language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/3400-3499/3499.Maximize Active Section with Trade I/Solution.py | {
"start": 0,
"end": 465
} | class ____:
def maxActiveSectionsAfterTrade(self, s: str) -> int:
n = len(s)
ans = i = 0
pre, mx = -inf, 0
while i < n:
j = i + 1
while j < n and s[j] == s[i]:
j += 1
cur = j - i
if s[i] == "1":
ans += cur
else:
mx = max(mx, pre + cur)
pre = cur
i = j
ans += mx
return ans
| Solution |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 16497,
"end": 17536
} | class ____(Protocol):
def has_metadata(self, name: str) -> bool:
"""Does the package's distribution contain the named metadata?"""
...
def get_metadata(self, name: str) -> str:
"""The named metadata resource as a string"""
...
def get_metadata_lines(self, name: str) -> Iterator[str]:
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
...
def metadata_isdir(self, name: str) -> bool:
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
...
def metadata_listdir(self, name: str) -> list[str]:
"""List of metadata names in the directory (like ``os.listdir()``)"""
...
def run_script(self, script_name: str, namespace: dict[str, Any]) -> None:
"""Execute the named script in the supplied namespace dictionary"""
...
| IMetadataProvider |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 22679,
"end": 22934
} | class ____(NotFoundError):
"""
Optimized (vendor) Blas libraries are not found.
Falls back to netlib Blas library which has worse performance.
A better performance should be easily gained by switching
Blas library."""
| BlasOptNotFoundError |
python | sympy__sympy | sympy/polys/rootoftools.py | {
"start": 4534,
"end": 38387
} | class ____(RootOf):
"""Represents an indexed complex root of a polynomial.
Roots of a univariate polynomial separated into disjoint
real or complex intervals and indexed in a fixed order:
* real roots come first and are sorted in increasing order;
* complex roots come next and are sorted primarily by increasing
real part, secondarily by increasing imaginary part.
Currently only rational coefficients are allowed.
Can be imported as ``CRootOf``. To avoid confusion, the
generator must be a Symbol.
Examples
========
>>> from sympy import CRootOf, rootof
>>> from sympy.abc import x
CRootOf is a way to reference a particular root of a
polynomial. If there is a rational root, it will be returned:
>>> CRootOf.clear_cache() # for doctest reproducibility
>>> CRootOf(x**2 - 4, 0)
-2
Whether roots involving radicals are returned or not
depends on whether the ``radicals`` flag is true (which is
set to True with rootof):
>>> CRootOf(x**2 - 3, 0)
CRootOf(x**2 - 3, 0)
>>> CRootOf(x**2 - 3, 0, radicals=True)
-sqrt(3)
>>> rootof(x**2 - 3, 0)
-sqrt(3)
The following cannot be expressed in terms of radicals:
>>> r = rootof(4*x**5 + 16*x**3 + 12*x**2 + 7, 0); r
CRootOf(4*x**5 + 16*x**3 + 12*x**2 + 7, 0)
The root bounds can be seen, however, and they are used by the
evaluation methods to get numerical approximations for the root.
>>> interval = r._get_interval(); interval
(-1, 0)
>>> r.evalf(2)
-0.98
The evalf method refines the width of the root bounds until it
guarantees that any decimal approximation within those bounds
will satisfy the desired precision. It then stores the refined
interval so subsequent requests at or below the requested
precision will not have to recompute the root bounds and will
return very quickly.
Before evaluation above, the interval was
>>> interval
(-1, 0)
After evaluation it is now
>>> r._get_interval() # doctest: +SKIP
(-165/169, -206/211)
To reset all intervals for a given polynomial, the :meth:`_reset` method
can be called from any CRootOf instance of the polynomial:
>>> r._reset()
>>> r._get_interval()
(-1, 0)
The :meth:`eval_approx` method will also find the root to a given
precision but the interval is not modified unless the search
for the root fails to converge within the root bounds. And
the secant method is used to find the root. (The ``evalf``
method uses bisection and will always update the interval.)
>>> r.eval_approx(2)
-0.98
The interval needed to be slightly updated to find that root:
>>> r._get_interval()
(-1, -1/2)
The ``evalf_rational`` will compute a rational approximation
of the root to the desired accuracy or precision.
>>> r.eval_rational(n=2)
-69629/71318
>>> t = CRootOf(x**3 + 10*x + 1, 1)
>>> t.eval_rational(1e-1)
15/256 - 805*I/256
>>> t.eval_rational(1e-1, 1e-4)
3275/65536 - 414645*I/131072
>>> t.eval_rational(1e-4, 1e-4)
6545/131072 - 414645*I/131072
>>> t.eval_rational(n=2)
104755/2097152 - 6634255*I/2097152
Notes
=====
Although a PurePoly can be constructed from a non-symbol generator
RootOf instances of non-symbols are disallowed to avoid confusion
over what root is being represented.
>>> from sympy import exp, PurePoly
>>> PurePoly(x) == PurePoly(exp(x))
True
>>> CRootOf(x - 1, 0)
1
>>> CRootOf(exp(x) - 1, 0) # would correspond to x == 0
Traceback (most recent call last):
...
sympy.polys.polyerrors.PolynomialError: generator must be a Symbol
See Also
========
eval_approx
eval_rational
"""
__slots__ = ('index',)
is_complex = True
is_number = True
is_finite = True
is_algebraic = True
def __new__(cls, f, x, index=None, radicals=False, expand=True):
""" Construct an indexed complex root of a polynomial.
See ``rootof`` for the parameters.
The default value of ``radicals`` is ``False`` to satisfy
``eval(srepr(expr) == expr``.
"""
x = sympify(x)
if index is None and x.is_Integer:
x, index = None, x
else:
index = sympify(index)
if index is not None and index.is_Integer:
index = int(index)
else:
raise ValueError("expected an integer root index, got %s" % index)
poly = PurePoly(f, x, greedy=False, expand=expand)
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
if not poly.gen.is_Symbol:
# PurePoly(sin(x) + 1) == PurePoly(x + 1) but the roots of
# x for each are not the same: issue 8617
raise PolynomialError("generator must be a Symbol")
degree = poly.degree()
if degree <= 0:
raise PolynomialError("Cannot construct CRootOf object for %s" % f)
if index < -degree or index >= degree:
raise IndexError("root index out of [%d, %d] range, got %d" %
(-degree, degree - 1, index))
elif index < 0:
index += degree
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError("CRootOf is not supported over %s" % dom)
root = cls._indexed_root(poly, index, lazy=True)
return coeff * cls._postprocess_root(root, radicals)
@classmethod
def _new(cls, poly, index):
"""Construct new ``CRootOf`` object from raw data. """
obj = Expr.__new__(cls)
obj.poly = PurePoly(poly)
obj.index = index
try:
_reals_cache[obj.poly] = _reals_cache[poly]
_complexes_cache[obj.poly] = _complexes_cache[poly]
except KeyError:
pass
return obj
def _hashable_content(self):
return (self.poly, self.index)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, Integer(self.index))
@property
def free_symbols(self):
# CRootOf currently only works with univariate expressions
# whose poly attribute should be a PurePoly with no free
# symbols
return set()
def _eval_is_real(self):
"""Return ``True`` if the root is real. """
self._ensure_reals_init()
return self.index < len(_reals_cache[self.poly])
def _eval_is_imaginary(self):
"""Return ``True`` if the root is imaginary. """
self._ensure_reals_init()
if self.index >= len(_reals_cache[self.poly]):
ivl = self._get_interval()
return ivl.ax*ivl.bx <= 0 # all others are on one side or the other
return False # XXX is this necessary?
@classmethod
def real_roots(cls, poly, radicals=True):
"""Get real roots of a polynomial. """
return cls._get_roots("_real_roots", poly, radicals)
@classmethod
def all_roots(cls, poly, radicals=True):
"""Get real and complex roots of a polynomial. """
return cls._get_roots("_all_roots", poly, radicals)
@classmethod
def _get_reals_sqf(cls, currentfactor, use_cache=True):
"""Get real root isolating intervals for a square-free factor."""
if use_cache and currentfactor in _reals_cache:
real_part = _reals_cache[currentfactor]
else:
_reals_cache[currentfactor] = real_part = \
dup_isolate_real_roots_sqf(
currentfactor.rep.to_list(), currentfactor.rep.dom, blackbox=True)
return real_part
@classmethod
def _get_complexes_sqf(cls, currentfactor, use_cache=True):
"""Get complex root isolating intervals for a square-free factor."""
if use_cache and currentfactor in _complexes_cache:
complex_part = _complexes_cache[currentfactor]
else:
_complexes_cache[currentfactor] = complex_part = \
dup_isolate_complex_roots_sqf(
currentfactor.rep.to_list(), currentfactor.rep.dom, blackbox=True)
return complex_part
@classmethod
def _get_reals(cls, factors, use_cache=True):
"""Compute real root isolating intervals for a list of factors. """
reals = []
for currentfactor, k in factors:
try:
if not use_cache:
raise KeyError
r = _reals_cache[currentfactor]
reals.extend([(i, currentfactor, k) for i in r])
except KeyError:
real_part = cls._get_reals_sqf(currentfactor, use_cache)
new = [(root, currentfactor, k) for root in real_part]
reals.extend(new)
reals = cls._reals_sorted(reals)
return reals
@classmethod
def _get_complexes(cls, factors, use_cache=True):
"""Compute complex root isolating intervals for a list of factors. """
complexes = []
for currentfactor, k in ordered(factors):
try:
if not use_cache:
raise KeyError
c = _complexes_cache[currentfactor]
complexes.extend([(i, currentfactor, k) for i in c])
except KeyError:
complex_part = cls._get_complexes_sqf(currentfactor, use_cache)
new = [(root, currentfactor, k) for root in complex_part]
complexes.extend(new)
complexes = cls._complexes_sorted(complexes)
return complexes
@classmethod
def _reals_sorted(cls, reals):
"""Make real isolating intervals disjoint and sort roots. """
cache = {}
for i, (u, f, k) in enumerate(reals):
for j, (v, g, m) in enumerate(reals[i + 1:]):
u, v = u.refine_disjoint(v)
reals[i + j + 1] = (v, g, m)
reals[i] = (u, f, k)
reals = sorted(reals, key=lambda r: r[0].a)
for root, currentfactor, _ in reals:
if currentfactor in cache:
cache[currentfactor].append(root)
else:
cache[currentfactor] = [root]
for currentfactor, root in cache.items():
_reals_cache[currentfactor] = root
return reals
@classmethod
def _refine_imaginary(cls, complexes):
sifted = sift(complexes, lambda c: c[1])
complexes = []
for f in ordered(sifted):
nimag = _imag_count_of_factor(f)
if nimag == 0:
# refine until xbounds are neg or pos
for u, f, k in sifted[f]:
while u.ax*u.bx <= 0:
u = u._inner_refine()
complexes.append((u, f, k))
else:
# refine until all but nimag xbounds are neg or pos
potential_imag = list(range(len(sifted[f])))
while True:
assert len(potential_imag) > 1
for i in list(potential_imag):
u, f, k = sifted[f][i]
if u.ax*u.bx > 0:
potential_imag.remove(i)
elif u.ax != u.bx:
u = u._inner_refine()
sifted[f][i] = u, f, k
if len(potential_imag) == nimag:
break
complexes.extend(sifted[f])
return complexes
@classmethod
def _refine_complexes(cls, complexes):
"""return complexes such that no bounding rectangles of non-conjugate
roots would intersect. In addition, assure that neither ay nor by is
0 to guarantee that non-real roots are distinct from real roots in
terms of the y-bounds.
"""
# get the intervals pairwise-disjoint.
# If rectangles were drawn around the coordinates of the bounding
# rectangles, no rectangles would intersect after this procedure.
for i, (u, f, k) in enumerate(complexes):
for j, (v, g, m) in enumerate(complexes[i + 1:]):
u, v = u.refine_disjoint(v)
complexes[i + j + 1] = (v, g, m)
complexes[i] = (u, f, k)
# refine until the x-bounds are unambiguously positive or negative
# for non-imaginary roots
complexes = cls._refine_imaginary(complexes)
# make sure that all y bounds are off the real axis
# and on the same side of the axis
for i, (u, f, k) in enumerate(complexes):
while u.ay*u.by <= 0:
u = u.refine()
complexes[i] = u, f, k
return complexes
@classmethod
def _complexes_sorted(cls, complexes):
"""Make complex isolating intervals disjoint and sort roots. """
complexes = cls._refine_complexes(complexes)
# XXX don't sort until you are sure that it is compatible
# with the indexing method but assert that the desired state
# is not broken
C, F = 0, 1 # location of ComplexInterval and factor
fs = {i[F] for i in complexes}
for i in range(1, len(complexes)):
if complexes[i][F] != complexes[i - 1][F]:
# if this fails the factors of a root were not
# contiguous because a discontinuity should only
# happen once
fs.remove(complexes[i - 1][F])
for i, cmplx in enumerate(complexes):
# negative im part (conj=True) comes before
# positive im part (conj=False)
assert cmplx[C].conj is (i % 2 == 0)
# update cache
cache = {}
# -- collate
for root, currentfactor, _ in complexes:
cache.setdefault(currentfactor, []).append(root)
# -- store
for currentfactor, root in cache.items():
_complexes_cache[currentfactor] = root
return complexes
@classmethod
def _reals_index(cls, reals, index):
"""
Map initial real root index to an index in a factor where
the root belongs.
"""
i = 0
for j, (_, currentfactor, k) in enumerate(reals):
if index < i + k:
poly, index = currentfactor, 0
for _, currentfactor, _ in reals[:j]:
if currentfactor == poly:
index += 1
return poly, index
else:
i += k
@classmethod
def _complexes_index(cls, complexes, index):
"""
Map initial complex root index to an index in a factor where
the root belongs.
"""
i = 0
for j, (_, currentfactor, k) in enumerate(complexes):
if index < i + k:
poly, index = currentfactor, 0
for _, currentfactor, _ in complexes[:j]:
if currentfactor == poly:
index += 1
index += len(_reals_cache[poly])
return poly, index
else:
i += k
@classmethod
def _count_roots(cls, roots):
"""Count the number of real or complex roots with multiplicities."""
return sum(k for _, _, k in roots)
@classmethod
def _indexed_root(cls, poly, index, lazy=False):
"""Get a root of a composite polynomial by index. """
factors = _pure_factors(poly)
# If the given poly is already irreducible, then the index does not
# need to be adjusted, and we can postpone the heavy lifting of
# computing and refining isolating intervals until that is needed.
# Note, however, that `_pure_factors()` extracts a negative leading
# coeff if present, so `factors[0][0]` may differ from `poly`, and
# is the "normalized" version of `poly` that we must return.
if lazy and len(factors) == 1 and factors[0][1] == 1:
return factors[0][0], index
reals = cls._get_reals(factors)
reals_count = cls._count_roots(reals)
if index < reals_count:
return cls._reals_index(reals, index)
else:
complexes = cls._get_complexes(factors)
return cls._complexes_index(complexes, index - reals_count)
def _ensure_reals_init(self):
"""Ensure that our poly has entries in the reals cache. """
if self.poly not in _reals_cache:
self._indexed_root(self.poly, self.index)
def _ensure_complexes_init(self):
"""Ensure that our poly has entries in the complexes cache. """
if self.poly not in _complexes_cache:
self._indexed_root(self.poly, self.index)
@classmethod
def _real_roots(cls, poly):
"""Get real roots of a composite polynomial. """
factors = _pure_factors(poly)
reals = cls._get_reals(factors)
reals_count = cls._count_roots(reals)
roots = []
for index in range(0, reals_count):
roots.append(cls._reals_index(reals, index))
return roots
def _reset(self):
"""
Reset all intervals
"""
factors = _pure_factors(self.poly)
self._get_reals(factors, use_cache=False)
self._get_complexes(factors, use_cache=False)
@classmethod
def _all_roots(cls, poly, use_cache=True):
"""Get real and complex roots of a composite polynomial. """
factors = _pure_factors(poly)
roots = []
if len(factors) == 1:
f, multiplicity = factors[0]
deg = f.degree()
roots.extend((f, i) for i in range(deg) for _ in range(multiplicity))
else:
reals = cls._get_reals(factors, use_cache=use_cache)
reals_count = cls._count_roots(reals)
for index in range(0, reals_count):
roots.append(cls._reals_index(reals, index))
complexes = cls._get_complexes(factors, use_cache=use_cache)
complexes_count = cls._count_roots(complexes)
for index in range(0, complexes_count):
roots.append(cls._complexes_index(complexes, index))
return roots
@classmethod
@cacheit
def _roots_trivial(cls, poly, radicals):
"""Compute roots in linear, quadratic and binomial cases. """
if poly.degree() == 1:
return roots_linear(poly)
if not radicals:
return None
if poly.degree() == 2:
return roots_quadratic(poly)
elif poly.length() == 2 and poly.TC():
return roots_binomial(poly)
else:
return None
@classmethod
def _preprocess_roots(cls, poly):
"""Take heroic measures to make ``poly`` compatible with ``CRootOf``."""
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError(
"sorted roots not supported over %s" % dom)
return coeff, poly
@classmethod
def _postprocess_root(cls, root, radicals):
"""Return the root if it is trivial or a ``CRootOf`` object. """
poly, index = root
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
else:
return cls._new(poly, index)
@classmethod
def _get_roots(cls, method: str, poly: Poly, radicals: bool) -> list[Expr]:
"""Return postprocessed roots of specified kind. """
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
dom = poly.get_domain()
# get rid of gen and it's free symbol
d = Dummy()
poly = poly.per(poly.rep, gens=(d,))
x = symbols('x')
# see what others are left and select x or a numbered x
# that doesn't clash
free_names = {str(i) for i in poly.free_symbols}
for x in chain((symbols('x'),), numbered_symbols('x')):
if x.name not in free_names:
poly = poly.replace(d, x)
break
if dom.is_QQ or dom.is_ZZ:
return cls._get_roots_qq(method, poly, radicals)
elif dom.is_ZZ_I or dom.is_QQ_I:
coeffs = poly.rep.to_list()
if all(c.y == 0 for c in coeffs):
poly = poly.set_domain(dom.dom)
return cls._get_roots_qq(method, poly, radicals)
elif all(c.x == 0 for c in coeffs):
poly = (I*poly).set_domain(dom.dom)
return cls._get_roots_qq(method, poly, radicals)
else:
return cls._get_roots_alg(method, poly, radicals)
elif dom.is_AlgebraicField:
return cls._get_roots_alg(method, poly, radicals)
else:
# XXX: not sure how to handle ZZ[x] which appears in some tests?
# this makes the tests pass alright but has to be a better way?
return cls._get_roots_qq(method, poly, radicals)
@classmethod
def _get_roots_qq(cls, method, poly, radicals):
"""Return postprocessed roots of specified kind
for polynomials with rational coefficients. """
coeff, poly = cls._preprocess_roots(poly)
roots = []
for root in getattr(cls, method)(poly):
roots.append(coeff*cls._postprocess_root(root, radicals))
return roots
@classmethod
def _get_roots_alg(cls, method, poly, radicals):
"""Return postprocessed roots of specified kind
for polynomials with algebraic coefficients. It assumes
the domain is already an algebraic field. First it
finds the roots using _get_roots_qq, then uses the
square-free factors to filter roots and get the correct
multiplicity.
"""
# Existing QQ code can find and sort the roots
roots = cls._get_roots_qq(method, poly.lift(), radicals)
subroots = {}
for f, m in poly.sqf_list()[1]:
if method == "_real_roots":
roots_filt = f.which_real_roots(roots)
elif method == "_all_roots":
roots_filt = f.which_all_roots(roots)
else:
raise TypeError("Unknown method")
for r in roots_filt:
subroots[r] = m
roots_seen = set()
roots_flat = []
for r in roots:
if r in subroots and r not in roots_seen:
m = subroots[r]
roots_flat.extend([r] * m)
roots_seen.add(r)
return roots_flat
@classmethod
def clear_cache(cls):
"""Reset cache for reals and complexes.
The intervals used to approximate a root instance are updated
as needed. When a request is made to see the intervals, the
most current values are shown. `clear_cache` will reset all
CRootOf instances back to their original state.
See Also
========
_reset
"""
global _reals_cache, _complexes_cache
_reals_cache = _pure_key_dict()
_complexes_cache = _pure_key_dict()
def _get_interval(self):
"""Internal function for retrieving isolation interval from cache. """
self._ensure_reals_init()
if self.is_real:
return _reals_cache[self.poly][self.index]
else:
reals_count = len(_reals_cache[self.poly])
self._ensure_complexes_init()
return _complexes_cache[self.poly][self.index - reals_count]
def _set_interval(self, interval):
"""Internal function for updating isolation interval in cache. """
self._ensure_reals_init()
if self.is_real:
_reals_cache[self.poly][self.index] = interval
else:
reals_count = len(_reals_cache[self.poly])
self._ensure_complexes_init()
_complexes_cache[self.poly][self.index - reals_count] = interval
def _eval_subs(self, old, new):
# don't allow subs to change anything
return self
def _eval_conjugate(self):
if self.is_real:
return self
expr, i = self.args
return self.func(expr, i + (1 if self._get_interval().conj else -1))
def eval_approx(self, n, return_mpmath=False):
"""Evaluate this complex root to the given precision.
This uses secant method and root bounds are used to both
generate an initial guess and to check that the root
returned is valid. If ever the method converges outside the
root bounds, the bounds will be made smaller and updated.
"""
prec = dps_to_prec(n)
with workprec(prec):
g = self.poly.gen
if not g.is_Symbol:
d = Dummy('x')
if self.is_imaginary:
d *= I
func = lambdify(d, self.expr.subs(g, d))
else:
expr = self.expr
if self.is_imaginary:
expr = self.expr.subs(g, I*g)
func = lambdify(g, expr)
interval = self._get_interval()
while True:
if self.is_real:
a = mpf(str(interval.a))
b = mpf(str(interval.b))
if a == b:
root = a
break
x0 = mpf(str(interval.center))
x1 = x0 + mpf(str(interval.dx))/4
elif self.is_imaginary:
a = mpf(str(interval.ay))
b = mpf(str(interval.by))
if a == b:
root = mpc(mpf('0'), a)
break
x0 = mpf(str(interval.center[1]))
x1 = x0 + mpf(str(interval.dy))/4
else:
ax = mpf(str(interval.ax))
bx = mpf(str(interval.bx))
ay = mpf(str(interval.ay))
by = mpf(str(interval.by))
if ax == bx and ay == by:
root = mpc(ax, ay)
break
x0 = mpc(*map(str, interval.center))
x1 = x0 + mpc(*map(str, (interval.dx, interval.dy)))/4
try:
# without a tolerance, this will return when (to within
# the given precision) x_i == x_{i-1}
root = findroot(func, (x0, x1))
# If the (real or complex) root is not in the 'interval',
# then keep refining the interval. This happens if findroot
# accidentally finds a different root outside of this
# interval because our initial estimate 'x0' was not close
# enough. It is also possible that the secant method will
# get trapped by a max/min in the interval; the root
# verification by findroot will raise a ValueError in this
# case and the interval will then be tightened -- and
# eventually the root will be found.
#
# It is also possible that findroot will not have any
# successful iterations to process (in which case it
# will fail to initialize a variable that is tested
# after the iterations and raise an UnboundLocalError).
if self.is_real or self.is_imaginary:
if not bool(root.imag) == self.is_real and (
a <= root <= b):
if self.is_imaginary:
root = mpc(mpf('0'), root.real)
break
elif (ax <= root.real <= bx and ay <= root.imag <= by):
break
except (UnboundLocalError, ValueError):
pass
interval = interval.refine()
# update the interval so we at least (for this precision or
# less) don't have much work to do to recompute the root
self._set_interval(interval)
if return_mpmath:
return root
return (Float._new(root.real._mpf_, prec) +
I*Float._new(root.imag._mpf_, prec))
def _eval_evalf(self, prec, **kwargs):
"""Evaluate this complex root to the given precision."""
# all kwargs are ignored
return self.eval_rational(n=prec_to_dps(prec))._evalf(prec)
def eval_rational(self, dx=None, dy=None, n=15):
"""
Return a Rational approximation of ``self`` that has real
and imaginary component approximations that are within ``dx``
and ``dy`` of the true values, respectively. Alternatively,
``n`` digits of precision can be specified.
The interval is refined with bisection and is sure to
converge. The root bounds are updated when the refinement
is complete so recalculation at the same or lesser precision
will not have to repeat the refinement and should be much
faster.
The following example first obtains Rational approximation to
1e-8 accuracy for all roots of the 4-th order Legendre
polynomial. Since the roots are all less than 1, this will
ensure the decimal representation of the approximation will be
correct (including rounding) to 6 digits:
>>> from sympy import legendre_poly, Symbol
>>> x = Symbol("x")
>>> p = legendre_poly(4, x, polys=True)
>>> r = p.real_roots()[-1]
>>> r.eval_rational(10**-8).n(6)
0.861136
It is not necessary to a two-step calculation, however: the
decimal representation can be computed directly:
>>> r.evalf(17)
0.86113631159405258
"""
dy = dy or dx
if dx:
rtol = None
dx = dx if isinstance(dx, Rational) else Rational(str(dx))
dy = dy if isinstance(dy, Rational) else Rational(str(dy))
else:
# 5 binary (or 2 decimal) digits are needed to ensure that
# a given digit is correctly rounded
# prec_to_dps(dps_to_prec(n) + 5) - n <= 2 (tested for
# n in range(1000000)
rtol = S(10)**-(n + 2) # +2 for guard digits
interval = self._get_interval()
while True:
if self.is_real:
if rtol:
dx = abs(interval.center*rtol)
interval = interval.refine_size(dx=dx)
c = interval.center
real = Rational(c)
imag = S.Zero
if not rtol or interval.dx < abs(c*rtol):
break
elif self.is_imaginary:
if rtol:
dy = abs(interval.center[1]*rtol)
dx = 1
interval = interval.refine_size(dx=dx, dy=dy)
c = interval.center[1]
imag = Rational(c)
real = S.Zero
if not rtol or interval.dy < abs(c*rtol):
break
else:
if rtol:
dx = abs(interval.center[0]*rtol)
dy = abs(interval.center[1]*rtol)
interval = interval.refine_size(dx, dy)
c = interval.center
real, imag = map(Rational, c)
if not rtol or (
interval.dx < abs(c[0]*rtol) and
interval.dy < abs(c[1]*rtol)):
break
# update the interval so we at least (for this precision or
# less) don't have much work to do to recompute the root
self._set_interval(interval)
return real + I*imag
CRootOf = ComplexRootOf
@dispatch(ComplexRootOf, ComplexRootOf)
def _eval_is_eq(lhs, rhs): # noqa:F811
# if we use is_eq to check here, we get infinite recursion
return lhs == rhs
@dispatch(ComplexRootOf, Basic) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
# CRootOf represents a Root, so if rhs is that root, it should set
# the expression to zero *and* it should be in the interval of the
# CRootOf instance. It must also be a number that agrees with the
# is_real value of the CRootOf instance.
if not rhs.is_number:
return None
if not rhs.is_finite:
return False
z = lhs.expr.subs(lhs.expr.free_symbols.pop(), rhs).is_zero
if z is False: # all roots will make z True but we don't know
# whether this is the right root if z is True
return False
o = rhs.is_real, rhs.is_imaginary
s = lhs.is_real, lhs.is_imaginary
assert None not in s # this is part of initial refinement
if o != s and None not in o:
return False
re, im = rhs.as_real_imag()
if lhs.is_real:
if im:
return False
i = lhs._get_interval()
a, b = [Rational(str(_)) for _ in (i.a, i.b)]
return sympify(a <= rhs and rhs <= b)
i = lhs._get_interval()
r1, r2, i1, i2 = [Rational(str(j)) for j in (
i.ax, i.bx, i.ay, i.by)]
return is_le(r1, re) and is_le(re,r2) and is_le(i1,im) and is_le(im,i2)
@public
| ComplexRootOf |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 9407,
"end": 9579
} | class ____(scale_x_continuous):
"""
Continuous x position reverse transformed scale
"""
trans: TransUser = "reverse"
@dataclass(kw_only=True)
| scale_x_reverse |
python | pypa__warehouse | tests/unit/oidc/models/test_google.py | {
"start": 240,
"end": 8014
} | class ____:
def test_publisher_name(self):
publisher = google.GooglePublisher(email="fake@example.com")
assert publisher.publisher_name == "Google"
def test_publisher_base_url(self):
publisher = google.GooglePublisher(email="fake@example.com")
assert publisher.publisher_base_url is None
def test_publisher_url(self):
publisher = google.GooglePublisher(email="fake@example.com")
assert publisher.publisher_url() is None
def test_stored_claims(self):
publisher = google.GooglePublisher(email="fake@example.com")
assert publisher.stored_claims() == {}
def test_stringifies_as_email(self):
publisher = google.GooglePublisher(email="fake@example.com")
assert str(publisher) == publisher.email
def test_google_publisher_admin_details_with_sub(self):
publisher = google.GooglePublisher(
email="fake@example.com",
sub="fakesubject",
)
assert publisher.admin_details == [
("Email", "fake@example.com"),
("Subject", "fakesubject"),
]
def test_google_publisher_admin_details_without_sub(self):
publisher = google.GooglePublisher(
email="fake@example.com",
sub=None,
)
assert publisher.admin_details == [
("Email", "fake@example.com"),
]
def test_google_publisher_all_known_claims(self):
assert google.GooglePublisher.all_known_claims() == {
# verifiable claims
"email",
"email_verified",
# optional verifiable claims
"sub",
# preverified claims
"iss",
"iat",
"nbf",
"exp",
"aud",
# unchecked claims
"azp",
"google",
}
def test_google_publisher_unaccounted_claims(self, monkeypatch):
scope = pretend.stub()
sentry_sdk = pretend.stub(
capture_message=pretend.call_recorder(lambda s: None),
new_scope=pretend.call_recorder(
lambda: pretend.stub(
__enter__=lambda *a: scope, __exit__=lambda *a: None
)
),
)
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
# We don't care if these actually verify, only that they're present.
signed_claims = {
claim_name: "fake"
for claim_name in google.GooglePublisher.all_known_claims()
}
signed_claims["fake-claim"] = "fake"
signed_claims["another-fake-claim"] = "also-fake"
google.GooglePublisher.check_claims_existence(signed_claims)
assert sentry_sdk.capture_message.calls == [
pretend.call(
"JWT for GooglePublisher has unaccounted claims: "
"['another-fake-claim', 'fake-claim']"
)
]
assert scope.fingerprint == ["another-fake-claim", "fake-claim"]
@pytest.mark.parametrize(
"missing",
google.GooglePublisher.__required_verifiable_claims__.keys()
| google.GooglePublisher.__required_unverifiable_claims__,
)
def test_google_publisher_missing_claims(self, monkeypatch, missing):
scope = pretend.stub()
sentry_sdk = pretend.stub(
capture_message=pretend.call_recorder(lambda s: None),
new_scope=pretend.call_recorder(
lambda: pretend.stub(
__enter__=lambda *a: scope, __exit__=lambda *a: None
)
),
)
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
signed_claims = {
claim_name: "fake"
for claim_name in google.GooglePublisher.all_known_claims()
}
# Pop the first signed claim, so that it's the first one to fail.
signed_claims.pop(missing)
assert missing not in signed_claims
assert google.GooglePublisher.__required_verifiable_claims__
with pytest.raises(errors.InvalidPublisherError) as e:
google.GooglePublisher.check_claims_existence(signed_claims)
assert str(e.value) == f"Missing claim '{missing}'"
assert sentry_sdk.capture_message.calls == [
pretend.call(f"JWT for GooglePublisher is missing claim: {missing}")
]
assert scope.fingerprint == [missing]
@pytest.mark.parametrize(
("email_verified", "valid"),
[(False, False), ("truthy-but-not-bool", False), ("", False), (True, True)],
)
def test_google_publisher_email_verified(self, email_verified, valid):
publisher = google.GooglePublisher(
sub="fakesubject",
email="fake@example.com",
)
signed_claims = {
"sub": "fakesubject",
"email": "fake@example.com",
"email_verified": email_verified,
}
if valid:
# Does not raise
publisher.verify_claims(
signed_claims=signed_claims, publisher_service=pretend.stub()
)
else:
with pytest.raises(errors.InvalidPublisherError) as e:
publisher.verify_claims(
signed_claims=signed_claims, publisher_service=pretend.stub()
)
assert str(e.value) == "Check failed for required claim 'email_verified'"
@pytest.mark.parametrize(
("expected_sub", "actual_sub", "valid"),
[
# Both present: must match.
("fakesubject", "fakesubject", True),
("fakesubject", "wrongsubject", False),
# Publisher configured without subject: any subject is acceptable.
("", "anysubject", True),
# Publisher configured with subject, none provided: must fail.
("fakesubject", None, False),
],
)
def test_google_publisher_sub_is_optional(self, expected_sub, actual_sub, valid):
publisher = google.GooglePublisher(
sub=expected_sub,
email="fake@example.com",
)
signed_claims = {
"sub": actual_sub,
"email": "fake@example.com",
"email_verified": True,
}
if valid:
# Does not raise
publisher.verify_claims(
signed_claims=signed_claims, publisher_service=pretend.stub()
)
else:
with pytest.raises(errors.InvalidPublisherError) as e:
publisher.verify_claims(
signed_claims=signed_claims, publisher_service=pretend.stub()
)
assert str(e.value) == "Check failed for optional claim 'sub'"
def test_lookup_no_matching_publishers(self, db_request):
signed_claims = {
"email": "fake@example.com",
"email_verified": True,
}
with pytest.raises(errors.InvalidPublisherError) as e:
google.GooglePublisher.lookup_by_claims(db_request.db, signed_claims)
assert str(e.value) == "Publisher with matching claims was not found"
@pytest.mark.parametrize("exists_in_db", [True, False])
def test_exists(self, db_request, exists_in_db):
publisher = google.GooglePublisher(
email="fake@example.com",
sub="fakesubject",
)
if exists_in_db:
db_request.db.add(publisher)
db_request.db.flush()
assert publisher.exists(db_request.db) == exists_in_db
def test_google_publisher_attestation_identity(self):
publisher = google.GooglePublisher(email="wu@tang.net")
identity = publisher.attestation_identity
assert identity.email == publisher.email
| TestGooglePublisher |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_pii.py | {
"start": 1731,
"end": 3017
} | class ____:
"""Test credit card detection with Luhn validation."""
def test_detect_valid_credit_card(self):
# Valid Visa test number
content = "Card: 4532015112830366"
matches = detect_credit_card(content)
assert len(matches) == 1
assert matches[0]["type"] == "credit_card"
assert matches[0]["value"] == "4532015112830366"
def test_detect_credit_card_with_spaces(self):
# Valid Mastercard test number
content = "Card: 5425233430109903"
# Add spaces
spaced_content = "Card: 5425 2334 3010 9903"
matches = detect_credit_card(spaced_content)
assert len(matches) == 1
assert "5425 2334 3010 9903" in matches[0]["value"]
def test_detect_credit_card_with_dashes(self):
content = "Card: 4532-0151-1283-0366"
matches = detect_credit_card(content)
assert len(matches) == 1
def test_invalid_luhn_not_detected(self):
# Invalid Luhn checksum
content = "Card: 1234567890123456"
matches = detect_credit_card(content)
assert len(matches) == 0
def test_no_credit_card(self):
content = "No cards here."
matches = detect_credit_card(content)
assert len(matches) == 0
| TestCreditCardDetection |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_integration_details.py | {
"start": 1893,
"end": 6458
} | class ____(OrganizationIntegrationBaseEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PRIVATE,
}
@extend_schema(
operation_id="Retrieve an Integration for an Organization",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.INTEGRATION_ID,
],
responses={
200: inline_sentry_response_serializer(
"OrganizationIntegrationResponse", OrganizationIntegrationResponse
),
},
examples=IntegrationExamples.GET_INTEGRATION,
)
@set_referrer_policy("strict-origin-when-cross-origin")
@method_decorator(never_cache)
def get(
self,
request: Request,
organization_context: RpcUserOrganizationContext,
integration_id: int,
**kwds: Any,
) -> Response:
org_integration = self.get_organization_integration(
organization_context.organization.id, integration_id
)
return self.respond(
serialize(
org_integration, request.user, OrganizationIntegrationSerializer(params=request.GET)
)
)
@extend_schema(
operation_id="Delete an Integration for an Organization",
parameters=[GlobalParams.ORG_ID_OR_SLUG, GlobalParams.INTEGRATION_ID],
responses={
204: RESPONSE_NO_CONTENT,
404: RESPONSE_NOT_FOUND,
},
)
@set_referrer_policy("strict-origin-when-cross-origin")
@method_decorator(never_cache)
def delete(
self,
request: Request,
organization_context: RpcUserOrganizationContext,
integration_id: int,
**kwds: Any,
) -> Response:
# Removing the integration removes the organization
# integrations and all linked issues.
# NOTE(hybrid-cloud): Deletions require the ORM object, not API versions
org_integration: OrganizationIntegration | None = OrganizationIntegration.objects.filter(
integration_id=integration_id, organization_id=organization_context.organization.id
).first()
if not org_integration:
raise Http404
integration = self.get_integration(organization_context.organization.id, integration_id)
# do any integration specific deleting steps
integration.get_installation(
organization_id=organization_context.organization.id
).uninstall()
with transaction.atomic(using=router.db_for_write(OrganizationIntegration)):
updated = False
for oi in OrganizationIntegration.objects.filter(
id=org_integration.id, status__in=[ObjectStatus.ACTIVE, ObjectStatus.DISABLED]
):
oi.update(status=ObjectStatus.PENDING_DELETION)
updated = True
if updated:
ScheduledDeletion.schedule(org_integration, days=0, actor=request.user)
create_audit_entry(
request=request,
organization_id=organization_context.organization.id,
target_object=integration.id,
event=audit_log.get_event_id("INTEGRATION_REMOVE"),
data={"provider": integration.provider, "name": integration.name},
)
return self.respond(status=204)
@set_referrer_policy("strict-origin-when-cross-origin")
@method_decorator(never_cache)
def post(
self,
request: Request,
organization_context: RpcUserOrganizationContext,
integration_id: int,
**kwds: Any,
) -> Response:
organization = organization_context.organization
integration = self.get_integration(organization.id, integration_id)
installation = integration.get_installation(organization_id=organization.id)
try:
installation.update_organization_config(request.data)
except (IntegrationError, ApiError) as e:
sentry_sdk.capture_exception(e)
return self.respond({"detail": [str(e)]}, status=400)
self.create_audit_entry(
request=request,
organization=organization,
target_object=integration.id,
event=audit_log.get_event_id("INTEGRATION_EDIT"),
data={"provider": integration.provider, "name": "config"},
)
return self.respond(status=200)
| OrganizationIntegrationDetailsEndpoint |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/action/test_action_handlers.py | {
"start": 473,
"end": 3813
} | class ____(MetricAlertHandlerBase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.detector = self.create_detector(project=self.project)
self.action = Action(type=Action.Type.DISCORD)
self.group, self.event, self.group_event = self.create_group_event()
self.event_data = WorkflowEventData(event=self.group_event, group=self.group)
@mock.patch(
"sentry.notifications.notification_action.registry.group_type_notification_registry.get"
)
def test_execute_error_group_type(self, mock_registry_get: mock.MagicMock) -> None:
"""Test that execute calls correct handler for ErrorGroupType"""
self.detector.type = ErrorGroupType.slug
self.detector.save()
mock_handler = mock.Mock()
mock_registry_get.return_value = mock_handler
self.action.trigger(self.event_data)
mock_registry_get.assert_called_once_with(ErrorGroupType.slug)
mock_handler.handle_workflow_action.assert_called_once_with(
self.event_data, self.action, self.detector
)
@mock.patch(
"sentry.notifications.notification_action.registry.group_type_notification_registry.get"
)
def test_execute_metric_alert_type(self, mock_registry_get: mock.MagicMock) -> None:
"""Test that execute calls correct handler for MetricIssue"""
self.detector.type = MetricIssue.slug
self.detector.config = {"threshold_period": 1, "detection_type": "static"}
self.detector.save()
self.group.type = MetricIssue.type_id
self.group.save()
group, _, group_event = self.create_group_event(
group_type_id=MetricIssue.type_id,
occurrence=self.create_issue_occurrence(
priority=PriorityLevel.HIGH.value,
level="error",
evidence_data={
"detector_id": self.detector.id,
},
),
)
self.event_data = WorkflowEventData(event=group_event, group=group)
mock_handler = mock.Mock()
mock_registry_get.return_value = mock_handler
self.action.trigger(self.event_data)
mock_registry_get.assert_called_once_with(MetricIssue.slug)
mock_handler.handle_workflow_action.assert_called_once_with(
self.event_data, self.action, self.detector
)
@mock.patch("sentry.notifications.notification_action.utils.execute_via_issue_alert_handler")
@mock.patch(
"sentry.notifications.notification_action.registry.group_type_notification_registry.get",
side_effect=NoRegistrationExistsError,
)
@mock.patch("sentry.notifications.notification_action.utils.logger")
def test_execute_unknown_detector(
self,
mock_logger: mock.MagicMock,
mock_registry_get: mock.MagicMock,
mock_execute_via_issue_alert_handler: mock.MagicMock,
) -> None:
"""Test that execute does nothing when we can't find the detector"""
self.action.trigger(self.event_data)
mock_logger.warning.assert_called_once_with(
"group_type_notification_registry.get.NoRegistrationExistsError",
extra={"detector_id": self.detector.id, "action_id": self.action.id},
)
| TestNotificationActionHandler |
python | Netflix__metaflow | metaflow/flowspec.py | {
"start": 1402,
"end": 1765
} | class ____(MetaflowException):
headline = "Invalid self.next() transition detected"
def __init__(self, msg):
# NOTE this assume that InvalidNextException is only raised
# at the top level of next()
_, line_no, _, _ = traceback.extract_stack()[-3]
super(InvalidNextException, self).__init__(msg, line_no)
| InvalidNextException |
python | getsentry__sentry | src/sentry/api/endpoints/project_plugin_details.py | {
"start": 1248,
"end": 7358
} | class ____(ProjectEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
def convert_args(
self,
request: Request,
organization_id_or_slug: int | str,
project_id_or_slug: int | str,
plugin_id: str,
*args,
**kwargs,
):
(args, kwargs) = super().convert_args(
request, organization_id_or_slug, project_id_or_slug, *args, **kwargs
)
try:
plugin = plugins.get(plugin_id)
except KeyError:
raise ResourceDoesNotExist
kwargs["plugin"] = plugin
return (args, kwargs)
def get(self, request: Request, project, plugin: Plugin | Plugin2) -> Response:
try:
context = serialize(plugin, request.user, PluginWithConfigSerializer(project))
except PluginIdentityRequired as e:
context = serialize(plugin, request.user, PluginSerializer(project))
context["config_error"] = str(e)
# Use an absolute URI so that oauth redirects work.
context["auth_url"] = absolute_uri(reverse("socialauth_associate", args=[plugin.slug]))
if context["isDeprecated"]:
raise Http404
return Response(context)
def post(self, request: Request, project, plugin: Plugin | Plugin2) -> Response:
"""
Enable plugin, Test plugin or Reset plugin values
"""
if request.data.get("test") and plugin.is_testable():
test_results = plugin.test_configuration_and_get_test_results(project)
return Response({"detail": test_results}, status=200)
if request.data.get("reset"):
plugin.reset_options(project=project)
context = serialize(plugin, request.user, PluginWithConfigSerializer(project))
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=audit_log.get_event_id("INTEGRATION_EDIT"),
data={"integration": plugin.slug, "project": project.slug},
)
return Response(context, status=200)
if not plugin.can_disable:
return Response({"detail": ERR_ALWAYS_ENABLED}, status=400)
# Currently, only data forwarding plugins need feature check. If there will be plugins with other feature gates,
# we will need to add the relevant check. However, this is unlikely to happen.
if any(
[
fd.featureGate == IntegrationFeatures.DATA_FORWARDING
for fd in plugin.feature_descriptions
]
) and not features.has("organizations:data-forwarding", project.organization):
return Response(
{"detail": ERR_FEATURE_REQUIRED % "organizations:data-forwarding"}, status=403
)
plugin.enable(project)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=audit_log.get_event_id("INTEGRATION_ADD"),
data={"integration": plugin.slug, "project": project.slug},
)
return Response(status=201)
def delete(self, request: Request, project, plugin: Plugin | Plugin2) -> Response:
"""
Disable plugin
"""
if not plugin.can_disable:
return Response({"detail": ERR_ALWAYS_ENABLED}, status=400)
plugin.disable(project)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=audit_log.get_event_id("INTEGRATION_REMOVE"),
data={"integration": plugin.slug, "project": project.slug},
)
return Response(status=204)
def put(self, request: Request, project, plugin: Plugin | Plugin2) -> Response:
config = [
serialize_field(project, plugin, c)
for c in plugin.get_config(project=project, user=request.user, initial=request.data)
]
cleaned = {}
errors = {}
for field in config:
key = field["name"]
value = request.data.get(key)
if field.get("required") and not value:
errors[key] = ERR_FIELD_REQUIRED
try:
value = plugin.validate_config_field(
project=project, name=key, value=value, actor=request.user
)
except (
forms.ValidationError,
serializers.ValidationError,
InvalidIdentity,
PluginError,
) as e:
errors[key] = str(e)
if not errors.get(key):
cleaned[key] = value
if not errors:
try:
cleaned = plugin.validate_config(
project=project, config=cleaned, actor=request.user
)
except (InvalidIdentity, PluginError) as e:
errors["__all__"] = str(e)
if errors:
return Response({"errors": errors}, status=400)
for key, value in cleaned.items():
if value is None:
plugin.unset_option(project=project, key=key)
else:
plugin.set_option(project=project, key=key, value=value)
context = serialize(plugin, request.user, PluginWithConfigSerializer(project))
plugin_enabled.send(plugin=plugin, project=project, user=request.user, sender=self)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=audit_log.get_event_id("INTEGRATION_EDIT"),
data={"integration": plugin.slug, "project": project.slug},
)
return Response(context)
| ProjectPluginDetailsEndpoint |
python | paramiko__paramiko | paramiko/config.py | {
"start": 1288,
"end": 23861
} | class ____:
"""
Representation of config information as stored in the format used by
OpenSSH. Queries can be made via `lookup`. The format is described in
OpenSSH's ``ssh_config`` man page. This class is provided primarily as a
convenience to posix users (since the OpenSSH format is a de-facto
standard on posix) but should work fine on Windows too.
.. versionadded:: 1.6
"""
SETTINGS_REGEX = re.compile(r"(\w+)(?:\s*=\s*|\s+)(.+)")
# TODO: do a full scan of ssh.c & friends to make sure we're fully
# compatible across the board, e.g. OpenSSH 8.1 added %n to ProxyCommand.
TOKENS_BY_CONFIG_KEY = {
"controlpath": ["%C", "%h", "%l", "%L", "%n", "%p", "%r", "%u"],
"hostname": ["%h"],
"identityfile": ["%C", "~", "%d", "%h", "%l", "%u", "%r"],
"proxycommand": ["~", "%h", "%p", "%r"],
"proxyjump": ["%h", "%p", "%r"],
# Doesn't seem worth making this 'special' for now, it will fit well
# enough (no actual match-exec config key to be confused with).
"match-exec": ["%C", "%d", "%h", "%L", "%l", "%n", "%p", "%r", "%u"],
}
def __init__(self):
"""
Create a new OpenSSH config object.
Note: the newer alternate constructors `from_path`, `from_file` and
`from_text` are simpler to use, as they parse on instantiation. For
example, instead of::
config = SSHConfig()
config.parse(open("some-path.config")
you could::
config = SSHConfig.from_file(open("some-path.config"))
# Or more directly:
config = SSHConfig.from_path("some-path.config")
# Or if you have arbitrary ssh_config text from some other source:
config = SSHConfig.from_text("Host foo\\n\\tUser bar")
"""
self._config = []
@classmethod
def from_text(cls, text):
"""
Create a new, parsed `SSHConfig` from ``text`` string.
.. versionadded:: 2.7
"""
return cls.from_file(StringIO(text))
@classmethod
def from_path(cls, path):
"""
Create a new, parsed `SSHConfig` from the file found at ``path``.
.. versionadded:: 2.7
"""
with open(path) as flo:
return cls.from_file(flo)
@classmethod
def from_file(cls, flo):
"""
Create a new, parsed `SSHConfig` from file-like object ``flo``.
.. versionadded:: 2.7
"""
obj = cls()
obj.parse(flo)
return obj
def parse(self, file_obj):
"""
Read an OpenSSH config from the given file object.
:param file_obj: a file-like object to read the config file from
"""
# Start out w/ implicit/anonymous global host-like block to hold
# anything not contained by an explicit one.
context = {"host": ["*"], "config": {}}
for line in file_obj:
# Strip any leading or trailing whitespace from the line.
# Refer to https://github.com/paramiko/paramiko/issues/499
line = line.strip()
# Skip blanks, comments
if not line or line.startswith("#"):
continue
# Parse line into key, value
match = re.match(self.SETTINGS_REGEX, line)
if not match:
raise ConfigParseError("Unparsable line {}".format(line))
key = match.group(1).lower()
value = match.group(2)
# Host keyword triggers switch to new block/context
if key in ("host", "match"):
self._config.append(context)
context = {"config": {}}
if key == "host":
# TODO 4.0: make these real objects or at least name this
# "hosts" to acknowledge it's an iterable. (Doing so prior
# to 3.0, despite it being a private API, feels bad -
# surely such an old codebase has folks actually relying on
# these keys.)
context["host"] = self._get_hosts(value)
else:
context["matches"] = self._get_matches(value)
# Special-case for noop ProxyCommands
elif key == "proxycommand" and value.lower() == "none":
# Store 'none' as None - not as a string implying that the
# proxycommand is the literal shell command "none"!
context["config"][key] = None
# All other keywords get stored, directly or via append
else:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
# identityfile, localforward, remoteforward keys are special
# cases, since they are allowed to be specified multiple times
# and they should be tried in order of specification.
if key in ["identityfile", "localforward", "remoteforward"]:
if key in context["config"]:
context["config"][key].append(value)
else:
context["config"][key] = [value]
elif key not in context["config"]:
context["config"][key] = value
# Store last 'open' block and we're done
self._config.append(context)
def lookup(self, hostname):
"""
Return a dict (`SSHConfigDict`) of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host`` and/or
``Match`` specifications, and that section is only applied for hosts
which match the given patterns or keywords
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
Finally, please see the docs for `SSHConfigDict` for deeper info on
features such as optional type conversion methods, e.g.::
conf = my_config.lookup('myhost')
assert conf['passwordauthentication'] == 'yes'
assert conf.as_bool('passwordauthentication') is True
.. note::
If there is no explicitly configured ``HostName`` value, it will be
set to the being-looked-up hostname, which is as close as we can
get to OpenSSH's behavior around that particular option.
:param str hostname: the hostname to lookup
.. versionchanged:: 2.5
Returns `SSHConfigDict` objects instead of dict literals.
.. versionchanged:: 2.7
Added canonicalization support.
.. versionchanged:: 2.7
Added ``Match`` support.
.. versionchanged:: 3.3
Added ``Match final`` support.
"""
# First pass
options = self._lookup(hostname=hostname)
# Inject HostName if it was not set (this used to be done incidentally
# during tokenization, for some reason).
if "hostname" not in options:
options["hostname"] = hostname
# Handle canonicalization
canon = options.get("canonicalizehostname", None) in ("yes", "always")
maxdots = int(options.get("canonicalizemaxdots", 1))
if canon and hostname.count(".") <= maxdots:
# NOTE: OpenSSH manpage does not explicitly state this, but its
# implementation for CanonicalDomains is 'split on any whitespace'.
domains = options["canonicaldomains"].split()
hostname = self.canonicalize(hostname, options, domains)
# Overwrite HostName again here (this is also what OpenSSH does)
options["hostname"] = hostname
options = self._lookup(
hostname, options, canonical=True, final=True
)
else:
options = self._lookup(
hostname, options, canonical=False, final=True
)
return options
def _lookup(self, hostname, options=None, canonical=False, final=False):
# Init
if options is None:
options = SSHConfigDict()
# Iterate all stanzas, applying any that match, in turn (so that things
# like Match can reference currently understood state)
for context in self._config:
if not (
self._pattern_matches(context.get("host", []), hostname)
or self._does_match(
context.get("matches", []),
hostname,
canonical,
final,
options,
)
):
continue
for key, value in context["config"].items():
if key not in options:
# Create a copy of the original value,
# else it will reference the original list
# in self._config and update that value too
# when the extend() is being called.
options[key] = value[:] if value is not None else value
elif key == "identityfile":
options[key].extend(
x for x in value if x not in options[key]
)
if final:
# Expand variables in resulting values
# (besides 'Match exec' which was already handled above)
options = self._expand_variables(options, hostname)
return options
def canonicalize(self, hostname, options, domains):
"""
Return canonicalized version of ``hostname``.
:param str hostname: Target hostname.
:param options: An `SSHConfigDict` from a previous lookup pass.
:param domains: List of domains (e.g. ``["paramiko.org"]``).
:returns: A canonicalized hostname if one was found, else ``None``.
.. versionadded:: 2.7
"""
found = False
for domain in domains:
candidate = "{}.{}".format(hostname, domain)
family_specific = _addressfamily_host_lookup(candidate, options)
if family_specific is not None:
# TODO: would we want to dig deeper into other results? e.g. to
# find something that satisfies PermittedCNAMEs when that is
# implemented?
found = family_specific[0]
else:
# TODO: what does ssh use here and is there a reason to use
# that instead of gethostbyname?
try:
found = socket.gethostbyname(candidate)
except socket.gaierror:
pass
if found:
# TODO: follow CNAME (implied by found != candidate?) if
# CanonicalizePermittedCNAMEs allows it
return candidate
# If we got here, it means canonicalization failed.
# When CanonicalizeFallbackLocal is undefined or 'yes', we just spit
# back the original hostname.
if options.get("canonicalizefallbacklocal", "yes") == "yes":
return hostname
# And here, we failed AND fallback was set to a non-yes value, so we
# need to get mad.
raise CouldNotCanonicalize(hostname)
def get_hostnames(self):
"""
Return the set of literal hostnames defined in the SSH config (both
explicit hostnames and wildcard entries).
"""
hosts = set()
for entry in self._config:
hosts.update(entry["host"])
return hosts
def _pattern_matches(self, patterns, target):
# Convenience auto-splitter if not already a list
if hasattr(patterns, "split"):
patterns = patterns.split(",")
match = False
for pattern in patterns:
# Short-circuit if target matches a negated pattern
if pattern.startswith("!") and fnmatch.fnmatch(
target, pattern[1:]
):
return False
# Flag a match, but continue (in case of later negation) if regular
# match occurs
elif fnmatch.fnmatch(target, pattern):
match = True
return match
def _does_match(
self, match_list, target_hostname, canonical, final, options
):
matched = []
candidates = match_list[:]
local_username = getpass.getuser()
while candidates:
candidate = candidates.pop(0)
passed = None
# Obtain latest host/user value every loop, so later Match may
# reference values assigned within a prior Match.
configured_host = options.get("hostname", None)
configured_user = options.get("user", None)
type_, param = candidate["type"], candidate["param"]
# Canonical is a hard pass/fail based on whether this is a
# canonicalized re-lookup.
if type_ == "canonical":
if self._should_fail(canonical, candidate):
return False
if type_ == "final":
passed = final
# The parse step ensures we only see this by itself or after
# canonical, so it's also an easy hard pass. (No negation here as
# that would be uh, pretty weird?)
elif type_ == "all":
return True
# From here, we are testing various non-hard criteria,
# short-circuiting only on fail
elif type_ == "host":
hostval = configured_host or target_hostname
passed = self._pattern_matches(param, hostval)
elif type_ == "originalhost":
passed = self._pattern_matches(param, target_hostname)
elif type_ == "user":
user = configured_user or local_username
passed = self._pattern_matches(param, user)
elif type_ == "localuser":
passed = self._pattern_matches(param, local_username)
elif type_ == "exec":
exec_cmd = self._tokenize(
options, target_hostname, "match-exec", param
)
# This is the laziest spot in which we can get mad about an
# inability to import Invoke.
if invoke is None:
raise invoke_import_error
# Like OpenSSH, we 'redirect' stdout but let stderr bubble up
passed = invoke.run(exec_cmd, hide="stdout", warn=True).ok
# Tackle any 'passed, but was negated' results from above
if passed is not None and self._should_fail(passed, candidate):
return False
# Made it all the way here? Everything matched!
matched.append(candidate)
# Did anything match? (To be treated as bool, usually.)
return matched
def _should_fail(self, would_pass, candidate):
return would_pass if candidate["negate"] else not would_pass
def _tokenize(self, config, target_hostname, key, value):
"""
Tokenize a string based on current config/hostname data.
:param config: Current config data.
:param target_hostname: Original target connection hostname.
:param key: Config key being tokenized (used to filter token list).
:param value: Config value being tokenized.
:returns: The tokenized version of the input ``value`` string.
"""
allowed_tokens = self._allowed_tokens(key)
# Short-circuit if no tokenization possible
if not allowed_tokens:
return value
# Obtain potentially configured hostname, for use with %h.
# Special-case where we are tokenizing the hostname itself, to avoid
# replacing %h with a %h-bearing value, etc.
configured_hostname = target_hostname
if key != "hostname":
configured_hostname = config.get("hostname", configured_hostname)
# Ditto the rest of the source values
if "port" in config:
port = config["port"]
else:
port = SSH_PORT
user = getpass.getuser()
if "user" in config:
remoteuser = config["user"]
else:
remoteuser = user
local_hostname = socket.gethostname().split(".")[0]
local_fqdn = LazyFqdn(config, local_hostname)
homedir = os.path.expanduser("~")
tohash = local_hostname + target_hostname + repr(port) + remoteuser
# The actual tokens!
replacements = {
# TODO: %%???
"%C": sha1(tohash.encode()).hexdigest(),
"%d": homedir,
"%h": configured_hostname,
# TODO: %i?
"%L": local_hostname,
"%l": local_fqdn,
# also this is pseudo buggy when not in Match exec mode so document
# that. also WHY is that the case?? don't we do all of this late?
"%n": target_hostname,
"%p": port,
"%r": remoteuser,
# TODO: %T? don't believe this is possible however
"%u": user,
"~": homedir,
}
# Do the thing with the stuff
tokenized = value
for find, replace in replacements.items():
if find not in allowed_tokens:
continue
tokenized = tokenized.replace(find, str(replace))
# TODO: log? eg that value -> tokenized
return tokenized
def _allowed_tokens(self, key):
"""
Given config ``key``, return list of token strings to tokenize.
.. note::
This feels like it wants to eventually go away, but is used to
preserve as-strict-as-possible compatibility with OpenSSH, which
for whatever reason only applies some tokens to some config keys.
"""
return self.TOKENS_BY_CONFIG_KEY.get(key, [])
def _expand_variables(self, config, target_hostname):
"""
Return a dict of config options with expanded substitutions
for a given original & current target hostname.
Please refer to :doc:`/api/config` for details.
:param dict config: the currently parsed config
:param str hostname: the hostname whose config is being looked up
"""
for k in config:
if config[k] is None:
continue
tokenizer = partial(self._tokenize, config, target_hostname, k)
if isinstance(config[k], list):
for i, value in enumerate(config[k]):
config[k][i] = tokenizer(value)
else:
config[k] = tokenizer(config[k])
return config
def _get_hosts(self, host):
"""
Return a list of host_names from host value.
"""
try:
return shlex.split(host)
except ValueError:
raise ConfigParseError("Unparsable host {}".format(host))
def _get_matches(self, match):
"""
Parse a specific Match config line into a list-of-dicts for its values.
Performs some parse-time validation as well.
"""
matches = []
tokens = shlex.split(match)
while tokens:
match = {"type": None, "param": None, "negate": False}
type_ = tokens.pop(0)
# Handle per-keyword negation
if type_.startswith("!"):
match["negate"] = True
type_ = type_[1:]
match["type"] = type_
# all/canonical have no params (everything else does)
if type_ in ("all", "canonical", "final"):
matches.append(match)
continue
if not tokens:
raise ConfigParseError(
"Missing parameter to Match '{}' keyword".format(type_)
)
match["param"] = tokens.pop(0)
matches.append(match)
# Perform some (easier to do now than in the middle) validation that is
# better handled here than at lookup time.
keywords = [x["type"] for x in matches]
if "all" in keywords:
allowable = ("all", "canonical")
ok, bad = (
list(filter(lambda x: x in allowable, keywords)),
list(filter(lambda x: x not in allowable, keywords)),
)
err = None
if any(bad):
err = "Match does not allow 'all' mixed with anything but 'canonical'" # noqa
elif "canonical" in ok and ok.index("canonical") > ok.index("all"):
err = "Match does not allow 'all' before 'canonical'"
if err is not None:
raise ConfigParseError(err)
return matches
def _addressfamily_host_lookup(hostname, options):
"""
Try looking up ``hostname`` in an IPv4 or IPv6 specific manner.
This is an odd duck due to needing use in two divergent use cases. It looks
up ``AddressFamily`` in ``options`` and if it is ``inet`` or ``inet6``,
this function uses `socket.getaddrinfo` to perform a family-specific
lookup, returning the result if successful.
In any other situation -- lookup failure, or ``AddressFamily`` being
unspecified or ``any`` -- ``None`` is returned instead and the caller is
expected to do something situation-appropriate like calling
`socket.gethostbyname`.
:param str hostname: Hostname to look up.
:param options: `SSHConfigDict` instance w/ parsed options.
:returns: ``getaddrinfo``-style tuples, or ``None``, depending.
"""
address_family = options.get("addressfamily", "any").lower()
if address_family == "any":
return
try:
family = socket.AF_INET6
if address_family == "inet":
family = socket.AF_INET
return socket.getaddrinfo(
hostname,
None,
family,
socket.SOCK_DGRAM,
socket.IPPROTO_IP,
socket.AI_CANONNAME,
)
except socket.gaierror:
pass
| SSHConfig |
python | fluentpython__example-code-2e | 23-descriptor/bulkfood/model_v5.py | {
"start": 500,
"end": 707
} | class ____(Validated):
"""a number greater than zero"""
def validate(self, name, value): # <1>
if value <= 0:
raise ValueError(f'{name} must be > 0')
return value
| Quantity |
python | scikit-learn__scikit-learn | sklearn/compose/tests/test_column_transformer.py | {
"start": 1998,
"end": 59861
} | class ____(BaseEstimator):
def fit(self, X, y=None):
raise ValueError("specific message")
def transform(self, X, y=None):
raise ValueError("specific message")
def test_column_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first1D = np.array([0, 1, 2])
X_res_second1D = np.array([2, 4, 6])
X_res_first = X_res_first1D.reshape(-1, 1)
X_res_both = X_array
cases = [
# single column 1D / 2D
(0, X_res_first),
([0], X_res_first),
# list-like
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
([True, False], X_res_first),
(np.array([True, True]), X_res_both),
([True, True], X_res_both),
]
for selection, res in cases:
ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer(
[("trans", Trans(), lambda x: selection)], remainder="drop"
)
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
# test with transformer_weights
transformer_weights = {"trans1": 0.1, "trans2": 10}
both = ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
transformer_weights=transformer_weights,
)
res = np.vstack(
[
transformer_weights["trans1"] * X_res_first1D,
transformer_weights["trans2"] * X_res_second1D,
]
).T
assert_array_equal(both.fit_transform(X_array), res)
assert_array_equal(both.fit(X_array).transform(X_array), res)
assert len(both.transformers_) == 2
both = ColumnTransformer(
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
)
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
assert len(both.transformers_) == 1
def test_column_transformer_tuple_transformers_parameter():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])]
ct_with_list = ColumnTransformer(transformers)
ct_with_tuple = ColumnTransformer(tuple(transformers))
assert_array_equal(
ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array)
)
assert_array_equal(
ct_with_list.fit(X_array).transform(X_array),
ct_with_tuple.fit(X_array).transform(X_array),
)
@pytest.mark.parametrize("constructor_name", ["dataframe", "polars"])
def test_column_transformer_dataframe(constructor_name):
if constructor_name == "dataframe":
dataframe_lib = pytest.importorskip("pandas")
else:
dataframe_lib = pytest.importorskip(constructor_name)
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = _convert_container(
X_array, constructor_name, columns_name=["first", "second"]
)
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_both = X_array
cases = [
# String keys: label based
# list
(["first"], X_res_first),
(["first", "second"], X_res_both),
# slice
(slice("first", "second"), X_res_both),
# int keys: positional
# list
([0], X_res_first),
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
([True, False], X_res_first),
]
if constructor_name == "dataframe":
# Scalars are only supported for pandas dataframes.
cases.extend(
[
# scalar
(0, X_res_first),
("first", X_res_first),
(
dataframe_lib.Series([True, False], index=["first", "second"]),
X_res_first,
),
]
)
for selection, res in cases:
ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer(
[("trans", Trans(), lambda X: selection)], remainder="drop"
)
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
ct = ColumnTransformer(
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
)
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
# test with transformer_weights
transformer_weights = {"trans1": 0.1, "trans2": 10}
both = ColumnTransformer(
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])],
transformer_weights=transformer_weights,
)
res = np.vstack(
[
transformer_weights["trans1"] * X_df["first"],
transformer_weights["trans2"] * X_df["second"],
]
).T
assert_array_equal(both.fit_transform(X_df), res)
assert_array_equal(both.fit(X_df).transform(X_df), res)
assert len(both.transformers_) == 2
assert both.transformers_[-1][0] != "remainder"
# test multiple columns
both = ColumnTransformer(
[("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1}
)
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert both.transformers_[-1][0] != "remainder"
both = ColumnTransformer(
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
)
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert both.transformers_[-1][0] != "remainder"
# ensure pandas object is passed through
class TransAssert(BaseEstimator):
def __init__(self, expected_type_transform):
self.expected_type_transform = expected_type_transform
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, self.expected_type_transform)
if isinstance(X, dataframe_lib.Series):
X = X.to_frame()
return X
ct = ColumnTransformer(
[
(
"trans",
TransAssert(expected_type_transform=dataframe_lib.DataFrame),
["first", "second"],
)
]
)
ct.fit_transform(X_df)
if constructor_name == "dataframe":
# DataFrame protocol does not have 1d columns, so we only test on Pandas
# dataframes.
ct = ColumnTransformer(
[
(
"trans",
TransAssert(expected_type_transform=dataframe_lib.Series),
"first",
)
],
remainder="drop",
)
ct.fit_transform(X_df)
# Only test on pandas because the dataframe protocol requires string column
# names
# integer column spec + integer column names -> still use positional
X_df2 = X_df.copy()
X_df2.columns = [1, 0]
ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop")
assert_array_equal(ct.fit_transform(X_df2), X_res_first)
assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert ct.transformers_[-1][1] == "drop"
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"])
@pytest.mark.parametrize(
"column_selection",
[[], np.array([False, False]), [False, False]],
ids=["list", "bool", "bool_int"],
)
@pytest.mark.parametrize("callable_column", [False, True])
def test_column_transformer_empty_columns(pandas, column_selection, callable_column):
# test case that ensures that the column transformer does also work when
# a given transformer doesn't have any columns to work on
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
if pandas:
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X_array, columns=["first", "second"])
else:
X = X_array
if callable_column:
column = lambda X: column_selection
else:
column = column_selection
ct = ColumnTransformer(
[("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)]
)
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[1][1], TransRaise)
ct = ColumnTransformer(
[("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])]
)
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[0][1], TransRaise)
ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough")
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], TransRaise)
fixture = np.array([[], [], []])
ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop")
assert_array_equal(ct.fit_transform(X), fixture)
assert_array_equal(ct.fit(X).transform(X), fixture)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], TransRaise)
def test_column_transformer_output_indices():
# Checks for the output_indices_ attribute
X_array = np.arange(6).reshape(3, 2)
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {
"trans1": slice(0, 1),
"trans2": slice(1, 2),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
# test with transformer_weights and multiple columns
ct = ColumnTransformer(
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
)
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)}
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
# test case that ensures that the attribute does also work when
# a given transformer doesn't have any columns to work on
ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])])
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {
"trans1": slice(0, 2),
"trans2": slice(0, 0),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough")
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)}
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]])
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]])
def test_column_transformer_output_indices_df():
# Checks for the output_indices_ attribute with data frames
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"])
ct = ColumnTransformer(
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
)
X_trans = ct.fit_transform(X_df)
assert ct.output_indices_ == {
"trans1": slice(0, 1),
"trans2": slice(1, 2),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
X_trans = ct.fit_transform(X_df)
assert ct.output_indices_ == {
"trans1": slice(0, 1),
"trans2": slice(1, 2),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_column_transformer_sparse_array(csr_container):
X_sparse = csr_container(sparse.eye(3, 2))
# no distinction between 1D and 2D
X_res_first = X_sparse[:, [0]]
X_res_both = X_sparse
for col in [(0,), [0], slice(0, 1)]:
for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]:
ct = ColumnTransformer(
[("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8
)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res)
for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both)
def test_column_transformer_list():
X_list = [[1, float("nan"), "a"], [0, 0, "b"]]
expected_result = np.array(
[
[1, float("nan"), 1, 0],
[-1, 0, 0, 1],
]
)
ct = ColumnTransformer(
[
("numerical", StandardScaler(), [0, 1]),
("categorical", OneHotEncoder(), [2]),
]
)
assert_array_equal(ct.fit_transform(X_list), expected_result)
assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("constructor_name", ["array", "pandas", "polars"])
def test_column_transformer_sparse_stacking(csr_container, constructor_name):
X = np.array([[0, 1, 2], [2, 4, 6]]).T
X = _convert_container(X, constructor_name, columns_name=["first", "second"])
col_trans = ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
sparse_threshold=0.8,
)
col_trans.fit(X)
X_trans = col_trans.transform(X)
assert sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
assert len(col_trans.transformers_) == 2
assert col_trans.transformers_[-1][0] != "remainder"
col_trans = ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
sparse_threshold=0.1,
)
col_trans.fit(X)
X_trans = col_trans.transform(X)
assert not sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_mixed_cols_sparse():
df = np.array([["a", 1, True], ["b", 2, False]], dtype="O")
ct = make_column_transformer(
(OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0
)
# this shouldn't fail, since boolean can be coerced into a numeric
# See: https://github.com/scikit-learn/scikit-learn/issues/11912
X_trans = ct.fit_transform(df)
assert X_trans.format == "csr"
assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]]))
ct = make_column_transformer(
(OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0
)
with pytest.raises(ValueError, match="For a sparse output, all columns should"):
# this fails since strings `a` and `b` cannot be
# coerced into a numeric.
ct.fit_transform(df)
def test_column_transformer_sparse_threshold():
X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T
# above data has sparsity of 4 / 8 = 0.5
# apply threshold even if all sparse
col_trans = ColumnTransformer(
[("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])],
sparse_threshold=0.2,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# mixed -> sparsity of (4 + 2) / 8 = 0.75
for thres in [0.75001, 1]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse_output=True), [0]),
("trans2", OneHotEncoder(sparse_output=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
for thres in [0.75, 0]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse_output=True), [0]),
("trans2", OneHotEncoder(sparse_output=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# if nothing is sparse -> no sparse
for thres in [0.33, 0, 1]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse_output=False), [0]),
("trans2", OneHotEncoder(sparse_output=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
def test_column_transformer_error_msg_1D():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
col_trans = ColumnTransformer([("trans", StandardScaler(), 0)])
msg = "1D data passed to a transformer"
with pytest.raises(ValueError, match=msg):
col_trans.fit(X_array)
with pytest.raises(ValueError, match=msg):
col_trans.fit_transform(X_array)
col_trans = ColumnTransformer([("trans", TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
with pytest.raises(ValueError, match="specific message"):
func(X_array)
def test_2D_transformer_output():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)])
msg = "the 'trans2' transformer should be 2D"
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X_array)
# because fit is also doing transform, this raises already on fit
with pytest.raises(ValueError, match=msg):
ct.fit(X_array)
def test_2D_transformer_output_pandas():
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["col1", "col2"])
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([("trans1", TransNo2D(), "col1")])
msg = "the 'trans1' transformer should be 2D"
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X_df)
# because fit is also doing transform, this raises already on fit
with pytest.raises(ValueError, match=msg):
ct.fit(X_df)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
def test_column_transformer_invalid_columns(remainder):
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# general invalid
for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]:
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
with pytest.raises(ValueError, match="No valid specification"):
ct.fit(X_array)
# invalid for arrays
for col in ["string", ["string", "other"], slice("a", "b")]:
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
with pytest.raises(ValueError, match="Specifying the columns"):
ct.fit(X_array)
# transformed n_features does not match fitted n_features
col = [0, 1]
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
ct.fit(X_array)
X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input."
with pytest.raises(ValueError, match=msg):
ct.transform(X_array_more)
X_array_fewer = np.array(
[
[0, 1, 2],
]
).T
err_msg = (
"X has 1 features, but ColumnTransformer is expecting 2 features as input."
)
with pytest.raises(ValueError, match=err_msg):
ct.transform(X_array_fewer)
def test_column_transformer_invalid_transformer():
class NoTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return X
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
ct = ColumnTransformer([("trans", NoTrans(), [0])])
msg = "All estimators should implement fit and transform"
with pytest.raises(TypeError, match=msg):
ct.fit(X_array)
def test_make_column_transformer():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, "first"), (norm, ["second"]))
names, transformers, columns = zip(*ct.transformers)
assert names == ("standardscaler", "normalizer")
assert transformers == (scaler, norm)
assert columns == ("first", ["second"])
def test_make_column_transformer_pandas():
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["first", "second"])
norm = Normalizer()
ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)])
ct2 = make_column_transformer((norm, X_df.columns))
assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df))
def test_make_column_transformer_kwargs():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer(
(scaler, "first"),
(norm, ["second"]),
n_jobs=3,
remainder="drop",
sparse_threshold=0.5,
)
assert (
ct.transformers
== make_column_transformer((scaler, "first"), (norm, ["second"])).transformers
)
assert ct.n_jobs == 3
assert ct.remainder == "drop"
assert ct.sparse_threshold == 0.5
# invalid keyword parameters should raise an error message
msg = re.escape(
"make_column_transformer() got an unexpected "
"keyword argument 'transformer_weights'"
)
with pytest.raises(TypeError, match=msg):
make_column_transformer(
(scaler, "first"),
(norm, ["second"]),
transformer_weights={"pca": 10, "Transf": 1},
)
def test_make_column_transformer_remainder_transformer():
scaler = StandardScaler()
norm = Normalizer()
remainder = StandardScaler()
ct = make_column_transformer(
(scaler, "first"), (norm, ["second"]), remainder=remainder
)
assert ct.remainder == remainder
def test_column_transformer_get_set_params():
ct = ColumnTransformer(
[("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])]
)
exp = {
"n_jobs": None,
"remainder": "drop",
"sparse_threshold": 0.3,
"trans1": ct.transformers[0][1],
"trans1__copy": True,
"trans1__with_mean": True,
"trans1__with_std": True,
"trans2": ct.transformers[1][1],
"trans2__copy": True,
"trans2__with_mean": True,
"trans2__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose_feature_names_out": True,
"verbose": False,
"force_int_remainder_cols": "deprecated",
}
assert ct.get_params() == exp
ct.set_params(trans1__with_mean=False)
assert not ct.get_params()["trans1__with_mean"]
ct.set_params(trans1="passthrough")
exp = {
"n_jobs": None,
"remainder": "drop",
"sparse_threshold": 0.3,
"trans1": "passthrough",
"trans2": ct.transformers[1][1],
"trans2__copy": True,
"trans2__with_mean": True,
"trans2__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose_feature_names_out": True,
"verbose": False,
"force_int_remainder_cols": "deprecated",
}
assert ct.get_params() == exp
def test_column_transformer_named_estimators():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer(
[
("trans1", StandardScaler(), [0]),
("trans2", StandardScaler(with_std=False), [1]),
]
)
assert not hasattr(ct, "transformers_")
ct.fit(X_array)
assert hasattr(ct, "transformers_")
assert isinstance(ct.named_transformers_["trans1"], StandardScaler)
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
assert isinstance(ct.named_transformers_["trans2"], StandardScaler)
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
assert not ct.named_transformers_.trans2.with_std
# check it are fitted transformers
assert ct.named_transformers_.trans1.mean_ == 1.0
def test_column_transformer_cloning():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
ct.fit(X_array)
assert not hasattr(ct.transformers[0][1], "mean_")
assert hasattr(ct.transformers_[0][1], "mean_")
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
ct.fit_transform(X_array)
assert not hasattr(ct.transformers[0][1], "mean_")
assert hasattr(ct.transformers_[0][1], "mean_")
def test_column_transformer_get_feature_names():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans", Trans(), [0, 1])])
# raise correct error when not fitted
with pytest.raises(NotFittedError):
ct.get_feature_names_out()
# raise correct error when no feature names are available
ct.fit(X_array)
msg = re.escape(
"Transformer trans (type Trans) does not provide get_feature_names_out"
)
with pytest.raises(AttributeError, match=msg):
ct.get_feature_names_out()
def test_column_transformer_special_strings():
# one 'drop' -> ignore
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])])
exp = np.array([[0.0], [1.0], [2.0]])
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
# all 'drop' -> return shape 0 array
ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])])
assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
# 'passthrough'
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])])
exp = X_array
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
def test_column_transformer_remainder():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
X_res_both = X_array
# default drop
ct = ColumnTransformer([("trans1", Trans(), [0])])
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert ct.transformers_[-1][1] == "drop"
assert_array_equal(ct.transformers_[-1][2], [1])
# specify passthrough
ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough")
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
assert_array_equal(ct.transformers_[-1][2], [1])
# column order is not preserved (passed through added to end)
ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough")
assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
assert_array_equal(ct.transformers_[-1][2], [0])
# passthrough when all actual transformers are skipped
ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough")
assert_array_equal(ct.fit_transform(X_array), X_res_second)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
assert_array_equal(ct.transformers_[-1][2], [1])
# check default for make_column_transformer
ct = make_column_transformer((Trans(), [0]))
assert ct.remainder == "drop"
@pytest.mark.parametrize(
"cols1, cols2, expected_remainder_cols",
[
([0], [False, True, False], [2]), # mix types
([0], [1], [2]), # ints
(lambda x: [0], lambda x: [1], [2]), # callables
(["A"], ["B"], ["C"]), # all strings
([True, False, False], [False, True, False], [False, False, True]), # all bools
],
)
def test_column_transformer_remainder_dtypes(cols1, cols2, expected_remainder_cols):
"""Check that the remainder columns format matches the format of the other
columns when they're all strings or masks.
"""
X = np.ones((1, 3))
if isinstance(cols1, list) and isinstance(cols1[0], str):
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X, columns=["A", "B", "C"])
# if inputs are column names store remainder columns as column names
ct = make_column_transformer(
(Trans(), cols1),
(Trans(), cols2),
remainder="passthrough",
)
ct.fit_transform(X)
assert ct.transformers_[-1][-1] == expected_remainder_cols
# TODO(1.9): remove this test
@pytest.mark.parametrize("force_int_remainder_cols", [True, False])
def test_force_int_remainder_cols_deprecation(force_int_remainder_cols):
"""Check that ColumnTransformer raises a FutureWarning when
force_int_remainder_cols is set.
"""
X = np.ones((1, 3))
ct = ColumnTransformer(
[("T1", Trans(), [0]), ("T2", Trans(), [1])],
remainder="passthrough",
force_int_remainder_cols=force_int_remainder_cols,
)
with pytest.warns(FutureWarning, match="`force_int_remainder_cols` is deprecated"):
ct.fit(X)
@pytest.mark.parametrize(
"key, expected_cols",
[
([0], [1]),
(np.array([0]), [1]),
(slice(0, 1), [1]),
(np.array([True, False]), [False, True]),
],
)
def test_column_transformer_remainder_numpy(key, expected_cols):
# test different ways that columns are specified with passthrough
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
ct = ColumnTransformer(
[("trans1", Trans(), key)],
remainder="passthrough",
)
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
assert ct.transformers_[-1][2] == expected_cols
@pytest.mark.parametrize(
"key, expected_cols",
[
([0], [1]),
(slice(0, 1), [1]),
(np.array([True, False]), [False, True]),
(["first"], ["second"]),
("pd-index", ["second"]),
(np.array(["first"]), ["second"]),
(np.array(["first"], dtype=object), ["second"]),
(slice(None, "first"), ["second"]),
(slice("first", "first"), ["second"]),
],
)
def test_column_transformer_remainder_pandas(key, expected_cols):
# test different ways that columns are specified with passthrough
pd = pytest.importorskip("pandas")
if isinstance(key, str) and key == "pd-index":
key = pd.Index(["first"])
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["first", "second"])
X_res_both = X_array
ct = ColumnTransformer(
[("trans1", Trans(), key)],
remainder="passthrough",
)
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
assert ct.transformers_[-1][2] == expected_cols
@pytest.mark.parametrize(
"key, expected_cols",
[
([0], [1, 2]),
(np.array([0]), [1, 2]),
(slice(0, 1), [1, 2]),
(np.array([True, False, False]), [False, True, True]),
],
)
def test_column_transformer_remainder_transformer(key, expected_cols):
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
X_res_both = X_array.copy()
# second and third columns are doubled when remainder = DoubleTrans
X_res_both[:, 1:3] *= 2
ct = ColumnTransformer(
[("trans1", Trans(), key)],
remainder=DoubleTrans(),
)
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert ct.transformers_[-1][2] == expected_cols
def test_column_transformer_no_remaining_remainder_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_array)
assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] != "remainder"
def test_column_transformer_drops_all_remainder_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
# columns are doubled when remainder = DoubleTrans
X_res_both = 2 * X_array.copy()[:, 1:3]
ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_column_transformer_sparse_remainder_transformer(csr_container):
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
ct = ColumnTransformer(
[("trans1", Trans(), [0])],
remainder=SparseMatrixTrans(csr_container),
sparse_threshold=0.8,
)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column. There is
# one column in ``transformers``, thus:
assert X_trans.shape == (3, 3 + 1)
exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3)))
assert_array_equal(X_trans.toarray(), exp_array)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container):
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
ct = ColumnTransformer(
[("trans1", "drop", [0])],
remainder=SparseMatrixTrans(csr_container),
sparse_threshold=0.8,
)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column, thus:
assert X_trans.shape == (3, 3)
assert_array_equal(X_trans.toarray(), np.eye(3))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_get_set_params_with_remainder():
ct = ColumnTransformer(
[("trans1", StandardScaler(), [0])], remainder=StandardScaler()
)
exp = {
"n_jobs": None,
"remainder": ct.remainder,
"remainder__copy": True,
"remainder__with_mean": True,
"remainder__with_std": True,
"sparse_threshold": 0.3,
"trans1": ct.transformers[0][1],
"trans1__copy": True,
"trans1__with_mean": True,
"trans1__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose_feature_names_out": True,
"verbose": False,
"force_int_remainder_cols": "deprecated",
}
assert ct.get_params() == exp
ct.set_params(remainder__with_std=False)
assert not ct.get_params()["remainder__with_std"]
ct.set_params(trans1="passthrough")
exp = {
"n_jobs": None,
"remainder": ct.remainder,
"remainder__copy": True,
"remainder__with_mean": True,
"remainder__with_std": False,
"sparse_threshold": 0.3,
"trans1": "passthrough",
"transformers": ct.transformers,
"transformer_weights": None,
"verbose_feature_names_out": True,
"verbose": False,
"force_int_remainder_cols": "deprecated",
}
assert ct.get_params() == exp
def test_column_transformer_no_estimators():
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T
ct = ColumnTransformer([], remainder=StandardScaler())
params = ct.get_params()
assert params["remainder__with_mean"]
X_trans = ct.fit_transform(X_array)
assert X_trans.shape == X_array.shape
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] == "remainder"
assert ct.transformers_[-1][2] == [0, 1, 2]
@pytest.mark.parametrize(
["est", "pattern"],
[
(
ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
remainder=DoubleTrans(),
),
(
r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
),
),
(
ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
remainder="passthrough",
),
(
r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
),
),
(
ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", "drop", [1])],
remainder="passthrough",
),
(
r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
),
),
(
ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", "passthrough", [1])],
remainder="passthrough",
),
(
r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
),
),
(
ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"),
(
r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
),
),
(
ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop"
),
(
r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$"
),
),
(
ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"),
r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$",
),
],
)
@pytest.mark.parametrize("method", ["fit", "fit_transform"])
def test_column_transformer_verbose(est, pattern, method, capsys):
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
func = getattr(est, method)
est.set_params(verbose=False)
func(X_array)
assert not capsys.readouterr().out, "Got output for verbose=False"
est.set_params(verbose=True)
func(X_array)
assert re.match(pattern, capsys.readouterr()[0])
def test_column_transformer_no_estimators_set_params():
ct = ColumnTransformer([]).set_params(n_jobs=2)
assert ct.n_jobs == 2
def test_column_transformer_callable_specifier():
# assert that function gets the full array
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([[0, 1, 2]]).T
def func(X):
assert_array_equal(X, X_array)
return [0]
ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert callable(ct.transformers[0][2])
assert ct.transformers_[0][2] == [0]
def test_column_transformer_callable_specifier_dataframe():
# assert that function gets the full dataframe
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([[0, 1, 2]]).T
X_df = pd.DataFrame(X_array, columns=["first", "second"])
def func(X):
assert_array_equal(X.columns, X_df.columns)
assert_array_equal(X.values, X_df.values)
return ["first"]
ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
assert callable(ct.transformers[0][2])
assert ct.transformers_[0][2] == ["first"]
def test_column_transformer_negative_column_indexes():
X = np.random.randn(2, 2)
X_categories = np.array([[1], [2]])
X = np.concatenate([X, X_categories], axis=1)
ohe = OneHotEncoder()
tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough")
tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough")
assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X))
@pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS])
def test_column_transformer_mask_indexing(array_type):
# Regression test for #14510
# Boolean array-like does not behave as boolean array with sparse matrices.
X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]])
X = array_type(X)
column_transformer = ColumnTransformer(
[("identity", FunctionTransformer(), [False, True, False, True])]
)
X_trans = column_transformer.fit_transform(X)
assert X_trans.shape == (3, 2)
def test_n_features_in():
# make sure n_features_in is what is passed as input to the column
# transformer.
X = [[1, 2], [3, 4], [5, 6]]
ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])])
assert not hasattr(ct, "n_features_in_")
ct.fit(X)
assert ct.n_features_in_ == 2
@pytest.mark.parametrize(
"cols, pattern, include, exclude",
[
(["col_int", "col_float"], None, np.number, None),
(["col_int", "col_float"], None, None, [object, "string"]),
(["col_int", "col_float"], None, [int, float], None),
(["col_str"], None, [object, "string"], None),
(["col_float"], None, [float], None),
(["col_float"], None, float, None),
(["col_float"], "at$", [np.number], None),
(["col_int"], None, [int], None),
(["col_int"], "^col_int", [np.number], None),
(["col_float", "col_str"], "float|str", None, None),
(["col_str"], "^col_s", None, [int]),
([], "str$", float, None),
(
["col_int", "col_float", "col_str"],
None,
[np.number, object, "string"],
None,
),
],
)
def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude):
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame(
{
"col_int": np.array([0, 1, 2], dtype=int),
"col_float": np.array([0.0, 1.0, 2.0], dtype=float),
"col_str": ["one", "two", "three"],
},
columns=["col_int", "col_float", "col_str"],
)
selector = make_column_selector(
dtype_include=include, dtype_exclude=exclude, pattern=pattern
)
assert_array_equal(selector(X_df), cols)
def test_column_transformer_with_make_column_selector():
# Functional test for column transformer + column selector
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame(
{
"col_int": np.array([0, 1, 2], dtype=int),
"col_float": np.array([0.0, 1.0, 2.0], dtype=float),
"col_cat": ["one", "two", "one"],
"col_str": ["low", "middle", "high"],
},
columns=["col_int", "col_float", "col_cat", "col_str"],
)
X_df["col_str"] = X_df["col_str"].astype("category")
cat_selector = make_column_selector(dtype_include=["category", object, "string"])
num_selector = make_column_selector(dtype_include=np.number)
ohe = OneHotEncoder()
scaler = StandardScaler()
ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector))
ct_direct = make_column_transformer(
(ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"])
)
X_selector = ct_selector.fit_transform(X_df)
X_direct = ct_direct.fit_transform(X_df)
assert_allclose(X_selector, X_direct)
def test_make_column_selector_error():
selector = make_column_selector(dtype_include=np.number)
X = np.array([[0.1, 0.2]])
msg = "make_column_selector can only be applied to pandas dataframes"
with pytest.raises(ValueError, match=msg):
selector(X)
def test_make_column_selector_pickle():
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame(
{
"col_int": np.array([0, 1, 2], dtype=int),
"col_float": np.array([0.0, 1.0, 2.0], dtype=float),
"col_str": ["one", "two", "three"],
},
columns=["col_int", "col_float", "col_str"],
)
selector = make_column_selector(dtype_include=[object, "string"])
selector_picked = pickle.loads(pickle.dumps(selector))
assert_array_equal(selector(X_df), selector_picked(X_df))
@pytest.mark.parametrize(
"empty_col",
[[], np.array([], dtype=int), lambda x: []],
ids=["list", "array", "callable"],
)
def test_feature_names_empty_columns(empty_col):
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
ct = ColumnTransformer(
transformers=[
("ohe", OneHotEncoder(), ["col1", "col2"]),
("empty_features", OneHotEncoder(), empty_col),
],
)
ct.fit(df)
assert_array_equal(
ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"]
)
@pytest.mark.parametrize(
"selector",
[
[1],
lambda x: [1],
["col2"],
lambda x: ["col2"],
[False, True],
lambda x: [False, True],
],
)
def test_feature_names_out_pandas(selector):
"""Checks name when selecting only the second column"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
ct.fit(df)
assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"])
@pytest.mark.parametrize(
"selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]]
)
def test_feature_names_out_non_pandas(selector):
"""Checks name when selecting the second column with numpy array"""
X = [["a", "z"], ["a", "z"], ["b", "z"]]
ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
ct.fit(X)
assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"])
@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
def test_sk_visual_block_remainder(remainder):
# remainder='passthrough' or an estimator will be shown in repr_html
ohe = OneHotEncoder()
ct = ColumnTransformer(
transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
)
visual_block = ct._sk_visual_block_()
assert visual_block.names == ("ohe", "remainder")
assert visual_block.name_details == (["col1", "col2"], "")
assert visual_block.estimators == (ohe, remainder)
def test_sk_visual_block_remainder_drop():
# remainder='drop' is not shown in repr_html
ohe = OneHotEncoder()
ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])])
visual_block = ct._sk_visual_block_()
assert visual_block.names == ("ohe",)
assert visual_block.name_details == (["col1", "col2"],)
assert visual_block.estimators == (ohe,)
@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
def test_sk_visual_block_remainder_fitted_pandas(remainder):
# Remainder shows the columns after fitting
pd = pytest.importorskip("pandas")
ohe = OneHotEncoder()
ct = ColumnTransformer(
transformers=[("ohe", ohe, ["col1", "col2"])],
remainder=remainder,
)
df = pd.DataFrame(
{
"col1": ["a", "b", "c"],
"col2": ["z", "z", "z"],
"col3": [1, 2, 3],
"col4": [3, 4, 5],
}
)
ct.fit(df)
visual_block = ct._sk_visual_block_()
assert visual_block.names == ("ohe", "remainder")
assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"])
assert visual_block.estimators == (ohe, remainder)
@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
def test_sk_visual_block_remainder_fitted_numpy(remainder):
# Remainder shows the indices after fitting
X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)
scaler = StandardScaler()
ct = ColumnTransformer(
transformers=[("scale", scaler, [0, 2])], remainder=remainder
)
ct.fit(X)
visual_block = ct._sk_visual_block_()
assert visual_block.names == ("scale", "remainder")
assert visual_block.name_details == ([0, 2], [1])
assert visual_block.estimators == (scaler, remainder)
@pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1])
@pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"])
def test_column_transformer_reordered_column_names_remainder(
explicit_colname, remainder
):
"""Test the interaction between remainder and column transformer"""
pd = pytest.importorskip("pandas")
X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"])
X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T
X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"])
tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder)
tf.fit(X_fit_df)
X_fit_trans = tf.transform(X_fit_df)
# Changing the order still works
X_trans = tf.transform(X_trans_df)
assert_allclose(X_trans, X_fit_trans)
# extra columns are ignored
X_extended_df = X_fit_df.copy()
X_extended_df["third"] = [3, 6, 9]
X_trans = tf.transform(X_extended_df)
assert_allclose(X_trans, X_fit_trans)
if isinstance(explicit_colname, str):
# Raise error if columns are specified by names but input only allows
# to specify by position, e.g. numpy array instead of a pandas df.
X_array = X_fit_array.copy()
err_msg = "Specifying the columns"
with pytest.raises(ValueError, match=err_msg):
tf.transform(X_array)
def test_feature_name_validation_missing_columns_drop_passthough():
"""Test the interaction between {'drop', 'passthrough'} and
missing column names."""
pd = pytest.importorskip("pandas")
X = np.ones(shape=(3, 4))
df = pd.DataFrame(X, columns=["a", "b", "c", "d"])
df_dropped = df.drop("c", axis=1)
# with remainder='passthrough', all columns seen during `fit` must be
# present
tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough")
tf.fit(df)
msg = r"columns are missing: {'c'}"
with pytest.raises(ValueError, match=msg):
tf.transform(df_dropped)
# with remainder='drop', it is allowed to have column 'c' missing
tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop")
tf.fit(df)
df_dropped_trans = tf.transform(df_dropped)
df_fit_trans = tf.transform(df)
assert_allclose(df_dropped_trans, df_fit_trans)
# bycol drops 'c', thus it is allowed for 'c' to be missing
tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough")
tf.fit(df)
df_dropped_trans = tf.transform(df_dropped)
df_fit_trans = tf.transform(df)
assert_allclose(df_dropped_trans, df_fit_trans)
def test_feature_names_in_():
"""Feature names are stored in column transformer.
Column transformer deliberately does not check for column name consistency.
It only checks that the non-dropped names seen in `fit` are seen
in `transform`. This behavior is already tested in
`test_feature_name_validation_missing_columns_drop_passthough`"""
pd = pytest.importorskip("pandas")
feature_names = ["a", "c", "d"]
df = pd.DataFrame([[1, 2, 3]], columns=feature_names)
ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough")
ct.fit(df)
assert_array_equal(ct.feature_names_in_, feature_names)
assert isinstance(ct.feature_names_in_, np.ndarray)
assert ct.feature_names_in_.dtype == object
| TransRaise |
python | jazzband__django-redis | django_redis/compressors/lzma.py | {
"start": 124,
"end": 550
} | class ____(BaseCompressor):
min_length = 100
preset = 4
def compress(self, value: bytes) -> bytes:
if len(value) > self.min_length:
return lzma.compress(value, preset=self.preset)
return value
def decompress(self, value: bytes) -> bytes:
try:
return lzma.decompress(value)
except lzma.LZMAError as e:
raise CompressorError from e
| LzmaCompressor |
python | apache__airflow | providers/common/io/src/airflow/providers/common/io/operators/file_transfer.py | {
"start": 1308,
"end": 4159
} | class ____(BaseOperator):
"""
Copies a file from a source to a destination.
This streams the file from the source to the destination if required
, so it does not need to fit into memory.
:param src: The source file path or ObjectStoragePath object.
:param dst: The destination file path or ObjectStoragePath object.
:param source_conn_id: The optional source connection id.
:param dest_conn_id: The optional destination connection id.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FileTransferOperator`
"""
template_fields: Sequence[str] = ("src", "dst")
def __init__(
self,
*,
src: str | ObjectStoragePath,
dst: str | ObjectStoragePath,
source_conn_id: str | None = None,
dest_conn_id: str | None = None,
overwrite: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.src = src
self.dst = dst
self.source_conn_id = source_conn_id
self.dst_conn_id = dest_conn_id
self.overwrite = overwrite
def execute(self, context: Context) -> None:
src: ObjectStoragePath = self._get_path(self.src, self.source_conn_id)
dst: ObjectStoragePath = self._get_path(self.dst, self.dst_conn_id)
if not self.overwrite:
if dst.exists() and dst.is_file():
raise ValueError(f"Destination {dst} already exists")
src.copy(dst)
def get_openlineage_facets_on_start(self) -> OperatorLineage:
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.openlineage.extractors import OperatorLineage
def _prepare_ol_dataset(path: ObjectStoragePath) -> Dataset:
if hasattr(path, "namespace"):
# namespace has been added in Airflow 2.9.0; #36410
return Dataset(namespace=path.namespace, name=path.key)
# manually recreating namespace
return Dataset(
namespace=f"{path.protocol}://{path.bucket}" if path.bucket else path.protocol,
name=path.key.lstrip(path.sep),
)
src: ObjectStoragePath = self._get_path(self.src, self.source_conn_id)
dst: ObjectStoragePath = self._get_path(self.dst, self.dst_conn_id)
input_dataset = _prepare_ol_dataset(src)
output_dataset = _prepare_ol_dataset(dst)
return OperatorLineage(
inputs=[input_dataset],
outputs=[output_dataset],
)
@staticmethod
def _get_path(path: str | ObjectStoragePath, conn_id: str | None) -> ObjectStoragePath:
if isinstance(path, str):
return ObjectStoragePath(path, conn_id=conn_id)
return path
| FileTransferOperator |
python | pandas-dev__pandas | pandas/tests/indexes/multi/test_indexing.py | {
"start": 7119,
"end": 21336
} | class ____:
def test_get_indexer(self):
major_axis = Index(np.arange(4))
minor_axis = Index(np.arange(2))
major_codes = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_codes = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
tm.assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method="pad")
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
tm.assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method="pad")
tm.assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method="ffill")
tm.assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method="backfill")
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
tm.assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method="backfill")
tm.assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method="bfill")
tm.assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
tm.assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(list(range(10)) + list(range(10)))
idx2 = Index(list(range(20)))
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([("a", 1), ("b", 2)])
msg = (
"method='nearest' not implemented yet for MultiIndex; see GitHub issue 9365"
)
with pytest.raises(NotImplementedError, match=msg):
midx.get_indexer(["a"], method="nearest")
msg = "tolerance not implemented yet for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
midx.get_indexer(["a"], method="pad", tolerance=2)
def test_get_indexer_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(date_range("2012-01-01", periods=3, freq="h")),
]
)
result = midx.get_indexer(midx)
tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp))
@pytest.mark.parametrize(
"index_arr,labels,expected",
[
(
[[1, np.nan, 2], [3, 4, 5]],
[1, np.nan, 2],
np.array([-1, -1, -1], dtype=np.intp),
),
([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)),
([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)),
(
[[1, 2, 3], [np.nan, 4, 5]],
[np.nan, 4, 5],
np.array([-1, -1, -1], dtype=np.intp),
),
],
)
def test_get_indexer_with_missing_value(self, index_arr, labels, expected):
# issue 19132
idx = MultiIndex.from_arrays(index_arr)
result = idx.get_indexer(labels)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_methods(self):
# https://github.com/pandas-dev/pandas/issues/29896
# test getting an indexer for another index with different methods
# confirms that getting an indexer without a filling method, getting an
# indexer and backfilling, and getting an indexer and padding all behave
# correctly in the case where all of the target values fall in between
# several levels in the MultiIndex into which they are getting an indexer
#
# visually, the MultiIndexes used in this test are:
# mult_idx_1:
# 0: -1 0
# 1: 2
# 2: 3
# 3: 4
# 4: 0 0
# 5: 2
# 6: 3
# 7: 4
# 8: 1 0
# 9: 2
# 10: 3
# 11: 4
#
# mult_idx_2:
# 0: 0 1
# 1: 3
# 2: 4
mult_idx_1 = MultiIndex.from_product([[-1, 0, 1], [0, 2, 3, 4]])
mult_idx_2 = MultiIndex.from_product([[0], [1, 3, 4]])
indexer = mult_idx_1.get_indexer(mult_idx_2)
expected = np.array([-1, 6, 7], dtype=indexer.dtype)
tm.assert_almost_equal(expected, indexer)
backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="backfill")
expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)
tm.assert_almost_equal(expected, backfill_indexer)
# ensure the legacy "bfill" option functions identically to "backfill"
backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill")
expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)
tm.assert_almost_equal(expected, backfill_indexer)
pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="pad")
expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)
tm.assert_almost_equal(expected, pad_indexer)
# ensure the legacy "ffill" option functions identically to "pad"
pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill")
expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)
tm.assert_almost_equal(expected, pad_indexer)
@pytest.mark.parametrize("method", ["pad", "ffill", "backfill", "bfill", "nearest"])
def test_get_indexer_methods_raise_for_non_monotonic(self, method):
# 53452
mi = MultiIndex.from_arrays([[0, 4, 2], [0, 4, 2]])
if method == "nearest":
err = NotImplementedError
msg = "not implemented yet for MultiIndex"
else:
err = ValueError
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(err, match=msg):
mi.get_indexer([(1, 1)], method=method)
def test_get_indexer_three_or_more_levels(self):
# https://github.com/pandas-dev/pandas/issues/29896
# tests get_indexer() on MultiIndexes with 3+ levels
# visually, these are
# mult_idx_1:
# 0: 1 2 5
# 1: 7
# 2: 4 5
# 3: 7
# 4: 6 5
# 5: 7
# 6: 3 2 5
# 7: 7
# 8: 4 5
# 9: 7
# 10: 6 5
# 11: 7
#
# mult_idx_2:
# 0: 1 1 8
# 1: 1 5 9
# 2: 1 6 7
# 3: 2 1 6
# 4: 2 7 6
# 5: 2 7 8
# 6: 3 6 8
mult_idx_1 = MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]])
mult_idx_2 = MultiIndex.from_tuples(
[
(1, 1, 8),
(1, 5, 9),
(1, 6, 7),
(2, 1, 6),
(2, 7, 7),
(2, 7, 8),
(3, 6, 8),
]
)
# sanity check
assert mult_idx_1.is_monotonic_increasing
assert mult_idx_1.is_unique
assert mult_idx_2.is_monotonic_increasing
assert mult_idx_2.is_unique
# show the relationships between the two
assert mult_idx_2[0] < mult_idx_1[0]
assert mult_idx_1[3] < mult_idx_2[1] < mult_idx_1[4]
assert mult_idx_1[5] == mult_idx_2[2]
assert mult_idx_1[5] < mult_idx_2[3] < mult_idx_1[6]
assert mult_idx_1[5] < mult_idx_2[4] < mult_idx_1[6]
assert mult_idx_1[5] < mult_idx_2[5] < mult_idx_1[6]
assert mult_idx_1[-1] < mult_idx_2[6]
indexer_no_fill = mult_idx_1.get_indexer(mult_idx_2)
expected = np.array([-1, -1, 5, -1, -1, -1, -1], dtype=indexer_no_fill.dtype)
tm.assert_almost_equal(expected, indexer_no_fill)
# test with backfilling
indexer_backfilled = mult_idx_1.get_indexer(mult_idx_2, method="backfill")
expected = np.array([0, 4, 5, 6, 6, 6, -1], dtype=indexer_backfilled.dtype)
tm.assert_almost_equal(expected, indexer_backfilled)
# now, the same thing, but forward-filled (aka "padded")
indexer_padded = mult_idx_1.get_indexer(mult_idx_2, method="pad")
expected = np.array([-1, 3, 5, 5, 5, 5, 11], dtype=indexer_padded.dtype)
tm.assert_almost_equal(expected, indexer_padded)
# now, do the indexing in the other direction
assert mult_idx_2[0] < mult_idx_1[0] < mult_idx_2[1]
assert mult_idx_2[0] < mult_idx_1[1] < mult_idx_2[1]
assert mult_idx_2[0] < mult_idx_1[2] < mult_idx_2[1]
assert mult_idx_2[0] < mult_idx_1[3] < mult_idx_2[1]
assert mult_idx_2[1] < mult_idx_1[4] < mult_idx_2[2]
assert mult_idx_2[2] == mult_idx_1[5]
assert mult_idx_2[5] < mult_idx_1[6] < mult_idx_2[6]
assert mult_idx_2[5] < mult_idx_1[7] < mult_idx_2[6]
assert mult_idx_2[5] < mult_idx_1[8] < mult_idx_2[6]
assert mult_idx_2[5] < mult_idx_1[9] < mult_idx_2[6]
assert mult_idx_2[5] < mult_idx_1[10] < mult_idx_2[6]
assert mult_idx_2[5] < mult_idx_1[11] < mult_idx_2[6]
indexer = mult_idx_2.get_indexer(mult_idx_1)
expected = np.array(
[-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1], dtype=indexer.dtype
)
tm.assert_almost_equal(expected, indexer)
backfill_indexer = mult_idx_2.get_indexer(mult_idx_1, method="bfill")
expected = np.array(
[1, 1, 1, 1, 2, 2, 6, 6, 6, 6, 6, 6], dtype=backfill_indexer.dtype
)
tm.assert_almost_equal(expected, backfill_indexer)
pad_indexer = mult_idx_2.get_indexer(mult_idx_1, method="pad")
expected = np.array(
[0, 0, 0, 0, 1, 2, 5, 5, 5, 5, 5, 5], dtype=pad_indexer.dtype
)
tm.assert_almost_equal(expected, pad_indexer)
def test_get_indexer_crossing_levels(self):
# https://github.com/pandas-dev/pandas/issues/29896
# tests a corner case with get_indexer() with MultiIndexes where, when we
# need to "carry" across levels, proper tuple ordering is respected
#
# the MultiIndexes used in this test, visually, are:
# mult_idx_1:
# 0: 1 1 1 1
# 1: 2
# 2: 2 1
# 3: 2
# 4: 1 2 1 1
# 5: 2
# 6: 2 1
# 7: 2
# 8: 2 1 1 1
# 9: 2
# 10: 2 1
# 11: 2
# 12: 2 2 1 1
# 13: 2
# 14: 2 1
# 15: 2
#
# mult_idx_2:
# 0: 1 3 2 2
# 1: 2 3 2 2
mult_idx_1 = MultiIndex.from_product([[1, 2]] * 4)
mult_idx_2 = MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)])
# show the tuple orderings, which get_indexer() should respect
assert mult_idx_1[7] < mult_idx_2[0] < mult_idx_1[8]
assert mult_idx_1[-1] < mult_idx_2[1]
indexer = mult_idx_1.get_indexer(mult_idx_2)
expected = np.array([-1, -1], dtype=indexer.dtype)
tm.assert_almost_equal(expected, indexer)
backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill")
expected = np.array([8, -1], dtype=backfill_indexer.dtype)
tm.assert_almost_equal(expected, backfill_indexer)
pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill")
expected = np.array([7, 15], dtype=pad_indexer.dtype)
tm.assert_almost_equal(expected, pad_indexer)
def test_get_indexer_kwarg_validation(self):
# GH#41918
mi = MultiIndex.from_product([range(3), ["A", "B"]])
msg = "limit argument only valid if doing pad, backfill or nearest"
with pytest.raises(ValueError, match=msg):
mi.get_indexer(mi[:-1], limit=4)
msg = "tolerance argument only valid if doing pad, backfill or nearest"
with pytest.raises(ValueError, match=msg):
mi.get_indexer(mi[:-1], tolerance="piano")
def test_get_indexer_nan(self):
# GH#37222
idx1 = MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"])
idx2 = MultiIndex.from_product([["A"], [np.nan, 2.0]], names=["id1", "id2"])
expected = np.array([-1, 1])
result = idx2.get_indexer(idx1)
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
result = idx1.get_indexer(idx2)
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_getitem(idx):
# scalar
assert idx[2] == ("bar", "one")
# slice
result = idx[2:5]
expected = idx[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = idx[[True, False, True, False, True, True]]
result2 = idx[np.array([True, False, True, False, True, True])]
expected = idx[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(idx):
sorted_idx, _ = idx.sortlevel(0)
assert sorted_idx.get_loc("baz") == slice(3, 4)
assert sorted_idx.get_loc("foo") == slice(0, 2)
@pytest.mark.parametrize("box", [list, Index])
def test_getitem_bool_index_all(box):
# GH#22533
ind1 = box([True] * 5)
idx = MultiIndex.from_tuples([(10, 1), (20, 2), (30, 3), (40, 4), (50, 5)])
tm.assert_index_equal(idx[ind1], idx)
ind2 = box([True, False, True, False, False])
expected = MultiIndex.from_tuples([(10, 1), (30, 3)])
tm.assert_index_equal(idx[ind2], expected)
@pytest.mark.parametrize("box", [list, Index])
def test_getitem_bool_index_single(box):
# GH#22533
ind1 = box([True])
idx = MultiIndex.from_tuples([(10, 1)])
tm.assert_index_equal(idx[ind1], idx)
ind2 = box([False])
expected = MultiIndex(
levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)],
codes=[[], []],
)
tm.assert_index_equal(idx[ind2], expected)
| TestGetIndexer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar7.py | {
"start": 2526,
"end": 2650
} | class ____:
def __add__(self, value: float) -> "Thing1": ...
def __radd__(self, value: float) -> "Thing1": ...
| Thing1 |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py | {
"start": 1669,
"end": 2304
} | class ____(EdgeJobBase):
"""Details of the job sent to the scheduler."""
state: Annotated[TaskInstanceState, Field(description="State of the job from the view of the executor.")]
queue: Annotated[
str,
Field(description="Queue for which the task is scheduled/running."),
]
queued_dttm: Annotated[datetime | None, Field(description="When the job was queued.")] = None
edge_worker: Annotated[
str | None, Field(description="The worker processing the job during execution.")
] = None
last_update: Annotated[datetime | None, Field(description="Last heartbeat of the job.")] = None
| Job |
python | spack__spack | lib/spack/spack/util/executable.py | {
"start": 17118,
"end": 17242
} | class ____(spack.error.SpackError):
"""Raised when :func:`which()` can't find a required executable."""
| CommandNotFoundError |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/tests/test_steps/test_simple_docker_step.py | {
"start": 726,
"end": 3448
} | class ____:
async def test_env_variables_set(self, context):
# Define test inputs
title = "test_env_variables_set"
env_variables = {"VAR1": "value1", "VAR2": "value2"}
# Create SimpleDockerStep instance
step = SimpleDockerStep(title=title, context=context, env_variables=env_variables)
# Initialize container
container = await step.init_container()
# Check if environment variables are set
for key, expected_value in env_variables.items():
stdout_value = await container.with_exec(["printenv", key], use_entrypoint=True).stdout()
actual_value = stdout_value.strip()
assert actual_value == expected_value
async def test_mount_paths(self, context):
# Define test inputs
title = "test_mount_paths"
path_to_current_file = Path(__file__).relative_to(Path.cwd())
invalid_path = Path("invalid_path")
paths_to_mount = [
MountPath(path=path_to_current_file, optional=False),
MountPath(path=invalid_path, optional=True),
]
# Create SimpleDockerStep instance
step = SimpleDockerStep(title=title, context=context, paths_to_mount=paths_to_mount)
# Initialize container
container = await step.init_container()
for path_to_mount in paths_to_mount:
exit_code, _stdout, _stderr = await get_exec_result(
container.with_exec(["test", "-f", f"{str(path_to_mount)}"], use_entrypoint=True)
)
expected_exit_code = 1 if path_to_mount.optional else 0
assert exit_code == expected_exit_code
async def test_invalid_mount_paths(self):
path_to_current_file = Path(__file__).relative_to(Path.cwd())
invalid_path = Path("invalid_path")
# No errors expected
MountPath(path=path_to_current_file, optional=False)
MountPath(path=invalid_path, optional=True)
# File not found error expected
with pytest.raises(FileNotFoundError):
MountPath(path=invalid_path, optional=False)
async def test_work_dir(self, context):
# Define test inputs
title = "test_work_dir"
working_directory = "/test"
# Create SimpleDockerStep instance
step = SimpleDockerStep(title=title, context=context, working_directory=working_directory)
# Initialize container
container = await step.init_container()
# Check if working directory is set
stdout_value = await container.with_exec(["pwd"], use_entrypoint=True).stdout()
actual_value = stdout_value.strip()
assert actual_value == working_directory
| TestSimpleDockerStep |
python | apache__airflow | providers/dbt/cloud/tests/unit/dbt/cloud/hooks/test_dbt.py | {
"start": 3004,
"end": 4895
} | class ____:
valid_job_run_statuses = [
1, # QUEUED
2, # STARTING
3, # RUNNING
10, # SUCCESS
20, # ERROR
30, # CANCELLED
[1, 2, 3], # QUEUED, STARTING, and RUNNING
{10, 20, 30}, # SUCCESS, ERROR, and CANCELLED
]
invalid_job_run_statuses = [
123, # Single invalid status
[123, 23, 65], # Multiple invalid statuses
[1, 2, 65], # Not all statuses are valid
"1", # String types are not valid
"12",
["1", "2", "65"],
]
def _get_ids(status_set: Any):
return [f"checking_status_{argval}" for argval in status_set]
@pytest.mark.parametrize(
argnames="statuses",
argvalues=valid_job_run_statuses,
ids=_get_ids(valid_job_run_statuses),
)
def test_valid_job_run_status(self, statuses):
DbtCloudJobRunStatus.check_is_valid(statuses)
@pytest.mark.parametrize(
argnames="statuses",
argvalues=invalid_job_run_statuses,
ids=_get_ids(invalid_job_run_statuses),
)
def test_invalid_job_run_status(self, statuses):
with pytest.raises(ValueError, match=NOT_VAILD_DBT_STATUS):
DbtCloudJobRunStatus.check_is_valid(statuses)
@pytest.mark.parametrize(
argnames="statuses",
argvalues=valid_job_run_statuses,
ids=_get_ids(valid_job_run_statuses),
)
def test_valid_terminal_job_run_status(self, statuses):
DbtCloudJobRunStatus.check_is_valid(statuses)
@pytest.mark.parametrize(
argnames="statuses",
argvalues=invalid_job_run_statuses,
ids=_get_ids(invalid_job_run_statuses),
)
def test_invalid_terminal_job_run_status(self, statuses):
with pytest.raises(ValueError, match=NOT_VAILD_DBT_STATUS):
DbtCloudJobRunStatus.check_is_valid(statuses)
| TestDbtCloudJobRunStatus |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 151888,
"end": 152309
} | class ____(BaseModel, extra="forbid"):
wal_capacity_mb: Optional[int] = Field(default=None, description="Size of a single WAL segment in MB")
wal_segments_ahead: Optional[int] = Field(
default=None, description="Number of WAL segments to create ahead of actually used ones"
)
wal_retain_closed: Optional[int] = Field(default=None, description="Number of closed WAL segments to retain")
| WalConfigDiff |
python | neetcode-gh__leetcode | python/0743-network-delay-time.py | {
"start": 0,
"end": 636
} | class ____:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
edges = collections.defaultdict(list)
for u, v, w in times:
edges[u].append((v, w))
minHeap = [(0, k)]
visit = set()
t = 0
while minHeap:
w1, n1 = heapq.heappop(minHeap)
if n1 in visit:
continue
visit.add(n1)
t = w1
for n2, w2 in edges[n1]:
if n2 not in visit:
heapq.heappush(minHeap, (w1 + w2, n2))
return t if len(visit) == n else -1
# O(E * logV)
| Solution |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/query_engine.py | {
"start": 918,
"end": 6042
} | class ____(RetrieverQueryEngine):
def __init__(
self,
retriever: LanceDBRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: List[BaseNodePostprocessor] = None,
callback_manager: Optional[CallbackManager] = None,
):
super().__init__(
retriever, response_synthesizer, node_postprocessors, callback_manager
)
@override
def retrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]:
nodes = self._retriever._retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
@override
async def aretrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever._aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
@override
@dispatcher.span
def _query(self, query_bundle: ExtendedQueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = self.retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@override
@dispatcher.span
async def _aquery(self, query_bundle: ExtendedQueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self.aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@override
@dispatcher.span
def query(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> RESPONSE_TYPE:
"""
Executes a query against the managed LanceDB index.
Args:
query_str (Optional[str]): The text query string to search for. Defaults to None.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): An image or image-like object to use as part of the query. Can be a PIL Image, ImageBlock, ImageDocument, or a file path as a string. Defaults to None.
query_image_path (Optional[os.PathLike[str]]): The file path to an image to use as part of the query. Defaults to None.
Returns:
RESPONSE_TYPE: The result of the query.
Notes:
- At least one of `query_str`, `query_image`, or `query_image_path` should be provided.
"""
qb = ExtendedQueryBundle(
query_str=query_str, image_path=query_image_path, image=query_image
)
dispatcher.event(QueryStartEvent(query=qb))
with self.callback_manager.as_trace("query"):
if not query_str:
query_str = ""
query_result = self._query(qb)
dispatcher.event(QueryEndEvent(query=qb, response=query_result))
return query_result
@override
@dispatcher.span
async def aquery(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> RESPONSE_TYPE:
"""
Asynchronously executes a query against the managed LanceDB index.
Args:
query_str (Optional[str]): The text query string to search for. Defaults to None.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): An image or image-like object to use as part of the query. Can be a PIL Image, ImageBlock, ImageDocument, or a file path as a string. Defaults to None.
query_image_path (Optional[os.PathLike[str]]): The file path to an image to use as part of the query. Defaults to None.
Returns:
RESPONSE_TYPE: The result of the query.
Notes:
- At least one of `query_str`, `query_image`, or `query_image_path` should be provided.
"""
qb = ExtendedQueryBundle(
query_str=query_str, image_path=query_image_path, image=query_image
)
dispatcher.event(QueryStartEvent(query=qb))
with self.callback_manager.as_trace("query"):
if not query_str:
query_str = ""
query_result = await self._aquery(qb)
dispatcher.event(QueryEndEvent(query=qb, response=query_result))
return query_result
| LanceDBRetrieverQueryEngine |
python | wandb__wandb | wandb/vendor/pygments/lexers/jvm.py | {
"start": 5408,
"end": 18630
} | class ____(RegexLexer):
"""
For `Scala <http://www.scala-lang.org>`_ source code.
"""
name = 'Scala'
aliases = ['scala']
filenames = ['*.scala']
mimetypes = ['text/x-scala']
flags = re.MULTILINE | re.DOTALL
# don't use raw unicode strings!
op = (u'[-~\\^\\*!%&\\\\<>\\|+=:/?@\u00a6-\u00a7\u00a9\u00ac\u00ae\u00b0-\u00b1'
u'\u00b6\u00d7\u00f7\u03f6\u0482\u0606-\u0608\u060e-\u060f\u06e9'
u'\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0cf1-\u0cf2'
u'\u0d79\u0f01-\u0f03\u0f13-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38'
u'\u0fbe-\u0fc5\u0fc7-\u0fcf\u109e-\u109f\u1360\u1390-\u1399\u1940'
u'\u19e0-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2044\u2052\u207a-\u207c'
u'\u208a-\u208c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2118'
u'\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u2140-\u2144'
u'\u214a-\u214d\u214f\u2190-\u2328\u232b-\u244a\u249c-\u24e9\u2500-\u2767'
u'\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb'
u'\u29fe-\u2b54\u2ce5-\u2cea\u2e80-\u2ffb\u3004\u3012-\u3013\u3020'
u'\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3'
u'\u3200-\u321e\u322a-\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u33ff'
u'\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ufb29\ufdfd\ufe62\ufe64-\ufe66'
u'\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe4\uffe8-\uffee\ufffc-\ufffd]+')
letter = (u'[a-zA-Z\\$_\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6'
u'\u00f8-\u02af\u0370-\u0373\u0376-\u0377\u037b-\u037d\u0386'
u'\u0388-\u03f5\u03f7-\u0481\u048a-\u0556\u0561-\u0587\u05d0-\u05f2'
u'\u0621-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5'
u'\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5'
u'\u07b1\u07ca-\u07ea\u0904-\u0939\u093d\u0950\u0958-\u0961'
u'\u0972-\u097f\u0985-\u09b9\u09bd\u09ce\u09dc-\u09e1\u09f0-\u09f1'
u'\u0a05-\u0a39\u0a59-\u0a5e\u0a72-\u0a74\u0a85-\u0ab9\u0abd'
u'\u0ad0-\u0ae1\u0b05-\u0b39\u0b3d\u0b5c-\u0b61\u0b71\u0b83-\u0bb9'
u'\u0bd0\u0c05-\u0c3d\u0c58-\u0c61\u0c85-\u0cb9\u0cbd\u0cde-\u0ce1'
u'\u0d05-\u0d3d\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0dc6\u0e01-\u0e30'
u'\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0eb0\u0eb2-\u0eb3\u0ebd-\u0ec4'
u'\u0edc-\u0f00\u0f40-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f'
u'\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070'
u'\u1075-\u1081\u108e\u10a0-\u10fa\u1100-\u135a\u1380-\u138f'
u'\u13a0-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u16ee-\u1711'
u'\u1720-\u1731\u1740-\u1751\u1760-\u1770\u1780-\u17b3\u17dc'
u'\u1820-\u1842\u1844-\u18a8\u18aa-\u191c\u1950-\u19a9\u19c1-\u19c7'
u'\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf'
u'\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1d00-\u1d2b\u1d62-\u1d77'
u'\u1d79-\u1d9a\u1e00-\u1fbc\u1fbe\u1fc2-\u1fcc\u1fd0-\u1fdb'
u'\u1fe0-\u1fec\u1ff2-\u1ffc\u2071\u207f\u2102\u2107\u210a-\u2113'
u'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139'
u'\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c7c'
u'\u2c80-\u2ce4\u2d00-\u2d65\u2d80-\u2dde\u3006-\u3007\u3021-\u3029'
u'\u3038-\u303a\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff-\u318e'
u'\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\ua014\ua016-\ua48c'
u'\ua500-\ua60b\ua610-\ua61f\ua62a-\ua66e\ua680-\ua697\ua722-\ua76f'
u'\ua771-\ua787\ua78b-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822'
u'\ua840-\ua873\ua882-\ua8b3\ua90a-\ua925\ua930-\ua946\uaa00-\uaa28'
u'\uaa40-\uaa42\uaa44-\uaa4b\uac00-\ud7a3\uf900-\ufb1d\ufb1f-\ufb28'
u'\ufb2a-\ufd3d\ufd50-\ufdfb\ufe70-\ufefc\uff21-\uff3a\uff41-\uff5a'
u'\uff66-\uff6f\uff71-\uff9d\uffa0-\uffdc]')
upper = (u'[A-Z\\$_\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108'
u'\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c'
u'\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130'
u'\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145'
u'\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a'
u'\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e'
u'\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182'
u'\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194'
u'\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7'
u'\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc'
u'\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9'
u'\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee'
u'\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204'
u'\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218'
u'\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c'
u'\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246'
u'\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038f'
u'\u0391-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0'
u'\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7'
u'\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a'
u'\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e'
u'\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a'
u'\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae'
u'\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1'
u'\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6'
u'\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea'
u'\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe'
u'\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512'
u'\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0531-\u0556'
u'\u10a0-\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e'
u'\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22'
u'\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36'
u'\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a'
u'\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e'
u'\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72'
u'\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86'
u'\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2'
u'\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6'
u'\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca'
u'\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede'
u'\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2'
u'\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d'
u'\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59-\u1f5f'
u'\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb'
u'\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112'
u'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133'
u'\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67'
u'\u2c69\u2c6b\u2c6d-\u2c6f\u2c72\u2c75\u2c80\u2c82\u2c84\u2c86'
u'\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a'
u'\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae'
u'\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2'
u'\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6'
u'\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\ua640\ua642\ua644\ua646'
u'\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a'
u'\ua65c\ua65e\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682'
u'\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696'
u'\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736'
u'\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a'
u'\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e'
u'\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b'
u'\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]')
idrest = u'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
letter_letter_digit = u'%s(?:%s|\d)*' % (letter, letter)
tokens = {
'root': [
# method names
(r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(u'@%s' % idrest, Name.Decorator),
(u'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
u'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
u'lazy|match|new|override|pr(?:ivate|otected)'
u'|re(?:quires|turn)|s(?:ealed|uper)|'
u't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\\b|'
u'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
(u':(?!%s)' % op, Keyword, 'type'),
(u'%s%s\\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
(r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'),
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
(r'""".*?"""(?!")', String),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(u"'%s" % idrest, Text.Symbol),
(r'[fs]"""', String, 'interptriplestring'), # interpolated strings
(r'[fs]"', String, 'interpstring'), # interpolated strings
(r'raw"(\\\\|\\"|[^"])*"', String), # raw strings
# (ur'(\.)(%s|%s|`[^`]+`)' % (idrest, op), bygroups(Operator,
# Name.Attribute)),
(idrest, Name),
(r'`[^`]+`', Name),
(r'\[', Operator, 'typeparam'),
(r'[(){};,.#]', Operator),
(op, Operator),
(r'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?',
Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(u'(%s|%s|`[^`]+`)(\\s*)(\\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
(r'\s+', Text),
(r'\{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(u'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
],
'type': [
(r'\s+', Text),
(r'<[%:]|>:|[#_]|forSome|type', Keyword),
(u'([,);}]|=>|=|\u21d2)(\\s*)', bygroups(Operator, Text), '#pop'),
(r'[({]', Operator, '#push'),
(u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)(\\[)' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')),
(u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)$' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text), '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
(r'[\s,]+', Text),
(u'<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\])}])', Operator, '#pop'),
(r'[(\[{]', Operator, '#push'),
(u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'import': [
(u'(%s|\\.)+' % idrest, Name.Namespace, '#pop')
],
'interpstringcommon': [
(r'[^"$\\]+', String),
(r'\$\$', String),
(r'\$' + letter_letter_digit, String.Interpol),
(r'\$\{', String.Interpol, 'interpbrace'),
(r'\\.', String),
],
'interptriplestring': [
(r'"""(?!")', String, '#pop'),
(r'"', String),
include('interpstringcommon'),
],
'interpstring': [
(r'"', String, '#pop'),
include('interpstringcommon'),
],
'interpbrace': [
(r'\}', String.Interpol, '#pop'),
(r'\{', String.Interpol, '#push'),
include('root'),
],
}
| ScalaLexer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 67689,
"end": 71415
} | class ____(GoogleCloudBaseOperator):
"""
Updates the defined Memcached Parameters for an existing Instance.
This method only stages the parameters, it must be followed by apply_parameters
to apply the parameters to nodes of the Memcached Instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedApplyParametersOperator`
:param update_mask: Required. Mask of fields to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param parameters: The parameters to apply to the instance.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.MemcacheParameters`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = (
"update_mask",
"parameters",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
*,
update_mask: dict | FieldMask,
parameters: dict | cloud_memcache.MemcacheParameters,
location: str,
instance_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.update_mask = update_mask
self.parameters = parameters
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance_id,
"location_id": self.location,
"project_id": self.project_id,
}
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.update_parameters(
update_mask=self.update_mask,
parameters=self.parameters,
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(context=context)
| CloudMemorystoreMemcachedUpdateParametersOperator |
python | django__django | tests/template_tests/syntax_tests/i18n/base.py | {
"start": 381,
"end": 687
} | class ____(SimpleTestCase):
"""
Tests for template rendering when multiple locales are activated during the
lifetime of the same process.
"""
def setUp(self):
self._old_language = get_language()
self.addCleanup(activate, self._old_language)
| MultipleLocaleActivationTestCase |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_types.py | {
"start": 5945,
"end": 6008
} | class ____(TypedDict):
type: Literal["json"]
| JsonColumnConfig |
python | gevent__gevent | src/greentest/3.14/test_httpservers.py | {
"start": 47124,
"end": 47390
} | class ____:
def __init__(self):
self.datas = []
def write(self, data):
self.datas.append(data)
def getData(self):
return b''.join(self.datas)
@property
def numWrites(self):
return len(self.datas)
| AuditableBytesIO |
python | giampaolo__psutil | tests/test_heap.py | {
"start": 4998,
"end": 5147
} | class ____(PsutilTestCase):
def setUp(self):
trim_memory()
@classmethod
def tearDownClass(cls):
trim_memory()
| HeapTestCase |
python | getsentry__sentry | tests/acceptance/test_organization_plugin_detail_view.py | {
"start": 326,
"end": 1696
} | class ____(AcceptanceTestCase):
@cached_property
def plugin(self) -> OpsGeniePlugin:
return OpsGeniePlugin()
def setUp(self) -> None:
super().setUp()
# need at least two projects
self.project = self.create_project(organization=self.organization, name="Back end")
self.create_project(organization=self.organization, name="Front End")
self.login_as(self.user)
def load_page(self, slug: str, configuration_tab: bool = False) -> None:
url = f"/settings/{self.organization.slug}/plugins/{slug}/"
if configuration_tab:
url += "?tab=configurations"
self.browser.get(url)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
def test_uninstallation(self) -> None:
self.plugin.set_option("api_key", "7c8951d1", self.project)
self.plugin.set_option("alert_url", "https://api.opsgenie.com/v2/alerts", self.project)
self.load_page("opsgenie", configuration_tab=True)
detail_view_page = OrganizationAbstractDetailViewPage(browser=self.browser)
assert self.browser.element_exists('[aria-label="Configure"]')
detail_view_page.uninstall()
self.browser.wait_until('[data-test-id="toast-success"]')
assert not self.browser.element_exists('[aria-label="Configure"]')
| OrganizationPluginDetailedView |
python | google__jax | jax/experimental/jax2tf/tests/primitives_test.py | {
"start": 3007,
"end": 7877
} | class ____(tf_test_util.JaxToTfTestCase):
# This test runs for all primitive harnesses. For each primitive "xxx" the
# test will be called "test_prim_xxx_..." and the custom parameters for
# the test are defined in the class method "jax2tf_limitations.Jax2TfLimitation.xxx".
# See more details in the comment at top of file and in Jax2TfLimitation class.
# If you want to run this test for only one harness, add parameter
# `one_containing="foo"` to parameterized below.
@test_harnesses.parameterized(
test_harnesses.all_harnesses,
include_jax_unimpl=False,
#one_containing="",
)
@jtu.ignore_warning(
category=UserWarning, message="Using reduced precision for gradient.*")
def test_prim(self, harness: test_harnesses.Harness):
limitations = Jax2TfLimitation.limitations_for_harness(harness)
device = jtu.device_under_test()
limitations = tuple(filter(lambda l: l.filter(device=device,
dtype=harness.dtype), limitations))
func_jax = harness.dyn_fun
args = harness.dyn_args_maker(self.rng())
if ("eigh" == harness.group_name and
np.complex64 == harness.dtype and
device == "tpu"):
raise unittest.SkipTest("b/264716764: error on tf.cast from c64 to f32")
if "eigh" == harness.group_name and device == "cpu":
raise unittest.SkipTest(
"Equality comparisons on eigendecompositions are not stable.")
if device == "gpu" and "lu" in harness.fullname:
raise unittest.SkipTest("b/269388847: lu failures on GPU")
def skipCustomCallTest(target: str):
raise unittest.SkipTest(
f"TODO(b/272239584): custom call target not guaranteed stable: {target}")
if device == "gpu":
if "custom_linear_solve_" in harness.fullname:
skipCustomCallTest("cusolver_geqrf, cublas_geqrf_batched")
if "svd_shape" in harness.fullname:
skipCustomCallTest("cusolver_gesvdj")
if "tridiagonal_solve_shape" in harness.fullname:
skipCustomCallTest("cusparse_gtsv2_f32, cusparse_gtsv2_f64")
associative_scan_reductions = harness.params.get("associative_scan_reductions", False)
try:
with jax.jax2tf_associative_scan_reductions(associative_scan_reductions):
self.ConvertAndCompare(func_jax, *args, limitations=limitations)
except Exception as e:
# TODO(b/264596006): custom calls are not registered properly with TF in OSS
if "does not work with custom calls" in str(e):
logging.warning("Suppressing error %s", e)
raise unittest.SkipTest("b/264596006: custom calls in native serialization fail in TF")
else:
raise e
# The rest of the test are checking special cases
@parameterized.named_parameters(
dict(testcase_name=f"_{f_jax.__name__}",
f_jax=f_jax)
for f_jax in [jnp.add, jnp.subtract, jnp.multiply, jnp.divide,
jnp.less, jnp.less_equal, jnp.equal, jnp.greater,
jnp.greater_equal, jnp.not_equal, jnp.maximum,
jnp.minimum])
def test_type_promotion(self, f_jax=jnp.add):
# We only test a few types here, as tensorflow does not support many
# types like uint* or bool in binary ops.
types = [dtypes.bfloat16, np.int32, np.int64, np.float32]
for x_dtype in types:
for y_dtype in types:
x = np.array([1, 2], dtype=x_dtype)
y = np.array([3, 4], dtype=y_dtype)
self.ConvertAndCompare(f_jax, x, y)
def test_boolean_gather(self):
values = np.array([[True, True], [False, True], [False, False]],
dtype=np.bool_)
indices = np.array([0, 1], dtype=np.int32)
for axis in [0, 1]:
f_jax = jax.jit(lambda v, i: jnp.take(v, i, axis=axis)) # pylint: disable=cell-var-from-loop
self.ConvertAndCompare(f_jax, values, indices)
def test_gather_rank_change(self):
params = jnp.array([[1.0, 1.5, 2.0], [2.0, 2.5, 3.0], [3.0, 3.5, 4.0]])
indices = jnp.array([[1, 1, 2], [0, 1, 0]])
f_jax = jax.jit(lambda i: params[i])
self.ConvertAndCompare(f_jax, indices)
@jtu.sample_product(f_jax=REDUCE)
def test_reduce_ops_with_numerical_input(self, f_jax):
values = np.array([1, 2, 3], dtype=np.float32)
self.ConvertAndCompare(f_jax, values)
@jtu.sample_product(op=["add", "max", "min", "multiply", "set"])
def test_scatter_static(self, op):
values = np.ones((5, 6), dtype=np.float32)
update = np.float32(6.)
f_jax = jax.jit(lambda v, u: getattr(v.at[::2, 3:], op)(u))
self.ConvertAndCompare(f_jax, values, update)
@jtu.sample_product(f_jax=REDUCE)
def test_reduce_ops_with_boolean_input(self, f_jax):
values = np.array([True, False, True], dtype=np.bool_)
self.ConvertAndCompare(f_jax, values)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| JaxPrimitiveTest |
python | aimacode__aima-python | nlp4e.py | {
"start": 14763,
"end": 20036
} | class ____(Problem):
def __init__(self, initial, grammar, goal='S'):
"""
:param initial: the initial state of words in a list.
:param grammar: a grammar object
:param goal: the goal state, usually S
"""
super(TextParsingProblem, self).__init__(initial, goal)
self.grammar = grammar
self.combinations = defaultdict(list) # article combinations
# backward lookup of rules
for rule in grammar.rules:
for comb in grammar.rules[rule]:
self.combinations[' '.join(comb)].append(rule)
def actions(self, state):
actions = []
categories = self.grammar.categories
# first change each word to the article of its category
for i in range(len(state)):
word = state[i]
if word in categories:
for X in categories[word]:
state[i] = X
actions.append(copy.copy(state))
state[i] = word
# if all words are replaced by articles, replace combinations of articles by inferring rules.
if not actions:
for start in range(len(state)):
for end in range(start, len(state) + 1):
# try combinations between (start, end)
articles = ' '.join(state[start:end])
for c in self.combinations[articles]:
actions.append(state[:start] + [c] + state[end:])
return actions
def result(self, state, action):
return action
def h(self, state):
# heuristic function
return len(state)
def astar_search_parsing(words, gramma):
"""bottom-up parsing using A* search to find whether a list of words is a sentence"""
# init the problem
problem = TextParsingProblem(words, gramma, 'S')
state = problem.initial
# init the searching frontier
frontier = [(len(state) + problem.h(state), state)]
heapq.heapify(frontier)
while frontier:
# search the frontier node with lowest cost first
cost, state = heapq.heappop(frontier)
actions = problem.actions(state)
for action in actions:
new_state = problem.result(state, action)
# update the new frontier node to the frontier
if new_state == [problem.goal]:
return problem.goal
if new_state != state:
heapq.heappush(frontier, (len(new_state) + problem.h(new_state), new_state))
return False
def beam_search_parsing(words, gramma, b=3):
"""bottom-up text parsing using beam search"""
# init problem
problem = TextParsingProblem(words, gramma, 'S')
# init frontier
frontier = [(len(problem.initial), problem.initial)]
heapq.heapify(frontier)
# explore the current frontier and keep b new states with lowest cost
def explore(frontier):
new_frontier = []
for cost, state in frontier:
# expand the possible children states of current state
if not problem.goal_test(' '.join(state)):
actions = problem.actions(state)
for action in actions:
new_state = problem.result(state, action)
if [len(new_state), new_state] not in new_frontier and new_state != state:
new_frontier.append([len(new_state), new_state])
else:
return problem.goal
heapq.heapify(new_frontier)
# only keep b states
return heapq.nsmallest(b, new_frontier)
while frontier:
frontier = explore(frontier)
if frontier == problem.goal:
return frontier
return False
# ______________________________________________________________________________
# 22.4 Augmented Grammar
g = Grammar("arithmetic_expression", # A Grammar of Arithmetic Expression
rules={
'Number_0': 'Digit_0', 'Number_1': 'Digit_1', 'Number_2': 'Digit_2',
'Number_10': 'Number_1 Digit_0', 'Number_11': 'Number_1 Digit_1',
'Number_100': 'Number_10 Digit_0',
'Exp_5': ['Number_5', '( Exp_5 )', 'Exp_1, Operator_+ Exp_4', 'Exp_2, Operator_+ Exp_3',
'Exp_0, Operator_+ Exp_5', 'Exp_3, Operator_+ Exp_2', 'Exp_4, Operator_+ Exp_1',
'Exp_5, Operator_+ Exp_0', 'Exp_1, Operator_* Exp_5'], # more possible combinations
'Operator_+': operator.add, 'Operator_-': operator.sub, 'Operator_*': operator.mul,
'Operator_/': operator.truediv,
'Digit_0': 0, 'Digit_1': 1, 'Digit_2': 2, 'Digit_3': 3, 'Digit_4': 4
},
lexicon={})
g = Grammar("Ali loves Bob", # A example grammer of Ali loves Bob example
rules={
"S_loves_ali_bob": "NP_ali, VP_x_loves_x_bob", "S_loves_bob_ali": "NP_bob, VP_x_loves_x_ali",
"VP_x_loves_x_bob": "Verb_xy_loves_xy NP_bob", "VP_x_loves_x_ali": "Verb_xy_loves_xy NP_ali",
"NP_bob": "Name_bob", "NP_ali": "Name_ali"
},
lexicon={
"Name_ali": "Ali", "Name_bob": "Bob", "Verb_xy_loves_xy": "loves"
})
| TextParsingProblem |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 12167,
"end": 12619
} | class ____(LocalizableStreamlitException):
"""Exception raised when the `min_value` is greater than the `value`."""
def __init__(
self,
value: int | float | date | time,
min_value: int | float | date | time,
) -> None:
super().__init__(
"The `value` {value} is less than the `min_value` {min_value}.",
value=value,
min_value=min_value,
)
| StreamlitValueBelowMinError |
python | bokeh__bokeh | tests/support/util/screenshot.py | {
"start": 1514,
"end": 1575
} | class ____(TypedDict):
url: str | None
text: str
| JSError |
python | coleifer__peewee | tests/models.py | {
"start": 169969,
"end": 170377
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [User]
def test_execute_query(self):
for username in ('huey', 'zaizee'):
User.create(username=username)
query = User.select().order_by(User.username.desc())
cursor = self.database.execute(query)
self.assertEqual([row[1] for row in cursor], ['zaizee', 'huey'])
| TestDatabaseExecuteQuery |
python | celery__celery | celery/beat.py | {
"start": 1381,
"end": 2126
} | class ____:
"""A lazy function declared in 'beat_schedule' and called before sending to worker.
Example:
beat_schedule = {
'test-every-5-minutes': {
'task': 'test',
'schedule': 300,
'kwargs': {
"current": BeatCallBack(datetime.datetime.now)
}
}
}
"""
def __init__(self, func, *args, **kwargs):
self._func = func
self._func_params = {
"args": args,
"kwargs": kwargs
}
def __call__(self):
return self.delay()
def delay(self):
return self._func(*self._func_params["args"], **self._func_params["kwargs"])
@total_ordering
| BeatLazyFunc |
python | lepture__authlib | tests/flask/cache.py | {
"start": 87,
"end": 1715
} | class ____:
"""A SimpleCache for testing. Copied from Werkzeug."""
def __init__(self, threshold=500, default_timeout=300):
self.default_timeout = default_timeout
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time.time()
toremove = []
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if (expires != 0 and expires <= now) or idx % 3 == 0:
toremove.append(key)
for key in toremove:
self._cache.pop(key, None)
def _normalize_timeout(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout > 0:
timeout = time.time() + timeout
return timeout
def get(self, key):
try:
expires, value = self._cache[key]
if expires == 0 or expires > time.time():
return pickle.loads(value)
except (KeyError, pickle.PickleError):
return None
def set(self, key, value, timeout=None):
expires = self._normalize_timeout(timeout)
self._prune()
self._cache[key] = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
return True
def delete(self, key):
return self._cache.pop(key, None) is not None
def has(self, key):
try:
expires, value = self._cache[key]
return expires == 0 or expires > time.time()
except KeyError:
return False
| SimpleCache |
python | python-openxml__python-docx | tests/test_shared.py | {
"start": 2900,
"end": 4121
} | class ____:
"""Unit-test suite for `docx.shared.RGBColor` objects."""
def it_is_natively_constructed_using_three_ints_0_to_255(self):
rgb_color = RGBColor(0x12, 0x34, 0x56)
assert isinstance(rgb_color, RGBColor)
# -- it is comparable to a tuple[int, int, int] --
assert rgb_color == (18, 52, 86)
def it_raises_with_helpful_error_message_on_wrong_types(self):
with pytest.raises(TypeError, match=r"RGBColor\(\) takes three integer valu"):
RGBColor("12", "34", "56") # pyright: ignore
with pytest.raises(ValueError, match=r"\(\) takes three integer values 0-255"):
RGBColor(-1, 34, 56)
with pytest.raises(ValueError, match=r"RGBColor\(\) takes three integer valu"):
RGBColor(12, 256, 56)
def it_can_construct_from_a_hex_string_rgb_value(self):
rgb = RGBColor.from_string("123456")
assert rgb == RGBColor(0x12, 0x34, 0x56)
def it_can_provide_a_hex_string_rgb_value(self):
assert str(RGBColor(0xF3, 0x8A, 0x56)) == "F38A56"
def it_has_a_custom_repr(self):
rgb_color = RGBColor(0x42, 0xF0, 0xBA)
assert repr(rgb_color) == "RGBColor(0x42, 0xf0, 0xba)"
| DescribeRGBColor |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_emails.py | {
"start": 2565,
"end": 9536
} | class ____(UserEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.UNOWNED
def get(self, request: Request, user: User) -> Response:
"""
Returns a list of emails. Primary email will have `isPrimary: true`
"""
emails = user.emails.all()
return self.respond(
serialize(list(emails), user=user, serializer=UserEmailSerializer()),
status=200,
)
@sudo_required
def post(self, request: Request, user: User) -> Response:
"""
Add a secondary email address to account
"""
validator = EmailValidator(data=request.data)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = validator.validated_data
email = result["email"].lower().strip()
use_signed_urls = options.get("user-settings.signed-url-confirmation-emails")
try:
if use_signed_urls:
add_email_signed(email, user)
logger.info(
"user.email.add",
extra={
"user_id": user.id,
"ip_address": request.META["REMOTE_ADDR"],
"used_signed_url": True,
"email": email,
},
)
return self.respond(
{"detail": _("A verification email has been sent. Please check your inbox.")},
status=201,
)
else:
new_useremail = add_email(email, user)
logger.info(
"user.email.add",
extra={
"user_id": user.id,
"ip_address": request.META["REMOTE_ADDR"],
"used_signed_url": False,
"email": email,
},
)
return self.respond(
serialize(new_useremail, user=request.user, serializer=UserEmailSerializer()),
status=201,
)
except DuplicateEmailError:
return self.respond(
{"detail": _("That email address is already associated with your account.")},
status=409,
)
@sudo_required
def put(self, request: Request, user: User) -> Response:
"""
Update a primary email address
"""
validator = EmailValidator(data=request.data)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = validator.validated_data
old_email = user.email.lower()
new_email = result["email"].lower()
new_useremail = user.emails.filter(email__iexact=new_email).first()
# If email doesn't exist for user, attempt to add new email
if not new_useremail:
try:
new_useremail = add_email(new_email, user)
except DuplicateEmailError:
new_useremail = user.emails.get(email__iexact=new_email)
else:
logger.info(
"user.email.add",
extra={
"user_id": user.id,
"ip_address": request.META["REMOTE_ADDR"],
"email": new_useremail.email,
},
)
new_email = new_useremail.email
# Check if email is in use
# TODO(dcramer): this needs rate limiting to avoid abuse
# TODO(dcramer): this needs a lock/constraint
if (
User.objects.filter(Q(email__iexact=new_email) | Q(username__iexact=new_email))
.exclude(id=user.id)
.exists()
):
return self.respond(
{"email": _("That email address is already associated with another account.")},
status=400,
)
if not new_useremail.is_verified:
return self.respond(
{"email": _("You must verify your email address before marking it as primary.")},
status=400,
)
options = UserOption.objects.filter(user=user, key="mail:email")
for option in options:
if option.value != old_email:
continue
option.update(value=new_email)
has_new_username = old_email == user.username
update_kwargs = {"email": new_email}
if has_new_username and not User.objects.filter(username__iexact=new_email).exists():
update_kwargs["username"] = new_email
# NOTE(mattrobenolt): When changing your primary email address,
# we explicitly want to invalidate existing lost password hashes,
# so that in the event of a compromised inbox, an outstanding
# password hash can't be used to gain access. We also feel this
# is a large enough of a security concern to force logging
# out other current sessions.
user.clear_lost_passwords()
user.refresh_session_nonce(request._request)
update_kwargs["session_nonce"] = user.session_nonce
user.update(**update_kwargs)
logger.info(
"user.email.edit",
extra={
"user_id": user.id,
"ip_address": request.META["REMOTE_ADDR"],
"email": new_email,
},
)
return self.respond(
serialize(new_useremail, user=request.user, serializer=UserEmailSerializer()),
status=200,
)
@sudo_required
def delete(self, request: Request, user: User) -> Response:
"""
Removes an email associated with the user account
"""
validator = EmailValidator(data=request.data)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
email = validator.validated_data["email"]
primary_email = UserEmail.objects.get_primary_email(user)
del_email = UserEmail.objects.filter(user=user, email__iexact=email).first()
del_useroption_email_list = UserOption.objects.filter(
user=user, key="mail:email", value=email
)
# Don't allow deleting primary email?
if primary_email == del_email:
return self.respond({"detail": "Cannot remove primary email"}, status=400)
if del_email:
del_email.delete()
for useroption in del_useroption_email_list:
useroption.delete()
logger.info(
"user.email.remove",
extra={"user_id": user.id, "ip_address": request.META["REMOTE_ADDR"], "email": email},
)
return self.respond(status=204)
| UserEmailsEndpoint |
python | falconry__falcon | falcon/_typing.py | {
"start": 7192,
"end": 7387
} | class ____(Protocol[_AReqT, _ARespT]):
"""ASGI middleware with request handler."""
async def process_request(self, req: _AReqT, resp: _ARespT) -> None: ...
| AsgiMiddlewareWithProcessRequest |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc8628/grant_types/device_code.py | {
"start": 250,
"end": 4265
} | class ____(GrantTypeBase):
def create_authorization_response(
self, request: common.Request, token_handler: Callable
) -> tuple[dict, str, int]:
"""
Validate the device flow request -> create the access token
-> persist the token -> return the token.
"""
headers = self._get_default_headers()
try:
self.validate_token_request(request)
except rfc6749_errors.OAuth2Error as e:
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=False)
for modifier in self._token_modifiers:
token = modifier(token)
self.request_validator.save_token(token, request)
return self.create_token_response(request, token_handler)
def validate_token_request(self, request: common.Request) -> None:
"""
Performs the necessary check against the request to ensure
it's allowed to retrieve a token.
"""
for validator in self.custom_validators.pre_token:
validator(request)
if not getattr(request, "grant_type", None):
raise rfc6749_errors.InvalidRequestError(
"Request is missing grant type.", request=request
)
if request.grant_type != "urn:ietf:params:oauth:grant-type:device_code":
raise rfc6749_errors.UnsupportedGrantTypeError(request=request)
for param in ("grant_type", "scope"):
if param in request.duplicate_params:
raise rfc6749_errors.InvalidRequestError(
description=f"Duplicate {param} parameter.", request=request
)
if not self.request_validator.authenticate_client(request):
raise rfc6749_errors.InvalidClientError(request=request)
elif not hasattr(request.client, "client_id"):
raise NotImplementedError(
"Authenticate client must set the "
"request.client.client_id attribute "
"in authenticate_client."
)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
request.client_id = request.client_id or request.client.client_id
self.validate_scopes(request)
for validator in self.custom_validators.post_token:
validator(request)
def create_token_response(
self, request: common.Request, token_handler: Callable
) -> tuple[dict, str, int]:
"""Return token or error in json format.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = self._get_default_headers()
try:
if self.request_validator.client_authentication_required(
request
) and not self.request_validator.authenticate_client(request):
raise rfc6749_errors.InvalidClientError(request=request)
self.validate_token_request(request)
except rfc6749_errors.OAuth2Error as e:
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request, self.refresh_token)
self.request_validator.save_token(token, request)
return headers, json.dumps(token), 200
| DeviceCodeGrant |
python | ansible__ansible | test/integration/targets/config/lookup_plugins/casting_individual.py | {
"start": 1139,
"end": 1663
} | class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
for cast in (list, int, bool, str):
option = 'test_%s' % str(cast).replace("<class '", '').replace("'>", '')
if option in kwargs:
self.set_option(option, kwargs[option])
value = self.get_option(option)
if not isinstance(value, cast):
raise Exception('%s is not a %s: got %s/%s' % (option, cast, type(value), value))
return terms
| LookupModule |
python | realpython__materials | django-markdown/dmd_app/markdown_extensions.py | {
"start": 115,
"end": 494
} | class ____(LinkInlineProcessor):
def getLink(self, data, index):
href, title, index, handled = super().getLink(data, index)
if href.startswith("slug"):
slug = href.split(":")[1]
relative_url = reverse("markdown-content", args=[slug])
href = relative_url
return href, title, index, handled
| SlugFieldLinkInlineProcessor |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_sagemaker.py | {
"start": 1247,
"end": 2121
} | class ____(BaseAwsLinksTestCase):
link_class = SageMakerTransformJobLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="sagemaker_transform_job_details",
value={
"region_name": "us-east-1",
"aws_domain": BaseAwsLink.get_aws_domain("aws"),
**{"job_name": "test_job_name"},
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/sagemaker/home?region=us-east-1#/transform-jobs/test_job_name"
),
region_name="us-east-1",
aws_partition="aws",
job_name="test_job_name",
)
| TestSageMakerTransformDetailsLink |
python | ray-project__ray | python/ray/data/_internal/metadata_exporter.py | {
"start": 10458,
"end": 11517
} | class ____(ABC):
"""Abstract base class for dataset metadata exporters.
Implementations of this interface can export Ray Data metadata to various destinations
like log files, databases, or monitoring systems.
"""
@abstractmethod
def export_dataset_metadata(
self,
dataset_metadata: DatasetMetadata,
include_data_context: bool = True,
include_op_args: bool = True,
) -> None:
"""Export dataset metadata to the destination.
Args:
dataset_metadata: DatasetMetadata object containing dataset information.
include_data_context: If DataContext will be exported
include_op_args: If operator args will be exported
"""
pass
@classmethod
@abstractmethod
def create_if_enabled(cls) -> Optional["DatasetMetadataExporter"]:
"""Create an exporter instance if the export functionality is enabled.
Returns:
An exporter instance if enabled, None otherwise.
"""
pass
| DatasetMetadataExporter |
python | apache__airflow | providers/alibaba/src/airflow/providers/alibaba/cloud/operators/oss.py | {
"start": 5503,
"end": 6342
} | class ____(BaseOperator):
"""
This operator to delete an OSS object.
:param key: key of the object to delete.
:param region: OSS region
:param bucket_name: OSS bucket name
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
key: str,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.key = key
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.delete_object(bucket_name=self.bucket_name, key=self.key)
| OSSDeleteObjectOperator |
python | scrapy__scrapy | tests/test_link.py | {
"start": 46,
"end": 1862
} | class ____:
def _assert_same_links(self, link1, link2):
assert link1 == link2
assert hash(link1) == hash(link2)
def _assert_different_links(self, link1, link2):
assert link1 != link2
assert hash(link1) != hash(link2)
def test_eq_and_hash(self):
l1 = Link("http://www.example.com")
l2 = Link("http://www.example.com/other")
l3 = Link("http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link("http://www.example.com", text="test")
l5 = Link("http://www.example.com", text="test2")
l6 = Link("http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=False
)
l8 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=False
)
l9 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=True
)
l10 = Link(
"http://www.example.com", text="test", fragment="other", nofollow=False
)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=True
)
l2 = eval(repr(l1)) # pylint: disable=eval-used
self._assert_same_links(l1, l2)
def test_bytes_url(self):
with pytest.raises(TypeError):
Link(b"http://www.example.com/\xc2\xa3")
| TestLink |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 57786,
"end": 59794
} | class ____(Base):
"""
DB model for entity associations.
"""
__tablename__ = "entity_associations"
ASSOCIATION_ID_PREFIX = "a-"
association_id = Column(String(36), nullable=False)
"""
Association ID: `String` (limit 36 characters).
"""
source_type = Column(String(36), nullable=False)
"""
Source entity type: `String` (limit 36 characters).
"""
source_id = Column(String(36), nullable=False)
"""
Source entity ID: `String` (limit 36 characters).
"""
destination_type = Column(String(36), nullable=False)
"""
Destination entity type: `String` (limit 36 characters).
"""
destination_id = Column(String(36), nullable=False)
"""
Destination entity ID: `String` (limit 36 characters).
"""
created_time = Column(BigInteger, default=get_current_time_millis)
"""
Creation time: `BigInteger`.
"""
__table_args__ = (
PrimaryKeyConstraint(
"source_type",
"source_id",
"destination_type",
"destination_id",
name="entity_associations_pk",
),
Index("index_entity_associations_association_id", "association_id"),
Index(
"index_entity_associations_reverse_lookup",
"destination_type",
"destination_id",
"source_type",
"source_id",
),
)
def __init__(self, **kwargs):
"""Initialize a new entity association with auto-generated ID if not provided."""
if "association_id" not in kwargs:
kwargs["association_id"] = self.generate_association_id()
super().__init__(**kwargs)
@staticmethod
def generate_association_id() -> str:
"""
Generate a unique ID for entity associations.
Returns:
A unique association ID with the format "a-<uuid_hex>".
"""
return f"{SqlEntityAssociation.ASSOCIATION_ID_PREFIX}{uuid.uuid4().hex}"
| SqlEntityAssociation |
python | getsentry__sentry | src/sentry/types/actor.py | {
"start": 681,
"end": 10217
} | class ____(RpcModel):
"""Can represent any model object with a foreign key to Actor."""
id: int
"""The id of the user/team this actor represents"""
actor_type: ActorType
"""Whether this actor is a User or Team"""
slug: str | None = None
class InvalidActor(ObjectDoesNotExist):
"""Raised when an Actor fails to resolve or be found"""
pass
@classmethod
def resolve_many(
cls, actors: Sequence["Actor"], filter_none: bool = True
) -> list["Team | RpcUser | None"]:
"""
Resolve a list of actors in a batch to the Team/User the Actor references.
Will generate more efficient queries to load actors than calling
Actor.resolve() individually will.
"""
from sentry.models.team import Team
from sentry.users.services.user.service import user_service
if not actors:
return []
actors_by_type: dict[ActorType, list[Actor]] = defaultdict(list)
for actor in actors:
actors_by_type[actor.actor_type].append(actor)
results: dict[tuple[ActorType, int], Team | RpcUser] = {}
for actor_type, actor_list in actors_by_type.items():
if actor_type == ActorType.USER:
for user in user_service.get_many_by_id(ids=[u.id for u in actor_list]):
results[(actor_type, user.id)] = user
if actor_type == ActorType.TEAM:
for team in Team.objects.filter(id__in=[t.id for t in actor_list]):
results[(actor_type, team.id)] = team
final_results = [results.get((actor.actor_type, actor.id)) for actor in actors]
if filter_none:
final_results = list(filter(None, final_results))
return final_results
@classmethod
def many_from_object(cls, objects: Iterable[ActorTarget]) -> list["Actor"]:
"""
Create a list of Actor instances based on a collection of 'objects'
Objects will be grouped by the kind of actor they would be related to.
Queries for actors are batched to increase efficiency. Users that are
missing actors will have actors generated.
"""
from sentry.models.team import Team
from sentry.organizations.services.organization import RpcTeam
from sentry.users.models.user import User
result: list["Actor"] = []
grouped_by_type: MutableMapping[str, list[int]] = defaultdict(list)
team_slugs: MutableMapping[int, str] = {}
for obj in objects:
if isinstance(obj, cls):
result.append(obj)
if isinstance(obj, (User, RpcUser)):
grouped_by_type[ActorType.USER].append(obj.id)
if isinstance(obj, (Team, RpcTeam)):
team_slugs[obj.id] = obj.slug
grouped_by_type[ActorType.TEAM].append(obj.id)
if grouped_by_type[ActorType.TEAM]:
team_ids = grouped_by_type[ActorType.TEAM]
for team_id in team_ids:
result.append(
Actor(
id=team_id,
actor_type=ActorType.TEAM,
slug=team_slugs.get(team_id),
)
)
if grouped_by_type[ActorType.USER]:
user_ids = grouped_by_type[ActorType.USER]
for user_id in user_ids:
result.append(Actor(id=user_id, actor_type=ActorType.USER))
return result
@classmethod
def from_object(cls, obj: ActorTarget) -> "Actor":
"""
fetch_actor: whether to make an extra query or call to fetch the actor id
Without the actor_id the Actor acts as a tuple of id and type.
"""
from sentry.models.team import Team
from sentry.organizations.services.organization import RpcTeam
from sentry.users.models.user import User
if isinstance(obj, cls):
return obj
if isinstance(obj, User):
return cls.from_orm_user(obj)
if isinstance(obj, Team):
return cls.from_orm_team(obj)
if isinstance(obj, RpcUser):
return cls.from_rpc_user(obj)
if isinstance(obj, RpcTeam):
return cls.from_rpc_team(obj)
raise TypeError(f"Cannot build Actor from {type(obj)}")
@classmethod
def from_orm_user(cls, user: "User") -> "Actor":
return cls(
id=user.id,
actor_type=ActorType.USER,
)
@classmethod
def from_rpc_user(cls, user: RpcUser) -> "Actor":
return cls(
id=user.id,
actor_type=ActorType.USER,
)
@classmethod
def from_orm_team(cls, team: "Team") -> "Actor":
return cls(id=team.id, actor_type=ActorType.TEAM, slug=team.slug)
@classmethod
def from_rpc_team(cls, team: "RpcTeam") -> "Actor":
return cls(id=team.id, actor_type=ActorType.TEAM, slug=team.slug)
@overload
@classmethod
def from_identifier(cls, id: None) -> None: ...
@overload
@classmethod
def from_identifier(cls, id: int | str) -> "Actor": ...
@overload
@classmethod
def from_identifier(cls, id: int | str, organization_id: int) -> "Actor": ...
@classmethod
def from_identifier(
cls, id: str | int | None, organization_id: int | None = None
) -> "Actor | None":
"""
Parse an actor identifier into an Actor
Forms `id` can take:
1231 -> look up User by id
"1231" -> look up User by id
"user:1231" -> look up User by id
"user:maiseythedog" -> look up user by username
"team:1231" -> look up Team by id
"team:team-name" -> look up Team by name (must provide organization_id)
"maiseythedog" -> look up User by username
"maisey@dogsrule.com" -> look up User by primary email
"""
from sentry.models.team import Team
from sentry.users.services.user.service import user_service
if not id:
return None
# If we have an integer, fall back to assuming it's a User
if isinstance(id, int):
return cls(id=id, actor_type=ActorType.USER)
# If the actor_identifier is a simple integer as a string,
# we're also a User
if id.isdigit():
return cls(id=int(id), actor_type=ActorType.USER)
if id.startswith("user:"):
remainder = id[5:]
if remainder.isdigit():
return cls(id=int(remainder), actor_type=ActorType.USER)
# pass this on to get to the user lookup below
id = remainder
if id.startswith("team:"):
remainder = id[5:]
if remainder.isdigit():
return cls(id=int(remainder), actor_type=ActorType.TEAM)
if organization_id is not None:
try:
team = Team.objects.get(name=remainder, organization_id=organization_id)
return cls(id=team.id, actor_type=ActorType.TEAM)
except Team.DoesNotExist:
pass
raise cls.InvalidActor(f"Unable to resolve team name: {remainder}")
try:
user = user_service.get_by_username(username=id)[0]
return cls(id=user.id, actor_type=ActorType.USER)
except IndexError as e:
raise cls.InvalidActor(f"Unable to resolve actor identifier: {e}")
@classmethod
def from_id(cls, user_id: int | None = None, team_id: int | None = None) -> "Actor | None":
if user_id and team_id:
raise cls.InvalidActor("You can only provide one of user_id and team_id")
if user_id:
return cls(id=user_id, actor_type=ActorType.USER)
if team_id:
return cls(id=team_id, actor_type=ActorType.TEAM)
return None
def __post_init__(self) -> None:
if not self.is_team and self.slug is not None:
raise ValueError("Slugs are expected for teams only")
def __hash__(self) -> int:
return hash((self.id, self.actor_type))
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, self.__class__)
and self.id == other.id
and self.actor_type == other.actor_type
)
def resolve(self) -> "Team | RpcUser":
"""
Resolve an Actor into the Team or RpcUser it represents.
Will raise Team.DoesNotExist or User.DoesNotExist when the actor is invalid
"""
from sentry.models.team import Team
from sentry.users.services.user.service import user_service
if self.is_team:
team = Team.objects.filter(id=self.id).first()
if team:
return team
raise Actor.InvalidActor(f"Cannot find a team with id={self.id}")
if self.is_user:
user = user_service.get_user(user_id=self.id)
if user:
return user
raise Actor.InvalidActor(f"Cannot find a User with id={self.id}")
# This should be un-reachable
raise Actor.InvalidActor("Cannot resolve an actor with an unknown type")
@property
def identifier(self) -> str:
return f"{self.actor_type.lower()}:{self.id}"
@property
def is_team(self) -> bool:
return self.actor_type == ActorType.TEAM
@property
def is_user(self) -> bool:
return self.actor_type == ActorType.USER
| Actor |
python | getsentry__sentry | src/sentry/seer/endpoints/group_autofix_update.py | {
"start": 733,
"end": 2484
} | class ____(GroupAiEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ML_AI
def post(self, request: Request, group: Group) -> Response:
"""
Send an update event to autofix for a given group.
"""
if not request.data:
return Response(status=400, data={"error": "Need a body with a run_id and payload"})
user = request.user
if isinstance(user, AnonymousUser):
return Response(
status=401,
data={"error": "You must be authenticated to use this endpoint"},
)
if not get_seer_org_acknowledgement(group.organization):
return Response(
status=403,
data={
"error": "Seer has not been enabled for this organization. Please open an issue at sentry.io/issues and set up Seer."
},
)
path = "/v1/automation/autofix/update"
body = orjson.dumps(
{
**request.data,
"invoking_user": (
{
"id": user.id,
"display_name": user.get_display_name(),
}
),
}
)
response = requests.post(
f"{settings.SEER_AUTOFIX_URL}{path}",
data=body,
headers={
"content-type": "application/json;charset=utf-8",
**sign_with_seer_secret(body),
},
)
response.raise_for_status()
group.update(seer_autofix_last_triggered=timezone.now())
return Response(status=202, data=response.json())
| GroupAutofixUpdateEndpoint |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 67384,
"end": 85369
} | class ____:
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these
tests, but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
rng = np.random.RandomState(1234567)
x = rng.randn(n)
y = x + rng.randn(n)
xm = np.full(len(x) + 5, 1e16)
ym = np.full(len(y) + 5, 1e16)
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.full((n, nx), np.nan)
y = np.full((n, nx), np.nan)
xm = np.full((n+5, nx), np.nan)
ym = np.full((n+5, nx), np.nan)
for i in range(nx):
x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
result1 = stats.linregress(x, y)
result2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(result1), np.asarray(result2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_spearmanr_backcompat_useties(self):
# A regression test to ensure we don't break backwards compat
# more than we have to (see gh-9204).
x = np.arange(6)
assert_raises(ValueError, mstats.spearmanr, x, x, False)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
# reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
# validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
# compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5, 4)
am = np.ma.array(a)
r = stats.sem(a, ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
assert_almost_equal(stats.tmax(x, upperlimit=3.),
stats.mstats.tmax(xm, upperlimit=3.), 10)
assert_almost_equal(stats.tmax(y, upperlimit=3.),
stats.mstats.tmax(ym, upperlimit=3.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
stats.mstats.tmin(xm, lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
stats.mstats.tmin(ym, lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x, y)
zm = stats.mstats.zmap(xm, ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
decimal=14)
assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
decimal=14)
assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
stats.mstats.tsem(xm, limits=(-2., 2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r, rm)
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
rng = np.random.default_rng(2790153686)
x = rng.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0][0], rm[0][0], rtol=1e-14)
assert_allclose(r[0][1], rm[0][1], rtol=1e-14)
def test_normaltest(self):
with np.errstate(over='raise'), warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "`kurtosistest` p-value may be inaccurate", UserWarning)
warnings.filterwarnings(
"ignore", "kurtosistest only valid for n>=20", UserWarning)
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
x_orig, xm_orig = x.copy(), xm.copy()
unique, unique_counts = np.unique(x, return_counts=True)
r = unique[unique_counts > 1], unique_counts[unique_counts > 1]
rm = stats.mstats.find_repeats(xm)
assert_equal(r, rm)
assert_equal(x, x_orig)
assert_equal(xm, xm_orig)
# This crazy behavior is expected by count_tied_groups, but is not
# in the docstring...
_, counts = stats.mstats.find_repeats([])
assert_equal(counts, np.array(0, dtype=np.intp))
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
def test_ks_1samp(self):
"""Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with warnings.catch_warnings():
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_1samp(x, stats.norm.cdf,
alternative=alternative, mode=mode)
res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf,
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_1samp(xm, stats.norm.cdf,
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_1samp(self):
"""
Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays.
"""
for mode in ['auto', 'exact', 'asymp']:
with warnings.catch_warnings():
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, 'norm',
alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, 'norm',
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, 'norm',
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_ks_2samp(self):
"""Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.
gh-8431"""
for mode in ['auto', 'exact', 'asymp']:
with warnings.catch_warnings():
if mode in ['auto', 'exact']:
message = "ks_2samp: Exact calculation unsuccessful."
warnings.filterwarnings("ignore", message, RuntimeWarning)
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_2samp(x, y,
alternative=alternative, mode=mode)
res2 = stats.mstats.ks_2samp(xm, ym,
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_2samp(xm, y,
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_2samp(self):
"""
Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays.
"""
for mode in ['auto', 'exact', 'asymp']:
with warnings.catch_warnings():
if mode in ['auto', 'exact']:
message = "ks_2samp: Exact calculation unsuccessful."
warnings.filterwarnings("ignore", message, RuntimeWarning)
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, y,
alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, ym,
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, y,
alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
| TestCompareWithStats |
python | davidhalter__parso | parso/python/errors.py | {
"start": 21201,
"end": 21529
} | class ____(SyntaxRule):
message = 'cannot assign to __debug__'
message_none = 'cannot assign to None'
def is_issue(self, leaf):
self._normalizer.context.add_name(leaf)
if leaf.value == '__debug__' and leaf.is_definition():
return True
@ErrorFinder.register_rule(type='string')
| _NameChecks |
python | google__jax | tests/batching_test.py | {
"start": 51241,
"end": 54131
} | class ____(jtu.JaxTestCase):
@parameterized.parameters([False, True])
def test_basic(self, jit):
with temporarily_register_named_array_vmappable():
def f(x):
return named_mul(x, x)
if jit:
f = jax.jit(f)
x = NamedArray(['i', 'j'], jnp.arange(12.).reshape(3, 4))
g = jax.vmap(f,
in_axes=NamedMapSpec('i', 0),
out_axes=NamedMapSpec('i', 1),
axis_size=3)
ans = g(x)
expected = NamedArray(['j', 'i'], jnp.arange(12.).reshape(3, 4).T ** 2)
self.assertEqual(ans.names, expected.names)
self.assertAllClose(ans.data, expected.data)
def test_to_elt_that_binds_primitives(self):
class A:
data: Array
def __init__(self, data):
self.data = data
def to_elt(cont, _, val, spec):
return cont(val.data + 1, spec)
def from_elt(cont, size, elt, spec):
assert False
@jax.jit
def f():
a = A(jnp.arange(3.))
return jax.vmap(lambda x: x - 1, axis_size=3)(a)
try:
batching.register_vmappable(A, int, int, to_elt, from_elt, None)
ans = f()
finally:
batching.unregister_vmappable(A)
self.assertAllClose(ans, jnp.arange(3.))
def test_from_elt_that_binds_primitives(self):
class A:
data: Array
def __init__(self, data):
self.data = data
def to_elt(cont, _, val, spec):
return A(cont(val.data, spec))
def from_elt(cont, size, elt, spec):
return A(cont(size, elt.data + 1, spec))
@jax.jit
def f():
a = A(jnp.arange(3.))
return jax.vmap(lambda x: x, axis_size=3)(a).data
try:
batching.register_vmappable(A, int, int, to_elt, from_elt, None)
ans = f()
finally:
batching.unregister_vmappable(A)
self.assertAllClose(ans, jnp.arange(3.) + 1)
def test_types_with_same_spec(self):
# We register NamedArray.
batching.register_vmappable(NamedArray, NamedMapSpec, int,
named_to_elt, named_from_elt, None)
# We then register another type that uses NamedMapSpec as the spec_type too,
# and immediately unregister it.
class Foo:
pass
batching.register_vmappable(Foo, NamedMapSpec, int,
named_to_elt, named_from_elt, None)
batching.unregister_vmappable(Foo)
# We should still be able to use vmap on NamedArray.
def f(x):
return named_mul(x, x)
x = NamedArray(['i', 'j'], jnp.arange(12.).reshape(3, 4))
ans = jax.jit(f)(x)
expected = NamedArray(['i', 'j'], jnp.arange(12.).reshape(3, 4) ** 2)
self.assertEqual(ans.names, expected.names)
self.assertAllClose(ans.data, expected.data)
# And unregister NamedArray without exceptions.
batching.unregister_vmappable(NamedArray)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| VmappableTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/base.py | {
"start": 95758,
"end": 96402
} | class ____(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS_MYSQL
def __init__(
self,
dialect: default.DefaultDialect,
server_ansiquotes: bool = False,
**kw: Any,
):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super().__init__(dialect, initial_quote=quote, escape_quote=quote)
def _quote_free_identifiers(self, *ids: Optional[str]) -> tuple[str, ...]:
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
| MySQLIdentifierPreparer |
python | ansible__ansible | lib/ansible/playbook/role/include.py | {
"start": 943,
"end": 2091
} | class ____(RoleDefinition, Delegatable):
"""
A derivative of RoleDefinition, used by playbook code when a role
is included for execution in a play.
"""
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager,
loader=loader, collection_list=collection_list)
@staticmethod
def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
if not (isinstance(data, str) or isinstance(data, dict)):
raise AnsibleParserError("Invalid role definition.", obj=data)
if isinstance(data, str) and ',' in data:
raise AnsibleError("Invalid old style role requirement: %s" % data)
ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader, collection_list=collection_list)
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
| RoleInclude |
python | tiangolo__fastapi | tests/test_inherited_custom_class.py | {
"start": 185,
"end": 3093
} | class ____:
def __init__(self, uuid_string: str):
self.uuid = uuid_string
def __str__(self):
return self.uuid
@property # type: ignore
def __class__(self):
return uuid.UUID
@property
def __dict__(self):
"""Spoof a missing __dict__ by raising TypeError, this is how
asyncpg.pgroto.pgproto.UUID behaves"""
raise TypeError("vars() argument must have __dict__ attribute")
@needs_pydanticv2
def test_pydanticv2():
from pydantic import field_serializer
app = FastAPI()
@app.get("/fast_uuid")
def return_fast_uuid():
asyncpg_uuid = MyUuid("a10ff360-3b1e-4984-a26f-d3ab460bdb51")
assert isinstance(asyncpg_uuid, uuid.UUID)
assert type(asyncpg_uuid) is not uuid.UUID
with pytest.raises(TypeError):
vars(asyncpg_uuid)
return {"fast_uuid": asyncpg_uuid}
class SomeCustomClass(BaseModel):
model_config = {"arbitrary_types_allowed": True}
a_uuid: MyUuid
@field_serializer("a_uuid")
def serialize_a_uuid(self, v):
return str(v)
@app.get("/get_custom_class")
def return_some_user():
# Test that the fix also works for custom pydantic classes
return SomeCustomClass(a_uuid=MyUuid("b8799909-f914-42de-91bc-95c819218d01"))
client = TestClient(app)
with client:
response_simple = client.get("/fast_uuid")
response_pydantic = client.get("/get_custom_class")
assert response_simple.json() == {
"fast_uuid": "a10ff360-3b1e-4984-a26f-d3ab460bdb51"
}
assert response_pydantic.json() == {
"a_uuid": "b8799909-f914-42de-91bc-95c819218d01"
}
# TODO: remove when deprecating Pydantic v1
@needs_pydanticv1
def test_pydanticv1():
app = FastAPI()
@app.get("/fast_uuid")
def return_fast_uuid():
asyncpg_uuid = MyUuid("a10ff360-3b1e-4984-a26f-d3ab460bdb51")
assert isinstance(asyncpg_uuid, uuid.UUID)
assert type(asyncpg_uuid) is not uuid.UUID
with pytest.raises(TypeError):
vars(asyncpg_uuid)
return {"fast_uuid": asyncpg_uuid}
class SomeCustomClass(BaseModel):
class Config:
arbitrary_types_allowed = True
json_encoders = {uuid.UUID: str}
a_uuid: MyUuid
@app.get("/get_custom_class")
def return_some_user():
# Test that the fix also works for custom pydantic classes
return SomeCustomClass(a_uuid=MyUuid("b8799909-f914-42de-91bc-95c819218d01"))
client = TestClient(app)
with client:
response_simple = client.get("/fast_uuid")
response_pydantic = client.get("/get_custom_class")
assert response_simple.json() == {
"fast_uuid": "a10ff360-3b1e-4984-a26f-d3ab460bdb51"
}
assert response_pydantic.json() == {
"a_uuid": "b8799909-f914-42de-91bc-95c819218d01"
}
| MyUuid |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/xla_fsdp.py | {
"start": 2151,
"end": 29346
} | class ____(ParallelStrategy, _Sharded):
r"""Strategy for training multiple XLA devices using the
:func:`torch_xla.distributed.xla_fully_sharded_data_parallel.XlaFullyShardedDataParallel` method.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
For more information check out https://github.com/pytorch/xla/blob/v2.5.0/docs/fsdp.md
Args:
auto_wrap_policy: Same as ``auto_wrap_policy`` parameter in
:class:`torch_xla.distributed.fsdp.XlaFullyShardedDataParallel`.
For convenience, this also accepts a set of the layer classes to wrap.
activation_checkpointing_policy: Used when selecting the modules for
which you want to enable activation checkpointing. Enabling this can free up a significant amount of memory
at the cost of speed since activations in these layers need to be recomputed during backpropagation.
This accepts a set of the layer classes to wrap.
state_dict_type: The format in which the state of the model and optimizers gets saved into the checkpoint.
- ``"full"``: The full weights and optimizer states get assembled on rank 0 and saved to a single file.
- ``"sharded"``: Each rank saves its shard of weights and optimizer states to a file. The checkpoint is
a folder with files for each shard in the host. Note that TPU VM multihost does not have a shared
filesystem.
sequential_save: With this enabled, individual ranks consecutively save their state dictionary shards, reducing
peak system RAM usage, although it elongates the saving process.
\**kwargs: See available parameters in :class:`torch_xla.distributed.fsdp.XlaFullyShardedDataParallel`.
"""
def __init__(
self,
accelerator: Optional[Accelerator] = None,
parallel_devices: Optional[list[torch.device]] = None,
checkpoint_io: Optional[XLACheckpointIO] = None,
precision: Optional[XLAPrecision] = None,
auto_wrap_policy: Optional[_POLICY] = None,
activation_checkpointing_policy: Optional[_POLICY_SET] = None,
state_dict_type: Literal["full", "sharded"] = "sharded",
sequential_save: bool = False,
**kwargs: Any,
) -> None:
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
super().__init__(
accelerator=accelerator,
parallel_devices=parallel_devices,
cluster_environment=XLAEnvironment(),
checkpoint_io=checkpoint_io,
precision=precision,
)
self._backward_sync_control = _XLAFSDPBackwardSyncControl()
self._auto_wrap_policy = auto_wrap_policy
self._activation_checkpointing_policy = activation_checkpointing_policy
self._fsdp_kwargs = kwargs
self._state_dict_type = state_dict_type
self._sequential_save = sequential_save
self._launched = False
@property
@override
def root_device(self) -> torch.device:
if not self._launched:
raise RuntimeError("Accessing the XLA device before processes have spawned is not allowed.")
import torch_xla.core.xla_model as xm
return xm.xla_device()
@property
def num_processes(self) -> int:
return len(self.parallel_devices) if self.parallel_devices is not None else 0
@property
@override
def checkpoint_io(self) -> XLACheckpointIO:
plugin = self._checkpoint_io
if plugin is not None:
assert isinstance(plugin, XLACheckpointIO)
return plugin
return XLACheckpointIO()
@checkpoint_io.setter
@override
def checkpoint_io(self, io: Optional[CheckpointIO]) -> None:
if io is not None and not isinstance(io, XLACheckpointIO):
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
self._checkpoint_io = io
@property
@override
def precision(self) -> XLAPrecision:
plugin = self._precision
if plugin is not None:
assert isinstance(plugin, XLAPrecision)
return plugin
return XLAPrecision("32-true")
@precision.setter
@override
def precision(self, precision: Optional[Precision]) -> None:
if precision is not None and not isinstance(precision, XLAPrecision):
raise TypeError(f"The XLA FSDP strategy can only work with the `XLAPrecision` plugin, found {precision}")
self._precision = precision
@property
@override
def global_rank(self) -> int:
return super().global_rank if self._launched else 0
@property
@override
def local_rank(self) -> int:
return super().local_rank if self._launched else 0
@property
@override
def node_rank(self) -> int:
return super().node_rank if self._launched else 0
@property
@override
def world_size(self) -> int:
return super().world_size if self._launched else 1
@override
def _configure_launcher(self) -> None:
self._launcher = _XLALauncher(self)
@override
def setup_environment(self) -> None:
assert self.parallel_devices is not None
if len(self.parallel_devices) == 1:
# spawning only 1 device with PjRT is not supported:
# https://github.com/Lightning-AI/pytorch-lightning/pull/17408#discussion_r1170671732
raise NotImplementedError(
f"The {type(self).__name__} does not support running on a single device with the PjRT runtime."
" Try using all devices or the `SingleDeviceXLAStrategy` strategy"
)
self._launched = True
rank_zero_only.rank = self.global_rank
super().setup_environment()
@override
def setup_module_and_optimizers(
self, module: Module, optimizers: list[Optimizer], scheduler: Optional["_LRScheduler"] = None
) -> tuple[Module, list[Optimizer], Optional["_LRScheduler"]]:
"""Returns NotImplementedError since for XLAFSDP optimizer setup must happen after module setup."""
raise NotImplementedError(
f"The `{type(self).__name__}` does not support the joint setup of module and optimizer(s)."
" Please do it in this order: Create the model, call `setup_module`, create the optimizer,"
" call `setup_optimizer`."
)
@override
def setup_module(self, module: Module) -> Module:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as XLAFSDP
kwargs = self._parse_fsdp_kwargs()
if any(isinstance(mod, XLAFSDP) for mod in module.modules()) and "auto_wrap_policy" in kwargs:
rank_zero_warn(
"A XLAFSDP `auto_wrap_policy` is set, but at least one submodule is already wrapped."
" The policy will be ignored."
)
del kwargs["auto_wrap_policy"]
# XLA FSDP requires that the root is wrapped, even if submodules are already wrapped
if not isinstance(module, XLAFSDP):
module = XLAFSDP(module=module, **kwargs)
return module
@override
def module_to_device(self, module: Module) -> None:
pass
def module_init_context(self, empty_init: Optional[bool] = None) -> AbstractContextManager:
precision_init_ctx = self.precision.module_init_context()
module_sharded_ctx = self.module_sharded_context()
stack = ExitStack()
stack.enter_context(_EmptyInit(enabled=bool(empty_init)))
stack.enter_context(precision_init_ctx)
stack.enter_context(module_sharded_ctx)
return stack
@override
def module_sharded_context(self) -> AbstractContextManager:
return nullcontext()
@override
def process_dataloader(self, dataloader: DataLoader) -> "MpDeviceLoader":
from torch_xla.distributed.parallel_loader import MpDeviceLoader
if isinstance(dataloader, MpDeviceLoader):
# dataloader is already wrapped by MpDeviceLoader
return dataloader
dataloader = MpDeviceLoader(dataloader, self.root_device)
# Mimic interface to torch.utils.data.DataLoader
dataloader.dataset = dataloader._loader.dataset
dataloader.batch_sampler = getattr(dataloader._loader, "batch_sampler", None)
return dataloader
@override
def setup_optimizer(self, optimizer: Optimizer) -> Optimizer:
"""Set up an optimizer for a model wrapped with XLAFSDP.
This setup method doesn't modify the optimizer or wrap the optimizer. The only thing it currently does is verify
that the optimizer was created after the model was wrapped with :meth:`setup_module` with a reference to the
flattened parameters.
"""
if any(getattr(p, "_is_sharded", False) for group in optimizer.param_groups for p in group["params"]):
return optimizer
raise ValueError(
"The optimizer does not seem to reference any XLAFSDP parameters. HINT: Make sure to create the optimizer"
" after setting up the model."
)
@override
def optimizer_step(self, optimizer: Optimizable, **kwargs: Any) -> Any:
"""Overrides default tpu optimizer_step since FSDP should not call `torch_xla.core.xla_model.optimizer_step`.
Performs the actual optimizer step.
Args:
optimizer: the optimizer performing the step
**kwargs: Any extra arguments to ``optimizer.step``
"""
loss = optimizer.step(**kwargs)
import torch_xla.core.xla_model as xm
xm.mark_step()
return loss
@override
def clip_gradients_norm(
self,
module: Module,
optimizer: Optimizer,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
error_if_nonfinite: bool = True,
) -> Tensor:
"""Clip gradients by norm."""
self.precision.unscale_gradients(optimizer)
assert callable(module.clip_grad_norm_)
return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type)
@override
def clip_gradients_value(self, module: Module, optimizer: Optimizer, clip_val: Union[float, int]) -> None:
"""Clip gradients by value."""
raise NotImplementedError(
"XLA's FSDP strategy does not support to clip gradients by value."
" Consider clipping by norm instead or choose another strategy!"
)
@override
def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> Tensor:
"""Function to gather a tensor from several distributed processes.
Args:
tensor: tensor to all-gather.
group: unused.
sync_grads: flag that allows users to synchronize gradients for the all-gather operation.
Return:
A tensor of shape (world_size, ...)
"""
if not self._launched:
return tensor
if not isinstance(tensor, Tensor):
raise NotImplementedError(
f"`{type(self).__name__}.all_gather` is only implemented for tensors. Given {tensor}"
)
if tensor.dim() == 0:
tensor = tensor.unsqueeze(0)
original_device = tensor.device
tensor = tensor.to(self.root_device)
import torch_xla.core.functions as xf
import torch_xla.core.xla_model as xm
tensor = xf.all_gather(tensor) if sync_grads else xm.all_gather(tensor)
tensor = tensor.to(original_device)
return tensor
@override
def all_reduce(
self, output: Union[Tensor, Any], group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None
) -> Tensor:
if not isinstance(output, Tensor):
output = torch.tensor(output, device=self.root_device)
invalid_reduce_op = isinstance(reduce_op, ReduceOp) and reduce_op != ReduceOp.SUM
invalid_reduce_op_str = isinstance(reduce_op, str) and reduce_op.lower() not in ("sum", "mean", "avg")
if invalid_reduce_op or invalid_reduce_op_str:
raise ValueError(
"Currently, the XLAFSDPStrategy only supports `sum`, `mean`, `avg` for the reduce operation, got:"
f" {reduce_op}"
)
import torch_xla.core.xla_model as xm
output = xm.mesh_reduce("reduce", output, sum)
if isinstance(reduce_op, str) and reduce_op.lower() in ("avg", "mean"):
output = output / self.world_size
return output
@override
def barrier(self, name: Optional[str] = None, *args: Any, **kwargs: Any) -> None:
if not self._launched:
return
import torch_xla.core.xla_model as xm
if name is None:
# `None` is not supported: "TypeError: _xla_rendezvous(): incompatible function arguments"
name = ""
xm.rendezvous(name)
@override
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
if not self._launched:
return obj
import torch_xla.core.xla_model as xm
is_tensor = isinstance(obj, Tensor)
if is_tensor:
if obj.dim() == 0:
obj = obj.unsqueeze(0)
original_device = obj.device
# XLA distributed requires that the data is on the XLA device
obj = obj.to(self.root_device)
else:
# support for arbitrary pickle-ables
buffer = io.BytesIO()
torch.save(obj, buffer)
obj = torch.tensor( # type: ignore[assignment]
bytearray(buffer.getbuffer()), device=self.root_device, dtype=torch.float
)
obj = [obj]
xm.collective_broadcast(obj, root_ordinal=src)
obj = obj[0]
if not is_tensor:
# this will preserve the dtype and device of any tensors
buffer = io.BytesIO(obj.cpu().byte().numpy())
obj = torch.load(buffer)
else:
obj = obj.to(original_device)
return obj
@override
def save_checkpoint(
self,
path: _PATH,
state: dict[str, Union[Module, Optimizer, Any]],
storage_options: Optional[Any] = None,
filter: Optional[dict[str, Callable[[str, Any], bool]]] = None,
) -> None:
"""Save model, optimizer, and other state in the provided checkpoint directory.
If the user specifies sharded checkpointing, the directory will contain one file per process, with model- and
optimizer shards stored per file. If the user specifies full checkpointing, the directory will contain a
consolidated checkpoint combining all of the sharded checkpoints.
"""
# broadcast the path from rank 0 to ensure all the states are saved in a common path
path = Path(self.broadcast(path))
if path.is_dir() and any(path.iterdir()):
raise FileExistsError(f"The checkpoint directory already exists and is not empty: {path}")
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as XLAFSDP
modules = [module for module in state.values() if isinstance(module, XLAFSDP)]
if len(modules) == 0:
raise ValueError(
"Could not find a XLAFSDP model in the provided checkpoint state. Please provide the model as"
" part of the state like so: `save_checkpoint(..., state={'model': model, ...})`. Make sure"
" you set up the model (and optimizers if any) through the strategy before saving the checkpoint."
)
if len(modules) > 1:
raise ValueError(
"Found multiple XLAFSDP modules in the given state. Saving checkpoints with FSDP is"
" currently limited to a single model per checkpoint. To save multiple models, call the"
" save method for each model separately with a different path."
)
import torch_xla.core.xla_model as xm
# ensure model parameters are updated
xm.mark_step()
parallel_devices = self.parallel_devices
assert parallel_devices is not None
if self._sequential_save:
# each host runs this in parallel, but the ranks in the host run it sequentially
for rank in range(len(parallel_devices)):
if rank == self.local_rank:
self._save_checkpoint_shard(path, state, storage_options, filter)
self.barrier(f"wait-for-{rank}-save")
else:
self._save_checkpoint_shard(path, state, storage_options, filter)
if self._state_dict_type == "full":
ckpt_prefix = str(path / "checkpoint")
ckpt_suffix = "_rank-*-of-*.pth"
if len(parallel_devices) != self.world_size: # multihost
raise OSError(
"Multihost setups do not have a shared filesystem, so the checkpoint shards cannot be consolidated"
" into a single checkpoint after saving them. Please switch to"
" `XLAFSDPStrategy(state_dict_type='sharded')`. TIP: You can consolidate them manually by getting"
" them together into a single directory and running `python -m"
f" torch_xla.distributed.fsdp.consolidate_sharded_ckpts --ckpt_prefix {ckpt_prefix!r} --ckpt_suffix"
f" {ckpt_suffix!r} --save_path 'path/to/consolidated.ckpt'`."
)
from torch_xla.distributed.fsdp import consolidate_sharded_model_checkpoints
self.barrier("before_ckpt_consolidation")
if self.is_global_zero:
save_path = path.parent / "consolidated.ckpt"
# save consolidated checkpoint separate to the shards
consolidate_sharded_model_checkpoints(ckpt_prefix, ckpt_suffix, str(save_path))
# remove the shards directory
self.checkpoint_io.remove_checkpoint(path)
# mv the consolidated checkpoint where the user would expect it
get_filesystem(save_path).mv(str(save_path), str(path))
self.barrier("after_ckpt_consolidation")
def _save_checkpoint_shard(
self,
path: Path,
state: dict[str, Union[Module, Optimizer, Any]],
storage_options: Optional[Any],
filter: Optional[dict[str, Callable[[str, Any], bool]]],
) -> None:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as XLAFSDP
converted_state: dict[str, Any] = {}
for key, obj in state.items():
# convert the state
if isinstance(obj, Module) and isinstance(obj, XLAFSDP):
converted = obj.state_dict()
# add shard_metadata to state
converted_state["shard_metadata"] = obj.get_shard_metadata()
elif isinstance(obj, Optimizer):
converted = obj.state_dict()
else:
converted = obj
_apply_filter(key, filter or {}, converted, converted_state)
self.checkpoint_io.save_checkpoint(
converted_state,
path / f"checkpoint_rank-{self.global_rank:08d}-of-{self.world_size:08d}.pth",
storage_options=storage_options,
)
@override
def load_checkpoint(
self,
path: _PATH,
state: Optional[Union[Module, Optimizer, dict[str, Union[Module, Optimizer, Any]]]] = None,
strict: bool = True,
weights_only: Optional[bool] = None,
) -> dict[str, Any]:
"""Given a folder, load the contents from a checkpoint and restore the state of the given objects.
The strategy currently only supports saving and loading sharded checkpoints which are stored in form of a
directory of multiple files rather than a single file.
"""
if not state:
raise ValueError(
f"Got `XLAFSDPStrategy.load_checkpoint(..., state={state!r})` but a state with at least "
" a model instance to reload is required. Pass it in like so:"
" `FSDPStrategy.load_checkpoint(..., state={'model': model, ...})`"
)
# broadcast the path from rank 0 to ensure all the states are loaded from a common path
path = Path(self.broadcast(path))
if isinstance(state, (Module, Optimizer)):
raise NotImplementedError(
"Loading a single module or optimizer object from a checkpoint"
" is not supported yet with the XLAFSDP strategy."
)
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as XLAFSDP
modules = {key: module for key, module in state.items() if isinstance(module, XLAFSDP)}
optimizers = {key: optim for key, optim in state.items() if isinstance(optim, Optimizer)}
if self._state_dict_type == "sharded":
file = path / f"checkpoint_rank-{self.global_rank:08d}-of-{self.world_size:08d}.pth"
if not file.is_file():
raise ValueError(
f"The path {str(file)!r} does not point to valid sharded checkpoints. Make sure the path points to"
" a directory with XLAFSDP checkpoint shards."
)
if len(modules) == 0:
raise ValueError(
"Could not find a XLAFSDP model in the provided checkpoint state. Please provide the model as"
" part of the state like so: `load_checkpoint(..., state={'model': model, ...})`. Make sure"
" you set up the model (and optimizers if any) through the strategy before loading the checkpoint."
)
if len(modules) > 1:
raise ValueError(
"Found multiple XLAFSDP modules in the given state. Loading checkpoints with FSDP is"
" currently limited to a single model per checkpoint. To load multiple models, call the"
" load method for each model separately with a different path."
)
_, module = list(modules.items())[0]
sharded_ckpt = torch.load(file)
module.load_state_dict(sharded_ckpt["model"], strict=strict)
for opt_key, opt in optimizers.items():
opt.load_state_dict(sharded_ckpt[opt_key])
# Load anything leftover from sharded_ckpt
loaded_metadata_keys = sharded_ckpt.keys() - modules.keys() - optimizers.keys()
requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys()
_validate_keys_for_strict_loading(requested_metadata_keys, loaded_metadata_keys, strict=strict)
for key in requested_metadata_keys:
if key in loaded_metadata_keys:
state[key] = sharded_ckpt[key]
loaded_metadata_keys.remove(key)
metadata = {}
if len(loaded_metadata_keys):
for key in loaded_metadata_keys:
metadata[key] = sharded_ckpt[key]
# remove "shard_metadata" that is loaded in
if "shard_metadata" in metadata:
metadata.pop("shard_metadata")
return metadata
if self._state_dict_type == "full":
if not path.is_file():
raise ValueError(
f"The path {str(path)!r} does not point to a valid full checkpoint. Make sure the path points to a"
" directory with a full XLAFSDP checkpoint."
)
if len(optimizers) > 0 or len(state.keys() - modules.keys() - optimizers.keys()) > 0:
rank_zero_warn(
"Loading a full checkpoint will only load the full model."
" The optimizer and any additional metadata are not included."
)
if len(modules) > 0:
raise ValueError(
"Found a XLAFSDP model in the provided checkpoint state."
" Please provide the model without any XLAFSDP wrapper."
)
if "model" not in state or not isinstance(model := state["model"], torch.nn.Module):
raise NotImplementedError("XLAFSDP only supports a single model instance with 'model' as the key.")
full_ckpt = torch.load(path, weights_only=weights_only)
model.load_state_dict(full_ckpt.pop("model"), strict=strict)
return full_ckpt
raise ValueError(f"Unknown state_dict_type: {self._state_dict_type}")
@classmethod
@override
def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None:
strategy_registry.register("xla_fsdp", cls, description=cls.__name__)
def _parse_fsdp_kwargs(self) -> dict:
# this needs to be delayed because `self.precision` isn't available at init
kwargs = self._fsdp_kwargs.copy()
precision = self.precision
if isinstance(precision, XLAPrecision):
# the `compute_dtype` will be passed to the `auto_wrapper_callable` automatically, so we don't need to pass
# it when creating it
kwargs.setdefault("compute_dtype", precision._desired_dtype)
kwargs = _auto_wrap_policy_kwargs(self._auto_wrap_policy, kwargs)
return _activation_checkpointing_kwargs(self._activation_checkpointing_policy, kwargs)
def _auto_wrap_policy_kwargs(policy: Optional["_POLICY"], kwargs: dict) -> dict:
if policy is None:
return kwargs
if isinstance(policy, set):
from torch_xla.distributed.fsdp.wrap import transformer_auto_wrap_policy
# this is not transformer specific despite the name
policy = partial(transformer_auto_wrap_policy, transformer_layer_cls=policy)
kwargs["auto_wrap_policy"] = policy
return kwargs
def _activation_checkpointing_auto_wrapper(policy: _POLICY_SET, module: Module, *args: Any, **kwargs: Any) -> Module:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as XLAFSDP
from torch_xla.distributed.fsdp import checkpoint_module
module = checkpoint_module(module) if isinstance(module, tuple(policy)) else module
return XLAFSDP(module, *args, **kwargs)
def _activation_checkpointing_kwargs(policy: Optional[_POLICY_SET], kwargs: dict) -> dict:
if not policy:
return kwargs
if "auto_wrapper_callable" in kwargs:
raise ValueError(
"You cannot set both `auto_wrapper_callable` and `activation_checkpointing_policy`. Choose one"
)
if not isinstance(policy, set):
raise TypeError(
f"`activation_checkpointing_policy` must be a set, found {policy}. You can try defining and"
" passing `auto_wrapper_callable` instead."
)
auto_wrapper_callable = partial(_activation_checkpointing_auto_wrapper, policy)
kwargs["auto_wrapper_callable"] = auto_wrapper_callable
return kwargs
| XLAFSDPStrategy |
python | pandas-dev__pandas | pandas/io/sql.py | {
"start": 56341,
"end": 75157
} | class ____(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
con : SQLAlchemy Connectable or URI string.
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
need_transaction : bool, default False
If True, SQLDatabase will create a transaction.
"""
def __init__(
self, con, schema: str | None = None, need_transaction: bool = False
) -> None:
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.schema import MetaData
# self.exit_stack cleans up the Engine and Connection and commits the
# transaction if any of those objects was created below.
# Cleanup happens either in self.__exit__ or at the end of the iterator
# returned by read_sql when chunksize is not None.
self.exit_stack = ExitStack()
if isinstance(con, str):
con = create_engine(con)
self.exit_stack.callback(con.dispose)
if isinstance(con, Engine):
con = self.exit_stack.enter_context(con.connect())
if need_transaction and not con.in_transaction():
self.exit_stack.enter_context(con.begin())
self.con = con
self.meta = MetaData(schema=schema)
self.returns_generator = False
def __exit__(self, *args) -> None:
if not self.returns_generator:
self.exit_stack.close()
@contextmanager
def run_transaction(self):
if not self.con.in_transaction():
with self.con.begin():
yield self.con
else:
yield self.con
def execute(self, sql: str | Select | TextClause | Delete, params=None):
"""Simple passthrough to SQLAlchemy connectable"""
from sqlalchemy.exc import SQLAlchemyError
args = [] if params is None else [params]
if isinstance(sql, str):
execute_function = self.con.exec_driver_sql
else:
execute_function = self.con.execute
try:
return execute_function(sql, *args)
except SQLAlchemyError as exc:
raise DatabaseError(f"Execution failed on sql '{sql}': {exc}") from exc
def read_table(
self,
table_name: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed nullable
:class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
self.meta.reflect(bind=self.con, only=[table_name], views=True)
table = SQLTable(table_name, self, index=index_col, schema=schema)
if chunksize is not None:
self.returns_generator = True
return table.read(
self.exit_stack,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
dtype_backend=dtype_backend,
)
@staticmethod
def _query_iterator(
result,
exit_stack: ExitStack,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> Generator[DataFrame]:
"""Return generator through chunked result set"""
has_read_data = False
with exit_stack:
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield _wrap_result(
[],
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
break
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
def read_query(
self,
sql: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
result = self.execute(sql, params)
columns = result.keys()
if chunksize is not None:
self.returns_generator = True
return self._query_iterator(
result,
self.exit_stack,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
return frame
read_sql = read_query
def prep_table(
self,
frame,
name: str,
if_exists: Literal["fail", "replace", "append", "delete_rows"] = "fail",
index: bool | str | list[str] | None = True,
index_label=None,
schema=None,
dtype: DtypeArg | None = None,
) -> SQLTable:
"""
Prepares table in the database for data insertion. Creates it if needed, etc.
"""
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = dict.fromkeys(frame, dtype) # type: ignore[arg-type]
else:
dtype = cast(dict, dtype)
from sqlalchemy.types import TypeEngine
for col, my_type in dtype.items():
if isinstance(my_type, type) and issubclass(my_type, TypeEngine):
pass
elif isinstance(my_type, TypeEngine):
pass
else:
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
return table
def check_case_sensitive(
self,
name: str,
schema: str | None,
) -> None:
"""
Checks table name for issues with case-sensitivity.
Method is called after data is inserted.
"""
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
from sqlalchemy import inspect as sqlalchemy_inspect
insp = sqlalchemy_inspect(self.con)
table_names = insp.get_table_names(schema=schema or self.meta.schema)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(
msg,
UserWarning,
stacklevel=find_stack_level(),
)
def to_sql(
self,
frame,
name: str,
if_exists: Literal["fail", "replace", "append", "delete_rows"] = "fail",
index: bool = True,
index_label=None,
schema: str | None = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: Literal["multi"] | Callable | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append', 'delete_rows'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
- delete_rows: If a table exists, delete all records and insert data.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
engine : {'auto', 'sqlalchemy'}, default 'auto'
SQL engine library to use. If 'auto', then the option
``io.sql.engine`` is used. The default ``io.sql.engine``
behavior is 'sqlalchemy'
**engine_kwargs
Any additional kwargs are passed to the engine.
"""
sql_engine = get_engine(engine)
table = self.prep_table(
frame=frame,
name=name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
dtype=dtype,
)
total_inserted = sql_engine.insert_records(
table=table,
con=self.con,
frame=frame,
name=name,
index=index,
schema=schema,
chunksize=chunksize,
method=method,
**engine_kwargs,
)
self.check_case_sensitive(name=name, schema=schema)
return total_inserted
@property
def tables(self):
return self.meta.tables
def has_table(self, name: str, schema: str | None = None) -> bool:
from sqlalchemy import inspect as sqlalchemy_inspect
insp = sqlalchemy_inspect(self.con)
return insp.has_table(name, schema or self.meta.schema)
def get_table(self, table_name: str, schema: str | None = None) -> Table:
from sqlalchemy import (
Numeric,
Table,
)
schema = schema or self.meta.schema
tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema)
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name: str, schema: str | None = None) -> None:
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(
bind=self.con, only=[table_name], schema=schema, views=True
)
with self.run_transaction():
self.get_table(table_name, schema).drop(bind=self.con)
self.meta.clear()
def delete_rows(self, table_name: str, schema: str | None = None) -> None:
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(
bind=self.con, only=[table_name], schema=schema, views=True
)
table = self.get_table(table_name, schema)
self.execute(table.delete()).close()
self.meta.clear()
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: list[str] | None = None,
dtype: DtypeArg | None = None,
schema: str | None = None,
) -> str:
table = SQLTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
| SQLDatabase |
python | davidhalter__parso | parso/python/errors.py | {
"start": 21529,
"end": 22838
} | class ____(SyntaxRule):
if sys.version_info < (3, 10):
message = "bytes can only contain ASCII literal characters."
else:
message = "bytes can only contain ASCII literal characters"
def is_issue(self, leaf):
string_prefix = leaf.string_prefix.lower()
if 'b' in string_prefix \
and any(c for c in leaf.value if ord(c) > 127):
# b'ä'
return True
if 'r' not in string_prefix:
# Raw strings don't need to be checked if they have proper
# escaping.
payload = leaf._get_payload()
if 'b' in string_prefix:
payload = payload.encode('utf-8')
func = codecs.escape_decode
else:
func = codecs.unicode_escape_decode
try:
with warnings.catch_warnings():
# The warnings from parsing strings are not relevant.
warnings.filterwarnings('ignore')
func(payload)
except UnicodeDecodeError as e:
self.add_issue(leaf, message='(unicode error) ' + str(e))
except ValueError as e:
self.add_issue(leaf, message='(value error) ' + str(e))
@ErrorFinder.register_rule(value='*')
| _StringChecks |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_beta_messages.py | {
"start": 6223,
"end": 10321
} | class ____(Generic[ResponseFormatT]):
text_stream: AsyncIterator[str]
"""Async iterator over just the text deltas in the stream.
```py
async for text in stream.text_stream:
print(text, end="", flush=True)
print()
```
"""
def __init__(
self,
raw_stream: AsyncStream[BetaRawMessageStreamEvent],
output_format: ResponseFormatT | NotGiven,
) -> None:
self._raw_stream = raw_stream
self.text_stream = self.__stream_text__()
self._iterator = self.__stream__()
self.__final_message_snapshot: ParsedBetaMessage[ResponseFormatT] | None = None
self.__output_format = output_format
@property
def response(self) -> httpx.Response:
return self._raw_stream.response
@property
def request_id(self) -> str | None:
return self.response.headers.get("request-id") # type: ignore[no-any-return]
async def __anext__(self) -> ParsedBetaMessageStreamEvent[ResponseFormatT]:
return await self._iterator.__anext__()
async def __aiter__(self) -> AsyncIterator[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
async for item in self._iterator:
yield item
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self._raw_stream.close()
async def get_final_message(self) -> ParsedBetaMessage[ResponseFormatT]:
"""Waits until the stream has been read to completion and returns
the accumulated `Message` object.
"""
await self.until_done()
assert self.__final_message_snapshot is not None
return self.__final_message_snapshot
async def get_final_text(self) -> str:
"""Returns all `text` content blocks concatenated together.
> [!NOTE]
> Currently the API will only respond with a single content block.
Will raise an error if no `text` content blocks were returned.
"""
message = await self.get_final_message()
text_blocks: list[str] = []
for block in message.content:
if block.type == "text":
text_blocks.append(block.text)
if not text_blocks:
raise RuntimeError(
f".get_final_text() can only be called when the API returns a `text` content block.\nThe API returned {','.join([b.type for b in message.content])} content block type(s) that you can access by calling get_final_message().content"
)
return "".join(text_blocks)
async def until_done(self) -> None:
"""Waits until the stream has been consumed"""
await consume_async_iterator(self)
# properties
@property
def current_message_snapshot(self) -> ParsedBetaMessage[ResponseFormatT]:
assert self.__final_message_snapshot is not None
return self.__final_message_snapshot
async def __stream__(self) -> AsyncIterator[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
async for sse_event in self._raw_stream:
self.__final_message_snapshot = accumulate_event(
event=sse_event,
current_snapshot=self.__final_message_snapshot,
request_headers=self.response.request.headers,
output_format=self.__output_format,
)
events_to_fire = build_events(event=sse_event, message_snapshot=self.current_message_snapshot)
for event in events_to_fire:
yield event
async def __stream_text__(self) -> AsyncIterator[str]:
async for chunk in self:
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
yield chunk.delta.text
| BetaAsyncMessageStream |
python | numba__numba | numba/cuda/vectorizers.py | {
"start": 2900,
"end": 4196
} | class ____(GUFuncCallSteps):
__slots__ = [
'_stream',
]
def __init__(self, nin, nout, args, kwargs):
super().__init__(nin, nout, args, kwargs)
self._stream = kwargs.get('stream', 0)
def is_device_array(self, obj):
return cuda.is_cuda_array(obj)
def as_device_array(self, obj):
# We don't want to call as_cuda_array on objects that are already Numba
# device arrays, because this results in exporting the array as a
# Producer then importing it as a Consumer, which causes a
# synchronization on the array's stream (if it has one) by default.
# When we have a Numba device array, we can simply return it.
if cuda.cudadrv.devicearray.is_cuda_ndarray(obj):
return obj
return cuda.as_cuda_array(obj)
def to_device(self, hostary):
return cuda.to_device(hostary, stream=self._stream)
def to_host(self, devary, hostary):
out = devary.copy_to_host(hostary, stream=self._stream)
return out
def allocate_device_array(self, shape, dtype):
return cuda.device_array(shape=shape, dtype=dtype, stream=self._stream)
def launch_kernel(self, kernel, nelem, args):
kernel.forall(nelem, stream=self._stream)(*args)
| _CUDAGUFuncCallSteps |
python | getsentry__sentry | src/sentry/feedback/endpoints/project_user_reports.py | {
"start": 1331,
"end": 6588
} | class ____(ProjectEndpoint):
owner = ApiOwner.FEEDBACK
publish_status = {
"GET": ApiPublishStatus.PRIVATE, # TODO: deprecate
"POST": ApiPublishStatus.PRIVATE, # TODO: deprecate
}
authentication_classes = ProjectEndpoint.authentication_classes + (DSNAuthentication,)
def get(self, request: Request, project) -> Response:
"""
List a Project's User Feedback
``````````````````````````````
Return a list of user feedback items within this project.
*This list does not include submissions from the [User Feedback Widget](https://docs.sentry.io/product/user-feedback/#user-feedback-widget). This is because it is based on an older format called User Reports - read more [here](https://develop.sentry.dev/application/feedback-architecture/#user-reports). To return a list of user feedback items from the widget, please use the [issue API](https://docs.sentry.io/api/events/list-a-projects-issues/) with the filter `issue.category:feedback`.*
:pparam string organization_id_or_slug: the id or slug of the organization.
:pparam string project_id_or_slug: the id or slug of the project.
:auth: required
"""
# we don't allow read permission with DSNs
if request.auth is not None and request.auth.kind == "project_key":
return self.respond(status=401)
paginate_kwargs: _PaginateKwargs = {}
try:
environment = get_environment(request, project.organization_id)
except Environment.DoesNotExist:
queryset = UserReport.objects.none()
else:
retention = quotas.backend.get_event_retention(organization=project.organization)
start = datetime.now(UTC) - timedelta(days=retention) if retention else epoch
queryset = UserReport.objects.filter(
project_id=project.id, group_id__isnull=False, date_added__gte=start
)
if environment is not None:
queryset = queryset.filter(environment_id=environment.id)
status = request.GET.get("status", "unresolved")
if status == "unresolved":
paginate_kwargs["post_query_filter"] = user_reports_filter_to_unresolved
elif status:
return self.respond({"status": "Invalid status choice"}, status=400)
return self.paginate(
request=request,
queryset=queryset,
order_by="-date_added",
on_results=lambda x: serialize(
x,
request.user,
UserReportWithGroupSerializer(
environment_func=get_environment_func(request, project.organization_id)
),
),
paginator_cls=DateTimePaginator,
**paginate_kwargs,
)
def post(self, request: Request, project) -> Response:
"""
Submit User Feedback
````````````````````
*We only recommend this endpoint if your Sentry SDK does not support the User Feedback API. See the [list of supported platforms](https://docs.sentry.io/product/user-feedback/setup/#supported-sdks-for-user-feedback-api).*
Submit and associate user feedback with an issue.
Feedback must be received by the server no more than 30 minutes after the event was saved.
Additionally, within 5 minutes of submitting feedback it may also be overwritten. This is useful
in situations where you may need to retry sending a request due to network failures.
If feedback is rejected due to a mutability threshold, a 409 status code will be returned.
Note: Feedback may be submitted with DSN authentication (see auth documentation).
:pparam string organization_id_or_slug: the id or slug of the organization.
:pparam string project_id_or_slug: the id or slug of the project.
:auth: required
:param string event_id: the event ID
:param string name: user's name
:param string email: user's email address
:param string comments: comments supplied by user
"""
if (
request.auth is not None
and request.auth.project_id is not None
and project.id != request.auth.project_id
):
return self.respond(status=401)
serializer = UserReportSerializer(data=request.data)
if not serializer.is_valid():
return self.respond(serializer.errors, status=400)
report = serializer.validated_data
try:
report_instance = save_userreport(
project, report, FeedbackCreationSource.USER_REPORT_DJANGO_ENDPOINT
)
except Conflict as e:
return self.respond({"detail": str(e)}, status=409)
if request.auth is not None and request.auth.kind == "project_key":
return self.respond(status=200)
return self.respond(
serialize(
report_instance,
request.user,
UserReportWithGroupSerializer(
environment_func=get_environment_func(request, project.organization_id)
),
)
)
| ProjectUserReportsEndpoint |
python | vyperlang__vyper | vyper/semantics/types/base.py | {
"start": 13892,
"end": 15535
} | class ____(VyperType):
def __init__(self, typedef):
super().__init__()
self.typedef = typedef
def to_dict(self):
return {"type_t": self.typedef.to_dict()}
def __repr__(self):
return f"type({self.typedef})"
def check_modifiability_for_call(self, node, modifiability):
if hasattr(self.typedef, "_ctor_modifiability_for_call"):
return self.typedef._ctor_modifiability_for_call(node, modifiability)
raise StructureException("Value is not callable", node)
# dispatch into ctor if it's called
def fetch_call_return(self, node):
if hasattr(self.typedef, "_ctor_call_return"):
return self.typedef._ctor_call_return(node)
raise StructureException("Value is not callable", node)
def infer_arg_types(self, node, expected_return_typ=None):
if hasattr(self.typedef, "_ctor_arg_types"):
return self.typedef._ctor_arg_types(node)
raise StructureException("Value is not callable", node)
def infer_kwarg_types(self, node):
if hasattr(self.typedef, "_ctor_kwarg_types"):
return self.typedef._ctor_kwarg_types(node)
raise StructureException("Value is not callable", node)
# dispatch into get_type_member if it's dereferenced, ex.
# MyFlag.FOO
def get_member(self, key, node):
if hasattr(self.typedef, "get_type_member"):
return self.typedef.get_type_member(key, node)
raise UnknownAttribute("Value is not attributable", node)
def is_type_t(x: VyperType, t: type) -> bool:
return isinstance(x, TYPE_T) and isinstance(x.typedef, t)
| TYPE_T |
python | ray-project__ray | python/ray/train/v2/_internal/execution/checkpoint/checkpoint_manager.py | {
"start": 1053,
"end": 1208
} | class ____(BaseModel):
# Increment version if the schema changes
version: int = 0
checkpoint_dir_name: str
metrics: dict
| _TrainingResultState |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 23091,
"end": 26596
} | class ____(Transform):
r"""
Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`.
Args:
loc (Tensor or float): Location parameter.
scale (Tensor or float): Scale parameter.
event_dim (int): Optional size of `event_shape`. This should be zero
for univariate random variables, 1 for distributions over vectors,
2 for distributions over matrices, etc.
"""
bijective = True
def __init__(
self,
loc: Union[Tensor, float],
scale: Union[Tensor, float],
event_dim: int = 0,
cache_size: int = 0,
) -> None:
super().__init__(cache_size=cache_size)
self.loc = loc
self.scale = scale
self._event_dim = event_dim
@property
def event_dim(self) -> int:
return self._event_dim
@constraints.dependent_property(is_discrete=False)
# pyrefly: ignore [bad-override]
def domain(self):
if self.event_dim == 0:
return constraints.real
return constraints.independent(constraints.real, self.event_dim)
@constraints.dependent_property(is_discrete=False)
# pyrefly: ignore [bad-override]
def codomain(self):
if self.event_dim == 0:
return constraints.real
return constraints.independent(constraints.real, self.event_dim)
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return AffineTransform(
self.loc, self.scale, self.event_dim, cache_size=cache_size
)
def __eq__(self, other):
if not isinstance(other, AffineTransform):
return False
if isinstance(self.loc, _Number) and isinstance(other.loc, _Number):
if self.loc != other.loc:
return False
else:
if not (self.loc == other.loc).all().item(): # type: ignore[union-attr]
return False
if isinstance(self.scale, _Number) and isinstance(other.scale, _Number):
if self.scale != other.scale:
return False
else:
if not (self.scale == other.scale).all().item(): # type: ignore[union-attr]
return False
return True
@property
def sign(self) -> Union[Tensor, int]: # type: ignore[override]
if isinstance(self.scale, _Number):
return 1 if float(self.scale) > 0 else -1 if float(self.scale) < 0 else 0
return self.scale.sign()
def _call(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y):
shape = x.shape
scale = self.scale
if isinstance(scale, _Number):
result = torch.full_like(x, math.log(abs(scale)))
else:
result = torch.abs(scale).log()
if self.event_dim:
result_size = result.size()[: -self.event_dim] + (-1,)
result = result.view(result_size).sum(-1)
shape = shape[: -self.event_dim]
return result.expand(shape)
def forward_shape(self, shape):
return torch.broadcast_shapes(
shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ())
)
def inverse_shape(self, shape):
return torch.broadcast_shapes(
shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ())
)
| AffineTransform |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 162922,
"end": 164128
} | class ____(torch.nn.Module):
def forward(self, L_inputs_: "f32[1, 1]", L_model_modules_l1_parameters_weight_: "f32[1, 1]", L_model_modules_l1_parameters_bias_: "f32[1]", L_model_buffers_buffer_: "f32[1]"):
l_inputs_ = L_inputs_
l_model_modules_l1_parameters_weight_ = L_model_modules_l1_parameters_weight_
l_model_modules_l1_parameters_bias_ = L_model_modules_l1_parameters_bias_
l_model_buffers_buffer_ = L_model_buffers_buffer_
linear: "f32[1, 1]" = torch._C._nn.linear(l_inputs_, l_model_modules_l1_parameters_weight_, l_model_modules_l1_parameters_bias_); l_inputs_ = l_model_modules_l1_parameters_weight_ = l_model_modules_l1_parameters_bias_ = None
add: "f32[1, 1]" = linear + l_model_buffers_buffer_; linear = l_model_buffers_buffer_ = None
return (add,)
"""
# We found Windows/Linux have some empty line difference, empty_line_normalizer will help fix it.
self.assertExpectedInline(
empty_line_normalizer(actual),
empty_line_normalizer(normalize_gm(expected)),
)
else:
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 4382,
"end": 4729
} | class ____(nn.Module):
"""Construct the latent embeddings."""
def __init__(self, config):
super().__init__()
self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
def forward(self, batch_size: int):
return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang
| PerceiverEmbeddings |
python | getsentry__sentry | tests/sentry/sentry_metrics/consumers/test_routing_producer.py | {
"start": 431,
"end": 2108
} | class ____(MessageRouter):
def __init__(self) -> None:
self.all_producers: MutableSequence[Producer] = []
for _ in range(3):
self.all_producers.append(Producer({"bootstrap.servers": "127.0.0.1:9092"}))
def get_all_producers(self) -> Sequence[Producer]:
return self.all_producers
def get_route_for_message(self, message: Message[RoutingPayload]) -> MessageRoute:
routing_key = message.payload.routing_header["key"]
dest_id = routing_key % len(self.all_producers)
return MessageRoute(self.all_producers[dest_id], Topic(f"result-topic-{dest_id}"))
@pytest.mark.skip("Check whether this test is failing in CI")
def test_routing_producer() -> None:
"""
Test that the routing producer step correctly routes messages to the desired
producer and topic. This uses the RoundRobinRouter, which routes messages to
three different producers and topics
"""
epoch = datetime(1970, 1, 1)
orig_topic = Topic("orig-topic")
commit = mock.Mock()
router = RoundRobinRouter()
strategy = RoutingProducerStep(
commit_function=commit,
message_router=router,
)
for i in range(3):
value = b'{"something": "something"}'
data = RoutingPayload(
routing_header={"key": i}, routing_message=KafkaPayload(None, value, [])
)
message = Message(
BrokerValue(
data,
Partition(orig_topic, 0),
1,
epoch,
)
)
strategy.submit(message)
strategy.poll()
strategy.join()
assert commit.call_count >= 3
| RoundRobinRouter |
python | pikepdf__pikepdf | tests/test_matrix.py | {
"start": 428,
"end": 4188
} | class ____:
def test_default_is_identity(self):
assert Matrix() == Matrix(1, 0, 0, 1, 0, 0) == Matrix.identity()
def test_not_enough_args(self):
with pytest.raises(TypeError):
Matrix(1, 2, 3, 4, 5)
def test_tuple(self):
assert Matrix() == Matrix((1, 0, 0, 1, 0, 0))
with pytest.raises(ValueError):
Matrix((1, 2, 3, 4, 5))
def test_failed_object_conversion(self):
with pytest.raises(ValueError):
assert Matrix(Array([1, 2, 3]))
with pytest.raises(ValueError):
assert Matrix(Dictionary(Foo=1))
def test_accessors(self):
m = Matrix(1, 2, 3, 4, 5, 6)
assert m.a == 1
assert m.b == 2
assert m.c == 3
assert m.d == 4
assert m.e == 5
assert m.f == 6
def test_init(self):
m = Matrix()
m2 = m.scaled(2, 2)
m2t = m2.translated(2, 3)
assert repr(m2t) == 'pikepdf.Matrix(2, 0, 0, 2, 4, 6)'
m2tr = m2t.rotated(90)
expected = Matrix(0, 2, -2, 0, 4, 6)
assert allclose(m2tr, expected)
def test_init_copy(self):
m = Matrix(1, 2, 3, 4, 5, 6)
m2 = Matrix(m)
assert m == m2
def test_init_from_objlist(self):
a = Array([1, 2, 3, 4, 5, 6])
assert Matrix(a.as_list()).shorthand == (1, 2, 3, 4, 5, 6)
b = Array([1, 2, 3, 4, 5])
with pytest.raises(ValueError, match='must have 6 elements'):
Matrix(b.as_list())
c = Array([1, 2, b"foo", 4, 5, 6])
with pytest.raises(ValueError, match='must be numeric'):
Matrix(c.as_list())
def test_matmul(self):
m = Matrix()
scale = Matrix().scaled(3, 3)
translate = Matrix().translated(10, 10)
assert allclose(translate @ scale @ m, Matrix(3, 0, 0, 3, 30, 30))
assert allclose(scale @ translate @ m, Matrix(3, 0, 0, 3, 10, 10))
assert allclose(m.scaled(3, 3).translated(10, 10), Matrix(3, 0, 0, 3, 30, 30))
def test_inverse(self):
m = Matrix().rotated(45)
minv_m = m.inverse() @ m
assert allclose(minv_m, Matrix())
def test_non_invertible(self):
m = Matrix(4, 4, 4, 4, 0, 0)
with pytest.raises(ValueError, match='not invertible'):
m.inverse()
def test_numpy(self):
np = pytest.importorskip('numpy')
m = Matrix(1, 0, 0, 2, 7, 0)
a = np.array([[1, 0, 0], [0, 2, 0], [7, 0, 1]])
arr = np.array(m)
assert np.array_equal(arr, a)
def test_bool(self):
with pytest.raises(ValueError):
bool(Matrix(1, 0, 0, 1, 0, 0))
def test_pickle(self):
assert Matrix(1, 0, 0, 1, 42, 0) == pickle.loads(
pickle.dumps(Matrix(1, 0, 0, 1, 42, 0))
)
def test_encode(self):
assert Matrix((1, 2, 3, 4, 0, 0)).encode() == b'1 2 3 4 0 0'
def test_from_object_array(self):
assert Matrix(Array([1, 2, 3, 4, 5, 6])).shorthand == (1, 2, 3, 4, 5, 6)
def test_transform_point(self):
m = Matrix(1, 0, 0, 1, 0, 0)
assert m.transform((0, 0)) == (0, 0)
assert m.transform((1, 1)) == (1, 1)
m = Matrix(2, 0, 0, 2, 0, 1)
assert m.transform((0, 0)) == (0, 1)
assert m.transform((1, 1)) == (2, 3)
def test_transform_rect(self):
m = Matrix(2, 0, 0, 2, 1, 1)
assert m.transform(Rectangle(0, 0, 1, 1)) == Rectangle(1, 1, 3, 3)
def test_rotated_ccw(self):
m = Matrix().rotated(45)
assert (0, 0) < m.transform((1, 0)) < (1, 1)
m = Matrix().rotated(-45)
assert (0, 0) < m.transform((1, 0)) < (1, -1)
def test_latex(self):
assert '\\begin' in Matrix(1, 0, 0, 1, 0, 0)._repr_latex_()
| TestMatrix |
python | pytorch__pytorch | tools/testing/target_determination/heuristics/mentioned_in_pr.py | {
"start": 739,
"end": 2298
} | class ____(HeuristicInterface):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
def _search_for_linked_issues(self, s: str) -> list[str]:
return re.findall(r"#(\d+)", s) + re.findall(r"/pytorch/pytorch/.*/(\d+)", s)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
try:
commit_messages = get_git_commit_info()
except Exception as e:
print(f"Can't get commit info due to {e}")
commit_messages = ""
try:
pr_number = get_pr_number()
if pr_number is not None:
pr_body = get_issue_or_pr_body(pr_number)
else:
pr_body = ""
except Exception as e:
print(f"Can't get PR body due to {e}")
pr_body = ""
# Search for linked issues or PRs
linked_issue_bodies: list[str] = []
for issue in self._search_for_linked_issues(
commit_messages
) + self._search_for_linked_issues(pr_body):
try:
linked_issue_bodies.append(get_issue_or_pr_body(int(issue)))
except Exception:
pass
mentioned = []
for test in tests:
if (
test in commit_messages
or test in pr_body
or any(test in body for body in linked_issue_bodies)
):
mentioned.append(test)
return TestPrioritizations(tests, {TestRun(test): 1 for test in mentioned})
| MentionedInPR |
python | TheAlgorithms__Python | data_structures/linked_list/merge_two_lists.py | {
"start": 303,
"end": 358
} | class ____:
data: int
next_node: Node | None
| Node |
python | pypa__setuptools | setuptools/_vendor/jaraco/collections/__init__.py | {
"start": 21954,
"end": 22828
} | class ____(ItemsAsAttributes, BijectiveMap):
"""
A convenient way to provide enumerated values
>>> e = Enumeration('a b c')
>>> e['a']
0
>>> e.a
0
>>> e[1]
'b'
>>> set(e.names) == set('abc')
True
>>> set(e.codes) == set(range(3))
True
>>> e.get('d') is None
True
Codes need not start with 0
>>> e = Enumeration('a b c', range(1, 4))
>>> e['a']
1
>>> e[3]
'c'
"""
def __init__(self, names, codes=None):
if isinstance(names, str):
names = names.split()
if codes is None:
codes = itertools.count()
super().__init__(zip(names, codes))
@property
def names(self):
return (key for key in self if isinstance(key, str))
@property
def codes(self):
return (self[name] for name in self.names)
| Enumeration |
python | bokeh__bokeh | src/bokeh/application/handlers/code.py | {
"start": 2265,
"end": 6974
} | class ____(Handler):
''' Run source code which modifies a Document
'''
# These functions, if present in the supplied code, will be monkey patched
# to be no-ops, with a warning.
_io_functions = ['output_notebook', 'output_file', 'show', 'save', 'reset_output']
_loggers: dict[str, Callable[..., None]]
_logger_text: ClassVar[str]
_origin: ClassVar[str]
def __init__(self, *, source: str, filename: PathLike, argv: list[str] = [], package: ModuleType | None = None) -> None:
'''
Args:
source (str) : python source code
filename (str) : a filename to use in any debugging or error output
argv (list[str], optional) : a list of string arguments to make
available as ``sys.argv`` when the code executes
'''
super().__init__()
self._runner = CodeRunner(source, filename, argv, package=package)
self._loggers = {}
for f in CodeHandler._io_functions:
self._loggers[f] = self._make_io_logger(f)
# Properties --------------------------------------------------------------
@property
def error(self) -> str | None:
''' If the handler fails, may contain a related error message.
'''
return self._runner.error
@property
def error_detail(self) -> str | None:
''' If the handler fails, may contain a traceback or other details.
'''
return self._runner.error_detail
@property
def failed(self) -> bool:
''' ``True`` if the handler failed to modify the doc
'''
return self._runner.failed
@property
def safe_to_fork(self) -> bool:
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if the code has already been executed.
'''
return not self._runner.ran
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' Run Bokeh application code to update a ``Document``
Args:
doc (Document) : a ``Document`` to update
'''
module = self._runner.new_module()
# If no module was returned it means the code runner has some permanent
# unfixable problem, e.g. the configured source code has a syntax error
if module is None:
return
# One reason modules are stored is to prevent the module from being gc'd
# before the document is. A symptom of a gc'd module is that its globals
# become None. Additionally stored modules are used to provide correct
# paths to custom models resolver.
doc.modules.add(module)
with _monkeypatch_io(self._loggers):
with patch_curdoc(doc):
self._runner.run(module, self._make_post_doc_check(doc))
def url_path(self) -> str | None:
''' The last path component for the basename of the configured filename.
'''
if self.failed:
return None
# TODO should fix invalid URL characters
return '/' + splitext(basename(self._runner.path))[0]
# Private methods ---------------------------------------------------------
# subclasses must define self._logger_text
def _make_io_logger(self, name: str) -> Callable[..., None]:
def logger(*args: Any, **kwargs: Any) -> None:
log.info(self._logger_text, self._runner.path, name)
return logger
# script is supposed to edit the doc not replace it
def _make_post_doc_check(self, doc: Document) -> Callable[[], None]:
def func() -> None:
if curdoc() is not doc:
raise RuntimeError(f"{self._origin} at {self._runner.path!r} replaced the output document")
return func
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
# monkeypatching is a little ugly, but in this case there's no reason any legitimate
# code should be calling these functions, and we're only making a best effort to
# warn people so no big deal if we fail.
@contextmanager
def _monkeypatch_io(loggers: dict[str, Callable[..., None]]) -> dict[str, Any]:
import bokeh.io as io
old: dict[str, Any] = {}
for f in CodeHandler._io_functions:
old[f] = getattr(io, f)
setattr(io, f, loggers[f])
yield
for f in old:
setattr(io, f, old[f])
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| CodeHandler |
python | realpython__materials | nearbyshops/shops/migrations/0001_initial.py | {
"start": 135,
"end": 876
} | class ____(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=70)),
('location',
django.contrib.gis.db.models.fields.PointField(srid=4326)),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
],
)
]
| Migration |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor6.py | {
"start": 1115,
"end": 1486
} | class ____(Generic[_T1]):
@overload
def __init__(
self: "ForeignKey[_T2]", to: Type[_T2], *, null: Literal[False] = ...
) -> None: ...
@overload
def __init__(
self: "ForeignKey[Optional[_T2]]", to: Type[_T2], *, null: Literal[True]
) -> None: ...
def __init__(self, to: Type[_T2], *, null: bool = False) -> None: ...
| ForeignKey |
python | imageio__imageio | imageio/plugins/freeimage.py | {
"start": 9270,
"end": 13621
} | class ____(FreeimageFormat):
"""A JPEG format based on the Freeimage library.
This format supports grayscale and RGB images.
The freeimage plugin requires a `freeimage` binary. If this binary
not available on the system, it can be downloaded manually from
<https://github.com/imageio/imageio-binaries> by either
- the command line script ``imageio_download_bin freeimage``
- the Python method ``imageio.plugins.freeimage.download()``
Parameters for reading
----------------------
exifrotate : bool
Automatically rotate the image according to the exif flag.
Default True. If 2 is given, do the rotation in Python instead
of freeimage.
quickread : bool
Read the image more quickly, at the expense of quality.
Default False.
Parameters for saving
---------------------
quality : scalar
The compression factor of the saved image (1..100), higher
numbers result in higher quality but larger file size. Default 75.
progressive : bool
Save as a progressive JPEG file (e.g. for images on the web).
Default False.
optimize : bool
On saving, compute optimal Huffman coding tables (can reduce a
few percent of file size). Default False.
baseline : bool
Save basic JPEG, without metadata or any markers. Default False.
"""
class Reader(FreeimageFormat.Reader):
def _open(self, flags=0, exifrotate=True, quickread=False):
# Build flags from kwargs
flags = int(flags)
if exifrotate and exifrotate != 2:
flags |= IO_FLAGS.JPEG_EXIFROTATE
if not quickread:
flags |= IO_FLAGS.JPEG_ACCURATE
# Enter as usual, with modified flags
return FreeimageFormat.Reader._open(self, flags)
def _get_data(self, index):
im, meta = FreeimageFormat.Reader._get_data(self, index)
im = self._rotate(im, meta)
return im, meta
def _rotate(self, im, meta):
"""Use Orientation information from EXIF meta data to
orient the image correctly. Freeimage is also supposed to
support that, and I am pretty sure it once did, but now it
does not, so let's just do it in Python.
Edit: and now it works again, just leave in place as a fallback.
"""
if self.request.kwargs.get("exifrotate", None) == 2:
try:
ori = meta["EXIF_MAIN"]["Orientation"]
except KeyError: # pragma: no cover
pass # Orientation not available
else: # pragma: no cover - we cannot touch all cases
# www.impulseadventure.com/photo/exif-orientation.html
if ori in [1, 2]:
pass
if ori in [3, 4]:
im = np.rot90(im, 2)
if ori in [5, 6]:
im = np.rot90(im, 3)
if ori in [7, 8]:
im = np.rot90(im)
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
im = np.fliplr(im)
return im
# --
class Writer(FreeimageFormat.Writer):
def _open(
self, flags=0, quality=75, progressive=False, optimize=False, baseline=False
):
# Test quality
quality = int(quality)
if quality < 1 or quality > 100:
raise ValueError("JPEG quality should be between 1 and 100.")
# Build flags from kwargs
flags = int(flags)
flags |= quality
if progressive:
flags |= IO_FLAGS.JPEG_PROGRESSIVE
if optimize:
flags |= IO_FLAGS.JPEG_OPTIMIZE
if baseline:
flags |= IO_FLAGS.JPEG_BASELINE
# Act as usual, but with modified flags
return FreeimageFormat.Writer._open(self, flags)
def _append_data(self, im, meta):
if im.ndim == 3 and im.shape[-1] == 4:
raise IOError("JPEG does not support alpha channel.")
im = image_as_uint(im, bitdepth=8)
return FreeimageFormat.Writer._append_data(self, im, meta)
| FreeimageJpegFormat |
python | getsentry__sentry | src/sentry/models/groupcommitresolution.py | {
"start": 223,
"end": 775
} | class ____(Model):
"""
When a Group is referenced via a commit, its association is stored here.
"""
__relocation_scope__ = RelocationScope.Excluded
group_id = BoundedBigIntegerField()
commit_id = BoundedBigIntegerField(db_index=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
db_table = "sentry_groupcommitresolution"
app_label = "sentry"
unique_together = (("group_id", "commit_id"),)
__repr__ = sane_repr("group_id", "commit_id")
| GroupCommitResolution |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 3188,
"end": 3284
} | class ____(MilvusException):
"""Raise when clusteringkey are invalid"""
| ClusteringKeyException |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 5490,
"end": 5834
} | class ____(BaseModel):
model_config = ConfigDict(
protected_namespaces=tuple(), arbitrary_types_allowed=True
)
id: str
object: str
owned_by: str
permission: List[str]
metadata: Dict[str, Any]
@property
def model_type(self) -> str:
return self.metadata["engine_config"]["model_type"]
| ModelCard |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/25_func_annotations_same_name.py | {
"start": 39,
"end": 114
} | class ____: ...
def MyClass() -> MyClass: ...
def x(self) -> x: ...
| MyClass |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 3507,
"end": 3583
} | class ____(InputRenderer):
tag = "vf-field-password"
| PasswordInputRenderer |
python | doocs__leetcode | solution/3000-3099/3026.Maximum Good Subarray Sum/Solution.py | {
"start": 0,
"end": 505
} | class ____:
def maximumSubarraySum(self, nums: List[int], k: int) -> int:
ans = -inf
p = {nums[0]: 0}
s, n = 0, len(nums)
for i, x in enumerate(nums):
s += x
if x - k in p:
ans = max(ans, s - p[x - k])
if x + k in p:
ans = max(ans, s - p[x + k])
if i + 1 < n and (nums[i + 1] not in p or p[nums[i + 1]] > s):
p[nums[i + 1]] = s
return 0 if ans == -inf else ans
| Solution |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 15432,
"end": 15501
} | class ____(scale_color_datetime):
pass
@alias
| scale_colour_datetime |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/queries/snippets_models.py | {
"start": 1020,
"end": 1160
} | class ____(ndb.Model):
title = ndb.StringProperty()
stars = ndb.IntegerProperty()
tags = ndb.StringProperty(repeated=True)
| Article |
python | django__django | tests/check_framework/test_templates.py | {
"start": 945,
"end": 3430
} | class ____(SimpleTestCase):
TEMPLATES_STRING_IF_INVALID = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"NAME": "backend_1",
"OPTIONS": {
"string_if_invalid": False,
},
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"NAME": "backend_2",
"OPTIONS": {
"string_if_invalid": 42,
},
},
]
def _get_error_for_engine(self, engine):
value = engine.engine.string_if_invalid
return Error(
"'string_if_invalid' in TEMPLATES OPTIONS must be a string but got: %r "
"(%s)." % (value, type(value)),
obj=engine,
id="templates.E002",
)
def _check_engines(self, engines):
return list(
chain.from_iterable(e._check_string_if_invalid_is_string() for e in engines)
)
@override_settings(TEMPLATES=TEMPLATES_STRING_IF_INVALID)
def test_string_if_invalid_not_string(self):
_engines = engines.all()
errors = [
self._get_error_for_engine(_engines[0]),
self._get_error_for_engine(_engines[1]),
]
self.assertEqual(self._check_engines(_engines), errors)
def test_string_if_invalid_first_is_string(self):
TEMPLATES = deepcopy(self.TEMPLATES_STRING_IF_INVALID)
TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = "test"
with self.settings(TEMPLATES=TEMPLATES):
_engines = engines.all()
errors = [self._get_error_for_engine(_engines[1])]
self.assertEqual(self._check_engines(_engines), errors)
def test_string_if_invalid_both_are_strings(self):
TEMPLATES = deepcopy(self.TEMPLATES_STRING_IF_INVALID)
TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = "test"
TEMPLATES[1]["OPTIONS"]["string_if_invalid"] = "test"
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self._check_engines(engines.all()), [])
def test_string_if_invalid_not_specified(self):
TEMPLATES = deepcopy(self.TEMPLATES_STRING_IF_INVALID)
del TEMPLATES[1]["OPTIONS"]["string_if_invalid"]
with self.settings(TEMPLATES=TEMPLATES):
_engines = engines.all()
errors = [self._get_error_for_engine(_engines[0])]
self.assertEqual(self._check_engines(_engines), errors)
| CheckTemplateStringIfInvalidTest |
python | cython__cython | docs/examples/tutorial/cdef_classes/nonecheck.py | {
"start": 97,
"end": 458
} | class ____:
pass
# Turn off nonecheck locally for the function
@cython.nonecheck(False)
def func():
obj: MyClass = None
try:
# Turn nonecheck on again for a block
with cython.nonecheck(True):
print(obj.myfunc()) # Raises exception
except AttributeError:
pass
print(obj.myfunc()) # Hope for a crash!
| MyClass |
python | rapidsai__cudf | python/cudf/cudf/core/column/datetime.py | {
"start": 1916,
"end": 27767
} | class ____(TemporalBaseColumn):
"""
A Column implementation for Date-time types.
Parameters
----------
data : Buffer
The datetime values
dtype : np.dtype
The data type
mask : Buffer; optional
The validity mask
"""
_NP_SCALAR = np.datetime64
_PD_SCALAR = pd.Timestamp
_VALID_BINARY_OPERATIONS = {
"__eq__",
"__ne__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__add__",
"__sub__",
"__radd__",
"__rsub__",
}
_VALID_PLC_TYPES = {
plc.TypeId.TIMESTAMP_SECONDS,
plc.TypeId.TIMESTAMP_MILLISECONDS,
plc.TypeId.TIMESTAMP_MICROSECONDS,
plc.TypeId.TIMESTAMP_NANOSECONDS,
}
def __init__(
self,
plc_column: plc.Column,
size: int,
dtype: np.dtype | pd.DatetimeTZDtype,
offset: int,
null_count: int,
exposed: bool,
) -> None:
dtype = self._validate_dtype_instance(dtype)
super().__init__(
plc_column=plc_column,
size=size,
dtype=dtype,
offset=offset,
null_count=null_count,
exposed=exposed,
)
def _clear_cache(self) -> None:
super()._clear_cache()
attrs = (
"days_in_month",
"is_year_start",
"is_leap_year",
"is_year_end",
"is_quarter_start",
"is_quarter_end",
"is_month_start",
"is_month_end",
"day_of_year",
"weekday",
"nanosecond",
"microsecond",
"millisecond",
"second",
"minute",
"hour",
"day",
"month",
"year",
"quarter",
"time_unit",
)
for attr in attrs:
try:
delattr(self, attr)
except AttributeError:
# attr was not called yet, so ignore.
pass
def _scan(self, op: str) -> ColumnBase:
if op not in {"cummin", "cummax"}:
raise TypeError(
f"Accumulation {op} not supported for {self.dtype}"
)
return self.scan(op.replace("cum", ""), True)._with_type_metadata(
self.dtype
)
@staticmethod
def _validate_dtype_instance(dtype: np.dtype) -> np.dtype:
if (
cudf.get_option("mode.pandas_compatible") and not dtype.kind == "M"
) or (
not cudf.get_option("mode.pandas_compatible")
and not (isinstance(dtype, np.dtype) and dtype.kind == "M")
):
raise ValueError(f"dtype must be a datetime, got {dtype}")
return dtype
def __contains__(self, item: ScalarLike) -> bool:
try:
ts = self._PD_SCALAR(item).as_unit(self.time_unit)
except Exception:
# pandas can raise a variety of errors
# item cannot exist in self.
return False
if ts.tzinfo is None and isinstance(self.dtype, pd.DatetimeTZDtype):
return False
elif ts.tzinfo is not None:
ts = ts.tz_convert(None)
return super().__contains__(ts.to_numpy())
@functools.cached_property
@acquire_spill_lock()
def quarter(self) -> ColumnBase:
return type(self).from_pylibcudf(
plc.datetime.extract_quarter(self.to_pylibcudf(mode="read"))
)
@functools.cached_property
def year(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.YEAR)
@functools.cached_property
def month(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.MONTH)
@functools.cached_property
def day(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.DAY)
@functools.cached_property
def hour(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.HOUR)
@functools.cached_property
def minute(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.MINUTE)
@functools.cached_property
def second(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.SECOND)
@functools.cached_property
def millisecond(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.MILLISECOND)
@functools.cached_property
def microsecond(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.MICROSECOND)
@functools.cached_property
def nanosecond(self) -> ColumnBase:
return self._get_dt_field(plc.datetime.DatetimeComponent.NANOSECOND)
@functools.cached_property
def weekday(self) -> ColumnBase:
# pandas counts Monday-Sunday as 0-6
# while libcudf counts Monday-Sunday as 1-7
result = self._get_dt_field(plc.datetime.DatetimeComponent.WEEKDAY)
return result - result.dtype.type(1)
@functools.cached_property
@acquire_spill_lock()
def day_of_year(self) -> ColumnBase:
return type(self).from_pylibcudf(
plc.datetime.day_of_year(self.to_pylibcudf(mode="read"))
)
@functools.cached_property
def is_month_start(self) -> ColumnBase:
return (self.day == 1).fillna(False)
@functools.cached_property
def is_month_end(self) -> ColumnBase:
with acquire_spill_lock():
last_day_col = type(self).from_pylibcudf(
plc.datetime.last_day_of_month(self.to_pylibcudf(mode="read"))
)
return (self.day == last_day_col.day).fillna(False)
@functools.cached_property
def is_quarter_end(self) -> ColumnBase:
last_month = self.month.isin([3, 6, 9, 12])
return (self.is_month_end & last_month).fillna(False)
@functools.cached_property
def is_quarter_start(self) -> ColumnBase:
first_month = self.month.isin([1, 4, 7, 10])
return (self.is_month_start & first_month).fillna(False)
@functools.cached_property
def is_year_end(self) -> ColumnBase:
day_of_year = self.day_of_year
leap_dates = self.is_leap_year
leap = day_of_year == 366
non_leap = day_of_year == 365
return leap.copy_if_else(non_leap, leap_dates).fillna(False)
@functools.cached_property
@acquire_spill_lock()
def is_leap_year(self) -> ColumnBase:
return type(self).from_pylibcudf(
plc.datetime.is_leap_year(self.to_pylibcudf(mode="read"))
)
@functools.cached_property
def is_year_start(self) -> ColumnBase:
return (self.day_of_year == 1).fillna(False)
@functools.cached_property
@acquire_spill_lock()
def days_in_month(self) -> ColumnBase:
return type(self).from_pylibcudf(
plc.datetime.days_in_month(self.to_pylibcudf(mode="read"))
)
@functools.cached_property
def day_of_week(self) -> ColumnBase:
raise NotImplementedError("day_of_week is currently not implemented.")
@functools.cached_property
def tz(self):
"""
Return the timezone.
Returns
-------
datetime.tzinfo or None
Returns None when the array is tz-naive.
"""
if isinstance(self.dtype, pd.DatetimeTZDtype):
return self.dtype.tz
return None
@functools.cached_property
def time_unit(self) -> str:
return np.datetime_data(self.dtype)[0]
@functools.cached_property
def freq(self) -> str | None:
raise NotImplementedError("freq is not yet implemented.")
@functools.cached_property
def date(self):
raise NotImplementedError("date is not yet implemented.")
@functools.cached_property
def time(self):
raise NotImplementedError("time is not yet implemented.")
@functools.cached_property
def timetz(self):
raise NotImplementedError("timetz is not yet implemented.")
@functools.cached_property
def is_normalized(self) -> bool:
raise NotImplementedError(
"is_normalized is currently not implemented."
)
def to_julian_date(self) -> ColumnBase:
raise NotImplementedError(
"to_julian_date is currently not implemented."
)
def normalize(self) -> ColumnBase:
raise NotImplementedError("normalize is currently not implemented.")
@acquire_spill_lock()
def _get_dt_field(
self, field: plc.datetime.DatetimeComponent
) -> ColumnBase:
return type(self).from_pylibcudf(
plc.datetime.extract_datetime_component(
self.to_pylibcudf(mode="read"),
field,
)
)
def _get_field_names(
self,
field: Literal["month", "weekday"],
labels: list[str],
locale: str | None = None,
) -> ColumnBase:
if locale is not None:
raise NotImplementedError(
"Setting a locale is currently not supported. "
"Results will be returned in your current locale."
)
col_labels = as_column(labels)
indices = getattr(self, field)
has_nulls = indices.has_nulls()
if has_nulls:
indices = indices.fillna(len(col_labels))
return col_labels.take(indices, nullify=True, check_bounds=has_nulls)
def get_day_names(self, locale: str | None = None) -> ColumnBase:
return self._get_field_names(
"weekday", list(calendar.day_name), locale=locale
)
def get_month_names(self, locale: str | None = None) -> ColumnBase:
return self._get_field_names(
"month", list(calendar.month_name), locale=locale
)
def _round_dt(
self,
round_func: Callable[
[plc.Column, plc.datetime.RoundingFrequency], plc.Column
],
freq: str,
) -> ColumnBase:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timedelta.resolution_string.html
old_to_new_freq_map = {
"H": "h",
"N": "ns",
"T": "min",
"L": "ms",
"U": "us",
"S": "s",
}
if freq in old_to_new_freq_map:
warnings.warn(
f"{freq} is deprecated and will be "
"removed in a future version, please use "
f"{old_to_new_freq_map[freq]} instead.",
FutureWarning,
)
freq = old_to_new_freq_map[freq]
rounding_fequency_map = {
"D": plc.datetime.RoundingFrequency.DAY,
"h": plc.datetime.RoundingFrequency.HOUR,
"min": plc.datetime.RoundingFrequency.MINUTE,
"s": plc.datetime.RoundingFrequency.SECOND,
"ms": plc.datetime.RoundingFrequency.MILLISECOND,
"us": plc.datetime.RoundingFrequency.MICROSECOND,
"ns": plc.datetime.RoundingFrequency.NANOSECOND,
}
if (plc_freq := rounding_fequency_map.get(freq)) is None:
raise ValueError(f"Invalid resolution: '{freq}'")
with acquire_spill_lock():
return type(self).from_pylibcudf(
round_func(
self.to_pylibcudf(mode="read"),
plc_freq,
)
)
def ceil(self, freq: str) -> ColumnBase:
return self._round_dt(plc.datetime.ceil_datetimes, freq)
def floor(self, freq: str) -> ColumnBase:
return self._round_dt(plc.datetime.floor_datetimes, freq)
def round(self, freq: str) -> ColumnBase:
return self._round_dt(plc.datetime.round_datetimes, freq)
def isocalendar(self) -> dict[str, ColumnBase]:
return {
field: self.strftime(format=directive).astype(np.dtype(np.uint32))
for field, directive in zip(
["year", "week", "day"], ["%G", "%V", "%u"], strict=True
)
}
def as_datetime_column(self, dtype: np.dtype) -> DatetimeColumn:
if dtype == self.dtype:
return self
elif isinstance(dtype, pd.DatetimeTZDtype):
raise TypeError(
"Cannot use .astype to convert from timezone-naive dtype to timezone-aware dtype. "
"Use tz_localize instead."
)
return self.cast(dtype=dtype) # type: ignore[return-value]
def as_timedelta_column(self, dtype: np.dtype) -> None: # type: ignore[override]
raise TypeError(
f"cannot astype a datetimelike from {self.dtype} to {dtype}"
)
@functools.cached_property
def _strftime_names(self) -> plc.Column:
"""Strftime names for %A, %a, %B, %b"""
return plc.Column.from_iterable_of_py(
[
nl_langinfo(loc)
for loc in (
locale.AM_STR,
locale.PM_STR,
locale.DAY_1,
locale.DAY_2,
locale.DAY_3,
locale.DAY_4,
locale.DAY_5,
locale.DAY_6,
locale.DAY_7,
locale.ABDAY_1,
locale.ABDAY_2,
locale.ABDAY_3,
locale.ABDAY_4,
locale.ABDAY_5,
locale.ABDAY_6,
locale.ABDAY_7,
locale.MON_1,
locale.MON_2,
locale.MON_3,
locale.MON_4,
locale.MON_5,
locale.MON_6,
locale.MON_7,
locale.MON_8,
locale.MON_9,
locale.MON_10,
locale.MON_11,
locale.MON_12,
locale.ABMON_1,
locale.ABMON_2,
locale.ABMON_3,
locale.ABMON_4,
locale.ABMON_5,
locale.ABMON_6,
locale.ABMON_7,
locale.ABMON_8,
locale.ABMON_9,
locale.ABMON_10,
locale.ABMON_11,
locale.ABMON_12,
)
]
)
def strftime(self, format: str) -> StringColumn:
if len(self) == 0:
return super().strftime(format)
if re.search("%[aAbB]", format):
names = self._strftime_names
else:
names = plc.Column.from_scalar(
plc.Scalar.from_py(None, plc.DataType(plc.TypeId.STRING)), 0
)
with acquire_spill_lock():
return type(self).from_pylibcudf( # type: ignore[return-value]
plc.strings.convert.convert_datetime.from_timestamps(
self.to_pylibcudf(mode="read"),
format,
names,
)
)
def as_string_column(self, dtype: DtypeObj) -> StringColumn:
format = _dtype_to_format_conversion.get(
self.dtype.name, "%Y-%m-%d %H:%M:%S"
)
if cudf.get_option("mode.pandas_compatible"):
if isinstance(dtype, np.dtype) and dtype.kind == "O":
raise TypeError(
f"Cannot astype a datetimelike from {self.dtype} to {dtype}"
)
if format.endswith("f"):
sub_second_res_len = 3
else:
sub_second_res_len = 0
has_nanos = self.time_unit == "ns" and self.nanosecond.any()
has_micros = (
self.time_unit in {"ns", "us"} and self.microsecond.any()
)
has_millis = (
self.time_unit in {"ns", "us", "ms"} and self.millisecond.any()
)
has_seconds = self.second.any()
has_minutes = self.minute.any()
has_hours = self.hour.any()
if sub_second_res_len:
if has_nanos:
# format should be intact and rest of the
# following conditions shouldn't execute.
pass
elif has_micros:
format = format[:-sub_second_res_len] + "%6f"
elif has_millis:
format = format[:-sub_second_res_len] + "%3f"
elif has_seconds or has_minutes or has_hours:
format = format[:-4]
else:
format = format.split(" ")[0]
elif not (has_seconds or has_minutes or has_hours):
format = format.split(" ")[0]
return self.strftime(format)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
reflect, op = self._check_reflected_op(op)
if isinstance(other, cudf.DateOffset):
return other._datetime_binop(self, op, reflect=reflect)
other = self._normalize_binop_operand(other)
if other is NotImplemented:
return NotImplemented
if reflect:
lhs = other
rhs = self
if isinstance(lhs, pa.Scalar):
lhs_unit = lhs.type.unit
other_dtype = cudf_dtype_from_pa_type(lhs.type)
else:
lhs_unit = getattr(lhs, "time_unit", None)
other_dtype = lhs.dtype
rhs_unit = getattr(rhs, "time_unit", None)
else:
lhs = self
rhs = other # type: ignore[assignment]
if isinstance(rhs, pa.Scalar):
rhs_unit = rhs.type.unit
other_dtype = cudf_dtype_from_pa_type(rhs.type)
else:
rhs_unit = getattr(rhs, "time_unit", None)
other_dtype = rhs.dtype
lhs_unit = getattr(lhs, "time_unit", None)
other_is_timedelta = other_dtype.kind == "m"
other_is_datetime64 = other_dtype.kind == "M"
out_dtype = None
if (
op
in {
"__ne__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
}
and other_is_datetime64
):
out_dtype = get_dtype_of_same_kind(self.dtype, np.dtype(np.bool_))
elif op == "__add__" and other_is_timedelta:
# The only thing we can add to a datetime is a timedelta. This
# operation is symmetric, i.e. we allow `datetime + timedelta` or
# `timedelta + datetime`. Both result in DatetimeColumns.
out_dtype = get_dtype_of_same_kind(
self.dtype,
np.dtype(
f"datetime64[{_resolve_binop_resolution(lhs_unit, rhs_unit)}]" # type: ignore[arg-type]
),
)
elif op == "__sub__":
# Subtracting a datetime from a datetime results in a timedelta.
if other_is_datetime64:
out_dtype = get_dtype_of_same_kind(
self.dtype,
np.dtype(
f"timedelta64[{_resolve_binop_resolution(lhs_unit, rhs_unit)}]" # type: ignore[arg-type]
),
)
# We can subtract a timedelta from a datetime, but not vice versa.
# Not only is subtraction antisymmetric (as is normal), it is only
# well-defined if this operation was not invoked via reflection.
elif other_is_timedelta and not reflect:
out_dtype = get_dtype_of_same_kind(
self.dtype,
np.dtype(
f"datetime64[{_resolve_binop_resolution(lhs_unit, rhs_unit)}]" # type: ignore[arg-type]
),
)
elif op in {
"__eq__",
"__ne__",
"NULL_EQUALS",
"NULL_NOT_EQUALS",
}:
out_dtype = get_dtype_of_same_kind(self.dtype, np.dtype(np.bool_))
if isinstance(other, ColumnBase) and not isinstance(
other, DatetimeColumn
):
fill_value = op in ("__ne__", "NULL_NOT_EQUALS")
result = self._all_bools_with_nulls(
other, bool_fill_value=fill_value
)
if cudf.get_option("mode.pandas_compatible"):
result = result.fillna(fill_value)
return result
if out_dtype is None:
return NotImplemented
lhs_binop: plc.Scalar | ColumnBase = (
pa_scalar_to_plc_scalar(lhs) if isinstance(lhs, pa.Scalar) else lhs
)
rhs_binop: plc.Scalar | ColumnBase = (
pa_scalar_to_plc_scalar(rhs) if isinstance(rhs, pa.Scalar) else rhs
)
result_col = binaryop.binaryop(lhs_binop, rhs_binop, op, out_dtype)
if out_dtype.kind != "b" and op == "__add__":
return result_col
elif (
cudf.get_option("mode.pandas_compatible") and out_dtype.kind == "b"
):
return result_col.fillna(op == "__ne__")
else:
return result_col
def _with_type_metadata(self, dtype: DtypeObj) -> DatetimeColumn:
if isinstance(dtype, pd.DatetimeTZDtype):
return DatetimeTZColumn(
plc_column=self.plc_column,
size=self.size,
dtype=dtype,
offset=self.offset,
null_count=self.null_count,
exposed=False,
)
if cudf.get_option("mode.pandas_compatible"):
self._dtype = get_dtype_of_same_type(dtype, self.dtype)
return self
def _find_ambiguous_and_nonexistent(
self, zone_name: str
) -> tuple[NumericalColumn, NumericalColumn] | tuple[bool, bool]:
"""
Recognize ambiguous and nonexistent timestamps for the given timezone.
Returns a tuple of columns, both of "bool" dtype and of the same
size as `self`, that respectively indicate ambiguous and
nonexistent timestamps in `self` with the value `True`.
Ambiguous and/or nonexistent timestamps are only possible if any
transitions occur in the time zone database for the given timezone.
If no transitions occur, the tuple `(False, False)` is returned.
"""
transition_times, offsets = get_tz_data(zone_name)
offsets = offsets.astype(np.dtype(f"timedelta64[{self.time_unit}]")) # type: ignore[assignment]
if len(offsets) == 1: # no transitions
return False, False
transition_times, offsets, old_offsets = (
transition_times.slice(1, len(transition_times)),
offsets.slice(1, len(offsets)),
offsets.slice(0, len(offsets) - 1),
)
# Assume we have two clocks at the moment of transition:
# - Clock 1 is turned forward or backwards correctly
# - Clock 2 makes no changes
clock_1 = transition_times + offsets
clock_2 = transition_times + old_offsets
# At the start of an ambiguous time period, Clock 1 (which has
# been turned back) reads less than Clock 2:
cond = clock_1 < clock_2
ambiguous_begin = clock_1.apply_boolean_mask(cond)
# The end of an ambiguous time period is what Clock 2 reads at
# the moment of transition:
ambiguous_end = clock_2.apply_boolean_mask(cond)
ambiguous = self.label_bins(
left_edge=ambiguous_begin,
left_inclusive=True,
right_edge=ambiguous_end,
right_inclusive=False,
).notnull()
# At the start of a non-existent time period, Clock 2 reads less
# than Clock 1 (which has been turned forward):
cond = clock_1 > clock_2
nonexistent_begin = clock_2.apply_boolean_mask(cond)
# The end of the non-existent time period is what Clock 1 reads
# at the moment of transition:
nonexistent_end = clock_1.apply_boolean_mask(cond)
nonexistent = self.label_bins(
left_edge=nonexistent_begin,
left_inclusive=True,
right_edge=nonexistent_end,
right_inclusive=False,
).notnull()
return ambiguous, nonexistent # type: ignore[return-value]
def tz_localize(
self,
tz: str | None,
ambiguous: Literal["NaT"] = "NaT",
nonexistent: Literal["NaT"] = "NaT",
) -> DatetimeColumn:
if tz is None:
return self.copy()
ambiguous, nonexistent = check_ambiguous_and_nonexistent(
ambiguous, nonexistent
)
dtype = get_compatible_timezone(pd.DatetimeTZDtype(self.time_unit, tz))
tzname = dtype.tz.key
ambiguous_col, nonexistent_col = self._find_ambiguous_and_nonexistent(
tzname
)
localized = self._scatter_by_column(
self.isnull() | (ambiguous_col | nonexistent_col),
pa_scalar_to_plc_scalar(
pa.scalar(None, type=cudf_dtype_to_pa_type(self.dtype))
),
)
transition_times, offsets = get_tz_data(tzname)
transition_times_local = (transition_times + offsets).astype(
localized.dtype
)
indices = (
transition_times_local.searchsorted(localized, side="right") - 1
)
offsets_to_utc = offsets.take(indices, nullify=True)
gmt_data = localized - offsets_to_utc
return gmt_data._with_type_metadata(dtype)
def tz_convert(self, tz: str | None) -> DatetimeColumn:
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
| DatetimeColumn |
python | scipy__scipy | scipy/integrate/tests/test_integrate.py | {
"start": 12956,
"end": 13317
} | class ____(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j, 2j, 3j, 4j, 5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
| ComplexExp |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 921217,
"end": 921730
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of RejectDeployments"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "deployments")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
deployments = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null("Deployment")), graphql_name="deployments")
"""The affected deployments."""
| RejectDeploymentsPayload |
python | django__django | tests/serializers/test_yaml.py | {
"start": 1258,
"end": 2914
} | class ____(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super().setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super().tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
with self.assertRaises(ImportError):
serializers.serialize("yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
with self.assertRaises(ImportError):
serializers.deserialize("yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with self.assertRaisesMessage(
management.CommandError, YAML_IMPORT_ERROR_MESSAGE
):
management.call_command("dumpdata", format="yaml")
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
| NoYamlSerializerTestCase |
python | neetcode-gh__leetcode | python/0253-meeting-rooms.py | {
"start": 0,
"end": 577
} | class ____:
"""
@param intervals: an array of meeting time intervals
@return: the minimum number of conference rooms required
"""
def minMeetingRooms(self, intervals):
start = sorted([i[0] for i in intervals])
end = sorted([i[1] for i in intervals])
res, count = 0, 0
s, e = 0, 0
while s < len(intervals):
if start[s] < end[e]:
s += 1
count += 1
else:
e += 1
count -= 1
res = max(res, count)
return res
| Solution |
python | getlogbook__logbook | benchmark/bench_disabled_introspection.py | {
"start": 125,
"end": 343
} | class ____(NullHandler):
blackhole = False
def run():
with Flags(introspection=False):
with DummyHandler():
for _ in range(500):
log.warning("this is not handled")
| DummyHandler |
python | google__pytype | pytype/rewrite/abstract/classes.py | {
"start": 6754,
"end": 7437
} | class ____(BaseInstance):
"""Instance of a class."""
members: dict[str, base.BaseValue]
def __init__(self, ctx: base.ContextType, cls: SimpleClass):
super().__init__(ctx, cls, {})
def __repr__(self):
return f'MutableInstance({self.cls.name})'
@property
def _attrs(self):
return (self.cls.full_name, datatypes.immutabledict(self.members))
def set_attribute(self, name: str, value: base.BaseValue) -> None:
if name in self.members:
self.members[name] = base.Union(self._ctx, (self.members[name], value))
else:
self.members[name] = value
def freeze(self) -> 'FrozenInstance':
return FrozenInstance(self._ctx, self)
| MutableInstance |
python | pydata__xarray | xarray/tests/test_pandas_to_xarray.py | {
"start": 4478,
"end": 6901
} | class ____:
@pytest.fixture
def df(self):
return DataFrame(
{
"a": list("abcd"),
"b": list(range(1, 5)),
"c": np.arange(3, 7).astype("u1"),
"d": np.arange(4.0, 8.0, dtype="float64"),
"e": [True, False, True, False],
"f": Categorical(list("abcd")),
"g": date_range("20130101", periods=4),
"h": date_range("20130101", periods=4, tz="US/Eastern"),
}
)
def test_to_xarray_index_types(self, index_flat, df):
index = index_flat
# MultiIndex is tested in test_to_xarray_with_multiindex
if len(index) == 0:
pytest.skip("Test doesn't make sense for empty index")
from xarray import Dataset
df.index = index[:4]
df.index.name = "foo"
df.columns.name = "bar"
result = df.to_xarray()
assert result.sizes["foo"] == 4
assert len(result.coords) == 1
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, Dataset)
# idempotency
# datetimes w/tz are preserved
# column names are lost
expected = df.copy()
expected.columns.name = None
tm.assert_frame_equal(result.to_dataframe(), expected)
def test_to_xarray_empty(self, df):
from xarray import Dataset
df.index.name = "foo"
result = df[0:0].to_xarray()
assert result.sizes["foo"] == 0
assert isinstance(result, Dataset)
def test_to_xarray_with_multiindex(self, df):
from xarray import Dataset
# MultiIndex
df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])
result = df.to_xarray()
assert result.sizes["one"] == 1
assert result.sizes["two"] == 4
assert len(result.coords) == 2
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, Dataset)
result = result.to_dataframe()
expected = df.copy()
expected["f"] = expected["f"].astype(
object if Version(pd.__version__) < Version("3.0.0dev0") else str
)
expected.columns.name = None
tm.assert_frame_equal(result, expected)
| TestDataFrameToXArray |
python | huggingface__transformers | tests/models/vits/test_modeling_vits.py | {
"start": 1977,
"end": 5560
} | class ____:
def __init__(
self,
parent,
batch_size=2,
seq_length=7,
is_training=False,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=64,
flow_size=16,
vocab_size=38,
spectrogram_bins=8,
duration_predictor_num_flows=2,
duration_predictor_filter_channels=16,
prior_encoder_num_flows=2,
upsample_initial_channel=16,
upsample_rates=[8, 2],
upsample_kernel_sizes=[16, 4],
resblock_kernel_sizes=[3, 7],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]],
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.flow_size = flow_size
self.vocab_size = vocab_size
self.spectrogram_bins = spectrogram_bins
self.duration_predictor_num_flows = duration_predictor_num_flows
self.duration_predictor_filter_channels = duration_predictor_filter_channels
self.prior_encoder_num_flows = prior_encoder_num_flows
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_config(self):
return VitsConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
flow_size=self.flow_size,
vocab_size=self.vocab_size,
spectrogram_bins=self.spectrogram_bins,
duration_predictor_num_flows=self.duration_predictor_num_flows,
prior_encoder_num_flows=self.prior_encoder_num_flows,
duration_predictor_filter_channels=self.duration_predictor_filter_channels,
posterior_encoder_num_wavenet_layers=self.num_hidden_layers,
upsample_initial_channel=self.upsample_initial_channel,
upsample_rates=self.upsample_rates,
upsample_kernel_sizes=self.upsample_kernel_sizes,
resblock_kernel_sizes=self.resblock_kernel_sizes,
resblock_dilation_sizes=self.resblock_dilation_sizes,
)
def create_and_check_model_forward(self, config, inputs_dict):
model = VitsModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
result = model(input_ids, attention_mask=attention_mask)
self.parent.assertEqual((self.batch_size, 624), result.waveform.shape)
@require_torch
| VitsModelTester |
python | sympy__sympy | sympy/matrices/expressions/matexpr.py | {
"start": 23055,
"end": 27895
} | class ____:
r"""
Helper class to compute matrix derivatives.
The logic: when an expression is derived by a matrix `X_{mn}`, two lines of
matrix multiplications are created: the one contracted to `m` (first line),
and the one contracted to `n` (second line).
Transposition flips the side by which new matrices are connected to the
lines.
The trace connects the end of the two lines.
"""
def __init__(self, lines, higher=S.One):
self._lines = list(lines)
self._first_pointer_parent = self._lines
self._first_pointer_index = 0
self._first_line_index = 0
self._second_pointer_parent = self._lines
self._second_pointer_index = 1
self._second_line_index = 1
self.higher = higher
@property
def first_pointer(self):
return self._first_pointer_parent[self._first_pointer_index]
@first_pointer.setter
def first_pointer(self, value):
self._first_pointer_parent[self._first_pointer_index] = value
@property
def second_pointer(self):
return self._second_pointer_parent[self._second_pointer_index]
@second_pointer.setter
def second_pointer(self, value):
self._second_pointer_parent[self._second_pointer_index] = value
def __repr__(self):
built = [self._build(i) for i in self._lines]
return "_LeftRightArgs(lines=%s, higher=%s)" % (
built,
self.higher,
)
def transpose(self):
self._first_pointer_parent, self._second_pointer_parent = self._second_pointer_parent, self._first_pointer_parent
self._first_pointer_index, self._second_pointer_index = self._second_pointer_index, self._first_pointer_index
self._first_line_index, self._second_line_index = self._second_line_index, self._first_line_index
return self
@staticmethod
def _build(expr):
if isinstance(expr, ExprBuilder):
return expr.build()
if isinstance(expr, list):
if len(expr) == 1:
return expr[0]
else:
return expr[0](*[_LeftRightArgs._build(i) for i in expr[1]])
else:
return expr
def build(self):
data = [self._build(i) for i in self._lines]
if self.higher != 1:
data += [self._build(self.higher)]
data = list(data)
return data
def matrix_form(self):
if self.first != 1 and self.higher != 1:
raise ValueError("higher dimensional array cannot be represented")
def _get_shape(elem):
if isinstance(elem, MatrixExpr):
return elem.shape
return (None, None)
if _get_shape(self.first)[1] != _get_shape(self.second)[1]:
# Remove one-dimensional identity matrices:
# (this is needed by `a.diff(a)` where `a` is a vector)
if _get_shape(self.second) == (1, 1):
return self.first*self.second[0, 0]
if _get_shape(self.first) == (1, 1):
return self.first[1, 1]*self.second.T
raise ValueError("incompatible shapes")
if self.first != 1:
return self.first*self.second.T
else:
return self.higher
def rank(self):
"""
Number of dimensions different from trivial (warning: not related to
matrix rank).
"""
rank = 0
if self.first != 1:
rank += sum(i != 1 for i in self.first.shape)
if self.second != 1:
rank += sum(i != 1 for i in self.second.shape)
if self.higher != 1:
rank += 2
return rank
def _multiply_pointer(self, pointer, other):
from ...tensor.array.expressions.array_expressions import ArrayTensorProduct
from ...tensor.array.expressions.array_expressions import ArrayContraction
subexpr = ExprBuilder(
ArrayContraction,
[
ExprBuilder(
ArrayTensorProduct,
[
pointer,
other
]
),
(1, 2)
],
validator=ArrayContraction._validate
)
return subexpr
def append_first(self, other):
self.first_pointer *= other
def append_second(self, other):
self.second_pointer *= other
def _make_matrix(x):
from sympy.matrices.immutable import ImmutableDenseMatrix
if isinstance(x, MatrixExpr):
return x
return ImmutableDenseMatrix([[x]])
from .matmul import MatMul
from .matadd import MatAdd
from .matpow import MatPow
from .transpose import Transpose
from .inverse import Inverse
from .special import ZeroMatrix, Identity
from .determinant import Determinant
| _LeftRightArgs |
python | huggingface__transformers | src/transformers/models/longformer/modeling_longformer.py | {
"start": 18588,
"end": 23684
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
def _get_question_end_index(input_ids, sep_token_id):
"""
Computes the index of the first occurrence of `sep_token_id`.
"""
sep_token_indices = (input_ids == sep_token_id).nonzero()
batch_size = input_ids.shape[0]
assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
assert sep_token_indices.shape[0] == 3 * batch_size, (
f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You"
" might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
)
return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]
def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
"""
Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
True` else after `sep_token_id`.
"""
question_end_index = _get_question_end_index(input_ids, sep_token_id)
question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
if before_sep_token is True:
attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.bool)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.bool) * (
attention_mask.expand_as(input_ids) < input_ids.shape[-1]
).to(torch.bool)
return attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
| LongformerTokenClassifierOutput |
python | kamyu104__LeetCode-Solutions | Python/day-of-the-week.py | {
"start": 29,
"end": 545
} | class ____(object):
def dayOfTheWeek(self, day, month, year):
"""
:type day: int
:type month: int
:type year: int
:rtype: str
"""
DAYS = ["Sunday", "Monday", "Tuesday", "Wednesday", \
"Thursday", "Friday", "Saturday"]
# Zeller Formula
if month < 3:
month += 12
year -= 1
c, y = divmod(year, 100)
w = (c//4 - 2*c + y + y//4 + 13*(month+1)//5 + day - 1) % 7
return DAYS[w]
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.