language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/polys/matrices/_dfm.py | {
"start": 1798,
"end": 32971
} | class ____:
"""
Dense FLINT matrix. This class is a wrapper for matrices from python-flint.
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.matrices.dfm import DFM
>>> dfm = DFM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> dfm
[[1, 2], [3, 4]]
>>> dfm.rep
[1, 2]
[3, 4]
>>> type(dfm.rep) # doctest: +SKIP
<class 'flint._flint.fmpz_mat'>
Usually, the DFM class is not instantiated directly, but is created as the
internal representation of :class:`~.DomainMatrix`. When
`SYMPY_GROUND_TYPES` is set to `flint` and `python-flint` is installed, the
:class:`DFM` class is used automatically as the internal representation of
:class:`~.DomainMatrix` in dense format if the domain is supported by
python-flint.
>>> from sympy.polys.matrices.domainmatrix import DM
>>> dM = DM([[1, 2], [3, 4]], ZZ)
>>> dM.rep
[[1, 2], [3, 4]]
A :class:`~.DomainMatrix` can be converted to :class:`DFM` by calling the
:meth:`to_dfm` method:
>>> dM.to_dfm()
[[1, 2], [3, 4]]
"""
fmt = 'dense'
is_DFM = True
is_DDM = False
def __new__(cls, rowslist, shape, domain):
"""Construct from a nested list."""
flint_mat = cls._get_flint_func(domain)
if 0 not in shape:
try:
rep = flint_mat(rowslist)
except (ValueError, TypeError):
raise DMBadInputError(f"Input should be a list of list of {domain}")
else:
rep = flint_mat(*shape)
return cls._new(rep, shape, domain)
@classmethod
def _new(cls, rep, shape, domain):
"""Internal constructor from a flint matrix."""
cls._check(rep, shape, domain)
obj = object.__new__(cls)
obj.rep = rep
obj.shape = obj.rows, obj.cols = shape
obj.domain = domain
return obj
def _new_rep(self, rep):
"""Create a new DFM with the same shape and domain but a new rep."""
return self._new(rep, self.shape, self.domain)
@classmethod
def _check(cls, rep, shape, domain):
repshape = (rep.nrows(), rep.ncols())
if repshape != shape:
raise DMBadInputError("Shape of rep does not match shape of DFM")
if domain == ZZ and not isinstance(rep, flint.fmpz_mat):
raise RuntimeError("Rep is not a flint.fmpz_mat")
elif domain == QQ and not isinstance(rep, flint.fmpq_mat):
raise RuntimeError("Rep is not a flint.fmpq_mat")
elif domain.is_FF and not isinstance(rep, (flint.fmpz_mod_mat, flint.nmod_mat)):
raise RuntimeError("Rep is not a flint.fmpz_mod_mat or flint.nmod_mat")
elif domain not in (ZZ, QQ) and not domain.is_FF:
raise NotImplementedError("Only ZZ and QQ are supported by DFM")
@classmethod
def _supports_domain(cls, domain):
"""Return True if the given domain is supported by DFM."""
return domain in (ZZ, QQ) or domain.is_FF and domain._is_flint
@classmethod
def _get_flint_func(cls, domain):
"""Return the flint matrix class for the given domain."""
if domain == ZZ:
return flint.fmpz_mat
elif domain == QQ:
return flint.fmpq_mat
elif domain.is_FF:
c = domain.characteristic()
if isinstance(domain.one, flint.nmod):
_cls = flint.nmod_mat
def _func(*e):
if len(e) == 1 and isinstance(e[0], flint.nmod_mat):
return _cls(e[0])
else:
return _cls(*e, c)
else:
m = flint.fmpz_mod_ctx(c)
_func = lambda *e: flint.fmpz_mod_mat(*e, m)
return _func
else:
raise NotImplementedError("Only ZZ and QQ are supported by DFM")
@property
def _func(self):
"""Callable to create a flint matrix of the same domain."""
return self._get_flint_func(self.domain)
def __str__(self):
"""Return ``str(self)``."""
return str(self.to_ddm())
def __repr__(self):
"""Return ``repr(self)``."""
return f'DFM{repr(self.to_ddm())[3:]}'
def __eq__(self, other):
"""Return ``self == other``."""
if not isinstance(other, DFM):
return NotImplemented
# Compare domains first because we do *not* want matrices with
# different domains to be equal but e.g. a flint fmpz_mat and fmpq_mat
# with the same entries will compare equal.
return self.domain == other.domain and self.rep == other.rep
@classmethod
def from_list(cls, rowslist, shape, domain):
"""Construct from a nested list."""
return cls(rowslist, shape, domain)
def to_list(self):
"""Convert to a nested list."""
return self.rep.tolist()
def copy(self):
"""Return a copy of self."""
return self._new_rep(self._func(self.rep))
def to_ddm(self):
"""Convert to a DDM."""
return DDM.from_list(self.to_list(), self.shape, self.domain)
def to_sdm(self):
"""Convert to a SDM."""
return SDM.from_list(self.to_list(), self.shape, self.domain)
def to_dfm(self):
"""Return self."""
return self
def to_dfm_or_ddm(self):
"""
Convert to a :class:`DFM`.
This :class:`DFM` method exists to parallel the :class:`~.DDM` and
:class:`~.SDM` methods. For :class:`DFM` it will always return self.
See Also
========
to_ddm
to_sdm
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dfm_or_ddm
"""
return self
@classmethod
def from_ddm(cls, ddm):
"""Convert from a DDM."""
return cls.from_list(ddm.to_list(), ddm.shape, ddm.domain)
@classmethod
def from_list_flat(cls, elements, shape, domain):
"""Inverse of :meth:`to_list_flat`."""
func = cls._get_flint_func(domain)
try:
rep = func(*shape, elements)
except ValueError:
raise DMBadInputError(f"Incorrect number of elements for shape {shape}")
except TypeError:
raise DMBadInputError(f"Input should be a list of {domain}")
return cls(rep, shape, domain)
def to_list_flat(self):
"""Convert to a flat list."""
return self.rep.entries()
def to_flat_nz(self):
"""Convert to a flat list of non-zeros."""
return self.to_ddm().to_flat_nz()
@classmethod
def from_flat_nz(cls, elements, data, domain):
"""Inverse of :meth:`to_flat_nz`."""
return DDM.from_flat_nz(elements, data, domain).to_dfm()
def to_dod(self):
"""Convert to a DOD."""
return self.to_ddm().to_dod()
@classmethod
def from_dod(cls, dod, shape, domain):
"""Inverse of :meth:`to_dod`."""
return DDM.from_dod(dod, shape, domain).to_dfm()
def to_dok(self):
"""Convert to a DOK."""
return self.to_ddm().to_dok()
@classmethod
def from_dok(cls, dok, shape, domain):
"""Inverse of :math:`to_dod`."""
return DDM.from_dok(dok, shape, domain).to_dfm()
def iter_values(self):
"""Iterate over the non-zero values of the matrix."""
m, n = self.shape
rep = self.rep
for i in range(m):
for j in range(n):
repij = rep[i, j]
if repij:
yield rep[i, j]
def iter_items(self):
"""Iterate over indices and values of nonzero elements of the matrix."""
m, n = self.shape
rep = self.rep
for i in range(m):
for j in range(n):
repij = rep[i, j]
if repij:
yield ((i, j), repij)
def convert_to(self, domain):
"""Convert to a new domain."""
if domain == self.domain:
return self.copy()
elif domain == QQ and self.domain == ZZ:
return self._new(flint.fmpq_mat(self.rep), self.shape, domain)
elif self._supports_domain(domain):
# XXX: Use more efficient conversions when possible.
return self.to_ddm().convert_to(domain).to_dfm()
else:
# It is the callers responsibility to convert to DDM before calling
# this method if the domain is not supported by DFM.
raise NotImplementedError("Only ZZ and QQ are supported by DFM")
def getitem(self, i, j):
"""Get the ``(i, j)``-th entry."""
# XXX: flint matrices do not support negative indices
# XXX: They also raise ValueError instead of IndexError
m, n = self.shape
if i < 0:
i += m
if j < 0:
j += n
try:
return self.rep[i, j]
except ValueError:
raise IndexError(f"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}")
def setitem(self, i, j, value):
"""Set the ``(i, j)``-th entry."""
# XXX: flint matrices do not support negative indices
# XXX: They also raise ValueError instead of IndexError
m, n = self.shape
if i < 0:
i += m
if j < 0:
j += n
try:
self.rep[i, j] = value
except ValueError:
raise IndexError(f"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}")
def _extract(self, i_indices, j_indices):
"""Extract a submatrix with no checking."""
# Indices must be positive and in range.
M = self.rep
lol = [[M[i, j] for j in j_indices] for i in i_indices]
shape = (len(i_indices), len(j_indices))
return self.from_list(lol, shape, self.domain)
def extract(self, rowslist, colslist):
"""Extract a submatrix."""
# XXX: flint matrices do not support fancy indexing or negative indices
#
# Check and convert negative indices before calling _extract.
m, n = self.shape
new_rows = []
new_cols = []
for i in rowslist:
if i < 0:
i_pos = i + m
else:
i_pos = i
if not 0 <= i_pos < m:
raise IndexError(f"Invalid row index {i} for Matrix of shape {self.shape}")
new_rows.append(i_pos)
for j in colslist:
if j < 0:
j_pos = j + n
else:
j_pos = j
if not 0 <= j_pos < n:
raise IndexError(f"Invalid column index {j} for Matrix of shape {self.shape}")
new_cols.append(j_pos)
return self._extract(new_rows, new_cols)
def extract_slice(self, rowslice, colslice):
"""Slice a DFM."""
# XXX: flint matrices do not support slicing
m, n = self.shape
i_indices = range(m)[rowslice]
j_indices = range(n)[colslice]
return self._extract(i_indices, j_indices)
def neg(self):
"""Negate a DFM matrix."""
return self._new_rep(-self.rep)
def add(self, other):
"""Add two DFM matrices."""
return self._new_rep(self.rep + other.rep)
def sub(self, other):
"""Subtract two DFM matrices."""
return self._new_rep(self.rep - other.rep)
def mul(self, other):
"""Multiply a DFM matrix from the right by a scalar."""
return self._new_rep(self.rep * other)
def rmul(self, other):
"""Multiply a DFM matrix from the left by a scalar."""
return self._new_rep(other * self.rep)
def mul_elementwise(self, other):
"""Elementwise multiplication of two DFM matrices."""
# XXX: flint matrices do not support elementwise multiplication
return self.to_ddm().mul_elementwise(other.to_ddm()).to_dfm()
def matmul(self, other):
"""Multiply two DFM matrices."""
shape = (self.rows, other.cols)
return self._new(self.rep * other.rep, shape, self.domain)
# XXX: For the most part DomainMatrix does not expect DDM, SDM, or DFM to
# have arithmetic operators defined. The only exception is negation.
# Perhaps that should be removed.
def __neg__(self):
"""Negate a DFM matrix."""
return self.neg()
@classmethod
def zeros(cls, shape, domain):
"""Return a zero DFM matrix."""
func = cls._get_flint_func(domain)
return cls._new(func(*shape), shape, domain)
# XXX: flint matrices do not have anything like ones or eye
# In the methods below we convert to DDM and then back to DFM which is
# probably about as efficient as implementing these methods directly.
@classmethod
def ones(cls, shape, domain):
"""Return a one DFM matrix."""
# XXX: flint matrices do not have anything like ones
return DDM.ones(shape, domain).to_dfm()
@classmethod
def eye(cls, n, domain):
"""Return the identity matrix of size n."""
# XXX: flint matrices do not have anything like eye
return DDM.eye(n, domain).to_dfm()
@classmethod
def diag(cls, elements, domain):
"""Return a diagonal matrix."""
return DDM.diag(elements, domain).to_dfm()
def applyfunc(self, func, domain):
"""Apply a function to each entry of a DFM matrix."""
return self.to_ddm().applyfunc(func, domain).to_dfm()
def transpose(self):
"""Transpose a DFM matrix."""
return self._new(self.rep.transpose(), (self.cols, self.rows), self.domain)
def hstack(self, *others):
"""Horizontally stack matrices."""
return self.to_ddm().hstack(*[o.to_ddm() for o in others]).to_dfm()
def vstack(self, *others):
"""Vertically stack matrices."""
return self.to_ddm().vstack(*[o.to_ddm() for o in others]).to_dfm()
def diagonal(self):
"""Return the diagonal of a DFM matrix."""
M = self.rep
m, n = self.shape
return [M[i, i] for i in range(min(m, n))]
def is_upper(self):
"""Return ``True`` if the matrix is upper triangular."""
M = self.rep
for i in range(self.rows):
for j in range(min(i, self.cols)):
if M[i, j]:
return False
return True
def is_lower(self):
"""Return ``True`` if the matrix is lower triangular."""
M = self.rep
for i in range(self.rows):
for j in range(i + 1, self.cols):
if M[i, j]:
return False
return True
def is_diagonal(self):
"""Return ``True`` if the matrix is diagonal."""
return self.is_upper() and self.is_lower()
def is_zero_matrix(self):
"""Return ``True`` if the matrix is the zero matrix."""
M = self.rep
for i in range(self.rows):
for j in range(self.cols):
if M[i, j]:
return False
return True
def nnz(self):
"""Return the number of non-zero elements in the matrix."""
return self.to_ddm().nnz()
def scc(self):
"""Return the strongly connected components of the matrix."""
return self.to_ddm().scc()
@doctest_depends_on(ground_types='flint')
def det(self):
"""
Compute the determinant of the matrix using FLINT.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> dfm = M.to_DM().to_dfm()
>>> dfm
[[1, 2], [3, 4]]
>>> dfm.det()
-2
Notes
=====
Calls the ``.det()`` method of the underlying FLINT matrix.
For :ref:`ZZ` or :ref:`QQ` this calls ``fmpz_mat_det`` or
``fmpq_mat_det`` respectively.
At the time of writing the implementation of ``fmpz_mat_det`` uses one
of several algorithms depending on the size of the matrix and bit size
of the entries. The algorithms used are:
- Cofactor for very small (up to 4x4) matrices.
- Bareiss for small (up to 25x25) matrices.
- Modular algorithms for larger matrices (up to 60x60) or for larger
matrices with large bit sizes.
- Modular "accelerated" for larger matrices (60x60 upwards) if the bit
size is smaller than the dimensions of the matrix.
The implementation of ``fmpq_mat_det`` clears denominators from each
row (not the whole matrix) and then calls ``fmpz_mat_det`` and divides
by the product of the denominators.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.det
Higher level interface to compute the determinant of a matrix.
"""
# XXX: At least the first three algorithms described above should also
# be implemented in the pure Python DDM and SDM classes which at the
# time of writng just use Bareiss for all matrices and domains.
# Probably in Python the thresholds would be different though.
return self.rep.det()
@doctest_depends_on(ground_types='flint')
def charpoly(self):
"""
Compute the characteristic polynomial of the matrix using FLINT.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> dfm = M.to_DM().to_dfm() # need ground types = 'flint'
>>> dfm
[[1, 2], [3, 4]]
>>> dfm.charpoly()
[1, -5, -2]
Notes
=====
Calls the ``.charpoly()`` method of the underlying FLINT matrix.
For :ref:`ZZ` or :ref:`QQ` this calls ``fmpz_mat_charpoly`` or
``fmpq_mat_charpoly`` respectively.
At the time of writing the implementation of ``fmpq_mat_charpoly``
clears a denominator from the whole matrix and then calls
``fmpz_mat_charpoly``. The coefficients of the characteristic
polynomial are then multiplied by powers of the denominator.
The ``fmpz_mat_charpoly`` method uses a modular algorithm with CRT
reconstruction. The modular algorithm uses ``nmod_mat_charpoly`` which
uses Berkowitz for small matrices and non-prime moduli or otherwise
the Danilevsky method.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.charpoly
Higher level interface to compute the characteristic polynomial of
a matrix.
"""
# FLINT polynomial coefficients are in reverse order compared to SymPy.
return self.rep.charpoly().coeffs()[::-1]
@doctest_depends_on(ground_types='flint')
def inv(self):
"""
Compute the inverse of a matrix using FLINT.
Examples
========
>>> from sympy import Matrix, QQ
>>> M = Matrix([[1, 2], [3, 4]])
>>> dfm = M.to_DM().to_dfm().convert_to(QQ)
>>> dfm
[[1, 2], [3, 4]]
>>> dfm.inv()
[[-2, 1], [3/2, -1/2]]
>>> dfm.matmul(dfm.inv())
[[1, 0], [0, 1]]
Notes
=====
Calls the ``.inv()`` method of the underlying FLINT matrix.
For now this will raise an error if the domain is :ref:`ZZ` but will
use the FLINT method for :ref:`QQ`.
The FLINT methods for :ref:`ZZ` and :ref:`QQ` are ``fmpz_mat_inv`` and
``fmpq_mat_inv`` respectively. The ``fmpz_mat_inv`` method computes an
inverse with denominator. This is implemented by calling
``fmpz_mat_solve`` (see notes in :meth:`lu_solve` about the algorithm).
The ``fmpq_mat_inv`` method clears denominators from each row and then
multiplies those into the rhs identity matrix before calling
``fmpz_mat_solve``.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.inv
Higher level method for computing the inverse of a matrix.
"""
# TODO: Implement similar algorithms for DDM and SDM.
#
# XXX: The flint fmpz_mat and fmpq_mat inv methods both return fmpq_mat
# by default. The fmpz_mat method has an optional argument to return
# fmpz_mat instead for unimodular matrices.
#
# The convention in DomainMatrix is to raise an error if the matrix is
# not over a field regardless of whether the matrix is invertible over
# its domain or over any associated field. Maybe DomainMatrix.inv
# should be changed to always return a matrix over an associated field
# except with a unimodular argument for returning an inverse over a
# ring if possible.
#
# For now we follow the existing DomainMatrix convention...
K = self.domain
m, n = self.shape
if m != n:
raise DMNonSquareMatrixError("cannot invert a non-square matrix")
if K == ZZ:
raise DMDomainError("field expected, got %s" % K)
elif K == QQ or K.is_FF:
try:
return self._new_rep(self.rep.inv())
except ZeroDivisionError:
raise DMNonInvertibleMatrixError("matrix is not invertible")
else:
# If more domains are added for DFM then we will need to consider
# what happens here.
raise NotImplementedError("DFM.inv() is not implemented for %s" % K)
def lu(self):
"""Return the LU decomposition of the matrix."""
L, U, swaps = self.to_ddm().lu()
return L.to_dfm(), U.to_dfm(), swaps
def qr(self):
"""Return the QR decomposition of the matrix."""
Q, R = self.to_ddm().qr()
return Q.to_dfm(), R.to_dfm()
# XXX: The lu_solve function should be renamed to solve. Whether or not it
# uses an LU decomposition is an implementation detail. A method called
# lu_solve would make sense for a situation in which an LU decomposition is
# reused several times to solve with different rhs but that would imply a
# different call signature.
#
# The underlying python-flint method has an algorithm= argument so we could
# use that and have e.g. solve_lu and solve_modular or perhaps also a
# method= argument to choose between the two. Flint itself has more
# possible algorithms to choose from than are exposed by python-flint.
@doctest_depends_on(ground_types='flint')
def lu_solve(self, rhs):
"""
Solve a matrix equation using FLINT.
Examples
========
>>> from sympy import Matrix, QQ
>>> M = Matrix([[1, 2], [3, 4]])
>>> dfm = M.to_DM().to_dfm().convert_to(QQ)
>>> dfm
[[1, 2], [3, 4]]
>>> rhs = Matrix([1, 2]).to_DM().to_dfm().convert_to(QQ)
>>> dfm.lu_solve(rhs)
[[0], [1/2]]
Notes
=====
Calls the ``.solve()`` method of the underlying FLINT matrix.
For now this will raise an error if the domain is :ref:`ZZ` but will
use the FLINT method for :ref:`QQ`.
The FLINT methods for :ref:`ZZ` and :ref:`QQ` are ``fmpz_mat_solve``
and ``fmpq_mat_solve`` respectively. The ``fmpq_mat_solve`` method
uses one of two algorithms:
- For small matrices (<25 rows) it clears denominators between the
matrix and rhs and uses ``fmpz_mat_solve``.
- For larger matrices it uses ``fmpq_mat_solve_dixon`` which is a
modular approach with CRT reconstruction over :ref:`QQ`.
The ``fmpz_mat_solve`` method uses one of four algorithms:
- For very small (<= 3x3) matrices it uses a Cramer's rule.
- For small (<= 15x15) matrices it uses a fraction-free LU solve.
- Otherwise it uses either Dixon or another multimodular approach.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.lu_solve
Higher level interface to solve a matrix equation.
"""
if not self.domain == rhs.domain:
raise DMDomainError("Domains must match: %s != %s" % (self.domain, rhs.domain))
# XXX: As for inv we should consider whether to return a matrix over
# over an associated field or attempt to find a solution in the ring.
# For now we follow the existing DomainMatrix convention...
if not self.domain.is_Field:
raise DMDomainError("Field expected, got %s" % self.domain)
m, n = self.shape
j, k = rhs.shape
if m != j:
raise DMShapeError("Matrix size mismatch: %s * %s vs %s * %s" % (m, n, j, k))
sol_shape = (n, k)
# XXX: The Flint solve method only handles square matrices. Probably
# Flint has functions that could be used to solve non-square systems
# but they are not exposed in python-flint yet. Alternatively we could
# put something here using the features that are available like rref.
if m != n:
return self.to_ddm().lu_solve(rhs.to_ddm()).to_dfm()
try:
sol = self.rep.solve(rhs.rep)
except ZeroDivisionError:
raise DMNonInvertibleMatrixError("Matrix det == 0; not invertible.")
return self._new(sol, sol_shape, self.domain)
def fflu(self):
"""
Fraction-free LU decomposition of DFM.
Explanation
===========
Uses `python-flint` if possible for a matrix of
integers otherwise uses the DDM method.
See Also
========
sympy.polys.matrices.ddm.DDM.fflu
"""
if self.domain == ZZ:
fflu = getattr(self.rep, 'fflu', None)
if fflu is not None:
P, L, D, U = self.rep.fflu()
m, n = self.shape
return (
self._new(P, (m, m), self.domain),
self._new(L, (m, m), self.domain),
self._new(D, (m, m), self.domain),
self._new(U, self.shape, self.domain)
)
ddm_p, ddm_l, ddm_d, ddm_u = self.to_ddm().fflu()
P = ddm_p.to_dfm()
L = ddm_l.to_dfm()
D = ddm_d.to_dfm()
U = ddm_u.to_dfm()
return P, L, D, U
def nullspace(self):
"""Return a basis for the nullspace of the matrix."""
# Code to compute nullspace using flint:
#
# V, nullity = self.rep.nullspace()
# V_dfm = self._new_rep(V)._extract(range(self.rows), range(nullity))
#
# XXX: That gives the nullspace but does not give us nonpivots. So we
# use the slower DDM method anyway. It would be better to change the
# signature of the nullspace method to not return nonpivots.
#
# XXX: Also python-flint exposes a nullspace method for fmpz_mat but
# not for fmpq_mat. This is the reverse of the situation for DDM etc
# which only allow nullspace over a field. The nullspace method for
# DDM, SDM etc should be changed to allow nullspace over ZZ as well.
# The DomainMatrix nullspace method does allow the domain to be a ring
# but does not directly call the lower-level nullspace methods and uses
# rref_den instead. Nullspace methods should also be added to all
# matrix types in python-flint.
ddm, nonpivots = self.to_ddm().nullspace()
return ddm.to_dfm(), nonpivots
def nullspace_from_rref(self, pivots=None):
"""Return a basis for the nullspace of the matrix."""
# XXX: Use the flint nullspace method!!!
sdm, nonpivots = self.to_sdm().nullspace_from_rref(pivots=pivots)
return sdm.to_dfm(), nonpivots
def particular(self):
"""Return a particular solution to the system."""
return self.to_ddm().particular().to_dfm()
def _lll(self, transform=False, delta=0.99, eta=0.51, rep='zbasis', gram='approx'):
"""Call the fmpz_mat.lll() method but check rank to avoid segfaults."""
# XXX: There are tests that pass e.g. QQ(5,6) for delta. That fails
# with a TypeError in flint because if QQ is fmpq then conversion with
# float fails. We handle that here but there are two better fixes:
#
# - Make python-flint's fmpq convert with float(x)
# - Change the tests because delta should just be a float.
def to_float(x):
if QQ.of_type(x):
return float(x.numerator) / float(x.denominator)
else:
return float(x)
delta = to_float(delta)
eta = to_float(eta)
if not 0.25 < delta < 1:
raise DMValueError("delta must be between 0.25 and 1")
# XXX: The flint lll method segfaults if the matrix is not full rank.
m, n = self.shape
if self.rep.rank() != m:
raise DMRankError("Matrix must have full row rank for Flint LLL.")
# Actually call the flint method.
return self.rep.lll(transform=transform, delta=delta, eta=eta, rep=rep, gram=gram)
@doctest_depends_on(ground_types='flint')
def lll(self, delta=0.75):
"""Compute LLL-reduced basis using FLINT.
See :meth:`lll_transform` for more information.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6]])
>>> M.to_DM().to_dfm().lll()
[[2, 1, 0], [-1, 1, 3]]
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.lll
Higher level interface to compute LLL-reduced basis.
lll_transform
Compute LLL-reduced basis and transform matrix.
"""
if self.domain != ZZ:
raise DMDomainError("ZZ expected, got %s" % self.domain)
elif self.rows > self.cols:
raise DMShapeError("Matrix must not have more rows than columns.")
rep = self._lll(delta=delta)
return self._new_rep(rep)
@doctest_depends_on(ground_types='flint')
def lll_transform(self, delta=0.75):
"""Compute LLL-reduced basis and transform using FLINT.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6]]).to_DM().to_dfm()
>>> M_lll, T = M.lll_transform()
>>> M_lll
[[2, 1, 0], [-1, 1, 3]]
>>> T
[[-2, 1], [3, -1]]
>>> T.matmul(M) == M_lll
True
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.lll
Higher level interface to compute LLL-reduced basis.
lll
Compute LLL-reduced basis without transform matrix.
"""
if self.domain != ZZ:
raise DMDomainError("ZZ expected, got %s" % self.domain)
elif self.rows > self.cols:
raise DMShapeError("Matrix must not have more rows than columns.")
rep, T = self._lll(transform=True, delta=delta)
basis = self._new_rep(rep)
T_dfm = self._new(T, (self.rows, self.rows), self.domain)
return basis, T_dfm
# Avoid circular imports
from sympy.polys.matrices.ddm import DDM
from sympy.polys.matrices.ddm import SDM
| DFM |
python | euske__pdfminer | pdfminer/pdfdocument.py | {
"start": 5969,
"end": 8398
} | class ____(PDFBaseXRef):
debug = False
def __init__(self):
self.data = None
self.entlen = None
self.fl1 = self.fl2 = self.fl3 = None
self.ranges = []
return
def __repr__(self):
return '<PDFXRefStream: ranges=%r>' % (self.ranges)
def load(self, parser):
(_, objid) = parser.nexttoken() # ignored
(_, genno) = parser.nexttoken() # ignored
(_, kwd) = parser.nexttoken()
(_, stream) = parser.nextobject()
if not isinstance(stream, PDFStream) or stream['Type'] is not LITERAL_XREF:
raise PDFNoValidXRef('Invalid PDF stream spec.')
size = stream['Size']
index_array = stream.get('Index', (0, size))
if len(index_array) % 2 != 0:
raise PDFSyntaxError('Invalid index number')
self.ranges.extend(choplist(2, index_array))
(self.fl1, self.fl2, self.fl3) = stream['W']
self.data = stream.get_data()
self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.attrs
if self.debug:
logging.info('xref stream: objid=%s, fields=%d,%d,%d' %
(', '.join(map(repr, self.ranges)),
self.fl1, self.fl2, self.fl3))
return
def get_trailer(self):
return self.trailer
def get_objids(self):
for (start, nobjs) in self.ranges:
for i in range(nobjs):
offset = self.entlen * i
ent = self.data[offset:offset+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
if f1 == 1 or f1 == 2:
yield start+i
return
def get_pos(self, objid):
index = 0
for (start, nobjs) in self.ranges:
if start <= objid and objid < start+nobjs:
index += objid - start
break
else:
index += nobjs
else:
raise KeyError(objid)
offset = self.entlen * index
ent = self.data[offset:offset+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
f2 = nunpack(ent[self.fl1:self.fl1+self.fl2])
f3 = nunpack(ent[self.fl1+self.fl2:])
if f1 == 1:
return (None, f2, f3)
elif f1 == 2:
return (f2, f3, 0)
else:
# this is a free object
raise KeyError(objid)
## PDFSecurityHandler
##
| PDFXRefStream |
python | google__pytype | pytype/overlays/abc_overlay.py | {
"start": 2642,
"end": 3005
} | class ____(special_builtins.StaticMethod):
"""Implements abc.abstractstaticmethod."""
@classmethod
def make(cls, ctx, module):
return super().make_alias("abstractstaticmethod", ctx, module)
def call(self, node, func, args, alias_map=None):
_ = _set_abstract(args, "callable")
return super().call(node, func, args, alias_map)
| AbstractStaticMethod |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-pushes-to-type-word-i.py | {
"start": 38,
"end": 410
} | class ____(object):
def minimumPushes(self, word):
"""
:type word: str
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
return sum((i+1)*min(len(word)-i*(9-2+1), (9-2+1)) for i in xrange(ceil_divide(len(word), (9-2+1))))
# Time: O(26)
# Space: O(26)
import collections
# freq table, greedy
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 76737,
"end": 79590
} | class ____(Request):
"""
Set the model ready flag to True. If the model is an output model of a task then try to publish the task.
:param model: Model id
:type model: str
:param force_publish_task: Publish the associated task (if exists) even if it
is not in the 'stopped' state. Optional, the default value is False.
:type force_publish_task: bool
:param publish_task: Indicates that the associated task (if exists) should be
published. Optional, the default value is True.
:type publish_task: bool
"""
_service = "models"
_action = "set_ready"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force_publish_task": {
"description": "Publish the associated task (if exists) even if it is not in the 'stopped' state. Optional, the default value is False.",
"type": "boolean",
},
"model": {"description": "Model id", "type": "string"},
"publish_task": {
"description": "Indicates that the associated task (if exists) should be published. Optional, the default value is True.",
"type": "boolean",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self, model: str, force_publish_task: Optional[bool] = None, publish_task: Optional[bool] = None, **kwargs: Any
) -> None:
super(SetReadyRequest, self).__init__(**kwargs)
self.model = model
self.force_publish_task = force_publish_task
self.publish_task = publish_task
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("force_publish_task")
def force_publish_task(self) -> Optional[bool]:
return self._property_force_publish_task
@force_publish_task.setter
def force_publish_task(self, value: Optional[bool]) -> None:
if value is None:
self._property_force_publish_task = None
return
self.assert_isinstance(value, "force_publish_task", (bool,))
self._property_force_publish_task = value
@schema_property("publish_task")
def publish_task(self) -> Optional[bool]:
return self._property_publish_task
@publish_task.setter
def publish_task(self, value: Optional[bool]) -> None:
if value is None:
self._property_publish_task = None
return
self.assert_isinstance(value, "publish_task", (bool,))
self._property_publish_task = value
| SetReadyRequest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query.py | {
"start": 1715,
"end": 3604
} | class ____(AttributeTestClass7):
nomatch_attribute2 = ...
def __init__(self):
self.nomatch_instance2 = None
def alarm_1(x: AttributeTestClass1):
# should trigger SourceA -> Test
_test_sink(x.attribute)
def alarm_2(x: AttributeTestClass1):
# should trigger SourceA -> Test
_test_sink(x.instance)
def alarm_3(x: AttributeTestClass2):
# should trigger SourceA -> test
_test_sink(x.attribute)
def alarm_4(x: AttributeTestClass2):
# should trigger SourceA -> test
_test_sink(x.instance)
def alarm_5(x: AttributeTestClass3, source):
# should trigger Test -> SinkA and Test -> SinkB
x.attribute = source
def alarm_6(x: AttributeTestClass3):
# should trigger Test -> SinkA and Test -> SinkC
x.instance = _test_source()
def alarm_7(x: AttributeTestClass4):
# should trigger SourceC -> Test and SourceD -> Test
return x.attribute
def alarm_8(x: AttributeTestClass4):
# should trigger SourceC -> Test and SourceE -> Test
_test_sink(x.instance)
def alarm_9(x: AttributeTestClass5):
# should trigger SourceB -> Test
_test_sink(x.foo_attribute)
def alarm_10(x: AttributeTestClass5):
# should trigger SourceB -> Test
_test_sink(x.foo_instance)
def alarm_11(x: AttributeTestClass6):
# should trigger SourceB -> Test
_test_sink(x.foo_attribute)
def alarm_12(x: AttributeTestClass6):
# should trigger SourceB -> Test
_test_sink(x.foo_instance)
def no_alarm_1(x: AttributeTestClass7):
_test_sink(x.nomatch_attribute1)
_test_sink(x.nomatch_instance1)
def no_alarm_2(x: AttributeTestClass8):
_test_sink(x.nomatch_instance2)
_test_sink(x.nomatch_instance2)
def function_test1_alarm1():
return 0
def function_test1_alarm2():
return 0
def function_test1_noalarm1():
return 0
def function_test1_noalarm2():
return 0
| AttributeTestClass8 |
python | numba__numba | numba/core/ir.py | {
"start": 37148,
"end": 40097
} | class ____(EqualityCheckMixin):
"""A code block
"""
def __init__(self, scope, loc):
assert isinstance(scope, Scope)
assert isinstance(loc, Loc)
self.scope = scope
self.body = []
self.loc = loc
def copy(self):
block = Block(self.scope, self.loc)
block.body = self.body[:]
return block
def find_exprs(self, op=None):
"""
Iterate over exprs of the given *op* in this block.
"""
for inst in self.body:
if isinstance(inst, Assign):
expr = inst.value
if isinstance(expr, Expr):
if op is None or expr.op == op:
yield expr
def find_insts(self, cls=None):
"""
Iterate over insts of the given class in this block.
"""
for inst in self.body:
if isinstance(inst, cls):
yield inst
def find_variable_assignment(self, name):
"""
Returns the assignment inst associated with variable "name", None if
it cannot be found.
"""
for x in self.find_insts(cls=Assign):
if x.target.name == name:
return x
return None
def prepend(self, inst):
assert isinstance(inst, Stmt)
self.body.insert(0, inst)
def append(self, inst):
assert isinstance(inst, Stmt)
self.body.append(inst)
def remove(self, inst):
assert isinstance(inst, Stmt)
del self.body[self.body.index(inst)]
def clear(self):
del self.body[:]
def dump(self, file=None):
# Avoid early bind of sys.stdout as default value
file = file or sys.stdout
for inst in self.body:
if hasattr(inst, 'dump'):
inst.dump(file)
else:
inst_vars = sorted(str(v) for v in inst.list_vars())
print(' %-40s %s' % (inst, inst_vars), file=file)
@property
def terminator(self):
return self.body[-1]
@property
def is_terminated(self):
return self.body and self.body[-1].is_terminator
def verify(self):
if not self.is_terminated:
raise VerificationError("Missing block terminator")
# Only the last instruction can be a terminator
for inst in self.body[:-1]:
if inst.is_terminator:
raise VerificationError("Terminator before the last "
"instruction")
def insert_after(self, stmt, other):
"""
Insert *stmt* after *other*.
"""
index = self.body.index(other)
self.body.insert(index + 1, stmt)
def insert_before_terminator(self, stmt):
assert isinstance(stmt, Stmt)
assert self.is_terminated
self.body.insert(-1, stmt)
def __repr__(self):
return "<ir.Block at %s>" % (self.loc,)
| Block |
python | apache__airflow | airflow-core/tests/unit/utils/test_helpers.py | {
"start": 9240,
"end": 9579
} | class ____(BaseJobRunner):
job_type = "MockJob"
def __init__(self, job: Job, func=None):
super().__init__(job)
self.job = job
self.job.job_type = self.job_type
self.func = func
def _execute(self):
if self.func is not None:
return self.func()
return None
| MockJobRunner |
python | python-pillow__Pillow | src/PIL/TiffImagePlugin.py | {
"start": 42134,
"end": 74172
} | class ____(ImageFile.ImageFile):
format = "TIFF"
format_description = "Adobe TIFF"
_close_exclusive_fp_after_loading = False
def __init__(
self,
fp: StrOrBytesPath | IO[bytes],
filename: str | bytes | None = None,
) -> None:
self.tag_v2: ImageFileDirectory_v2
""" Image file directory (tag dictionary) """
self.tag: ImageFileDirectory_v1
""" Legacy tag entries """
super().__init__(fp, filename)
def _open(self) -> None:
"""Open the first image in a TIFF file"""
# Header
assert self.fp is not None
ifh = self.fp.read(8)
if ifh[2] == 43:
ifh += self.fp.read(8)
self.tag_v2 = ImageFileDirectory_v2(ifh)
# setup frame pointers
self.__first = self.__next = self.tag_v2.next
self.__frame = -1
self._fp = self.fp
self._frame_pos: list[int] = []
self._n_frames: int | None = None
logger.debug("*** TiffImageFile._open ***")
logger.debug("- __first: %s", self.__first)
logger.debug("- ifh: %s", repr(ifh)) # Use repr to avoid str(bytes)
# and load the first frame
self._seek(0)
@property
def n_frames(self) -> int:
current_n_frames = self._n_frames
if current_n_frames is None:
current = self.tell()
self._seek(len(self._frame_pos))
while self._n_frames is None:
self._seek(self.tell() + 1)
self.seek(current)
assert self._n_frames is not None
return self._n_frames
def seek(self, frame: int) -> None:
"""Select a given frame as current image"""
if not self._seek_check(frame):
return
self._seek(frame)
if self._im is not None and (
self.im.size != self._tile_size
or self.im.mode != self.mode
or self.readonly
):
self._im = None
def _seek(self, frame: int) -> None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.fp = self._fp
while len(self._frame_pos) <= frame:
if not self.__next:
msg = "no more images in TIFF file"
raise EOFError(msg)
logger.debug(
"Seeking to frame %s, on frame %s, __next %s, location: %s",
frame,
self.__frame,
self.__next,
self.fp.tell(),
)
if self.__next >= 2**63:
msg = "Unable to seek to frame"
raise ValueError(msg)
self.fp.seek(self.__next)
self._frame_pos.append(self.__next)
logger.debug("Loading tags, location: %s", self.fp.tell())
self.tag_v2.load(self.fp)
if self.tag_v2.next in self._frame_pos:
# This IFD has already been processed
# Declare this to be the end of the image
self.__next = 0
else:
self.__next = self.tag_v2.next
if self.__next == 0:
self._n_frames = frame + 1
if len(self._frame_pos) == 1:
self.is_animated = self.__next != 0
self.__frame += 1
self.fp.seek(self._frame_pos[frame])
self.tag_v2.load(self.fp)
if XMP in self.tag_v2:
xmp = self.tag_v2[XMP]
if isinstance(xmp, tuple) and len(xmp) == 1:
xmp = xmp[0]
self.info["xmp"] = xmp
elif "xmp" in self.info:
del self.info["xmp"]
self._reload_exif()
# fill the legacy tag/ifd entries
self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2)
self.__frame = frame
self._setup()
def tell(self) -> int:
"""Return the current frame number"""
return self.__frame
def get_photoshop_blocks(self) -> dict[int, dict[str, bytes]]:
"""
Returns a dictionary of Photoshop "Image Resource Blocks".
The keys are the image resource ID. For more information, see
https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577409_pgfId-1037727
:returns: Photoshop "Image Resource Blocks" in a dictionary.
"""
blocks = {}
val = self.tag_v2.get(ExifTags.Base.ImageResources)
if val:
while val.startswith(b"8BIM"):
id = i16(val[4:6])
n = math.ceil((val[6] + 1) / 2) * 2
size = i32(val[6 + n : 10 + n])
data = val[10 + n : 10 + n + size]
blocks[id] = {"data": data}
val = val[math.ceil((10 + n + size) / 2) * 2 :]
return blocks
def load(self) -> Image.core.PixelAccess | None:
if self.tile and self.use_load_libtiff:
return self._load_libtiff()
return super().load()
def load_prepare(self) -> None:
if self._im is None:
Image._decompression_bomb_check(self._tile_size)
self.im = Image.core.new(self.mode, self._tile_size)
ImageFile.ImageFile.load_prepare(self)
def load_end(self) -> None:
# allow closing if we're on the first frame, there's no next
# This is the ImageFile.load path only, libtiff specific below.
if not self.is_animated:
self._close_exclusive_fp_after_loading = True
# load IFD data from fp before it is closed
exif = self.getexif()
for key in TiffTags.TAGS_V2_GROUPS:
if key not in exif:
continue
exif.get_ifd(key)
ImageOps.exif_transpose(self, in_place=True)
if ExifTags.Base.Orientation in self.tag_v2:
del self.tag_v2[ExifTags.Base.Orientation]
def _load_libtiff(self) -> Image.core.PixelAccess | None:
"""Overload method triggered when we detect a compressed tiff
Calls out to libtiff"""
Image.Image.load(self)
self.load_prepare()
if not len(self.tile) == 1:
msg = "Not exactly one tile"
raise OSError(msg)
# (self._compression, (extents tuple),
# 0, (rawmode, self._compression, fp))
extents = self.tile[0][1]
args = self.tile[0][3]
# To be nice on memory footprint, if there's a
# file descriptor, use that instead of reading
# into a string in python.
assert self.fp is not None
try:
fp = hasattr(self.fp, "fileno") and self.fp.fileno()
# flush the file descriptor, prevents error on pypy 2.4+
# should also eliminate the need for fp.tell
# in _seek
if hasattr(self.fp, "flush"):
self.fp.flush()
except OSError:
# io.BytesIO have a fileno, but returns an OSError if
# it doesn't use a file descriptor.
fp = False
if fp:
assert isinstance(args, tuple)
args_list = list(args)
args_list[2] = fp
args = tuple(args_list)
decoder = Image._getdecoder(self.mode, "libtiff", args, self.decoderconfig)
try:
decoder.setimage(self.im, extents)
except ValueError as e:
msg = "Couldn't set the image"
raise OSError(msg) from e
close_self_fp = self._exclusive_fp and not self.is_animated
if hasattr(self.fp, "getvalue"):
# We've got a stringio like thing passed in. Yay for all in memory.
# The decoder needs the entire file in one shot, so there's not
# a lot we can do here other than give it the entire file.
# unless we could do something like get the address of the
# underlying string for stringio.
#
# Rearranging for supporting byteio items, since they have a fileno
# that returns an OSError if there's no underlying fp. Easier to
# deal with here by reordering.
logger.debug("have getvalue. just sending in a string from getvalue")
n, err = decoder.decode(self.fp.getvalue())
elif fp:
# we've got a actual file on disk, pass in the fp.
logger.debug("have fileno, calling fileno version of the decoder.")
if not close_self_fp:
self.fp.seek(0)
# Save and restore the file position, because libtiff will move it
# outside of the Python runtime, and that will confuse
# io.BufferedReader and possible others.
# NOTE: This must use os.lseek(), and not fp.tell()/fp.seek(),
# because the buffer read head already may not equal the actual
# file position, and fp.seek() may just adjust it's internal
# pointer and not actually seek the OS file handle.
pos = os.lseek(fp, 0, os.SEEK_CUR)
# 4 bytes, otherwise the trace might error out
n, err = decoder.decode(b"fpfp")
os.lseek(fp, pos, os.SEEK_SET)
else:
# we have something else.
logger.debug("don't have fileno or getvalue. just reading")
self.fp.seek(0)
# UNDONE -- so much for that buffer size thing.
n, err = decoder.decode(self.fp.read())
self.tile = []
self.readonly = 0
self.load_end()
if close_self_fp:
self.fp.close()
self.fp = None # might be shared
if err < 0:
msg = f"decoder error {err}"
raise OSError(msg)
return Image.Image.load(self)
def _setup(self) -> None:
"""Setup this image object based on current tags"""
if 0xBC01 in self.tag_v2:
msg = "Windows Media Photo files not yet supported"
raise OSError(msg)
# extract relevant tags
self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)]
self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1)
# photometric is a required tag, but not everyone is reading
# the specification
photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0)
# old style jpeg compression images most certainly are YCbCr
if self._compression == "tiff_jpeg":
photo = 6
fillorder = self.tag_v2.get(FILLORDER, 1)
logger.debug("*** Summary ***")
logger.debug("- compression: %s", self._compression)
logger.debug("- photometric_interpretation: %s", photo)
logger.debug("- planar_configuration: %s", self._planar_configuration)
logger.debug("- fill_order: %s", fillorder)
logger.debug("- YCbCr subsampling: %s", self.tag_v2.get(YCBCRSUBSAMPLING))
# size
try:
xsize = self.tag_v2[IMAGEWIDTH]
ysize = self.tag_v2[IMAGELENGTH]
except KeyError as e:
msg = "Missing dimensions"
raise TypeError(msg) from e
if not isinstance(xsize, int) or not isinstance(ysize, int):
msg = "Invalid dimensions"
raise ValueError(msg)
self._tile_size = xsize, ysize
orientation = self.tag_v2.get(ExifTags.Base.Orientation)
if orientation in (5, 6, 7, 8):
self._size = ysize, xsize
else:
self._size = xsize, ysize
logger.debug("- size: %s", self.size)
sample_format = self.tag_v2.get(SAMPLEFORMAT, (1,))
if len(sample_format) > 1 and max(sample_format) == min(sample_format) == 1:
# SAMPLEFORMAT is properly per band, so an RGB image will
# be (1,1,1). But, we don't support per band pixel types,
# and anything more than one band is a uint8. So, just
# take the first element. Revisit this if adding support
# for more exotic images.
sample_format = (1,)
bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,))
extra_tuple = self.tag_v2.get(EXTRASAMPLES, ())
if photo in (2, 6, 8): # RGB, YCbCr, LAB
bps_count = 3
elif photo == 5: # CMYK
bps_count = 4
else:
bps_count = 1
bps_count += len(extra_tuple)
bps_actual_count = len(bps_tuple)
samples_per_pixel = self.tag_v2.get(
SAMPLESPERPIXEL,
3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1,
)
if samples_per_pixel > MAX_SAMPLESPERPIXEL:
# DOS check, samples_per_pixel can be a Long, and we extend the tuple below
logger.error(
"More samples per pixel than can be decoded: %s", samples_per_pixel
)
msg = "Invalid value for samples per pixel"
raise SyntaxError(msg)
if samples_per_pixel < bps_actual_count:
# If a file has more values in bps_tuple than expected,
# remove the excess.
bps_tuple = bps_tuple[:samples_per_pixel]
elif samples_per_pixel > bps_actual_count and bps_actual_count == 1:
# If a file has only one value in bps_tuple, when it should have more,
# presume it is the same number of bits for all of the samples.
bps_tuple = bps_tuple * samples_per_pixel
if len(bps_tuple) != samples_per_pixel:
msg = "unknown data organization"
raise SyntaxError(msg)
# mode: check photometric interpretation and bits per pixel
key = (
self.tag_v2.prefix,
photo,
sample_format,
fillorder,
bps_tuple,
extra_tuple,
)
logger.debug("format key: %s", key)
try:
self._mode, rawmode = OPEN_INFO[key]
except KeyError as e:
logger.debug("- unsupported format")
msg = "unknown pixel mode"
raise SyntaxError(msg) from e
logger.debug("- raw mode: %s", rawmode)
logger.debug("- pil mode: %s", self.mode)
self.info["compression"] = self._compression
xres = self.tag_v2.get(X_RESOLUTION, 1)
yres = self.tag_v2.get(Y_RESOLUTION, 1)
if xres and yres:
resunit = self.tag_v2.get(RESOLUTION_UNIT)
if resunit == 2: # dots per inch
self.info["dpi"] = (xres, yres)
elif resunit == 3: # dots per centimeter. convert to dpi
self.info["dpi"] = (xres * 2.54, yres * 2.54)
elif resunit is None: # used to default to 1, but now 2)
self.info["dpi"] = (xres, yres)
# For backward compatibility,
# we also preserve the old behavior
self.info["resolution"] = xres, yres
else: # No absolute unit of measurement
self.info["resolution"] = xres, yres
# build tile descriptors
x = y = layer = 0
self.tile = []
self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw"
if self.use_load_libtiff:
# Decoder expects entire file as one tile.
# There's a buffer size limit in load (64k)
# so large g4 images will fail if we use that
# function.
#
# Setup the one tile for the whole image, then
# use the _load_libtiff function.
# libtiff handles the fillmode for us, so 1;IR should
# actually be 1;I. Including the R double reverses the
# bits, so stripes of the image are reversed. See
# https://github.com/python-pillow/Pillow/issues/279
if fillorder == 2:
# Replace fillorder with fillorder=1
key = key[:3] + (1,) + key[4:]
logger.debug("format key: %s", key)
# this should always work, since all the
# fillorder==2 modes have a corresponding
# fillorder=1 mode
self._mode, rawmode = OPEN_INFO[key]
# YCbCr images with new jpeg compression with pixels in one plane
# unpacked straight into RGB values
if (
photo == 6
and self._compression == "jpeg"
and self._planar_configuration == 1
):
rawmode = "RGB"
# libtiff always returns the bytes in native order.
# we're expecting image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
elif rawmode == "I;16":
rawmode = "I;16N"
elif rawmode.endswith((";16B", ";16L")):
rawmode = rawmode[:-1] + "N"
# Offset in the tile tuple is 0, we go from 0,0 to
# w,h, and we only do this once -- eds
a = (rawmode, self._compression, False, self.tag_v2.offset)
self.tile.append(ImageFile._Tile("libtiff", (0, 0, xsize, ysize), 0, a))
elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2:
# striped image
if STRIPOFFSETS in self.tag_v2:
offsets = self.tag_v2[STRIPOFFSETS]
h = self.tag_v2.get(ROWSPERSTRIP, ysize)
w = xsize
else:
# tiled image
offsets = self.tag_v2[TILEOFFSETS]
tilewidth = self.tag_v2.get(TILEWIDTH)
h = self.tag_v2.get(TILELENGTH)
if not isinstance(tilewidth, int) or not isinstance(h, int):
msg = "Invalid tile dimensions"
raise ValueError(msg)
w = tilewidth
if w == xsize and h == ysize and self._planar_configuration != 2:
# Every tile covers the image. Only use the last offset
offsets = offsets[-1:]
for offset in offsets:
if x + w > xsize:
stride = w * sum(bps_tuple) / 8 # bytes per line
else:
stride = 0
tile_rawmode = rawmode
if self._planar_configuration == 2:
# each band on it's own layer
tile_rawmode = rawmode[layer]
# adjust stride width accordingly
stride /= bps_count
args = (tile_rawmode, int(stride), 1)
self.tile.append(
ImageFile._Tile(
self._compression,
(x, y, min(x + w, xsize), min(y + h, ysize)),
offset,
args,
)
)
x += w
if x >= xsize:
x, y = 0, y + h
if y >= ysize:
y = 0
layer += 1
else:
logger.debug("- unsupported data organization")
msg = "unknown data organization"
raise SyntaxError(msg)
# Fix up info.
if ICCPROFILE in self.tag_v2:
self.info["icc_profile"] = self.tag_v2[ICCPROFILE]
# fixup palette descriptor
if self.mode in ["P", "PA"]:
palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]]
self.palette = ImagePalette.raw("RGB;L", b"".join(palette))
#
# --------------------------------------------------------------------
# Write TIFF files
# little endian is default except for image modes with
# explicit big endian byte-order
SAVE_INFO = {
# mode => rawmode, byteorder, photometrics,
# sampleformat, bitspersample, extra
"1": ("1", II, 1, 1, (1,), None),
"L": ("L", II, 1, 1, (8,), None),
"LA": ("LA", II, 1, 1, (8, 8), 2),
"P": ("P", II, 3, 1, (8,), None),
"PA": ("PA", II, 3, 1, (8, 8), 2),
"I": ("I;32S", II, 1, 2, (32,), None),
"I;16": ("I;16", II, 1, 1, (16,), None),
"I;16L": ("I;16L", II, 1, 1, (16,), None),
"F": ("F;32F", II, 1, 3, (32,), None),
"RGB": ("RGB", II, 2, 1, (8, 8, 8), None),
"RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0),
"RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2),
"CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None),
"YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None),
"LAB": ("LAB", II, 8, 1, (8, 8, 8), None),
"I;16B": ("I;16B", MM, 1, 1, (16,), None),
}
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError as e:
msg = f"cannot write mode {im.mode} as TIFF"
raise OSError(msg) from e
encoderinfo = im.encoderinfo
encoderconfig = im.encoderconfig
ifd = ImageFileDirectory_v2(prefix=prefix)
if encoderinfo.get("big_tiff"):
ifd._bigtiff = True
try:
compression = encoderinfo["compression"]
except KeyError:
compression = im.info.get("compression")
if isinstance(compression, int):
# compression value may be from BMP. Ignore it
compression = None
if compression is None:
compression = "raw"
elif compression == "tiff_jpeg":
# OJPEG is obsolete, so use new-style JPEG compression instead
compression = "jpeg"
elif compression == "tiff_deflate":
compression = "tiff_adobe_deflate"
libtiff = WRITE_LIBTIFF or compression != "raw"
# required for color libtiff images
ifd[PLANAR_CONFIGURATION] = 1
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# write any arbitrary tags passed in as an ImageFileDirectory
if "tiffinfo" in encoderinfo:
info = encoderinfo["tiffinfo"]
elif "exif" in encoderinfo:
info = encoderinfo["exif"]
if isinstance(info, bytes):
exif = Image.Exif()
exif.load(info)
info = exif
else:
info = {}
logger.debug("Tiffinfo Keys: %s", list(info))
if isinstance(info, ImageFileDirectory_v1):
info = info.to_v2()
for key in info:
if isinstance(info, Image.Exif) and key in TiffTags.TAGS_V2_GROUPS:
ifd[key] = info.get_ifd(key)
else:
ifd[key] = info.get(key)
try:
ifd.tagtype[key] = info.tagtype[key]
except Exception:
pass # might not be an IFD. Might not have populated type
legacy_ifd = {}
if hasattr(im, "tag"):
legacy_ifd = im.tag.to_v2()
supplied_tags = {**legacy_ifd, **getattr(im, "tag_v2", {})}
for tag in (
# IFD offset that may not be correct in the saved image
EXIFIFD,
# Determined by the image format and should not be copied from legacy_ifd.
SAMPLEFORMAT,
):
if tag in supplied_tags:
del supplied_tags[tag]
# additions written by Greg Couch, gregc@cgl.ucsf.edu
# inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com
if hasattr(im, "tag_v2"):
# preserve tags from original TIFF image file
for key in (
RESOLUTION_UNIT,
X_RESOLUTION,
Y_RESOLUTION,
IPTC_NAA_CHUNK,
PHOTOSHOP_CHUNK,
XMP,
):
if key in im.tag_v2:
if key == IPTC_NAA_CHUNK and im.tag_v2.tagtype[key] not in (
TiffTags.BYTE,
TiffTags.UNDEFINED,
):
del supplied_tags[key]
else:
ifd[key] = im.tag_v2[key]
ifd.tagtype[key] = im.tag_v2.tagtype[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
icc = encoderinfo.get("icc_profile", im.info.get("icc_profile"))
if icc:
ifd[ICCPROFILE] = icc
for key, name in [
(IMAGEDESCRIPTION, "description"),
(X_RESOLUTION, "resolution"),
(Y_RESOLUTION, "resolution"),
(X_RESOLUTION, "x_resolution"),
(Y_RESOLUTION, "y_resolution"),
(RESOLUTION_UNIT, "resolution_unit"),
(SOFTWARE, "software"),
(DATE_TIME, "date_time"),
(ARTIST, "artist"),
(COPYRIGHT, "copyright"),
]:
if name in encoderinfo:
ifd[key] = encoderinfo[name]
dpi = encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = dpi[0]
ifd[Y_RESOLUTION] = dpi[1]
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
if PHOTOMETRIC_INTERPRETATION not in ifd:
ifd[PHOTOMETRIC_INTERPRETATION] = photo
elif im.mode in ("1", "L") and ifd[PHOTOMETRIC_INTERPRETATION] == 0:
if im.mode == "1":
inverted_im = im.copy()
px = inverted_im.load()
if px is not None:
for y in range(inverted_im.height):
for x in range(inverted_im.width):
px[x, y] = 0 if px[x, y] == 255 else 255
im = inverted_im
else:
im = ImageOps.invert(im)
if im.mode in ["P", "PA"]:
lut = im.im.getpalette("RGB", "RGB;L")
colormap = []
colors = len(lut) // 3
for i in range(3):
colormap += [v * 256 for v in lut[colors * i : colors * (i + 1)]]
colormap += [0] * (256 - colors)
ifd[COLORMAP] = colormap
# data orientation
w, h = ifd[IMAGEWIDTH], ifd[IMAGELENGTH]
stride = len(bits) * ((w * bits[0] + 7) // 8)
if ROWSPERSTRIP not in ifd:
# aim for given strip size (64 KB by default) when using libtiff writer
if libtiff:
im_strip_size = encoderinfo.get("strip_size", STRIP_SIZE)
rows_per_strip = 1 if stride == 0 else min(im_strip_size // stride, h)
# JPEG encoder expects multiple of 8 rows
if compression == "jpeg":
rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, h)
else:
rows_per_strip = h
if rows_per_strip == 0:
rows_per_strip = 1
ifd[ROWSPERSTRIP] = rows_per_strip
strip_byte_counts = 1 if stride == 0 else stride * ifd[ROWSPERSTRIP]
strips_per_image = (h + ifd[ROWSPERSTRIP] - 1) // ifd[ROWSPERSTRIP]
if strip_byte_counts >= 2**16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + (
stride * h - strip_byte_counts * (strips_per_image - 1),
)
ifd[STRIPOFFSETS] = tuple(
range(0, strip_byte_counts * strips_per_image, strip_byte_counts)
) # this is adjusted by IFD writer
# no compression by default:
ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1)
if im.mode == "YCbCr":
for tag, default_value in {
YCBCRSUBSAMPLING: (1, 1),
REFERENCEBLACKWHITE: (0, 255, 128, 255, 128, 255),
}.items():
ifd.setdefault(tag, default_value)
blocklist = [TILEWIDTH, TILELENGTH, TILEOFFSETS, TILEBYTECOUNTS]
if libtiff:
if "quality" in encoderinfo:
quality = encoderinfo["quality"]
if not isinstance(quality, int) or quality < 0 or quality > 100:
msg = "Invalid quality setting"
raise ValueError(msg)
if compression != "jpeg":
msg = "quality setting only supported for 'jpeg' compression"
raise ValueError(msg)
ifd[JPEGQUALITY] = quality
logger.debug("Saving using libtiff encoder")
logger.debug("Items: %s", sorted(ifd.items()))
_fp = 0
if hasattr(fp, "fileno"):
try:
fp.seek(0)
_fp = fp.fileno()
except io.UnsupportedOperation:
pass
# optional types for non core tags
types = {}
# STRIPOFFSETS and STRIPBYTECOUNTS are added by the library
# based on the data in the strip.
# OSUBFILETYPE is deprecated.
# The other tags expect arrays with a certain length (fixed or depending on
# BITSPERSAMPLE, etc), passing arrays with a different length will result in
# segfaults. Block these tags until we add extra validation.
# SUBIFD may also cause a segfault.
blocklist += [
OSUBFILETYPE,
REFERENCEBLACKWHITE,
STRIPBYTECOUNTS,
STRIPOFFSETS,
TRANSFERFUNCTION,
SUBIFD,
]
# bits per sample is a single short in the tiff directory, not a list.
atts: dict[int, Any] = {BITSPERSAMPLE: bits[0]}
# Merge the ones that we have with (optional) more bits from
# the original file, e.g x,y resolution so that we can
# save(load('')) == original file.
for tag, value in itertools.chain(ifd.items(), supplied_tags.items()):
# Libtiff can only process certain core items without adding
# them to the custom dictionary.
# Custom items are supported for int, float, unicode, string and byte
# values. Other types and tuples require a tagtype.
if tag not in TiffTags.LIBTIFF_CORE:
if tag in TiffTags.TAGS_V2_GROUPS:
types[tag] = TiffTags.LONG8
elif tag in ifd.tagtype:
types[tag] = ifd.tagtype[tag]
elif isinstance(value, (int, float, str, bytes)) or (
isinstance(value, tuple)
and all(isinstance(v, (int, float, IFDRational)) for v in value)
):
type = TiffTags.lookup(tag).type
if type:
types[tag] = type
if tag not in atts and tag not in blocklist:
if isinstance(value, str):
atts[tag] = value.encode("ascii", "replace") + b"\0"
elif isinstance(value, IFDRational):
atts[tag] = float(value)
else:
atts[tag] = value
if SAMPLEFORMAT in atts and len(atts[SAMPLEFORMAT]) == 1:
atts[SAMPLEFORMAT] = atts[SAMPLEFORMAT][0]
logger.debug("Converted items: %s", sorted(atts.items()))
# libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if im.mode in ("I;16", "I;16B", "I;16L"):
rawmode = "I;16N"
# Pass tags as sorted list so that the tags are set in a fixed order.
# This is required by libtiff for some tags. For example, the JPEGQUALITY
# pseudo tag requires that the COMPRESS tag was already set.
tags = list(atts.items())
tags.sort()
a = (rawmode, compression, _fp, filename, tags, types)
encoder = Image._getencoder(im.mode, "libtiff", a, encoderconfig)
encoder.setimage(im.im, (0, 0) + im.size)
while True:
errcode, data = encoder.encode(ImageFile.MAXBLOCK)[1:]
if not _fp:
fp.write(data)
if errcode:
break
if errcode < 0:
msg = f"encoder error {errcode} when writing image file"
raise OSError(msg)
else:
for tag in blocklist:
del ifd[tag]
offset = ifd.save(fp)
ImageFile._save(
im,
fp,
[ImageFile._Tile("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))],
)
# -- helper for multi-page save --
if "_debug_multipage" in encoderinfo:
# just to access o32 and o16 (using correct byte order)
setattr(im, "_debug_multipage", ifd)
| TiffImageFile |
python | huggingface__transformers | tests/models/mbart/test_modeling_mbart.py | {
"start": 27849,
"end": 29015
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (MBartDecoder, MBartForCausalLM) if is_torch_available() else ()
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = MBartStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=MBartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot retain gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot retain gradients")
def test_flex_attention_with_grads(self):
return
| MBartStandaloneDecoderModelTest |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 7992,
"end": 8071
} | class ____(InvalidURL):
"""Invalid URL client error."""
| InvalidUrlClientError |
python | mwaskom__seaborn | tests/test_distributions.py | {
"start": 35842,
"end": 59496
} | class ____(SharedAxesLevelTests):
func = staticmethod(histplot)
def get_last_color(self, ax, element="bars", fill=True):
if element == "bars":
if fill:
return ax.patches[-1].get_facecolor()
else:
return ax.patches[-1].get_edgecolor()
else:
if fill:
artist = ax.collections[-1]
facecolor = artist.get_facecolor()
edgecolor = artist.get_edgecolor()
assert_colors_equal(facecolor, edgecolor, check_alpha=False)
return facecolor
else:
return ax.lines[-1].get_color()
@pytest.mark.parametrize(
"element,fill",
itertools.product(["bars", "step", "poly"], [True, False]),
)
def test_color(self, long_df, element, fill):
super().test_color(long_df, element=element, fill=fill)
@pytest.mark.parametrize(
"variable", ["x", "y"],
)
def test_long_vectors(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, vector.to_numpy(), vector.to_list(),
]
f, axs = plt.subplots(3)
for vector, ax in zip(vectors, axs):
histplot(data=long_df, ax=ax, **{variable: vector})
bars = [ax.patches for ax in axs]
for a_bars, b_bars in itertools.product(bars, bars):
for a, b in zip(a_bars, b_bars):
assert_array_equal(a.get_height(), b.get_height())
assert_array_equal(a.get_xy(), b.get_xy())
def test_wide_vs_long_data(self, wide_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(data=wide_df, ax=ax1, common_bins=False)
for col in wide_df.columns[::-1]:
histplot(data=wide_df, x=col, ax=ax2)
for a, b in zip(ax1.patches, ax2.patches):
assert a.get_height() == b.get_height()
assert a.get_xy() == b.get_xy()
def test_flat_vector(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(data=long_df["x"], ax=ax1)
histplot(data=long_df, x="x", ax=ax2)
for a, b in zip(ax1.patches, ax2.patches):
assert a.get_height() == b.get_height()
assert a.get_xy() == b.get_xy()
def test_empty_data(self):
ax = histplot(x=[])
assert not ax.patches
def test_variable_assignment(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(data=long_df, x="x", ax=ax1)
histplot(data=long_df, y="x", ax=ax2)
for a, b in zip(ax1.patches, ax2.patches):
assert a.get_height() == b.get_width()
@pytest.mark.parametrize("element", ["bars", "step", "poly"])
@pytest.mark.parametrize("multiple", ["layer", "dodge", "stack", "fill"])
def test_hue_fill_colors(self, long_df, multiple, element):
ax = histplot(
data=long_df, x="x", hue="a",
multiple=multiple, bins=1,
fill=True, element=element, legend=False,
)
palette = color_palette()
if multiple == "layer":
if element == "bars":
a = .5
else:
a = .25
else:
a = .75
for bar, color in zip(ax.patches[::-1], palette):
assert_colors_equal(bar.get_facecolor(), to_rgba(color, a))
for poly, color in zip(ax.collections[::-1], palette):
assert_colors_equal(poly.get_facecolor(), to_rgba(color, a))
def test_hue_stack(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
n = 10
kws = dict(data=long_df, x="x", hue="a", bins=n, element="bars")
histplot(**kws, multiple="layer", ax=ax1)
histplot(**kws, multiple="stack", ax=ax2)
layer_heights = np.reshape([b.get_height() for b in ax1.patches], (-1, n))
stack_heights = np.reshape([b.get_height() for b in ax2.patches], (-1, n))
assert_array_equal(layer_heights, stack_heights)
stack_xys = np.reshape([b.get_xy() for b in ax2.patches], (-1, n, 2))
assert_array_equal(
stack_xys[..., 1] + stack_heights,
stack_heights.cumsum(axis=0),
)
def test_hue_fill(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
n = 10
kws = dict(data=long_df, x="x", hue="a", bins=n, element="bars")
histplot(**kws, multiple="layer", ax=ax1)
histplot(**kws, multiple="fill", ax=ax2)
layer_heights = np.reshape([b.get_height() for b in ax1.patches], (-1, n))
stack_heights = np.reshape([b.get_height() for b in ax2.patches], (-1, n))
assert_array_almost_equal(
layer_heights / layer_heights.sum(axis=0), stack_heights
)
stack_xys = np.reshape([b.get_xy() for b in ax2.patches], (-1, n, 2))
assert_array_almost_equal(
(stack_xys[..., 1] + stack_heights) / stack_heights.sum(axis=0),
stack_heights.cumsum(axis=0),
)
def test_hue_dodge(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
bw = 2
kws = dict(data=long_df, x="x", hue="c", binwidth=bw, element="bars")
histplot(**kws, multiple="layer", ax=ax1)
histplot(**kws, multiple="dodge", ax=ax2)
layer_heights = [b.get_height() for b in ax1.patches]
dodge_heights = [b.get_height() for b in ax2.patches]
assert_array_equal(layer_heights, dodge_heights)
layer_xs = np.reshape([b.get_x() for b in ax1.patches], (2, -1))
dodge_xs = np.reshape([b.get_x() for b in ax2.patches], (2, -1))
assert_array_almost_equal(layer_xs[1], dodge_xs[1])
assert_array_almost_equal(layer_xs[0], dodge_xs[0] - bw / 2)
def test_hue_as_numpy_dodged(self, long_df):
# https://github.com/mwaskom/seaborn/issues/2452
ax = histplot(
long_df,
x="y", hue=long_df["a"].to_numpy(),
multiple="dodge", bins=1,
)
# Note hue order reversal
assert ax.patches[1].get_x() < ax.patches[0].get_x()
def test_multiple_input_check(self, flat_series):
with pytest.raises(ValueError, match="`multiple` must be"):
histplot(flat_series, multiple="invalid")
def test_element_input_check(self, flat_series):
with pytest.raises(ValueError, match="`element` must be"):
histplot(flat_series, element="invalid")
def test_count_stat(self, flat_series):
ax = histplot(flat_series, stat="count")
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == len(flat_series)
def test_density_stat(self, flat_series):
ax = histplot(flat_series, stat="density")
bar_heights = [b.get_height() for b in ax.patches]
bar_widths = [b.get_width() for b in ax.patches]
assert np.multiply(bar_heights, bar_widths).sum() == pytest.approx(1)
def test_density_stat_common_norm(self, long_df):
ax = histplot(
data=long_df, x="x", hue="a",
stat="density", common_norm=True, element="bars",
)
bar_heights = [b.get_height() for b in ax.patches]
bar_widths = [b.get_width() for b in ax.patches]
assert np.multiply(bar_heights, bar_widths).sum() == pytest.approx(1)
def test_density_stat_unique_norm(self, long_df):
n = 10
ax = histplot(
data=long_df, x="x", hue="a",
stat="density", bins=n, common_norm=False, element="bars",
)
bar_groups = ax.patches[:n], ax.patches[-n:]
for bars in bar_groups:
bar_heights = [b.get_height() for b in bars]
bar_widths = [b.get_width() for b in bars]
bar_areas = np.multiply(bar_heights, bar_widths)
assert bar_areas.sum() == pytest.approx(1)
@pytest.fixture(params=["probability", "proportion"])
def height_norm_arg(self, request):
return request.param
def test_probability_stat(self, flat_series, height_norm_arg):
ax = histplot(flat_series, stat=height_norm_arg)
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == pytest.approx(1)
def test_probability_stat_common_norm(self, long_df, height_norm_arg):
ax = histplot(
data=long_df, x="x", hue="a",
stat=height_norm_arg, common_norm=True, element="bars",
)
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == pytest.approx(1)
def test_probability_stat_unique_norm(self, long_df, height_norm_arg):
n = 10
ax = histplot(
data=long_df, x="x", hue="a",
stat=height_norm_arg, bins=n, common_norm=False, element="bars",
)
bar_groups = ax.patches[:n], ax.patches[-n:]
for bars in bar_groups:
bar_heights = [b.get_height() for b in bars]
assert sum(bar_heights) == pytest.approx(1)
def test_percent_stat(self, flat_series):
ax = histplot(flat_series, stat="percent")
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == 100
def test_common_bins(self, long_df):
n = 10
ax = histplot(
long_df, x="x", hue="a", common_bins=True, bins=n, element="bars",
)
bar_groups = ax.patches[:n], ax.patches[-n:]
assert_array_equal(
[b.get_xy() for b in bar_groups[0]],
[b.get_xy() for b in bar_groups[1]]
)
def test_unique_bins(self, wide_df):
ax = histplot(wide_df, common_bins=False, bins=10, element="bars")
bar_groups = np.split(np.array(ax.patches), len(wide_df.columns))
for i, col in enumerate(wide_df.columns[::-1]):
bars = bar_groups[i]
start = bars[0].get_x()
stop = bars[-1].get_x() + bars[-1].get_width()
assert_array_almost_equal(start, wide_df[col].min())
assert_array_almost_equal(stop, wide_df[col].max())
def test_range_with_inf(self, rng):
x = rng.normal(0, 1, 20)
ax = histplot([-np.inf, *x])
leftmost_edge = min(p.get_x() for p in ax.patches)
assert leftmost_edge == x.min()
def test_weights_with_missing(self, null_df):
ax = histplot(null_df, x="x", weights="s", bins=5)
bar_heights = [bar.get_height() for bar in ax.patches]
total_weight = null_df[["x", "s"]].dropna()["s"].sum()
assert sum(bar_heights) == pytest.approx(total_weight)
def test_weight_norm(self, rng):
vals = rng.normal(0, 1, 50)
x = np.concatenate([vals, vals])
w = np.repeat([1, 2], 50)
ax = histplot(
x=x, weights=w, hue=w, common_norm=True, stat="density", bins=5
)
# Recall that artists are added in reverse of hue order
y1 = [bar.get_height() for bar in ax.patches[:5]]
y2 = [bar.get_height() for bar in ax.patches[5:]]
assert sum(y1) == 2 * sum(y2)
def test_discrete(self, long_df):
ax = histplot(long_df, x="s", discrete=True)
data_min = long_df["s"].min()
data_max = long_df["s"].max()
assert len(ax.patches) == (data_max - data_min + 1)
for i, bar in enumerate(ax.patches):
assert bar.get_width() == 1
assert bar.get_x() == (data_min + i - .5)
def test_discrete_categorical_default(self, long_df):
ax = histplot(long_df, x="a")
for i, bar in enumerate(ax.patches):
assert bar.get_width() == 1
def test_categorical_yaxis_inversion(self, long_df):
ax = histplot(long_df, y="a")
ymax, ymin = ax.get_ylim()
assert ymax > ymin
def test_datetime_scale(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(x=long_df["t"], fill=True, ax=ax1)
histplot(x=long_df["t"], fill=False, ax=ax2)
assert ax1.get_xlim() == ax2.get_xlim()
@pytest.mark.parametrize("stat", ["count", "density", "probability"])
def test_kde(self, flat_series, stat):
ax = histplot(
flat_series, kde=True, stat=stat, kde_kws={"cut": 10}
)
bar_widths = [b.get_width() for b in ax.patches]
bar_heights = [b.get_height() for b in ax.patches]
hist_area = np.multiply(bar_widths, bar_heights).sum()
density, = ax.lines
kde_area = integrate(density.get_ydata(), density.get_xdata())
assert kde_area == pytest.approx(hist_area)
@pytest.mark.parametrize("multiple", ["layer", "dodge"])
@pytest.mark.parametrize("stat", ["count", "density", "probability"])
def test_kde_with_hue(self, long_df, stat, multiple):
n = 10
ax = histplot(
long_df, x="x", hue="c", multiple=multiple,
kde=True, stat=stat, element="bars",
kde_kws={"cut": 10}, bins=n,
)
bar_groups = ax.patches[:n], ax.patches[-n:]
for i, bars in enumerate(bar_groups):
bar_widths = [b.get_width() for b in bars]
bar_heights = [b.get_height() for b in bars]
hist_area = np.multiply(bar_widths, bar_heights).sum()
x, y = ax.lines[i].get_xydata().T
kde_area = integrate(y, x)
if multiple == "layer":
assert kde_area == pytest.approx(hist_area)
elif multiple == "dodge":
assert kde_area == pytest.approx(hist_area * 2)
def test_kde_default_cut(self, flat_series):
ax = histplot(flat_series, kde=True)
support = ax.lines[0].get_xdata()
assert support.min() == flat_series.min()
assert support.max() == flat_series.max()
def test_kde_hue(self, long_df):
n = 10
ax = histplot(data=long_df, x="x", hue="a", kde=True, bins=n)
for bar, line in zip(ax.patches[::n], ax.lines):
assert_colors_equal(
bar.get_facecolor(), line.get_color(), check_alpha=False
)
def test_kde_yaxis(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, kde=True)
histplot(y=flat_series, kde=True)
x, y = ax.lines
assert_array_equal(x.get_xdata(), y.get_ydata())
assert_array_equal(x.get_ydata(), y.get_xdata())
def test_kde_line_kws(self, flat_series):
lw = 5
ax = histplot(flat_series, kde=True, line_kws=dict(lw=lw))
assert ax.lines[0].get_linewidth() == lw
def test_kde_singular_data(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
ax = histplot(x=np.ones(10), kde=True)
assert not ax.lines
with warnings.catch_warnings():
warnings.simplefilter("error")
ax = histplot(x=[5], kde=True)
assert not ax.lines
def test_element_default(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(long_df, x="x", ax=ax1)
histplot(long_df, x="x", ax=ax2, element="bars")
assert len(ax1.patches) == len(ax2.patches)
f, (ax1, ax2) = plt.subplots(2)
histplot(long_df, x="x", hue="a", ax=ax1)
histplot(long_df, x="x", hue="a", ax=ax2, element="bars")
assert len(ax1.patches) == len(ax2.patches)
def test_bars_no_fill(self, flat_series):
alpha = .5
ax = histplot(flat_series, element="bars", fill=False, alpha=alpha)
for bar in ax.patches:
assert bar.get_facecolor() == (0, 0, 0, 0)
assert bar.get_edgecolor()[-1] == alpha
def test_step_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
n = 10
histplot(flat_series, element="bars", fill=True, bins=n, ax=ax1)
histplot(flat_series, element="step", fill=True, bins=n, ax=ax2)
bar_heights = [b.get_height() for b in ax1.patches]
bar_widths = [b.get_width() for b in ax1.patches]
bar_edges = [b.get_x() for b in ax1.patches]
fill = ax2.collections[0]
x, y = fill.get_paths()[0].vertices[::-1].T
assert_array_equal(x[1:2 * n:2], bar_edges)
assert_array_equal(y[1:2 * n:2], bar_heights)
assert x[n * 2] == bar_edges[-1] + bar_widths[-1]
assert y[n * 2] == bar_heights[-1]
def test_poly_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
n = 10
histplot(flat_series, element="bars", fill=True, bins=n, ax=ax1)
histplot(flat_series, element="poly", fill=True, bins=n, ax=ax2)
bar_heights = np.array([b.get_height() for b in ax1.patches])
bar_widths = np.array([b.get_width() for b in ax1.patches])
bar_edges = np.array([b.get_x() for b in ax1.patches])
fill = ax2.collections[0]
x, y = fill.get_paths()[0].vertices[::-1].T
assert_array_equal(x[1:n + 1], bar_edges + bar_widths / 2)
assert_array_equal(y[1:n + 1], bar_heights)
def test_poly_no_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
n = 10
histplot(flat_series, element="bars", fill=False, bins=n, ax=ax1)
histplot(flat_series, element="poly", fill=False, bins=n, ax=ax2)
bar_heights = np.array([b.get_height() for b in ax1.patches])
bar_widths = np.array([b.get_width() for b in ax1.patches])
bar_edges = np.array([b.get_x() for b in ax1.patches])
x, y = ax2.lines[0].get_xydata().T
assert_array_equal(x, bar_edges + bar_widths / 2)
assert_array_equal(y, bar_heights)
def test_step_no_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
histplot(flat_series, element="bars", fill=False, ax=ax1)
histplot(flat_series, element="step", fill=False, ax=ax2)
bar_heights = [b.get_height() for b in ax1.patches]
bar_widths = [b.get_width() for b in ax1.patches]
bar_edges = [b.get_x() for b in ax1.patches]
x, y = ax2.lines[0].get_xydata().T
assert_array_equal(x[:-1], bar_edges)
assert_array_equal(y[:-1], bar_heights)
assert x[-1] == bar_edges[-1] + bar_widths[-1]
assert y[-1] == y[-2]
def test_step_fill_xy(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, element="step", fill=True)
histplot(y=flat_series, element="step", fill=True)
xverts = ax.collections[0].get_paths()[0].vertices
yverts = ax.collections[1].get_paths()[0].vertices
assert_array_equal(xverts, yverts[:, ::-1])
def test_step_no_fill_xy(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, element="step", fill=False)
histplot(y=flat_series, element="step", fill=False)
xline, yline = ax.lines
assert_array_equal(xline.get_xdata(), yline.get_ydata())
assert_array_equal(xline.get_ydata(), yline.get_xdata())
def test_weighted_histogram(self):
ax = histplot(x=[0, 1, 2], weights=[1, 2, 3], discrete=True)
bar_heights = [b.get_height() for b in ax.patches]
assert bar_heights == [1, 2, 3]
def test_weights_with_auto_bins(self, long_df):
with pytest.warns(UserWarning):
ax = histplot(long_df, x="x", weights="f")
assert len(ax.patches) == 10
def test_shrink(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
bw = 2
shrink = .4
histplot(long_df, x="x", binwidth=bw, ax=ax1)
histplot(long_df, x="x", binwidth=bw, shrink=shrink, ax=ax2)
for p1, p2 in zip(ax1.patches, ax2.patches):
w1, w2 = p1.get_width(), p2.get_width()
assert w2 == pytest.approx(shrink * w1)
x1, x2 = p1.get_x(), p2.get_x()
assert (x2 + w2 / 2) == pytest.approx(x1 + w1 / 2)
def test_log_scale_explicit(self, rng):
x = rng.lognormal(0, 2, 1000)
ax = histplot(x, log_scale=True, binrange=(-3, 3), binwidth=1)
bar_widths = [b.get_width() for b in ax.patches]
steps = np.divide(bar_widths[1:], bar_widths[:-1])
assert np.allclose(steps, 10)
def test_log_scale_implicit(self, rng):
x = rng.lognormal(0, 2, 1000)
f, ax = plt.subplots()
ax.set_xscale("log")
histplot(x, binrange=(-3, 3), binwidth=1, ax=ax)
bar_widths = [b.get_width() for b in ax.patches]
steps = np.divide(bar_widths[1:], bar_widths[:-1])
assert np.allclose(steps, 10)
def test_log_scale_dodge(self, rng):
x = rng.lognormal(0, 2, 100)
hue = np.repeat(["a", "b"], 50)
ax = histplot(x=x, hue=hue, bins=5, log_scale=True, multiple="dodge")
x_min = np.log([b.get_x() for b in ax.patches])
x_max = np.log([b.get_x() + b.get_width() for b in ax.patches])
assert np.unique(np.round(x_max - x_min, 10)).size == 1
def test_log_scale_kde(self, rng):
x = rng.lognormal(0, 1, 1000)
ax = histplot(x=x, log_scale=True, kde=True, bins=20)
bar_height = max(p.get_height() for p in ax.patches)
kde_height = max(ax.lines[0].get_ydata())
assert bar_height == pytest.approx(kde_height, rel=.1)
@pytest.mark.parametrize(
"fill", [True, False],
)
def test_auto_linewidth(self, flat_series, fill):
get_lw = lambda ax: ax.patches[0].get_linewidth() # noqa: E731
kws = dict(element="bars", fill=fill)
f, (ax1, ax2) = plt.subplots(2)
histplot(flat_series, **kws, bins=10, ax=ax1)
histplot(flat_series, **kws, bins=100, ax=ax2)
assert get_lw(ax1) > get_lw(ax2)
f, ax1 = plt.subplots(figsize=(10, 5))
f, ax2 = plt.subplots(figsize=(2, 5))
histplot(flat_series, **kws, bins=30, ax=ax1)
histplot(flat_series, **kws, bins=30, ax=ax2)
assert get_lw(ax1) > get_lw(ax2)
f, ax1 = plt.subplots(figsize=(4, 5))
f, ax2 = plt.subplots(figsize=(4, 5))
histplot(flat_series, **kws, bins=30, ax=ax1)
histplot(10 ** flat_series, **kws, bins=30, log_scale=True, ax=ax2)
assert get_lw(ax1) == pytest.approx(get_lw(ax2))
f, ax1 = plt.subplots(figsize=(4, 5))
f, ax2 = plt.subplots(figsize=(4, 5))
histplot(y=[0, 1, 1], **kws, discrete=True, ax=ax1)
histplot(y=["a", "b", "b"], **kws, ax=ax2)
assert get_lw(ax1) == pytest.approx(get_lw(ax2))
def test_bar_kwargs(self, flat_series):
lw = 2
ec = (1, .2, .9, .5)
ax = histplot(flat_series, binwidth=1, ec=ec, lw=lw)
for bar in ax.patches:
assert_colors_equal(bar.get_edgecolor(), ec)
assert bar.get_linewidth() == lw
def test_step_fill_kwargs(self, flat_series):
lw = 2
ec = (1, .2, .9, .5)
ax = histplot(flat_series, element="step", ec=ec, lw=lw)
poly = ax.collections[0]
assert_colors_equal(poly.get_edgecolor(), ec)
assert poly.get_linewidth() == lw
def test_step_line_kwargs(self, flat_series):
lw = 2
ls = "--"
ax = histplot(flat_series, element="step", fill=False, lw=lw, ls=ls)
line = ax.lines[0]
assert line.get_linewidth() == lw
assert line.get_linestyle() == ls
def test_label(self, flat_series):
ax = histplot(flat_series, label="a label")
handles, labels = ax.get_legend_handles_labels()
assert len(handles) == 1
assert labels == ["a label"]
def test_default_color_scout_cleanup(self, flat_series):
ax = histplot(flat_series)
assert len(ax.containers) == 1
| TestHistPlotUnivariate |
python | tiangolo__fastapi | docs_src/path_operation_advanced_configuration/tutorial007.py | {
"start": 156,
"end": 822
} | class ____(BaseModel):
name: str
tags: List[str]
@app.post(
"/items/",
openapi_extra={
"requestBody": {
"content": {"application/x-yaml": {"schema": Item.model_json_schema()}},
"required": True,
},
},
)
async def create_item(request: Request):
raw_body = await request.body()
try:
data = yaml.safe_load(raw_body)
except yaml.YAMLError:
raise HTTPException(status_code=422, detail="Invalid YAML")
try:
item = Item.model_validate(data)
except ValidationError as e:
raise HTTPException(status_code=422, detail=e.errors(include_url=False))
return item
| Item |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 17080,
"end": 18520
} | class ____(BaseGroupingComponent[ContributingComponent]):
def __init__(
self,
variant_name: str,
hint: str | None = None,
contributes: bool | None = None,
values: Sequence[ContributingComponent] | None = None,
):
super().__init__(hint, contributes, values)
self.variant_name = variant_name
@property
def id(self) -> str:
return self.variant_name
@property
def key(self) -> str:
variant_name = self.variant_name
if not self.values: # Insurance - shouldn't ever happen
return variant_name
# Variant root components which don't contribute won't have any contributing children, but
# we can find the component which would be the contributing component, were the root
# component itself contributing. Strategies are run in descending order of priority, and
# added into `values` in order, so the highest-priority option will always be first.
would_be_contributing_component = self.values[0]
return would_be_contributing_component.key
def __repr__(self) -> str:
base_repr = super().__repr__()
# Fake the class name so that instead of showing as `RootGroupingComponent` in the repr it
# shows as `AppGroupingComponent`/`SystemGroupingComponent`/`DefaultGroupingComponent`
return base_repr.replace("Root", self.id.title())
| RootGroupingComponent |
python | milvus-io__pymilvus | pymilvus/client/interceptor.py | {
"start": 1157,
"end": 3099
} | class ____(
grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor,
):
def __init__(self, interceptor_function: Callable) -> None:
super().__init__()
self._fn = interceptor_function
def intercept_unary_unary(self, continuation: Callable, client_call_details: Any, request: Any):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, iter((request,))
)
response = continuation(new_details, next(new_request_iterator))
return postprocess(response) if postprocess else response
def intercept_unary_stream(
self,
continuation: Callable,
client_call_details: Any,
request: Any,
):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, iter((request,))
)
response_it = continuation(new_details, next(new_request_iterator))
return postprocess(response_it) if postprocess else response_it
def intercept_stream_unary(
self,
continuation: Callable,
client_call_details: Any,
request_iterator: Any,
):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, request_iterator
)
response = continuation(new_details, new_request_iterator)
return postprocess(response) if postprocess else response
def intercept_stream_stream(
self,
continuation: Callable,
client_call_details: Any,
request_iterator: Any,
):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, request_iterator
)
response_it = continuation(new_details, new_request_iterator)
return postprocess(response_it) if postprocess else response_it
| _GenericClientInterceptor |
python | doocs__leetcode | solution/1600-1699/1678.Goal Parser Interpretation/Solution.py | {
"start": 0,
"end": 126
} | class ____:
def interpret(self, command: str) -> str:
return command.replace('()', 'o').replace('(al)', 'al')
| Solution |
python | allegroai__clearml | clearml/utilities/process/mp.py | {
"start": 8747,
"end": 15125
} | class ____(object):
"""
Many writers Single Reader multiprocessing safe Queue
"""
__thread_pool = SingletonThreadPool()
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._reader_thread = None
self._reader_thread_started = False
# Fix the python Queue and Use SimpleQueue write so it uses a single OS write,
# making it atomic message passing
self._q = SimpleQueue(*args, **kwargs)
# on Windows, queue communication is done via pipes, no need to override the _send_bytes method
if sys.platform != "win32":
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences,PyProtectedMember
self._q._writer._send_bytes = partial(SafeQueue._pipe_override_send_bytes, self._q._writer)
except Exception:
pass
self._internal_q = None
# Note we should Never! assign a new object to `self._q_size`, just work with the initial object
self._q_size = [] # list of PIDs we pushed, so this is atomic.
def empty(self) -> bool:
return self._q.empty() and (not self._internal_q or self._internal_q.empty())
def is_pending(self) -> bool:
# check if we have pending requests to be pushed (it does not mean they were pulled)
# only call from main put process
return self._get_q_size_len() > 0
def close(self, event: Optional["SafeEvent"], timeout: float = 3.0) -> None:
# wait until all pending requests pushed
tic = time()
pid = os.getpid()
prev_q_size = self._get_q_size_len(pid)
while self.is_pending():
if event:
event.set()
if not self.__thread_pool.is_active():
break
sleep(0.1)
# timeout is for the maximum time to pull a single object from the queue,
# this way if we get stuck we notice quickly and abort
if timeout and (time() - tic) > timeout:
if prev_q_size == self._get_q_size_len(pid):
break
else:
prev_q_size = self._get_q_size_len(pid)
tic = time()
def get(self, *args: Any, **kwargs: Any) -> Any:
return self._get_internal_queue(*args, **kwargs)
def batch_get(
self,
max_items: int = 1000,
timeout: float = 0.2,
throttle_sleep: float = 0.1,
) -> List[Any]:
buffer = []
timeout_count = int(timeout / throttle_sleep)
empty_count = timeout_count
while len(buffer) < max_items:
while not self.empty() and len(buffer) < max_items:
try:
buffer.append(self._get_internal_queue(block=False))
empty_count = 0
except Empty:
break
empty_count += 1
if empty_count > timeout_count or len(buffer) >= max_items:
break
sleep(throttle_sleep)
return buffer
def put(self, obj: Any) -> None:
# not atomic when forking for the first time
# GIL will make sure it is atomic
self._q_size.append(os.getpid())
try:
# make sure the block put is done in the thread pool i.e. in the background
obj = pickle.dumps(obj)
if BackgroundMonitor.get_at_exit_state():
self._q_put(obj)
return
self.__thread_pool.get().apply_async(self._q_put, args=(obj, False))
except: # noqa
pid = os.getpid()
p = None
while p != pid and self._q_size:
p = self._q_size.pop()
def _get_q_size_len(self, pid: Optional[int] = None) -> int:
pid = pid or os.getpid()
return len([p for p in self._q_size if p == pid])
def _q_put(self, obj: Any, allow_raise: bool = True) -> None:
# noinspection PyBroadException
try:
self._q.put(obj)
except BaseException:
# make sure we zero the _q_size of the process dies (i.e. queue put fails)
self._q_size.clear()
if allow_raise:
raise
return
pid = os.getpid()
# GIL will make sure it is atomic
# pop the First "counter" that is ours (i.e. pid == os.getpid())
p = None
while p != pid and self._q_size:
p = self._q_size.pop()
def _init_reader_thread(self) -> None:
if not self._internal_q:
self._internal_q = ForkQueue()
if not self._reader_thread or not self._reader_thread.is_alive():
# read before we start the thread
self._reader_thread = Thread(target=self._reader_daemon)
self._reader_thread.daemon = True
self._reader_thread.start()
# if we have waiting results
# wait until thread is up and pushed some results
while not self._reader_thread_started:
sleep(0.2)
# just in case make sure we pulled some stuff if we had any
# todo: wait until a queue is not empty, but for some reason that might fail
sleep(1.0)
def _get_internal_queue(self, *args: Any, **kwargs: Any) -> Any:
self._init_reader_thread()
obj = self._internal_q.get(*args, **kwargs)
# deserialize
return pickle.loads(obj)
def _reader_daemon(self) -> None:
self._reader_thread_started = True
# pull from process queue and push into thread queue
while True:
# noinspection PyBroadException
try:
obj = self._q.get()
if obj is None:
break
except Exception:
break
self._internal_q.put(obj)
@staticmethod
def _pipe_override_send_bytes(self, buf: bytes) -> None:
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
# Issue #20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
# Also note we want to avoid sending a 0-length buffer separately,
# to avoid "broken pipe" errors if the other end closed the pipe.
self._send(header + buf)
| SafeQueue |
python | huggingface__transformers | src/transformers/models/deformable_detr/modeling_deformable_detr.py | {
"start": 78022,
"end": 79015
} | class ____(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@auto_docstring(
custom_intro="""
Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on
top, for tasks such as COCO detection.
"""
)
| DeformableDetrMLPPredictionHead |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/v1_consumer/package.py | {
"start": 216,
"end": 463
} | class ____(Package):
"""Mimic the real netlib-lapack, that may be built on top of an
optimized blas.
"""
homepage = "https://dev.null"
version("1.0")
depends_on("v2")
depends_on("v1")
provides("somelang")
| V1Consumer |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 91702,
"end": 93288
} | class ____(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head)
if mask is not None:
weights = weights.masked_fill(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min)
weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
__all__ = [
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
| ConditionalDetrMHAttentionMap |
python | tensorflow__tensorflow | tensorflow/python/saved_model/fingerprinting.py | {
"start": 1113,
"end": 7306
} | class ____:
"""The SavedModel fingerprint.
Each attribute of this class is named after a field name in the
FingerprintDef proto and contains the value of the respective field in the
protobuf.
Attributes:
saved_model_checksum: A uint64 containing the `saved_model_checksum`.
graph_def_program_hash: A uint64 containing `graph_def_program_hash`.
signature_def_hash: A uint64 containing the `signature_def_hash`.
saved_object_graph_hash: A uint64 containing the `saved_object_graph_hash`.
checkpoint_hash: A uint64 containing the`checkpoint_hash`.
version: An int32 containing the producer field of the VersionDef.
"""
def __init__(
self,
saved_model_checksum: int = None,
graph_def_program_hash: int = None,
signature_def_hash: int = None,
saved_object_graph_hash: int = None,
checkpoint_hash: int = None,
version: int = None,
):
"""Initializes the instance based on values in the SavedModel fingerprint.
Args:
saved_model_checksum: Value of the`saved_model_checksum`.
graph_def_program_hash: Value of the `graph_def_program_hash`.
signature_def_hash: Value of the `signature_def_hash`.
saved_object_graph_hash: Value of the `saved_object_graph_hash`.
checkpoint_hash: Value of the `checkpoint_hash`.
version: Value of the producer field of the VersionDef.
"""
self.saved_model_checksum = saved_model_checksum
self.graph_def_program_hash = graph_def_program_hash
self.signature_def_hash = signature_def_hash
self.saved_object_graph_hash = saved_object_graph_hash
self.checkpoint_hash = checkpoint_hash
self.version = version
@classmethod
def from_proto(cls, proto: fingerprint_pb2.FingerprintDef) -> "Fingerprint":
"""Constructs Fingerprint object from protocol buffer message."""
if isinstance(proto, bytes):
proto = fingerprint_pb2.FingerprintDef.FromString(proto)
try:
return Fingerprint(
proto.saved_model_checksum,
proto.graph_def_program_hash,
proto.signature_def_hash,
proto.saved_object_graph_hash,
proto.checkpoint_hash,
proto.version)
except AttributeError as e:
raise ValueError(
f"Given proto could not be deserialized as fingerprint."
f"{e}") from None
def __eq__(self, other: Any) -> bool:
if (isinstance(other, Fingerprint) or
isinstance(other, fingerprint_pb2.FingerprintDef)):
try:
return (
self.saved_model_checksum == other.saved_model_checksum and
self.graph_def_program_hash == other.graph_def_program_hash and
self.signature_def_hash == other.signature_def_hash and
self.saved_object_graph_hash == other.saved_object_graph_hash and
self.checkpoint_hash == other.checkpoint_hash)
except AttributeError:
pass
return False
def __str__(self) -> str:
return "\n".join([
"SavedModel Fingerprint",
f" saved_model_checksum: {self.saved_model_checksum}",
f" graph_def_program_hash: {self.graph_def_program_hash}",
f" signature_def_hash: {self.signature_def_hash}",
f" saved_object_graph_hash: {self.saved_object_graph_hash}",
f" checkpoint_hash: {self.checkpoint_hash}"
])
def __repr__(self) -> str:
return (f"Fingerprint({self.saved_model_checksum}, "
f"{self.graph_def_program_hash}, "
f"{self.signature_def_hash}, "
f"{self.saved_object_graph_hash}, "
f"{self.checkpoint_hash})")
def singleprint(self) -> fingerprinting_pywrap.Singleprint:
"""Canonical fingerprinting ID for a SavedModel.
Uniquely identifies a SavedModel based on the regularized fingerprint
attributes. (saved_model_checksum is sensitive to immaterial changes and
thus non-deterministic.)
Returns:
The string concatenation of `graph_def_program_hash`,
`signature_def_hash`, `saved_object_graph_hash`, and `checkpoint_hash`
fingerprint attributes (separated by '/').
Raises:
ValueError: If the fingerprint fields cannot be used to construct the
singleprint.
"""
try:
return fingerprinting_pywrap.Singleprint(self.graph_def_program_hash,
self.signature_def_hash,
self.saved_object_graph_hash,
self.checkpoint_hash)
except (TypeError, fingerprinting_pywrap.FingerprintException) as e:
raise ValueError(
f"Encounted invalid fingerprint values when constructing singleprint."
f"graph_def_program_hash: {self.graph_def_program_hash}"
f"signature_def_hash: {self.signature_def_hash}"
f"saved_object_graph_hash: {self.saved_object_graph_hash}"
f"checkpoint_hash: {self.checkpoint_hash}"
f"{e}") from None
@tf_export("saved_model.experimental.read_fingerprint", v1=[])
def read_fingerprint(export_dir: str) -> Fingerprint:
"""Reads the fingerprint of a SavedModel in `export_dir`.
Returns a `tf.saved_model.experimental.Fingerprint` object that contains
the values of the SavedModel fingerprint, which is persisted on disk in the
`fingerprint.pb` file in the `export_dir`.
Read more about fingerprints in the SavedModel guide at
https://www.tensorflow.org/guide/saved_model.
Args:
export_dir: The directory that contains the SavedModel.
Returns:
A `tf.saved_model.experimental.Fingerprint`.
Raises:
FileNotFoundError: If no or an invalid fingerprint is found.
"""
try:
fingerprint = fingerprinting_pywrap.ReadSavedModelFingerprint(export_dir)
except fingerprinting_pywrap.FileNotFoundException as e:
raise FileNotFoundError(f"SavedModel Fingerprint Error: {e}") from None # pylint: disable=raise-missing-from
except fingerprinting_pywrap.FingerprintException as e:
raise RuntimeError(f"SavedModel Fingerprint Error: {e}") from None # pylint: disable=raise-missing-from
return Fingerprint.from_proto(
fingerprint_pb2.FingerprintDef().FromString(fingerprint))
| Fingerprint |
python | joke2k__faker | faker/providers/address/fil_PH/__init__.py | {
"start": 54,
"end": 164
} | class ____(EnPhAddressProvider):
"""No difference from Address Provider for en_PH locale"""
pass
| Provider |
python | gevent__gevent | src/gevent/tests/test__monkey_ssl_warning3.py | {
"start": 180,
"end": 380
} | class ____(ssl.SSLContext):
pass
# This file should only have this one test in it
# because we have to be careful about our imports
# and because we need to be careful about our patching.
| MySubclass |
python | kamyu104__LeetCode-Solutions | Python/number-of-distinct-islands-ii.py | {
"start": 52,
"end": 1604
} | class ____(object):
def numDistinctIslands2(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
def dfs(i, j, grid, island):
if not (0 <= i < len(grid) and \
0 <= j < len(grid[0]) and \
grid[i][j] > 0):
return False
grid[i][j] *= -1
island.append((i, j))
for d in directions:
dfs(i+d[0], j+d[1], grid, island)
return True
def normalize(island):
shapes = [[] for _ in xrange(8)]
for x, y in island:
rotations_and_reflections = [[ x, y], [ x, -y], [-x, y], [-x, -y],
[ y, x], [ y, -x], [-y, x], [-y, -x]]
for i in xrange(len(rotations_and_reflections)):
shapes[i].append(rotations_and_reflections[i])
for shape in shapes:
shape.sort() # Time: O(ilogi), i is the size of the island, the max would be (m * n)
origin = list(shape[0])
for p in shape:
p[0] -= origin[0]
p[1] -= origin[1]
return min(shapes)
islands = set()
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
island = []
if dfs(i, j, grid, island):
islands.add(str(normalize(island)))
return len(islands)
| Solution |
python | huggingface__transformers | tests/models/aimv2/test_modeling_aimv2.py | {
"start": 10883,
"end": 11588
} | class ____(Aimv2ModelTesterMixin, unittest.TestCase):
all_model_classes = (Aimv2TextModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = Aimv2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Aimv2TextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Aimv2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
| Aimv2TextModelTest |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 32638,
"end": 32735
} | class ____(Stmt):
"""An artificial scope."""
fields = ("body",)
body: list[Node]
| Scope |
python | huggingface__transformers | src/transformers/models/rembert/modeling_rembert.py | {
"start": 15728,
"end": 19054
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.input_embedding_size, config.hidden_size)
self.layer = nn.ModuleList([RemBertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_values,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
past_key_values,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RemBert
| RemBertEncoder |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_aliases.py | {
"start": 5801,
"end": 6394
} | class ____(BaseTest):
create_file = True
def test_thumbnailer(self):
thumbnailer = files.get_thumbnailer(self.storage, 'avatars/test.jpg')
thumbnailer.thumbnail_storage = self.storage
thumb = thumbnailer['small']
self.assertEqual((thumb.width, thumb.height), (100, 75))
def test_thumbnailer_fieldfile(self):
profile = models.Profile(avatar='avatars/test.jpg')
thumbnailer = files.get_thumbnailer(profile.avatar)
thumb = thumbnailer['small']
self.assertEqual((thumb.width, thumb.height), (20, 20))
| AliasThumbnailerTest |
python | encode__django-rest-framework | tests/test_relations.py | {
"start": 6412,
"end": 9892
} | class ____(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset([
MockObject(pk=1, name='foobar'),
MockObject(pk=2, name='bazABCqux'),
MockObject(pk=2, name='bazABC qux'),
])
self.field = serializers.HyperlinkedRelatedField(
view_name='example',
lookup_field='name',
lookup_url_kwarg='name',
queryset=self.queryset,
)
self.field.reverse = mock_reverse
self.field._context = {'request': True}
def test_representation_unsaved_object_with_non_nullable_pk(self):
representation = self.field.to_representation(MockObject(pk=''))
assert representation is None
def test_serialize_empty_relationship_attribute(self):
class TestSerializer(serializers.Serializer):
via_unreachable = serializers.HyperlinkedRelatedField(
source='does_not_exist.unreachable',
view_name='example',
read_only=True,
)
class TestSerializable:
@property
def does_not_exist(self):
raise ObjectDoesNotExist
serializer = TestSerializer(TestSerializable())
assert serializer.data == {'via_unreachable': None}
def test_hyperlinked_related_lookup_exists(self):
instance = self.field.to_internal_value('http://example.org/example/foobar/')
assert instance is self.queryset.items[0]
def test_hyperlinked_related_lookup_url_encoded_exists(self):
instance = self.field.to_internal_value('http://example.org/example/baz%41%42%43qux/')
assert instance is self.queryset.items[1]
def test_hyperlinked_related_lookup_url_space_encoded_exists(self):
instance = self.field.to_internal_value('http://example.org/example/bazABC%20qux/')
assert instance is self.queryset.items[2]
def test_hyperlinked_related_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.field.to_internal_value('http://example.org/example/doesnotexist/')
msg = excinfo.value.detail[0]
assert msg == 'Invalid hyperlink - Object does not exist.'
def test_hyperlinked_related_internal_type_error(self):
class Field(serializers.HyperlinkedRelatedField):
def get_object(self, incorrect, signature):
raise NotImplementedError()
field = Field(view_name='example', queryset=self.queryset)
with pytest.raises(TypeError):
field.to_internal_value('http://example.org/example/doesnotexist/')
def hyperlinked_related_queryset_error(self, exc_type):
class QuerySet:
def get(self, *args, **kwargs):
raise exc_type
field = serializers.HyperlinkedRelatedField(
view_name='example',
lookup_field='name',
queryset=QuerySet(),
)
with pytest.raises(serializers.ValidationError) as excinfo:
field.to_internal_value('http://example.org/example/doesnotexist/')
msg = excinfo.value.detail[0]
assert msg == 'Invalid hyperlink - Object does not exist.'
def test_hyperlinked_related_queryset_type_error(self):
self.hyperlinked_related_queryset_error(TypeError)
def test_hyperlinked_related_queryset_value_error(self):
self.hyperlinked_related_queryset_error(ValueError)
| TestHyperlinkedRelatedField |
python | ZoranPandovski__al-go-rithms | data_structures/Graphs/graph/Python/prims-algorithm.py | {
"start": 253,
"end": 3021
} | class ____():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
# A utility function to print the constructed MST stored in parent[]
def printMST(self, parent):
print("Edge \tWeight")
for i in range(1,self.V):
print(parent[i],"-",i,"\t",self.graph[i][ parent[i] ])
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minKey(self, key, mstSet):
# Initilaize min value
min = sys.maxint
for v in range(self.V):
if key[v] < min and mstSet[v] == False:
min = key[v]
min_index = v
return min_index
# Function to construct and print MST for a graph
# represented using adjacency matrix representation
def primMST(self):
#Key values used to pick minimum weight edge in cut
key = [sys.maxint] * self.V
parent = [None] * self.V # Array to store constructed MST
# Make key 0 so that this vertex is picked as first vertex
key[0] = 0
mstSet = [False] * self.V
parent[0] = -1 # First node is always the root of
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minKey(key, mstSet)
# Put the minimum distance vertex in
# the shortest path tree
mstSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
# graph[u][v] is non zero only for adjacent vertices of m
# mstSet[v] is false for vertices not yet included in MST
# Update the key only if graph[u][v] is smaller than key[v]
if self.graph[u][v] > 0 and mstSet[v] == False and key[v] > self.graph[u][v]:
key[v] = self.graph[u][v]
parent[v] = u
self.printMST(parent)
def main():
# example data set can be altered according to need
g = Graph(5)
g.graph = [ [0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0]]
g.primMST();
if __name__ == '__main__':
main() | Graph |
python | pytorch__pytorch | tools/test/test_selective_build.py | {
"start": 237,
"end": 10421
} | class ____(unittest.TestCase):
def test_selective_build_operator(self) -> None:
op = SelectiveBuildOperator(
"aten::add.int",
is_root_operator=True,
is_used_for_training=False,
include_all_overloads=False,
_debug_info=None,
)
self.assertTrue(op.is_root_operator)
self.assertFalse(op.is_used_for_training)
self.assertFalse(op.include_all_overloads)
def test_selector_factory(self) -> None:
yaml_config_v1 = """
debug_info:
- model1@v100
- model2@v51
operators:
aten::add:
is_used_for_training: No
is_root_operator: Yes
include_all_overloads: Yes
aten::add.int:
is_used_for_training: Yes
is_root_operator: No
include_all_overloads: No
aten::mul.int:
is_used_for_training: Yes
is_root_operator: No
include_all_overloads: No
"""
yaml_config_v2 = """
debug_info:
- model1@v100
- model2@v51
operators:
aten::sub:
is_used_for_training: No
is_root_operator: Yes
include_all_overloads: No
debug_info:
- model1@v100
aten::sub.int:
is_used_for_training: Yes
is_root_operator: No
include_all_overloads: No
"""
yaml_config_all = "include_all_operators: Yes"
yaml_config_invalid = "invalid:"
selector1 = SelectiveBuilder.from_yaml_str(yaml_config_v1)
self.assertTrue(selector1.is_operator_selected("aten::add"))
self.assertTrue(selector1.is_operator_selected("aten::add.int"))
# Overload name is not used for checking in v1.
self.assertTrue(selector1.is_operator_selected("aten::add.float"))
def gen():
return SelectiveBuilder.from_yaml_str(yaml_config_invalid)
self.assertRaises(Exception, gen)
selector_all = SelectiveBuilder.from_yaml_str(yaml_config_all)
self.assertTrue(selector_all.is_operator_selected("aten::add"))
self.assertTrue(selector_all.is_operator_selected("aten::sub"))
self.assertTrue(selector_all.is_operator_selected("aten::sub.int"))
self.assertTrue(selector_all.is_kernel_dtype_selected("add_kernel", "int32"))
selector2 = SelectiveBuilder.from_yaml_str(yaml_config_v2)
self.assertFalse(selector2.is_operator_selected("aten::add"))
self.assertTrue(selector2.is_operator_selected("aten::sub"))
self.assertTrue(selector2.is_operator_selected("aten::sub.int"))
selector_legacy_v1 = SelectiveBuilder.from_legacy_op_registration_allow_list(
# pyrefly: ignore [bad-argument-type]
["aten::add", "aten::add.int", "aten::mul.int"],
False,
False,
)
self.assertTrue(selector_legacy_v1.is_operator_selected("aten::add.float"))
self.assertTrue(selector_legacy_v1.is_operator_selected("aten::add"))
self.assertTrue(selector_legacy_v1.is_operator_selected("aten::add.int"))
self.assertFalse(selector_legacy_v1.is_operator_selected("aten::sub"))
self.assertFalse(selector_legacy_v1.is_root_operator("aten::add"))
self.assertFalse(
selector_legacy_v1.is_operator_selected_for_training("aten::add")
)
selector_legacy_v1 = SelectiveBuilder.from_legacy_op_registration_allow_list(
# pyrefly: ignore [bad-argument-type]
["aten::add", "aten::add.int", "aten::mul.int"],
True,
False,
)
self.assertTrue(selector_legacy_v1.is_root_operator("aten::add"))
self.assertFalse(
selector_legacy_v1.is_operator_selected_for_training("aten::add")
)
self.assertTrue(selector_legacy_v1.is_root_operator("aten::add.float"))
self.assertFalse(
selector_legacy_v1.is_operator_selected_for_training("aten::add.float")
)
selector_legacy_v1 = SelectiveBuilder.from_legacy_op_registration_allow_list(
# pyrefly: ignore [bad-argument-type]
["aten::add", "aten::add.int", "aten::mul.int"],
False,
True,
)
self.assertFalse(selector_legacy_v1.is_root_operator("aten::add"))
self.assertTrue(
selector_legacy_v1.is_operator_selected_for_training("aten::add")
)
self.assertFalse(selector_legacy_v1.is_root_operator("aten::add.float"))
self.assertTrue(
selector_legacy_v1.is_operator_selected_for_training("aten::add.float")
)
def test_operator_combine(self) -> None:
op1 = SelectiveBuildOperator(
"aten::add.int",
is_root_operator=True,
is_used_for_training=False,
include_all_overloads=False,
_debug_info=None,
)
op2 = SelectiveBuildOperator(
"aten::add.int",
is_root_operator=False,
is_used_for_training=False,
include_all_overloads=False,
_debug_info=None,
)
op3 = SelectiveBuildOperator(
"aten::add",
is_root_operator=True,
is_used_for_training=False,
include_all_overloads=False,
_debug_info=None,
)
op4 = SelectiveBuildOperator(
"aten::add.int",
is_root_operator=True,
is_used_for_training=True,
include_all_overloads=False,
_debug_info=None,
)
op5 = combine_operators(op1, op2)
self.assertTrue(op5.is_root_operator)
self.assertFalse(op5.is_used_for_training)
op6 = combine_operators(op1, op4)
self.assertTrue(op6.is_root_operator)
self.assertTrue(op6.is_used_for_training)
def gen_new_op():
return combine_operators(op1, op3)
self.assertRaises(Exception, gen_new_op)
def test_training_op_fetch(self) -> None:
yaml_config = """
operators:
aten::add.int:
is_used_for_training: No
is_root_operator: Yes
include_all_overloads: No
aten::add:
is_used_for_training: Yes
is_root_operator: No
include_all_overloads: Yes
"""
selector = SelectiveBuilder.from_yaml_str(yaml_config)
self.assertTrue(selector.is_operator_selected_for_training("aten::add.int"))
self.assertTrue(selector.is_operator_selected_for_training("aten::add"))
def test_kernel_dtypes(self) -> None:
yaml_config = """
kernel_metadata:
add_kernel:
- int8
- int32
sub_kernel:
- int16
- int32
add/sub_kernel:
- float
- complex
"""
selector = SelectiveBuilder.from_yaml_str(yaml_config)
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int32"))
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int8"))
self.assertFalse(selector.is_kernel_dtype_selected("add_kernel", "int16"))
self.assertFalse(selector.is_kernel_dtype_selected("add1_kernel", "int32"))
self.assertFalse(selector.is_kernel_dtype_selected("add_kernel", "float"))
self.assertTrue(selector.is_kernel_dtype_selected("add/sub_kernel", "float"))
self.assertTrue(selector.is_kernel_dtype_selected("add/sub_kernel", "complex"))
self.assertFalse(selector.is_kernel_dtype_selected("add/sub_kernel", "int16"))
self.assertFalse(selector.is_kernel_dtype_selected("add/sub_kernel", "int32"))
def test_merge_kernel_dtypes(self) -> None:
yaml_config1 = """
kernel_metadata:
add_kernel:
- int8
add/sub_kernel:
- float
- complex
- none
mul_kernel:
- int8
"""
yaml_config2 = """
kernel_metadata:
add_kernel:
- int32
sub_kernel:
- int16
- int32
add/sub_kernel:
- float
- complex
"""
selector1 = SelectiveBuilder.from_yaml_str(yaml_config1)
selector2 = SelectiveBuilder.from_yaml_str(yaml_config2)
selector = combine_selective_builders(selector1, selector2)
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int32"))
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int8"))
self.assertFalse(selector.is_kernel_dtype_selected("add_kernel", "int16"))
self.assertFalse(selector.is_kernel_dtype_selected("add1_kernel", "int32"))
self.assertFalse(selector.is_kernel_dtype_selected("add_kernel", "float"))
self.assertTrue(selector.is_kernel_dtype_selected("add/sub_kernel", "float"))
self.assertTrue(selector.is_kernel_dtype_selected("add/sub_kernel", "complex"))
self.assertTrue(selector.is_kernel_dtype_selected("add/sub_kernel", "none"))
self.assertFalse(selector.is_kernel_dtype_selected("add/sub_kernel", "int16"))
self.assertFalse(selector.is_kernel_dtype_selected("add/sub_kernel", "int32"))
self.assertTrue(selector.is_kernel_dtype_selected("mul_kernel", "int8"))
self.assertFalse(selector.is_kernel_dtype_selected("mul_kernel", "int32"))
def test_all_kernel_dtypes_selected(self) -> None:
yaml_config = """
include_all_non_op_selectives: True
"""
selector = SelectiveBuilder.from_yaml_str(yaml_config)
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int32"))
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int8"))
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "int16"))
self.assertTrue(selector.is_kernel_dtype_selected("add1_kernel", "int32"))
self.assertTrue(selector.is_kernel_dtype_selected("add_kernel", "float"))
def test_custom_namespace_selected_correctly(self) -> None:
yaml_config = """
operators:
aten::add.int:
is_used_for_training: No
is_root_operator: Yes
include_all_overloads: No
custom::add:
is_used_for_training: Yes
is_root_operator: No
include_all_overloads: Yes
"""
selector = SelectiveBuilder.from_yaml_str(yaml_config)
native_function, _ = NativeFunction.from_yaml(
{"func": "custom::add() -> Tensor"},
loc=Location(__file__, 1),
valid_tags=set(),
)
self.assertTrue(selector.is_native_function_selected(native_function))
| TestSelectiveBuild |
python | astropy__astropy | astropy/io/misc/yaml.py | {
"start": 6652,
"end": 13283
} | class ____(yaml.SafeDumper):
"""
Custom SafeDumper that represents astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available representer functions that are
called when generating a YAML stream from an object. See the
`PyYaml documentation <https://pyyaml.org/wiki/PyYAMLDocumentation>`_
for details of the class signature.
"""
def _represent_tuple(self, data):
return self.represent_sequence("tag:yaml.org,2002:python/tuple", data)
def represent_float(self, data):
# Override to change repr(data) to str(data) since otherwise all the
# numpy scalars fail in not NUMPY_LT_2_0.
# otherwise, this function is identical to yaml.SafeDumper.represent_float
# (as of pyyaml 6.0.1)
if data != data or (data == 0.0 and data == 1.0):
value = ".nan"
elif data == self.inf_value:
value = ".inf"
elif data == -self.inf_value:
value = "-.inf"
else:
value = str(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if "." not in value and "e" in value:
value = value.replace("e", ".0e", 1)
return self.represent_scalar("tag:yaml.org,2002:float", value)
AstropyDumper.add_multi_representer(u.UnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.FunctionUnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.StructuredUnit, _unit_representer)
AstropyDumper.add_representer(tuple, AstropyDumper._represent_tuple)
AstropyDumper.add_representer(np.ndarray, _ndarray_representer)
AstropyDumper.add_representer(np.void, _void_representer)
AstropyDumper.add_representer(Time, _time_representer)
AstropyDumper.add_representer(TimeDelta, _timedelta_representer)
AstropyDumper.add_representer(coords.SkyCoord, _skycoord_representer)
AstropyDumper.add_representer(SerializedColumn, _serialized_column_representer)
# Numpy dtypes
AstropyDumper.add_representer(np.bool_, yaml.representer.SafeRepresenter.represent_bool)
AstropyDumper.add_representer(np.str_, yaml.representer.SafeRepresenter.represent_str)
for np_type in [
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_int
)
for np_type in [np.float16, np.float32, np.float64, np.longdouble]:
AstropyDumper.add_representer(np_type, AstropyDumper.represent_float)
for np_type in [complex, np.complex64, np.complex128]:
AstropyDumper.add_representer(np_type, _complex_representer)
AstropyLoader.add_constructor("tag:yaml.org,2002:python/complex", _complex_constructor)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/tuple", AstropyLoader._construct_python_tuple
)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/unicode", AstropyLoader._construct_python_unicode
)
AstropyLoader.add_constructor("!astropy.units.Unit", _unit_constructor)
AstropyLoader.add_constructor("!numpy.ndarray", _ndarray_constructor)
AstropyLoader.add_constructor("!numpy.void", _void_constructor)
AstropyLoader.add_constructor("!astropy.time.Time", _time_constructor)
AstropyLoader.add_constructor("!astropy.time.TimeDelta", _timedelta_constructor)
AstropyLoader.add_constructor(
"!astropy.coordinates.sky_coordinate.SkyCoord", _skycoord_constructor
)
AstropyLoader.add_constructor(
"!astropy.table.SerializedColumn", _serialized_column_constructor
)
for cls, tag in (
(u.Quantity, "!astropy.units.Quantity"),
(u.Magnitude, "!astropy.units.Magnitude"),
(u.Dex, "!astropy.units.Dex"),
(u.Decibel, "!astropy.units.Decibel"),
(coords.Angle, "!astropy.coordinates.Angle"),
(coords.Latitude, "!astropy.coordinates.Latitude"),
(coords.Longitude, "!astropy.coordinates.Longitude"),
(coords.EarthLocation, "!astropy.coordinates.earth.EarthLocation"),
):
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
# Add representations, differentials, and built-in frames defined in astropy and in the
# ``astropy.coordinates`` public API.
cls_coords = [
cls
for cls in itertools.chain(
coords.representation.REPRESENTATION_CLASSES.values(),
coords.representation.DIFFERENTIAL_CLASSES.values(),
coords.frame_transform_graph.frame_set,
)
if cls.__name__ in dir(coords)
]
for cls in cls_coords:
name = cls.__name__
tag = "!astropy.coordinates." + name
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
def load(stream):
"""Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load(stream, Loader=AstropyLoader)
def load_all(stream):
"""Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load_all(stream, Loader=AstropyLoader)
def dump(data, stream=None, **kwargs):
"""Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str
"""
kwargs["Dumper"] = AstropyDumper
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream=stream, **kwargs)
| AstropyDumper |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 87088,
"end": 87657
} | class ____:
xlCenterPoint = 5 # from enum XlPieSliceIndex
xlInnerCenterPoint = 8 # from enum XlPieSliceIndex
xlInnerClockwisePoint = 7 # from enum XlPieSliceIndex
xlInnerCounterClockwisePoint = 9 # from enum XlPieSliceIndex
xlMidClockwiseRadiusPoint = 4 # from enum XlPieSliceIndex
xlMidCounterClockwiseRadiusPoint = 6 # from enum XlPieSliceIndex
xlOuterCenterPoint = 2 # from enum XlPieSliceIndex
xlOuterClockwisePoint = 3 # from enum XlPieSliceIndex
xlOuterCounterClockwisePoint = 1 # from enum XlPieSliceIndex
| PieSliceIndex |
python | doocs__leetcode | solution/0500-0599/0581.Shortest Unsorted Continuous Subarray/Solution2.py | {
"start": 0,
"end": 432
} | class ____:
def findUnsortedSubarray(self, nums: List[int]) -> int:
mi, mx = inf, -inf
l = r = -1
n = len(nums)
for i, x in enumerate(nums):
if mx > x:
r = i
else:
mx = x
if mi < nums[n - i - 1]:
l = n - i - 1
else:
mi = nums[n - i - 1]
return 0 if r == -1 else r - l + 1
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 226194,
"end": 231048
} | class ____(Request):
"""
Add or update task configuration
:param task: Task ID
:type task: str
:param configuration: Task configuration items. The new ones will be added and
the already existing ones will be updated
:type configuration: Sequence[ConfigurationItem]
:param replace_configuration: If set then the all the configuration items will
be replaced with the provided ones. Otherwise only the provided configuration
items will be updated or added
:type replace_configuration: bool
:param force: If set to True then both new and running task configuration can
be edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.20"
_schema = {
"definitions": {
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"configuration": {
"description": "Task configuration items. The new ones will be added and the already existing ones will be updated",
"items": {"$ref": "#/definitions/configuration_item"},
"type": "array",
},
"force": {
"description": "If set to True then both new and running task configuration can be edited. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"replace_configuration": {
"description": "If set then the all the configuration items will be replaced with the provided ones. Otherwise only the provided configuration items will be updated or added",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "configuration"],
"type": "object",
}
def __init__(
self,
task: str,
configuration: List[Any],
replace_configuration: Optional[bool] = None,
force: Optional[bool] = None,
**kwargs: Any
) -> None:
super(EditConfigurationRequest, self).__init__(**kwargs)
self.task = task
self.configuration = configuration
self.replace_configuration = replace_configuration
self.force = force
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("configuration")
def configuration(self) -> List[Any]:
return self._property_configuration
@configuration.setter
def configuration(self, value: List[Any]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", (dict, ConfigurationItem), is_array=True)
value = [ConfigurationItem(**v) if isinstance(v, dict) else v for v in value]
self._property_configuration = value
@schema_property("replace_configuration")
def replace_configuration(self) -> Optional[bool]:
return self._property_replace_configuration
@replace_configuration.setter
def replace_configuration(self, value: Optional[bool]) -> None:
if value is None:
self._property_replace_configuration = None
return
self.assert_isinstance(value, "replace_configuration", (bool,))
self._property_replace_configuration = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| EditConfigurationRequest |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 16074,
"end": 16340
} | class ____(Constraint):
"""
Constrain to lower-triangular square matrices.
"""
event_dim = 2
def check(self, value):
value_tril = value.tril()
return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
| _LowerTriangular |
python | getsentry__sentry | src/sentry/backup/sanitize.py | {
"start": 1348,
"end": 1525
} | class ____(SanitizationError):
"""
Thrown when the model does not match that specified by the field supplied by a `set_*`
function caller.
"""
| UnexpectedModelError |
python | pypa__pip | src/pip/_vendor/pygments/lexer.py | {
"start": 28364,
"end": 33354
} | class ____(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
yield from action(self, m, ctx)
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(ctx.stack) > 1:
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# see RegexLexer for why this check is made
if abs(new_state) >= len(ctx.stack):
del ctx.stack[1:]
else:
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, f"wrong state def: {new_state!r}"
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
yield from tokens
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the position of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
if tmpval:
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
if oldi < len(v):
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
| ExtendedRegexLexer |
python | pyinstaller__pyinstaller | bootloader/waflib/Context.py | {
"start": 1513,
"end": 14805
} | class ____(ctx):
errors = Errors
tools = {}
def __init__(self, **kw):
try:
rd = kw['run_dir']
except KeyError:
rd = run_dir
self.node_class = type('Nod3', (waflib.Node.Node,), {})
self.node_class.__module__ = 'waflib.Node'
self.node_class.ctx = self
self.root = self.node_class('', None)
self.cur_script = None
self.path = self.root.find_dir(rd)
self.stack_path = []
self.exec_dict = {'ctx': self, 'conf': self, 'bld': self, 'opt': self}
self.logger = None
def finalize(self):
try:
logger = self.logger
except AttributeError:
pass
else:
Logs.free_logger(logger)
delattr(self, 'logger')
def load(self, tool_list, *k, **kw):
tools = Utils.to_list(tool_list)
path = Utils.to_list(kw.get('tooldir', ''))
with_sys_path = kw.get('with_sys_path', True)
for t in tools:
module = load_tool(t, path, with_sys_path=with_sys_path)
fun = getattr(module, kw.get('name', self.fun), None)
if fun:
fun(self)
def execute(self):
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self, node):
self.stack_path.append(self.cur_script)
self.cur_script = node
self.path = node.parent
def post_recurse(self, node):
self.cur_script = self.stack_path.pop()
if self.cur_script:
self.path = self.cur_script.parent
def recurse(self, dirs, name=None, mandatory=True, once=True, encoding=None):
try:
cache = self.recurse_cache
except AttributeError:
cache = self.recurse_cache = {}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
d = os.path.join(self.path.abspath(), d)
WSCRIPT = os.path.join(d, WSCRIPT_FILE)
WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun)
node = self.root.find_node(WSCRIPT_FUN)
if node and (not once or node not in cache):
cache[node] = True
self.pre_recurse(node)
try:
function_code = node.read('r', encoding)
exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node = self.root.find_node(WSCRIPT)
tup = (node, name or self.fun)
if node and (not once or tup not in cache):
cache[tup] = True
self.pre_recurse(node)
try:
wscript_module = load_module(node.abspath(), encoding=encoding)
user_function = getattr(wscript_module, (name or self.fun), None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %r defined in %s' % (name or self.fun, node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
try:
os.listdir(d)
except OSError:
raise Errors.WafError('Cannot read the folder %r' % d)
raise Errors.WafError('No wscript file in directory %s' % d)
def log_command(self, cmd, kw):
if Logs.verbose:
fmt = os.environ.get('WAF_CMD_FORMAT')
if fmt == 'string':
if not isinstance(cmd, str):
cmd = Utils.shell_escape(cmd)
Logs.debug('runner: %r', cmd)
Logs.debug('runner_env: kw=%s', kw)
def exec_command(self, cmd, **kw):
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
self.log_command(cmd, kw)
if self.logger:
self.logger.info(cmd)
if 'stdout' not in kw:
kw['stdout'] = subprocess.PIPE
if 'stderr' not in kw:
kw['stderr'] = subprocess.PIPE
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError('Program %s not found!' % cmd[0])
cargs = {}
if 'timeout' in kw:
if sys.hexversion >= 0x3030000:
cargs['timeout'] = kw['timeout']
if not 'start_new_session' in kw:
kw['start_new_session'] = True
del kw['timeout']
if 'input' in kw:
if kw['input']:
cargs['input'] = kw['input']
kw['stdin'] = subprocess.PIPE
del kw['input']
if 'cwd' in kw:
if not isinstance(kw['cwd'], str):
kw['cwd'] = kw['cwd'].abspath()
encoding = kw.pop('decode_as', default_encoding)
try:
ret, out, err = Utils.run_process(cmd, kw, cargs)
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if out:
if not isinstance(out, str):
out = out.decode(encoding, errors='replace')
if self.logger:
self.logger.debug('out: %s', out)
else:
Logs.info(out, extra={'stream': sys.stdout, 'c1': ''})
if err:
if not isinstance(err, str):
err = err.decode(encoding, errors='replace')
if self.logger:
self.logger.error('err: %s' % err)
else:
Logs.info(err, extra={'stream': sys.stderr, 'c1': ''})
return ret
def cmd_and_log(self, cmd, **kw):
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
self.log_command(cmd, kw)
quiet = kw.pop('quiet', None)
to_ret = kw.pop('output', STDOUT)
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError('Program %r not found!' % cmd[0])
kw['stdout'] = kw['stderr'] = subprocess.PIPE
if quiet is None:
self.to_log(cmd)
cargs = {}
if 'timeout' in kw:
if sys.hexversion >= 0x3030000:
cargs['timeout'] = kw['timeout']
if not 'start_new_session' in kw:
kw['start_new_session'] = True
del kw['timeout']
if 'input' in kw:
if kw['input']:
cargs['input'] = kw['input']
kw['stdin'] = subprocess.PIPE
del kw['input']
if 'cwd' in kw:
if not isinstance(kw['cwd'], str):
kw['cwd'] = kw['cwd'].abspath()
encoding = kw.pop('decode_as', default_encoding)
try:
ret, out, err = Utils.run_process(cmd, kw, cargs)
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if not isinstance(out, str):
out = out.decode(encoding, errors='replace')
if not isinstance(err, str):
err = err.decode(encoding, errors='replace')
if out and quiet != STDOUT and quiet != BOTH:
self.to_log('out: %s' % out)
if err and quiet != STDERR and quiet != BOTH:
self.to_log('err: %s' % err)
if ret:
e = Errors.WafError('Command %r returned %r' % (cmd, ret))
e.returncode = ret
e.stderr = err
e.stdout = out
raise e
if to_ret == BOTH:
return (out, err)
elif to_ret == STDERR:
return err
return out
def fatal(self, msg, ex=None):
if self.logger:
self.logger.info('from %s: %s' % (self.path.abspath(), msg))
try:
logfile = self.logger.handlers[0].baseFilename
except AttributeError:
pass
else:
if os.environ.get('WAF_PRINT_FAILURE_LOG'):
msg = 'Log from (%s):\n%s\n' % (logfile, Utils.readf(logfile))
else:
msg = '%s\n(complete log in %s)' % (msg, logfile)
raise self.errors.ConfigurationError(msg, ex=ex)
def to_log(self, msg):
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self, *k, **kw):
try:
msg = kw['msg']
except KeyError:
msg = k[0]
self.start_msg(msg, **kw)
try:
result = kw['result']
except KeyError:
result = k[1]
color = kw.get('color')
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color, **kw)
def start_msg(self, *k, **kw):
if kw.get('quiet'):
return
msg = kw.get('msg') or k[0]
try:
if self.in_msg:
self.in_msg += 1
return
except AttributeError:
self.in_msg = 0
self.in_msg += 1
try:
self.line_just = max(self.line_just, len(msg))
except AttributeError:
self.line_just = max(40, len(msg))
for x in (self.line_just * '-', msg):
self.to_log(x)
Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, *k, **kw):
if kw.get('quiet'):
return
self.in_msg -= 1
if self.in_msg:
return
result = kw.get('result') or k[0]
defcolor = 'GREEN'
if result is True:
msg = 'ok'
elif not result:
msg = 'not found'
defcolor = 'YELLOW'
else:
msg = str(result)
self.to_log(msg)
try:
color = kw['color']
except KeyError:
if len(k) > 1 and k[1] in Logs.colors_lst:
color = k[1]
else:
color = defcolor
Logs.pprint(color, msg)
def load_special_tools(self, var, ban=[]):
if os.path.isdir(waf_dir):
lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py', ''))
else:
from zipfile import PyZipFile
waflibs = PyZipFile(waf_dir)
lst = waflibs.namelist()
for x in lst:
if not re.match('waflib/extras/%s' % var.replace('*', '.*'), var):
continue
f = os.path.basename(x)
doban = False
for b in ban:
r = b.replace('*', '.*')
if re.match(r, f):
doban = True
if not doban:
f = f.replace('.py', '')
load_tool(f)
cache_modules = {}
def load_module(path, encoding=None):
try:
return cache_modules[path]
except KeyError:
pass
module = imp.new_module(WSCRIPT_FILE)
try:
code = Utils.readf(path, m='r', encoding=encoding)
except EnvironmentError:
raise Errors.WafError('Could not read the file %r' % path)
module_dir = os.path.dirname(path)
sys.path.insert(0, module_dir)
try:
exec(compile(code, path, 'exec'), module.__dict__)
finally:
sys.path.remove(module_dir)
cache_modules[path] = module
return module
def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True):
if tool == 'java':
tool = 'javaw'
else:
tool = tool.replace('++', 'xx')
if not with_sys_path:
back_path = sys.path
sys.path = []
try:
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
__import__(tool)
except ImportError as e:
e.waf_sys_path = list(sys.path)
raise
finally:
for d in tooldir:
sys.path.remove(d)
ret = sys.modules[tool]
Context.tools[tool] = ret
return ret
else:
if not with_sys_path:
sys.path.insert(0, waf_dir)
try:
for x in ('waflib.Tools.%s', 'waflib.extras.%s', 'waflib.%s', '%s'):
try:
__import__(x % tool)
break
except ImportError:
x = None
else:
__import__(tool)
except ImportError as e:
e.waf_sys_path = list(sys.path)
raise
finally:
if not with_sys_path:
sys.path.remove(waf_dir)
ret = sys.modules[x % tool]
Context.tools[tool] = ret
return ret
finally:
if not with_sys_path:
sys.path += back_path
| Context |
python | matplotlib__matplotlib | lib/matplotlib/contour.py | {
"start": 23794,
"end": 52551
} | class ____(ContourLabeler, mcoll.Collection):
"""
Store a set of contour lines or filled regions.
User-callable method: `~.Axes.clabel`
Parameters
----------
ax : `~matplotlib.axes.Axes`
levels : [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour levels.
allsegs : [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``. The lists
should look like ::
level0segs = [polygon0, polygon1, ...]
polygon0 = [[x0, y0], [x1, y1], ...]
allkinds : ``None`` or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
should look like ::
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not ``None``, usually all polygons for a
particular contour level are grouped together so that
``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
**kwargs
Keyword arguments are as described in the docstring of
`~.Axes.contour`.
%(contour_set_attributes)s
"""
def __init__(self, ax, *args,
levels=None, filled=False, linewidths=None, linestyles=None,
hatches=(None,), alpha=None, origin=None, extent=None,
cmap=None, colors=None, norm=None, vmin=None, vmax=None,
colorizer=None, extend='neither', antialiased=None, nchunk=0,
locator=None, transform=None, negative_linestyles=None,
**kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg *filled* is ``False`` (default) or ``True``.
Call signature::
ContourSet(ax, levels, allsegs, [allkinds], **kwargs)
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` object to draw on.
levels : [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
allsegs : [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``. The lists
should look like ::
level0segs = [polygon0, polygon1, ...]
polygon0 = [[x0, y0], [x1, y1], ...]
allkinds : [level0kinds, level1kinds, ...], optional
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
should look like ::
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not ``None``, usually all polygons for a
particular contour level are grouped together so that
``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
**kwargs
Keyword arguments are as described in the docstring of
`~.Axes.contour`.
"""
if antialiased is None and filled:
# Eliminate artifacts; we are not stroking the boundaries.
antialiased = False
# The default for line contours will be taken from the
# LineCollection default, which uses :rc:`lines.antialiased`.
super().__init__(
antialiaseds=antialiased,
alpha=alpha,
transform=transform,
colorizer=colorizer,
)
self.axes = ax
self.levels = levels
self.filled = filled
self.hatches = hatches
self.origin = origin
self.extent = extent
self.colors = colors
self.extend = extend
self.nchunk = nchunk
self.locator = locator
if "color" in kwargs:
raise _api.kwarg_error("ContourSet.__init__", "color")
if colorizer:
self._set_colorizer_check_keywords(colorizer, cmap=cmap,
norm=norm, vmin=vmin,
vmax=vmax, colors=colors)
norm = colorizer.norm
cmap = colorizer.cmap
if (isinstance(norm, mcolors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = mcolors.LogNorm()
else:
self.logscale = False
_api.check_in_list([None, 'lower', 'upper', 'image'], origin=origin)
if self.extent is not None and len(self.extent) != 4:
raise ValueError(
"If given, 'extent' must be None or (x0, x1, y0, y1)")
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._orig_linestyles = linestyles # Only kept for user access.
self.negative_linestyles = mpl._val_or_rc(negative_linestyles,
'contour.negative_linestyle')
kwargs = self._process_args(*args, **kwargs)
self._process_levels()
self._extend_min = self.extend in ['min', 'both']
self._extend_max = self.extend in ['max', 'both']
if self.colors is not None:
if mcolors.is_color_like(self.colors):
color_sequence = [self.colors]
else:
color_sequence = self.colors
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = (ncolors +
int(self._extend_min) +
int(self._extend_max))
if (len(color_sequence) == total_levels and
(self._extend_min or self._extend_max)):
use_set_under_over = True
if self._extend_min:
i0 = 1
cmap = mcolors.ListedColormap(
cbook._resize_sequence(color_sequence[i0:], ncolors),
under=(color_sequence[0]
if use_set_under_over and self._extend_min else None),
over=(color_sequence[-1]
if use_set_under_over and self._extend_max else None),
)
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
self.set_cmap(cmap)
if norm is not None:
self.set_norm(norm)
with self.norm.callbacks.blocked(signal="changed"):
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self.norm._changed()
self._process_colors()
if self._paths is None:
self._paths = self._make_paths_from_contour_generator()
if self.filled:
if linewidths is not None:
_api.warn_external('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
self.set(edgecolor="none")
else:
self.set(
facecolor="none",
linewidths=self._process_linewidths(linewidths),
linestyle=self._process_linestyles(linestyles),
label="_nolegend_",
# Default zorder taken from LineCollection, which is higher
# than for filled contours so that lines are displayed on top.
zorder=2,
)
self.set(**kwargs) # Let user-set values override defaults.
self.axes.add_collection(self, autolim=False)
self.sticky_edges.x[:] = [self._mins[0], self._maxs[0]]
self.sticky_edges.y[:] = [self._mins[1], self._maxs[1]]
self.axes.update_datalim([self._mins, self._maxs])
self.axes.autoscale_view(tight=True)
self.changed() # set the colors
allsegs = property(lambda self: [
[subp.vertices for subp in p._iter_connected_components()]
for p in self.get_paths()])
allkinds = property(lambda self: [
[subp.codes for subp in p._iter_connected_components()]
for p in self.get_paths()])
alpha = property(lambda self: self.get_alpha())
linestyles = property(lambda self: self._orig_linestyles)
def get_transform(self):
"""Return the `.Transform` instance used by this ContourSet."""
if self._transform is None:
self._transform = self.axes.transData
elif (not isinstance(self._transform, mtransforms.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object _contour_generator cannot currently be pickled. This
# isn't a big issue as it is not actually used once the contour has
# been calculated.
state['_contour_generator'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artists and labels suitable for passing through
to `~.Axes.legend` which represent this ContourSet.
The labels have the form "0 < x <= 1" stating the data ranges which
the artists represent.
Parameters
----------
variable_name : str
The string used inside the inequality used on the labels.
str_format : function: float -> str
Function used to format the numbers in the labels.
Returns
-------
artists : list[`.Artist`]
A list of the artists.
labels : list[str]
A list of the labels.
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self._paths)
for idx in range(n_levels):
artists.append(mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=self.get_facecolor()[idx],
hatch=self.hatches[idx % len(self.hatches)],
))
lower = str_format(lowers[idx])
upper = str_format(uppers[idx])
if idx == 0 and self.extend in ('min', 'both'):
labels.append(fr'${variable_name} \leq {lower}s$')
elif idx == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(fr'${variable_name} > {upper}s$')
else:
labels.append(fr'${lower} < {variable_name} \leq {upper}$')
else:
for idx, level in enumerate(self.levels):
artists.append(Line2D(
[], [],
color=self.get_edgecolor()[idx],
linewidth=self.get_linewidths()[idx],
linestyle=self.get_linestyles()[idx],
))
labels.append(fr'${variable_name} = {str_format(level)}$')
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update Axes limits.
"""
self.levels = args[0]
allsegs = args[1]
allkinds = args[2] if len(args) > 2 else None
self.zmax = np.max(self.levels)
self.zmin = np.min(self.levels)
if allkinds is None:
allkinds = [[None] * len(segs) for segs in allsegs]
# Check lengths of levels and allsegs.
if self.filled:
if len(allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if len(allkinds) != len(allsegs):
raise ValueError('allkinds has different length to allsegs')
# Determine x, y bounds and update axes data limits.
flatseglist = [s for seg in allsegs for s in seg]
points = np.concatenate(flatseglist, axis=0)
self._mins = points.min(axis=0)
self._maxs = points.max(axis=0)
# Each entry in (allsegs, allkinds) is a list of (segs, kinds): segs is a list
# of (N, 2) arrays of xy coordinates, kinds is a list of arrays of corresponding
# pathcodes. However, kinds can also be None; in which case all paths in that
# list are codeless (this case is normalized above). These lists are used to
# construct paths, which then get concatenated.
self._paths = [Path.make_compound_path(*map(Path, segs, kinds))
for segs, kinds in zip(allsegs, allkinds)]
return kwargs
def _make_paths_from_contour_generator(self):
"""Compute ``paths`` using C extension."""
if self._paths is not None:
return self._paths
cg = self._contour_generator
empty_path = Path(np.empty((0, 2)))
vertices_and_codes = (
map(cg.create_filled_contour, *self._get_lowers_and_uppers())
if self.filled else
map(cg.create_contour, self.levels))
return [Path(np.concatenate(vs), np.concatenate(cs)) if len(vs) else empty_path
for vs, cs in vertices_and_codes]
def _get_lowers_and_uppers(self):
"""
Return ``(lowers, uppers)`` for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def changed(self):
if not hasattr(self, "cvalues"):
self._process_colors() # Sets cvalues.
# Force an autoscale immediately because self.to_rgba() calls
# autoscale_None() internally with the data passed to it,
# so if vmin/vmax are not set yet, this would override them with
# content from *cvalues* rather than levels like we want
self.norm.autoscale_None(self.levels)
self.set_array(self.cvalues)
self.update_scalarmappable()
alphas = np.broadcast_to(self.get_alpha(), len(self.cvalues))
for label, cv, alpha in zip(self.labelTexts, self.labelCValues, alphas):
label.set_alpha(alpha)
label.set_color(self.labelMappable.to_rgba(cv))
super().changed()
def _ensure_locator_exists(self, N):
"""
Set a locator on this ContourSet if it's not already set.
Parameters
----------
N : int or None
If *N* is an int, it is used as the target number of levels.
Otherwise when *N* is None, a reasonable default is chosen;
for logscales the LogLocator chooses, N=7 is the default
otherwise.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator(numticks=N)
else:
if N is None:
N = 7 # Hard coded default
self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)
def _autolev(self):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
lev = self.locator.tick_values(self.zmin, self.zmax)
try:
if self.locator._symmetric:
return lev
except AttributeError:
pass
# Trim excess levels the locator may have supplied.
under = np.nonzero(lev < self.zmin)[0]
i0 = under[-1] if len(under) else 0
over = np.nonzero(lev > self.zmax)[0]
i1 = over[0] + 1 if len(over) else len(lev)
if self.extend in ('min', 'both'):
i0 += 1
if self.extend in ('max', 'both'):
i1 -= 1
if i1 - i0 < 3:
i0, i1 = 0, len(lev)
return lev[i0:i1]
def _process_contour_level_args(self, args, z_dtype):
"""
Determine the contour levels and store in self.levels.
"""
levels_arg = self.levels
if levels_arg is None:
if args:
# Set if levels manually provided
levels_arg = args[0]
elif np.issubdtype(z_dtype, bool):
# Set default values for bool data types
levels_arg = [0, .5, 1] if self.filled else [.5]
if isinstance(levels_arg, Integral) or levels_arg is None:
self._ensure_locator_exists(levels_arg)
self.levels = self._autolev()
else:
self.levels = np.asarray(levels_arg, np.float64)
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0:
raise ValueError("Contour levels must be increasing")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.logscale:
lower, upper = 1e-250, 1e250
else:
lower, upper = -1e250, 1e250
if self.extend in ('both', 'min'):
self._levels.insert(0, lower)
if self.extend in ('both', 'max'):
self._levels.append(upper)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# Layer values are mid-way between levels in screen space.
if self.logscale:
# Avoid overflow by taking sqrt before multiplying.
self.layers = (np.sqrt(self._levels[:-1])
* np.sqrt(self._levels[1:]))
else:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the colormapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 -= 1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(mcolors.NoNorm())
else:
self.cvalues = self.layers
self.norm.autoscale_None(self.levels)
self.set_array(self.cvalues)
self.update_scalarmappable()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
def _process_linewidths(self, linewidths):
Nlev = len(self.levels)
if linewidths is None:
default_linewidth = mpl.rcParams['contour.linewidth']
if default_linewidth is None:
default_linewidth = mpl.rcParams['lines.linewidth']
return [default_linewidth] * Nlev
elif not np.iterable(linewidths):
return [linewidths] * Nlev
else:
linewidths = list(linewidths)
return (linewidths * math.ceil(Nlev / len(linewidths)))[:Nlev]
def _process_linestyles(self, linestyles):
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = self.negative_linestyles
else:
if isinstance(linestyles, str):
tlinestyles = [linestyles] * Nlev
elif np.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def _find_nearest_contour(self, xy, indices=None):
"""
Find the point in the unfilled contour plot that is closest (in screen
space) to point *xy*.
Parameters
----------
xy : tuple[float, float]
The reference point (in screen space).
indices : list of int or None, default: None
Indices of contour levels to consider. If None (the default), all levels
are considered.
Returns
-------
idx_level_min : int
The index of the contour level closest to *xy*.
idx_vtx_min : int
The index of the `.Path` segment closest to *xy* (at that level).
proj : (float, float)
The point in the contour plot closest to *xy*.
"""
# Convert each contour segment to pixel coordinates and then compare the given
# point to those coordinates for each contour. This is fast enough in normal
# cases, but speedups may be possible.
if self.filled:
raise ValueError("Method does not support filled contours")
if indices is None:
indices = range(len(self._paths))
d2min = np.inf
idx_level_min = idx_vtx_min = proj_min = None
for idx_level in indices:
path = self._paths[idx_level]
idx_vtx_start = 0
for subpath in path._iter_connected_components():
if not len(subpath.vertices):
continue
lc = self.get_transform().transform(subpath.vertices)
d2, proj, leg = _find_closest_point_on_path(lc, xy)
if d2 < d2min:
d2min = d2
idx_level_min = idx_level
idx_vtx_min = leg[1] + idx_vtx_start
proj_min = proj
idx_vtx_start += len(subpath)
return idx_level_min, idx_vtx_min, proj_min
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Find the point in the contour plot that is closest to ``(x, y)``.
This method does not support filled contours.
Parameters
----------
x, y : float
The reference point.
indices : list of int or None, default: None
Indices of contour levels to consider. If None (the default), all
levels are considered.
pixel : bool, default: True
If *True*, measure distance in pixel (screen) space, which is
useful for manual contour labeling; else, measure distance in axes
space.
Returns
-------
path : int
The index of the path that is closest to ``(x, y)``. Each path corresponds
to one contour level.
subpath : int
The index within that closest path of the subpath that is closest to
``(x, y)``. Each subpath corresponds to one unbroken contour line.
index : int
The index of the vertices within that subpath that are closest to
``(x, y)``.
xmin, ymin : float
The point in the contour plot that is closest to ``(x, y)``.
d2 : float
The squared distance from ``(xmin, ymin)`` to ``(x, y)``.
"""
segment = index = d2 = None
with ExitStack() as stack:
if not pixel:
# _find_nearest_contour works in pixel space. We want axes space, so
# effectively disable the transformation here by setting to identity.
stack.enter_context(self._cm_set(
transform=mtransforms.IdentityTransform()))
i_level, i_vtx, (xmin, ymin) = self._find_nearest_contour((x, y), indices)
if i_level is not None:
cc_cumlens = np.cumsum(
[*map(len, self._paths[i_level]._iter_connected_components())])
segment = cc_cumlens.searchsorted(i_vtx, "right")
index = i_vtx if segment == 0 else i_vtx - cc_cumlens[segment - 1]
d2 = (xmin-x)**2 + (ymin-y)**2
return (i_level, segment, index, xmin, ymin, d2)
@artist.allow_rasterization
def draw(self, renderer):
paths = self._paths
n_paths = len(paths)
if not self.filled or all(hatch is None for hatch in self.hatches):
super().draw(renderer)
return
# In presence of hatching, draw contours one at a time.
edgecolors = self.get_edgecolors()
if edgecolors.size == 0:
edgecolors = ("none",)
for idx in range(n_paths):
with self._cm_set(
paths=[paths[idx]],
hatch=self.hatches[idx % len(self.hatches)],
array=[self.get_array()[idx]],
linewidths=[self.get_linewidths()[idx % len(self.get_linewidths())]],
linestyles=[self.get_linestyles()[idx % len(self.get_linestyles())]],
edgecolors=edgecolors[idx % len(edgecolors)],
):
super().draw(renderer)
@_docstring.interpd
| ContourSet |
python | walkccc__LeetCode | solutions/1728. Cat and Mouse II/1728.py | {
"start": 0,
"end": 2280
} | class ____:
def canMouseWin(self, grid: list[str], catJump: int, mouseJump: int) -> bool:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
nFloors = 0
cat = 0 # cat's position
mouse = 0 # mouse's position
def hash(i: int, j: int) -> int:
return i * n + j
for i in range(m):
for j in range(n):
if grid[i][j] != '#':
nFloors += 1
if grid[i][j] == 'C':
cat = hash(i, j)
elif grid[i][j] == 'M':
mouse = hash(i, j)
@functools.lru_cache(None)
def dp(cat: int, mouse: int, turn: int) -> bool:
"""
Returns True if the mouse can win, where the cat is on (i / 8, i % 8), the
mouse is on (j / 8, j % 8), and the turns is k.
"""
# We already search the whole touchable grid.
if turn == nFloors * 2:
return False
if turn % 2 == 0:
# the mouse's turn
i = mouse // n
j = mouse % n
for dx, dy in DIRS:
for jump in range(mouseJump + 1):
x = i + dx * jump
y = j + dy * jump
if x < 0 or x == m or y < 0 or y == n:
break
if grid[x][y] == '#':
break
# The mouse eats the food, so the mouse wins.
if grid[x][y] == 'F':
return True
if dp(cat, hash(x, y), turn + 1):
return True
# The mouse can't win, so the mouse loses.
return False
else:
# the cat's turn
i = cat // n
j = cat % n
for dx, dy in DIRS:
for jump in range(catJump + 1):
x = i + dx * jump
y = j + dy * jump
if x < 0 or x == m or y < 0 or y == n:
break
if grid[x][y] == '#':
break
# The cat eats the food, so the mouse loses.
if grid[x][y] == 'F':
return False
nextCat = hash(x, y)
# The cat catches the mouse, so the mouse loses.
if nextCat == mouse:
return False
if not dp(nextCat, mouse, turn + 1):
return False
# The cat can't win, so the mouse wins.
return True
return dp(cat, mouse, 0)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 3516,
"end": 3595
} | class ____(IncrementalShopifyStream):
data_field = "draft_orders"
| DraftOrders |
python | numba__numba | numba/cuda/tests/cudapy/test_debuginfo.py | {
"start": 295,
"end": 7875
} | class ____(CUDATestCase):
"""
These tests only checks the compiled PTX for debuginfo section
"""
def setUp(self):
super().setUp()
# If we're using LTO then we can't check the PTX in these tests,
# because we produce LTO-IR, which is opaque to the user.
# Additionally, LTO optimizes away the exception status due to an
# oversight in the way we generate it (it is not added to the used
# list).
self.skip_if_lto("Exceptions not supported with LTO")
def _getasm(self, fn, sig):
fn.compile(sig)
return fn.inspect_asm(sig)
def _check(self, fn, sig, expect):
asm = self._getasm(fn, sig=sig)
re_section_dbginfo = re.compile(r"\.section\s+\.debug_info\s+{")
match = re_section_dbginfo.search(asm)
assertfn = self.assertIsNotNone if expect else self.assertIsNone
assertfn(match, msg=asm)
def test_no_debuginfo_in_asm(self):
@cuda.jit(debug=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=False)
def test_debuginfo_in_asm(self):
@cuda.jit(debug=True, opt=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=True)
def test_environment_override(self):
with override_config('CUDA_DEBUGINFO_DEFAULT', 1):
# Using default value
@cuda.jit(opt=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=True)
# User override default value
@cuda.jit(debug=False)
def bar(x):
x[0] = 1
self._check(bar, sig=(types.int32[:],), expect=False)
def test_issue_5835(self):
# Invalid debug metadata would segfault NVVM when any function was
# compiled with debug turned on and optimization off. This eager
# compilation should not crash anything.
@cuda.jit((types.int32[::1],), debug=True, opt=False)
def f(x):
x[0] = 0
def test_wrapper_has_debuginfo(self):
sig = (types.int32[::1],)
@cuda.jit(sig, debug=True, opt=0)
def f(x):
x[0] = 1
llvm_ir = f.inspect_llvm(sig)
defines = [line for line in llvm_ir.splitlines()
if 'define void @"_ZN6cudapy' in line]
# Make sure we only found one definition
self.assertEqual(len(defines), 1)
wrapper_define = defines[0]
self.assertIn('!dbg', wrapper_define)
def test_debug_function_calls_internal_impl(self):
# Calling a function in a module generated from an implementation
# internal to Numba requires multiple modules to be compiled with NVVM -
# the internal implementation, and the caller. This example uses two
# modules because the `in (2, 3)` is implemented with:
#
# numba::cpython::listobj::in_seq::$3clocals$3e::seq_contains_impl$242(
# UniTuple<long long, 2>,
# int
# )
#
# This is condensed from this reproducer in Issue 5311:
# https://github.com/numba/numba/issues/5311#issuecomment-674206587
@cuda.jit((types.int32[:], types.int32[:]), debug=True, opt=False)
def f(inp, outp):
outp[0] = 1 if inp[0] in (2, 3) else 3
def test_debug_function_calls_device_function(self):
# Calling a device function requires compilation of multiple modules
# with NVVM - one for the caller and one for the callee. This checks
# that we don't cause an NVVM error in this case.
@cuda.jit(device=True, debug=True, opt=0)
def threadid():
return cuda.blockDim.x * cuda.blockIdx.x + cuda.threadIdx.x
@cuda.jit((types.int32[:],), debug=True, opt=0)
def kernel(arr):
i = cuda.grid(1)
if i < len(arr):
arr[i] = threadid()
def _test_chained_device_function(self, kernel_debug, f1_debug, f2_debug):
@cuda.jit(device=True, debug=f2_debug, opt=False)
def f2(x):
return x + 1
@cuda.jit(device=True, debug=f1_debug, opt=False)
def f1(x, y):
return x - f2(y)
@cuda.jit((types.int32, types.int32), debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
kernel[1, 1](1, 2)
def test_chained_device_function(self):
# Calling a device function that calls another device function from a
# kernel with should succeed regardless of which jit decorators have
# debug=True. See Issue #7159.
debug_opts = itertools.product(*[(True, False)] * 3)
for kernel_debug, f1_debug, f2_debug in debug_opts:
with self.subTest(kernel_debug=kernel_debug,
f1_debug=f1_debug,
f2_debug=f2_debug):
self._test_chained_device_function(kernel_debug,
f1_debug,
f2_debug)
def _test_chained_device_function_two_calls(self, kernel_debug, f1_debug,
f2_debug):
@cuda.jit(device=True, debug=f2_debug, opt=False)
def f2(x):
return x + 1
@cuda.jit(device=True, debug=f1_debug, opt=False)
def f1(x, y):
return x - f2(y)
@cuda.jit(debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
f2(x)
kernel[1, 1](1, 2)
def test_chained_device_function_two_calls(self):
# Calling a device function that calls a leaf device function from a
# kernel, and calling the leaf device function from the kernel should
# succeed, regardless of which jit decorators have debug=True. See
# Issue #7159.
debug_opts = itertools.product(*[(True, False)] * 3)
for kernel_debug, f1_debug, f2_debug in debug_opts:
with self.subTest(kernel_debug=kernel_debug,
f1_debug=f1_debug,
f2_debug=f2_debug):
self._test_chained_device_function_two_calls(kernel_debug,
f1_debug,
f2_debug)
def test_chained_device_three_functions(self):
# Like test_chained_device_function, but with enough functions (three)
# to ensure that the recursion visits all the way down the call tree
# when fixing linkage of functions for debug.
def three_device_fns(kernel_debug, leaf_debug):
@cuda.jit(device=True, debug=leaf_debug, opt=False)
def f3(x):
return x * x
@cuda.jit(device=True)
def f2(x):
return f3(x) + 1
@cuda.jit(device=True)
def f1(x, y):
return x - f2(y)
@cuda.jit(debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
kernel[1, 1](1, 2)
# Check when debug on the kernel, on the leaf, and not on any function.
three_device_fns(kernel_debug=True, leaf_debug=True)
three_device_fns(kernel_debug=True, leaf_debug=False)
three_device_fns(kernel_debug=False, leaf_debug=True)
three_device_fns(kernel_debug=False, leaf_debug=False)
if __name__ == '__main__':
unittest.main()
| TestCudaDebugInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/recursiveTypeAlias11.py | {
"start": 178,
"end": 240
} | class ____:
pass
T1 = TypeVar("T1", bound=ClassA1)
| ClassA1 |
python | pyca__cryptography | tests/x509/test_x509.py | {
"start": 228005,
"end": 231009
} | class ____:
@pytest.mark.parametrize(
("path", "loader_func"),
[
[
os.path.join("x509", "requests", "ec_sha256.pem"),
x509.load_pem_x509_csr,
],
[
os.path.join("x509", "requests", "ec_sha256.der"),
x509.load_der_x509_csr,
],
],
)
def test_load_ecdsa_certificate_request(self, path, loader_func, backend):
_skip_curve_unsupported(backend, ec.SECP384R1())
request = _load_cert(path, loader_func)
assert isinstance(request.signature_hash_algorithm, hashes.SHA256)
public_key = request.public_key()
assert isinstance(public_key, ec.EllipticCurvePublicKey)
subject = request.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(NameOID.COMMON_NAME, "cryptography.io"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "PyCA"),
x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "Texas"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "Austin"),
]
def test_signature(self, backend):
_skip_curve_unsupported(backend, ec.SECP384R1())
request = _load_cert(
os.path.join("x509", "requests", "ec_sha256.pem"),
x509.load_pem_x509_csr,
)
assert request.signature == binascii.unhexlify(
b"306502302c1a9f7de8c1787332d2307a886b476a59f172b9b0e250262f3238b1"
b"b45ee112bb6eb35b0fb56a123b9296eb212dffc302310094cf440c95c52827d5"
b"56ae6d76500e3008255d47c29f7ee782ed7558e51bfd76aa45df6d999ed5c463"
b"347fe2382d1751"
)
def test_tbs_certrequest_bytes(self, backend):
_skip_curve_unsupported(backend, ec.SECP384R1())
request = _load_cert(
os.path.join("x509", "requests", "ec_sha256.pem"),
x509.load_pem_x509_csr,
)
assert request.tbs_certrequest_bytes == binascii.unhexlify(
b"3081d602010030573118301606035504030c0f63727970746f6772617068792"
b"e696f310d300b060355040a0c0450794341310b300906035504061302555331"
b"0e300c06035504080c055465786173310f300d06035504070c0641757374696"
b"e3076301006072a8648ce3d020106052b8104002203620004de19b514c0b3c3"
b"ae9b398ea3e26b5e816bdcf9102cad8f12fe02f9e4c9248724b39297ed7582e"
b"04d8b32a551038d09086803a6d3fb91a1a1167ec02158b00efad39c9396462f"
b"accff0ffaf7155812909d3726bd59fde001cff4bb9b2f5af8cbaa000"
)
assert request.signature_hash_algorithm is not None
public_key = request.public_key()
assert isinstance(public_key, ec.EllipticCurvePublicKey)
public_key.verify(
request.signature,
request.tbs_certrequest_bytes,
ec.ECDSA(request.signature_hash_algorithm),
)
| TestECDSACertificateRequest |
python | doocs__leetcode | solution/1400-1499/1400.Construct K Palindrome Strings/Solution.py | {
"start": 0,
"end": 194
} | class ____:
def canConstruct(self, s: str, k: int) -> bool:
if len(s) < k:
return False
cnt = Counter(s)
return sum(v & 1 for v in cnt.values()) <= k
| Solution |
python | miyuchina__mistletoe | mistletoe/block_tokenizer.py | {
"start": 47,
"end": 3048
} | class ____:
def __init__(self, lines, start_line=1):
self.lines = lines if isinstance(lines, list) else list(lines)
self.start_line = start_line
self._index = -1
self._anchor = 0
def __next__(self):
if self._index + 1 < len(self.lines):
self._index += 1
return self.lines[self._index]
raise StopIteration
def __iter__(self):
return self
def __repr__(self):
return repr(self.lines[self._index + 1:])
def get_pos(self):
"""Returns the current reading position.
The result is an opaque value which can be passed to `set_pos`."""
return self._index
def set_pos(self, pos):
"""Sets the current reading position."""
self._index = pos
def anchor(self):
"""@deprecated use `get_pos` instead"""
self._anchor = self.get_pos()
def reset(self):
"""@deprecated use `set_pos` instead"""
self.set_pos(self._anchor)
def peek(self):
if self._index + 1 < len(self.lines):
return self.lines[self._index + 1]
return None
def backstep(self):
if self._index != -1:
self._index -= 1
def line_number(self):
return self.start_line + self._index
def tokenize(iterable, token_types):
"""
Searches for token_types in iterable.
Args:
iterable (list): user input lines to be parsed.
token_types (list): a list of block-level token constructors.
Returns:
block-level token instances.
"""
return make_tokens(tokenize_block(iterable, token_types))
def tokenize_block(iterable, token_types, start_line=1):
"""
Returns a list of tuples (token_type, read_result, line_number).
Footnotes are parsed here, but span-level parsing has not
started yet.
"""
lines = FileWrapper(iterable, start_line=start_line)
parse_buffer = ParseBuffer()
line = lines.peek()
while line is not None:
for token_type in token_types:
if token_type.start(line):
line_number = lines.line_number() + 1
result = token_type.read(lines)
if result is not None:
parse_buffer.append((token_type, result, line_number))
break
else: # unmatched newlines
next(lines)
parse_buffer.loose = True
line = lines.peek()
return parse_buffer
def make_tokens(parse_buffer):
"""
Takes a list of tuples (token_type, read_result, line_number),
applies token_type(read_result), and sets the line_number attribute.
Footnotes are already parsed before this point,
and span-level parsing is started here.
"""
tokens = []
for token_type, result, line_number in parse_buffer:
token = token_type(result)
if token is not None:
token.line_number = line_number
tokens.append(token)
return tokens
| FileWrapper |
python | gevent__gevent | src/gevent/resolver/thread.py | {
"start": 187,
"end": 2487
} | class ____(object):
"""
Implementation of the resolver API using native threads and native resolution
functions.
Using the native resolution mechanisms ensures the highest
compatibility with what a non-gevent program would return
including good support for platform specific configuration
mechanisms. The use of native (non-greenlet) threads ensures that
a caller doesn't block other greenlets.
This implementation also has the benefit of being very simple in comparison to
:class:`gevent.resolver_ares.Resolver`.
.. tip::
Most users find this resolver to be quite reliable in a
properly monkey-patched environment. However, there have been
some reports of long delays, slow performance or even hangs,
particularly in long-lived programs that make many, many DNS
requests. If you suspect that may be happening to you, try the
dnspython or ares resolver (and submit a bug report).
"""
def __init__(self, hub=None):
if hub is None:
hub = get_hub()
self.pool = hub.threadpool
if _socket.gaierror not in hub.NOT_ERROR:
# Do not cause lookup failures to get printed by the default
# error handler. This can be very noisy.
hub.NOT_ERROR += (_socket.gaierror, _socket.herror)
def __repr__(self):
return '<%s.%s at 0x%x pool=%r>' % (type(self).__module__,
type(self).__name__,
id(self), self.pool)
def close(self):
pass
# from briefly reading socketmodule.c, it seems that all of the functions
# below are thread-safe in Python, even if they are not thread-safe in C.
def gethostbyname(self, *args):
return self.pool.apply(_socket.gethostbyname, args)
def gethostbyname_ex(self, *args):
return self.pool.apply(_socket.gethostbyname_ex, args)
def getaddrinfo(self, *args, **kwargs):
return self.pool.apply(_socket.getaddrinfo, args, kwargs)
def gethostbyaddr(self, *args, **kwargs):
return self.pool.apply(_socket.gethostbyaddr, args, kwargs)
def getnameinfo(self, *args, **kwargs):
return self.pool.apply(_socket.getnameinfo, args, kwargs)
| Resolver |
python | readthedocs__readthedocs.org | readthedocs/telemetry/models.py | {
"start": 113,
"end": 3447
} | class ____(models.Manager):
"""Manager for the BuildData model."""
def collect(self, build, data):
"""
Save the collected information from a build.
We fill other fields from data we have access to
before saving it, like the project, version, organization, etc.
The final JSON structure should look like:
.. code-block:: json
{
"os": "ubuntu-18.04.5"
"python": "3.10.2",
"organization": {
"id": 1,
"slug": "org"
},
"project": {
"id": 2,
"slug": "docs"
},
"version": {
"id": 1,
"slug": "latest"
},
"build": {
"id": 3,
"start": "2021-04-20-...", # Date in isoformat
"length": "600", # Build length in seconds
"commit": "abcd1234"
"success": true,
},
"config": {
"user": {},
"final": {}
},
"packages": {
"pip": {
"user": [
{
"name": "sphinx",
"version": "3.4.5"
},
],
"all": [
{
"name": "sphinx",
"version": "3.4.5"
},
],
},
"conda": {
"all": [
{
"name": "sphinx",
"channel": "conda-forge",
"version": "0.1"
},
],
},
"apt": {
"user": [
{
"name": "python3-dev",
"version": "3.8.2-0ubuntu2"
},
],
"all": [
{
"name": "python3-dev",
"version": "3.8.2-0ubuntu2"
},
],
},
},
}
"""
data["build"] = {
"id": build.id,
"start": build.date.isoformat(),
"length": build.length,
"commit": build.commit,
"success": build.success,
}
data["project"] = {"id": build.project.id, "slug": build.project.slug}
if build.version:
data["version"] = {
"id": build.version.id,
"slug": build.version.slug,
}
org = build.project.organizations.first()
if org:
data["organization"] = {
"id": org.id,
"slug": org.slug,
}
data["config"]["final"] = build.config
return self.create(data=data)
| BuildDataManager |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 3326,
"end": 3449
} | class ____(BaseModel, extra="forbid"):
binary: "BinaryQuantizationConfig" = Field(..., description="")
| BinaryQuantization |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 24114,
"end": 24364
} | class ____(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = "The connection to an upstream server timed out."
| GatewayTimeout |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axislines.py | {
"start": 9028,
"end": 9781
} | class ____:
def __init__(self):
self._old_limits = None
super().__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._old_limits != (x1, x2, y1, y2):
self._update_grid(Bbox.from_extents(x1, y1, x2, y2))
self._old_limits = (x1, x2, y1, y2)
def _update_grid(self, bbox):
"""Cache relevant computations when the axes limits have changed."""
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
Parameters
----------
which : {"both", "major", "minor"}
axis : {"both", "x", "y"}
"""
return []
| GridHelperBase |
python | getsentry__sentry | src/sentry/issues/endpoints/group_integration_details.py | {
"start": 1759,
"end": 2516
} | class ____(IntegrationSerializer):
def __init__(
self,
group: Group,
action: str,
config: Mapping[str, Any],
) -> None:
self.group = group
self.action = action
self.config = config
def serialize(
self,
obj: Integration | RpcIntegration,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[str, Any]:
data = super().serialize(obj, attrs, user)
if self.action == "link":
data["linkIssueConfig"] = self.config
if self.action == "create":
data["createIssueConfig"] = self.config
return data
@region_silo_endpoint
| IntegrationIssueConfigSerializer |
python | kamyu104__LeetCode-Solutions | Python/longest-subsequence-with-limited-sum.py | {
"start": 87,
"end": 422
} | class ____(object):
def answerQueries(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[int]
:rtype: List[int]
"""
nums.sort()
for i in xrange(len(nums)-1):
nums[i+1] += nums[i]
return [bisect.bisect_right(nums, q) for q in queries]
| Solution |
python | pennersr__django-allauth | allauth/headless/account/response.py | {
"start": 912,
"end": 1122
} | class ____(APIResponse):
def __init__(self, request, email_addresses):
data = [email_address_data(addr) for addr in email_addresses]
super().__init__(request, data=data)
| EmailAddressesResponse |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 92179,
"end": 93277
} | class ____(Affine2DBase):
r"""
A transform wrapper for transforming displacements between pairs of points.
This class is intended to be used to transform displacements ("position
deltas") between pairs of points (e.g., as the ``offset_transform``
of `.Collection`\s): given a transform ``t`` such that ``t =
AffineDeltaTransform(t) + offset``, ``AffineDeltaTransform``
satisfies ``AffineDeltaTransform(a - b) == AffineDeltaTransform(a) -
AffineDeltaTransform(b)``.
This is implemented by forcing the offset components of the transform
matrix to zero.
This class is experimental as of 3.3, and the API may change.
"""
pass_through = True
def __init__(self, transform, **kwargs):
super().__init__(**kwargs)
self._base_transform = transform
self.set_children(transform)
__str__ = _make_str_method("_base_transform")
def get_matrix(self):
if self._invalid:
self._mtx = self._base_transform.get_matrix().copy()
self._mtx[:2, -1] = 0
return self._mtx
| AffineDeltaTransform |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams5.py | {
"start": 503,
"end": 619
} | class ____[*Ts: int]: ...
# This should generate an error because ParamSpecs don't
# support bound expressions.
| ClassF |
python | kamyu104__LeetCode-Solutions | Python/difference-between-element-sum-and-digit-sum-of-an-array.py | {
"start": 56,
"end": 398
} | class ____(object):
def differenceOfSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def total(x):
result = 0
while x:
result += x%10
x //= 10
return result
return abs(sum(nums)-sum(total(x) for x in nums))
| Solution |
python | doocs__leetcode | solution/3500-3599/3541.Find Most Frequent Vowel and Consonant/Solution.py | {
"start": 0,
"end": 261
} | class ____:
def maxFreqSum(self, s: str) -> int:
cnt = Counter(s)
a = b = 0
for c, v in cnt.items():
if c in "aeiou":
a = max(a, v)
else:
b = max(b, v)
return a + b
| Solution |
python | mamba-org__mamba | micromamba/tests/test_config.py | {
"start": 12815,
"end": 24064
} | class ____:
def test_file_set_single_input(self, rc_file):
config("set", "json", "true", "--file", rc_file)
assert config("get", "json", "--file", rc_file).splitlines() == "json: true".splitlines()
def test_file_set_change_key_value(self, rc_file):
config("set", "json", "true", "--file", rc_file)
config("set", "json", "false", "--file", rc_file)
assert config("get", "json", "--file", rc_file).splitlines() == "json: false".splitlines()
def test_file_set_invalit_input(self, rc_file):
assert (
config("set", "$%#@abc", "--file", rc_file).splitlines()
== "Key is invalid or more than one key was received".splitlines()
)
def test_file_set_multiple_inputs(self, rc_file):
assert (
config(
"set",
"json",
"true",
"clean_tarballs",
"true",
"--file",
rc_file,
).splitlines()
== "Key is invalid or more than one key was received".splitlines()
)
def test_file_remove_single_input(self, rc_file):
config("set", "json", "true", "--file", rc_file)
assert config("remove-key", "json", "--file", rc_file).splitlines() == []
def test_file_remove_non_existent_key(self, rc_file):
assert (
config("remove-key", "json", "--file", rc_file).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_invalid_key(self, rc_file):
assert (
config("remove-key", "^&*&^def", "--file", rc_file).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_vector(self, rc_file):
config("append", "channels", "flowers", "--file", rc_file)
config("remove-key", "channels", "--file", rc_file)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_vector_value(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("append", "channels", "totoro", "--file", rc_file)
config("append", "channels", "haku", "--file", rc_file)
config("remove", "channels", "totoro", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - haku",
]
# TODO: This behavior should be fixed "channels: []"
def test_file_remove_vector_all_values(self, rc_file):
config("append", "channels", "haku", "--file", rc_file)
config("remove", "channels", "haku", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"Key is not present in file"
]
def test_file_remove_vector_nonexistent_value(self, rc_file):
config("append", "channels", "haku", "--file", rc_file)
assert (
config(
"remove",
"channels",
"chihiro",
"--file",
rc_file,
).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_vector_multiple_values(self, rc_file):
config("append", "channels", "haku", "--file", rc_file)
assert (
config(
"remove",
"channels",
"haku",
"chihiro",
"--file",
rc_file,
).splitlines()
== "Only one value can be removed at a time".splitlines()
)
def test_file_append_single_input(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("append", "channels", "flowers", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - flowers",
]
def test_file_append_multiple_inputs(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo")
config(
"append",
"channels",
"condesc,mambesc",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - foo\n - condesc\n - mambesc".splitlines()
)
def test_file_append_multiple_keys(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo\ndefault_channels:\n - bar")
config(
"append",
"channels",
"condesc,mambesc",
"default_channels",
"condescd,mambescd",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - foo\n - condesc\n - mambesc".splitlines()
)
assert (
config("get", "default_channels", "--file", rc_file).splitlines()
== "default_channels:\n - bar\n - condescd\n - mambescd".splitlines()
)
def test_file_append_invalid_input(self, rc_file):
with pytest.raises(subprocess.CalledProcessError):
config("append", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("append", "@#A321", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("append", "json", "true", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config(
"append",
"channels",
"foo,bar",
"json",
"true",
"--file",
rc_file,
)
def test_file_prepend_single_input(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("prepend", "channels", "flowers", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - flowers",
]
def test_file_prepend_multiple_inputs(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo")
config(
"prepend",
"channels",
"condesc,mambesc",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - condesc\n - mambesc\n - foo".splitlines()
)
def test_file_prepend_multiple_keys(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo\ndefault_channels:\n - bar")
config(
"prepend",
"channels",
"condesc,mambesc",
"default_channels",
"condescd,mambescd",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - condesc\n - mambesc\n - foo".splitlines()
)
assert (
config("get", "default_channels", "--file", rc_file).splitlines()
== "default_channels:\n - condescd\n - mambescd\n - bar".splitlines()
)
def test_file_prepend_invalid_input(self, rc_file):
with pytest.raises(subprocess.CalledProcessError):
config("prepend", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("prepend", "@#A321", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("prepend", "json", "true", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config(
"prepend",
"channels",
"foo,bar",
"json",
"true",
"--file",
rc_file,
)
def test_file_append_and_prepend_inputs(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("append", "channels", "flowers", "--file", rc_file)
config("prepend", "channels", "powers", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - powers",
" - flowers",
]
def test_file_set_and_append_inputs(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("set", "experimental", "true", "--file", rc_file)
config("append", "channels", "gandalf", "--file", rc_file)
config("append", "channels", "legolas", "--file", rc_file)
assert (
config("get", "experimental", "--file", rc_file).splitlines()
== "experimental: true".splitlines()
)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - gandalf",
" - legolas",
]
def test_file_set_and_prepend_inputs(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("set", "experimental", "false", "--file", rc_file)
config("prepend", "channels", "zelda", "--file", rc_file)
config("prepend", "channels", "link", "--file", rc_file)
assert (
config("get", "experimental", "--file", rc_file).splitlines()
== "experimental: false".splitlines()
)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - link",
" - zelda",
]
def test_flag_env_set(self, rc_file):
config("set", "experimental", "false", "--env")
assert (
config("get", "experimental", "--env").splitlines()
== "experimental: false".splitlines()
)
def test_flag_env_file_remove_vector(self, rc_file):
config("prepend", "channels", "thinga-madjiga", "--env")
config("remove-key", "channels", "--env")
assert (
config("get", "channels", "--env").splitlines()
== "Key is not present in file".splitlines()
)
def test_flag_env_file_set_and_append_inputs(self, rc_file):
config("set", "local_repodata_ttl", "2", "--env")
config("append", "channels", "finn", "--env")
config("append", "channels", "jake", "--env")
assert (
config("get", "local_repodata_ttl", "--env").splitlines()
== "local_repodata_ttl: 2".splitlines()
)
assert config("get", "channels", "--env").splitlines() == [
"channels:",
" - finn",
" - jake",
]
| TestConfigModifiers |
python | wandb__wandb | tests/unit_tests/test_launch/test_agent/test_agent.py | {
"start": 388,
"end": 25760
} | class ____(MagicMock):
async def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
@pytest.fixture
def clean_agent():
LaunchAgent._instance = None
yield
LaunchAgent._instance = None
def _setup(mocker):
mocker.api = MagicMock()
mock_agent_response = {"name": "test-name", "stopPolling": False}
mocker.api.get_launch_agent = MagicMock(return_value=mock_agent_response)
mocker.api.fail_run_queue_item = MagicMock(side_effect=KeyboardInterrupt)
mocker.termlog = MagicMock()
mocker.termwarn = MagicMock()
mocker.termerror = MagicMock()
mocker.wandb_init = MagicMock()
mocker.patch("wandb.termlog", mocker.termlog)
mocker.patch("wandb.termwarn", mocker.termwarn)
mocker.patch("wandb.termerror", mocker.termerror)
mocker.patch("wandb.init", mocker.wandb_init)
mocker.logger = MagicMock()
mocker.patch("wandb.sdk.launch.agent.agent._logger", mocker.logger)
mocker.status = MagicMock()
mocker.status.state = "running"
mocker.run = MagicMock()
# async def _mock_get_status(*args, **kwargs):
# return mocker.status
mocker.run.get_status = AsyncMock(return_value=mocker.status)
mocker.runner = MagicMock()
async def _mock_runner_run(*args, **kwargs):
return mocker.run
mocker.runner.run = _mock_runner_run
mocker.patch(
"wandb.sdk.launch.agent.agent.loader.runner_from_config",
return_value=mocker.runner,
)
@pytest.mark.asyncio
async def test_loop_capture_stack_trace(mocker, clean_agent):
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
agent = LaunchAgent(api=mocker.api, config=mock_config)
agent.run_job = AsyncMock()
agent.run_job.side_effect = [None, None, Exception("test exception")]
agent.pop_from_queue = AsyncMock(return_value=MagicMock())
await agent.loop()
assert "Traceback (most recent call last):" in mocker.termerror.call_args[0][0]
@pytest.mark.asyncio
async def test_run_job_secure_mode(mocker, clean_agent):
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
"secure_mode": True,
}
agent = LaunchAgent(api=mocker.api, config=mock_config)
jobs = [
{
"runSpec": {
"resource_args": {
"kubernetes": {"spec": {"template": {"spec": {"hostPID": True}}}}
}
}
},
{
"runSpec": {
"resource_args": {
"kubernetes": {
"spec": {
"template": {
"spec": {
"containers": [{}, {"command": ["some", "code"]}]
}
}
}
}
}
}
},
{"runSpec": {"overrides": {"entry_point": ["some", "code"]}}},
]
errors = [
'This agent is configured to lock "hostPID" in pod spec but the job specification attempts to override it.',
'This agent is configured to lock "command" in container spec but the job specification attempts to override it.',
'This agent is configured to lock the "entrypoint" override but the job specification attempts to override it.',
]
mock_file_saver = MagicMock()
for job, error in zip(jobs, errors):
with pytest.raises(ValueError, match=error):
await agent.run_job(job, "test-queue", mock_file_saver)
def _setup_requeue(mocker):
_setup(mocker)
mocker.event = MagicMock()
mocker.event.is_set = MagicMock(return_value=True)
mocker.status = MagicMock()
mocker.status.state = "preempted"
mocker.run = MagicMock()
_mock_get_status = AsyncMock(return_value=mocker.status)
mocker.run.get_status = _mock_get_status
mocker.runner = MagicMock()
mocker.runner.run = AsyncMock(return_value=mocker.run)
mocker.launch_add = MagicMock()
mocker.project = MagicMock()
mocker.project.target_entity = "test-entity"
mocker.project.run_id = "test-run-id"
mocker.patch(
"wandb.sdk.launch.agent.agent.LaunchProject.from_spec",
return_value=mocker.project,
)
mocker.patch(
"wandb.sdk.launch.agent.agent.loader.builder_from_config",
return_value=None,
)
mocker.patch(
"wandb.sdk.launch.agent.agent.loader.runner_from_config",
return_value=mocker.runner,
)
mocker.api.fail_run_queue_item = MagicMock()
mocker.patch("wandb.sdk.launch.agent.agent.launch_add", mocker.launch_add)
@pytest.mark.asyncio
async def test_requeue_on_preemption(mocker, clean_agent):
_setup_requeue(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mock_job = {
"runQueueItemId": "test-id",
}
mock_launch_spec = {}
agent = LaunchAgent(api=mocker.api, config=mock_config)
job_tracker = JobAndRunStatusTracker(
mock_job["runQueueItemId"], "test-queue", MagicMock(), entity="test-entity"
)
assert job_tracker.entity == "test-entity"
await agent.task_run_job(
launch_spec=mock_launch_spec,
job=mock_job,
default_config={},
api=mocker.api,
job_tracker=job_tracker,
)
expected_config = {"run_id": "test-run-id", "_resume_count": 1}
mocker.launch_add.assert_called_once_with(
config=expected_config,
project_queue=LAUNCH_DEFAULT_PROJECT,
queue_name="test-queue",
)
def test_team_entity_warning(mocker, clean_agent):
_setup(mocker)
mocker.api.entity_is_team = MagicMock(return_value=True)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
_ = LaunchAgent(api=mocker.api, config=mock_config)
assert "Agent is running on team entity" in mocker.termwarn.call_args[0][0]
def test_non_team_entity_no_warning(mocker, clean_agent):
_setup(mocker)
mocker.api.entity_is_team = MagicMock(return_value=False)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
_ = LaunchAgent(api=mocker.api, config=mock_config)
assert not mocker.termwarn.call_args
@pytest.mark.parametrize(
"num_schedulers",
[0, -1, 1000000, "8", None],
)
def test_max_scheduler_setup(mocker, num_schedulers, clean_agent):
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
"max_schedulers": num_schedulers,
}
agent = LaunchAgent(api=mocker.api, config=mock_config)
if num_schedulers is None:
num_schedulers = 1 # default for none
elif num_schedulers == -1:
num_schedulers = float("inf")
elif isinstance(num_schedulers, str):
num_schedulers = int(num_schedulers)
assert agent._max_schedulers == num_schedulers
@pytest.mark.parametrize(
"num_schedulers",
[-29, "weird"],
)
def test_max_scheduler_setup_fail(mocker, num_schedulers, clean_agent):
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
"max_schedulers": num_schedulers,
}
with pytest.raises(LaunchError):
LaunchAgent(api=mocker.api, config=mock_config)
def _setup_thread_finish(mocker):
mocker.api = MagicMock()
mock_agent_response = {"name": "test-name", "stopPolling": False}
mocker.api.get_launch_agent = MagicMock(return_value=mock_agent_response)
mocker.api.fail_run_queue_item = MagicMock()
mocker.termlog = MagicMock()
mocker.termerror = MagicMock()
mocker.wandb_init = MagicMock()
mocker.patch("wandb.termlog", mocker.termlog)
mocker.patch("wandb.termerror", mocker.termerror)
mocker.patch("wandb.init", mocker.wandb_init)
@pytest.mark.asyncio
async def test_thread_finish_no_fail(mocker, clean_agent):
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state = MagicMock(return_value=lambda x: True)
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "test-queue", mock_saver)
job.run_id = "test_run_id"
job.project = MagicMock()
agent._jobs = {"thread_1": job}
await agent.finish_thread_id("thread_1")
assert len(agent._jobs) == 0
assert not mocker.api.fail_run_queue_item.called
assert not mock_saver.save_contents.called
@pytest.mark.asyncio
async def test_thread_finish_sweep_fail(mocker, clean_agent):
"""Test thread finished with 0 exit status, but sweep didn't init."""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state = MagicMock(return_value="pending")
mocker.patch("wandb.sdk.launch.agent.agent.RUN_INFO_GRACE_PERIOD", 1)
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "test-queue", mock_saver)
job.run_id = "test_run_id"
job.project = MagicMock()
run = MagicMock()
async def mock_get_logs():
return "logs"
run.get_logs = mock_get_logs
job.run = run
agent._jobs = {"thread_1": job}
await agent.finish_thread_id("thread_1")
assert len(agent._jobs) == 0
mocker.api.fail_run_queue_item.assert_called_once()
mock_saver.save_contents.assert_called_once()
@pytest.mark.asyncio
async def test_thread_finish_run_fail(mocker, clean_agent):
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state.side_effect = CommError("failed")
mocker.patch("wandb.sdk.launch.agent.agent.RUN_INFO_GRACE_PERIOD", 1)
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "test-queue", mock_saver)
job.run_id = "test_run_id"
job.project = MagicMock()
run = MagicMock()
async def mock_get_logs():
return "logs"
run.get_logs = mock_get_logs
job.run = run
agent._jobs = {"thread_1": job}
await agent.finish_thread_id("thread_1")
assert len(agent._jobs) == 0
mocker.api.fail_run_queue_item.assert_called_once()
mock_saver.save_contents.assert_called_once()
@pytest.mark.asyncio
async def test_thread_finish_run_fail_start(mocker, clean_agent):
"""Tests that if a run does not exist, the run queue item is failed."""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state.side_effect = CommError("failed")
mocker.patch("wandb.sdk.launch.agent.agent.RUN_INFO_GRACE_PERIOD", 1)
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "test-queue", mock_saver)
job.run_id = "test_run_id"
job.project = "test-project"
run = MagicMock()
async def mock_get_logs():
return "logs"
run.get_logs = mock_get_logs
job.run = run
job.run_queue_item_id = "asdasd"
agent._jobs = {"thread_1": job}
agent._jobs_lock = MagicMock()
await agent.finish_thread_id("thread_1")
assert len(agent._jobs) == 0
mocker.api.fail_run_queue_item.assert_called_once()
mock_saver.save_contents.assert_called_once()
@pytest.mark.asyncio
async def test_thread_finish_run_fail_start_old_server(mocker, clean_agent):
"""Tests that if a run does not exist, the run queue item is not failed for old servers."""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state.side_effect = CommError("failed")
mocker.patch("wandb.sdk.launch.agent.agent.RUN_INFO_GRACE_PERIOD", 1)
agent = LaunchAgent(api=mocker.api, config=mock_config)
agent._gorilla_supports_fail_run_queue_items = False
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "test-queue", mock_saver)
job.run_id = "test_run_id"
job.run_queue_item_id = "asdasd"
job.project = "test-project"
run = MagicMock()
async def mock_get_logs():
return "logs"
run.get_logs = mock_get_logs
job.run = run
agent._jobs_lock = MagicMock()
agent._jobs = {"thread_1": job}
await agent.finish_thread_id("thread_1")
assert len(agent._jobs) == 0
mocker.api.fail_run_queue_item.assert_not_called()
@pytest.mark.asyncio
async def test_thread_finish_run_fail_different_entity(mocker, clean_agent):
"""Tests that no check is made if the agent entity does not match."""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "test-queue", mock_saver)
job.run_id = "test_run_id"
job.project = "test-project"
job.entity = "other-entity"
agent._jobs = {"thread_1": job}
agent._jobs_lock = MagicMock()
await agent.finish_thread_id("thread_1")
assert len(agent._jobs) == 0
assert not mocker.api.fail_run_queue_item.called
assert not mock_saver.save_contents.called
@pytest.mark.asyncio
async def test_agent_fails_sweep_state(mocker, clean_agent):
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
def mock_set_sweep_state(sweep, entity, project, state):
assert entity == "test-entity"
assert project == "test-project"
assert sweep == "test-sweep-id"
assert state == "CANCELED"
mocker.api.set_sweep_state = mock_set_sweep_state
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker("run_queue_item_id", "_queue", mock_saver)
job.completed_status = "failed"
job.run_id = "test-sweep-id"
job.is_scheduler = True
job.entity = "test-entity"
job.project = "test-project"
run = MagicMock()
run.get_status.return_value.state = "failed"
job.run = run
# should detect failed scheduler, set sweep state to CANCELED
out = await agent._check_run_finished(job, {})
assert job.completed_status == "failed"
assert out, "True when status successfully updated"
@pytest.mark.skipif(platform.system() == "Windows", reason="fails on windows")
@pytest.mark.asyncio
async def test_thread_finish_no_run(mocker, clean_agent):
"""Test that we fail RQI when the job exits 0 but there is no run."""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state.side_effect = CommError("failed")
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker(
"run_queue_item_id", "test-queue", mock_saver, run=MagicMock()
)
job.run_id = "test_run_id"
job.project = MagicMock()
job.completed_status = "finished"
agent._jobs = {"thread_1": job}
mocker.patch("wandb.sdk.launch.agent.agent.RUN_INFO_GRACE_PERIOD", 0)
await agent.finish_thread_id("thread_1")
assert mocker.api.fail_run_queue_item.called
assert mocker.api.fail_run_queue_item.call_args[0][0] == "run_queue_item_id"
assert (
mocker.api.fail_run_queue_item.call_args[0][1]
== "The submitted job exited successfully but failed to call wandb.init"
)
@pytest.mark.skipif(platform.system() == "Windows", reason="fails on windows")
@pytest.mark.asyncio
async def test_thread_failed_no_run(mocker, clean_agent):
"""Test that we fail RQI when the job exits non-zero but there is no run."""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.api.get_run_state.side_effect = CommError("failed")
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_saver = MagicMock()
job = JobAndRunStatusTracker(
"run_queue_item_id", "test-queue", mock_saver, run=MagicMock()
)
job.run_id = "test_run_id"
job.project = MagicMock()
job.completed_status = "failed"
agent._jobs = {"thread_1": job}
mocker.patch("wandb.sdk.launch.agent.agent.RUN_INFO_GRACE_PERIOD", 0)
await agent.finish_thread_id("thread_1")
assert mocker.api.fail_run_queue_item.called
assert mocker.api.fail_run_queue_item.call_args[0][0] == "run_queue_item_id"
assert (
mocker.api.fail_run_queue_item.call_args[0][1]
== "The submitted run was not successfully started"
)
@pytest.mark.timeout(90)
@pytest.mark.asyncio
async def test_thread_finish_run_info_backoff(mocker, clean_agent):
"""Test that our retry + backoff logic for run info works.
This test should take at least 60 seconds.
"""
_setup_thread_finish(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mocker.patch("asyncio.sleep", AsyncMock())
mocker.api.get_run_state.side_effect = CommError("failed")
agent = LaunchAgent(api=mocker.api, config=mock_config)
submitted_run = MagicMock()
submitted_run.get_logs = AsyncMock(return_value="test logs")
mock_saver = MagicMock()
job = JobAndRunStatusTracker(
"run_queue_item_id", "test-queue", mock_saver, run=submitted_run
)
job.run_id = "test_run_id"
job.project = MagicMock()
job.completed_status = "failed"
agent._jobs = {"thread_1": job}
agent._jobs_lock = MagicMock()
await agent.finish_thread_id("thread_1")
assert mocker.api.fail_run_queue_item.called
# we should be able to call get_run_state at 0, 1, 3, 7, 15, 31, 63 seconds
assert mocker.api.get_run_state.call_count == 7
@pytest.mark.parametrize(
"exception",
[
LaunchDockerError("launch docker error"),
LaunchError("launch error"),
Exception("exception"),
None,
],
)
@pytest.mark.asyncio
async def test_thread_run_job_calls_finish_thread_id(mocker, exception, clean_agent):
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mock_saver = MagicMock()
job = JobAndRunStatusTracker(
"run_queue_item_id", "test-queue", mock_saver, run=MagicMock()
)
agent = LaunchAgent(api=mocker.api, config=mock_config)
def mock_thread_run_job(*args, **kwargs):
if exception is not None:
raise exception
return asyncio.sleep(0)
agent._task_run_job = mock_thread_run_job
mock_finish_thread_id = AsyncMock()
agent.finish_thread_id = mock_finish_thread_id
await agent.task_run_job({}, dict(runQueueItemId="rqi-xxxx"), {}, MagicMock(), job)
mock_finish_thread_id.assert_called_once_with("rqi-xxxx", exception)
@pytest.mark.asyncio
async def test_inner_thread_run_job(mocker, clean_agent):
_setup(mocker)
mocker.patch("wandb.sdk.launch.agent.agent.DEFAULT_STOPPED_RUN_TIMEOUT", new=0)
mocker.patch("wandb.sdk.launch.agent.agent.AGENT_POLLING_INTERVAL", new=0)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
mock_saver = MagicMock()
job = JobAndRunStatusTracker(
"run_queue_item_id", "test-queue", mock_saver, run=MagicMock()
)
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_spec = {
"docker": {"docker_image": "blah-blah:latest"},
"entity": "user",
"project": "test",
}
mocker.api.check_stop_requested = True
def _side_effect(*args, **kwargs):
job.completed_status = True
mocker.run.cancel = AsyncMock(side_effect=_side_effect)
await agent._task_run_job(
mock_spec,
{"runQueueItemId": "blah"},
{},
mocker.api,
threading.current_thread().ident,
job,
)
mocker.run.cancel.assert_called_once()
@pytest.mark.asyncio
async def test_raise_warnings(mocker, clean_agent):
_setup(mocker)
mocker.status = MagicMock()
mocker.status.state = "preempted"
mocker.status.messages = ["Test message"]
mocker.run = MagicMock()
_mock_get_status = AsyncMock(return_value=mocker.status)
mocker.run.get_status = _mock_get_status
mocker.runner = MagicMock()
mocker.runner.run = AsyncMock(return_value=mocker.run)
mocker.patch(
"wandb.sdk.launch.agent.agent.loader.runner_from_config",
return_value=mocker.runner,
)
mocker.patch("wandb.sdk.launch.agent.agent.DEFAULT_STOPPED_RUN_TIMEOUT", new=0)
mocker.patch("wandb.sdk.launch.agent.agent.AGENT_POLLING_INTERVAL", new=0)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
job = JobAndRunStatusTracker(
"run_queue_item_id", "test-queue", MagicMock(), run=mocker.run
)
agent = LaunchAgent(api=mocker.api, config=mock_config)
mock_spec = {
"docker": {"docker_image": "blah-blah:latest"},
"entity": "user",
"project": "test",
}
await agent._task_run_job(
mock_spec,
{"runQueueItemId": "blah"},
{},
mocker.api,
threading.current_thread().ident,
job,
)
assert agent._known_warnings == ["Test message"]
mocker.api.update_run_queue_item_warning.assert_called_once_with(
"run_queue_item_id", "Test message", "Kubernetes", []
)
@pytest.mark.asyncio
async def test_get_job_and_queue(mocker):
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
"queues": ["queue-1", "queue-2", "queue-3"],
}
mock_job = {"test-key": "test-value"}
agent = LaunchAgent(api=mocker.api, config=mock_config)
agent.pop_from_queue = AsyncMock(return_value=mock_job)
job_and_queue = await agent.get_job_and_queue()
assert job_and_queue is not None
assert job_and_queue.job == mock_job
assert job_and_queue.queue == "queue-1"
assert agent._queues == ["queue-2", "queue-3", "queue-1"]
def test_get_agent_name(mocker, clean_agent):
with pytest.raises(LaunchError):
LaunchAgent.name()
_setup(mocker)
mock_config = {
"entity": "test-entity",
"project": "test-project",
}
LaunchAgent(api=mocker.api, config=mock_config)
assert LaunchAgent.name() == "test-name"
def test_agent_logger(mocker):
_setup(mocker)
# Normal logger
logger = InternalAgentLogger()
logger.error("test 1")
mocker.termerror.assert_not_called()
mocker.logger.error.assert_called_once_with(f"{LOG_PREFIX}test 1")
logger.warn("test 2")
mocker.termwarn.assert_not_called()
mocker.logger.warning.assert_called_once_with(f"{LOG_PREFIX}test 2")
logger.info("test 3")
mocker.termlog.assert_not_called()
mocker.logger.info.assert_called_once_with(f"{LOG_PREFIX}test 3")
logger.debug("test 4")
mocker.termlog.assert_not_called()
mocker.logger.debug.assert_called_once_with(f"{LOG_PREFIX}test 4")
# Verbose logger
logger = InternalAgentLogger(verbosity=2)
logger.error("test 5")
mocker.termerror.assert_called_with(f"{LOG_PREFIX}test 5")
mocker.logger.error.assert_called_with(f"{LOG_PREFIX}test 5")
logger.warn("test 6")
mocker.termwarn.assert_called_with(f"{LOG_PREFIX}test 6")
mocker.logger.warning.assert_called_with(f"{LOG_PREFIX}test 6")
logger.info("test 7")
mocker.termlog.assert_called_with(f"{LOG_PREFIX}test 7")
mocker.logger.info.assert_called_with(f"{LOG_PREFIX}test 7")
logger.debug("test 8")
mocker.termlog.assert_called_with(f"{LOG_PREFIX}test 8")
mocker.logger.debug.assert_called_with(f"{LOG_PREFIX}test 8")
def test_agent_inf_jobs(mocker):
config = {
"entity": "mock_server_entity",
"project": "test_project",
"queues": ["default"],
"max_jobs": -1,
}
mocker.patch(
"wandb.sdk.launch.agent.agent.LaunchAgent._init_agent_run", lambda x: None
)
agent = LaunchAgent(MagicMock(), config)
assert agent._max_jobs == float("inf")
@pytest.mark.asyncio
async def test_run_job_api_key_redaction(mocker):
"""Test that API keys are redacted when logging job details in run_job method."""
_setup(mocker)
mock_term_log = mocker.termlog
job_data = {
"runQueueItemId": "test-queue-item-id",
"runSpec": {
"_wandb_api_key": "test_api_key",
"docker": {"docker_image": "test-image"},
"project": "test-project",
},
}
agent = LaunchAgent(
api=mocker.api, config={"entity": "test-entity", "project": "test-project"}
)
agent.update_status = AsyncMock()
agent.task_run_job = AsyncMock()
await agent.run_job(job_data, "test-queue", MagicMock())
log_message = mock_term_log.call_args[0][0]
assert "<redacted>" in log_message
assert "test_api_key" not in log_message
| AsyncMock |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/sql/tracking.py | {
"start": 3773,
"end": 3967
} | class ____:
def __init__(self, cursor, db, logger):
super().__init__(cursor, db)
# logger must implement a ``record`` method
self.logger = logger
| DjDTCursorWrapperMixin |
python | gevent__gevent | src/greentest/3.14/test__interpreters.py | {
"start": 7475,
"end": 10620
} | class ____(TestBase):
def test_in_main(self):
id = _interpreters.create()
self.assertIsInstance(id, int)
after = [id for id, *_ in _interpreters.list_all()]
self.assertIn(id, after)
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = _interpreters.create()
_interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
@support.skip_if_sanitizer('gh-129824: race on tp_flags', thread=True)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = _interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
after = set(id for id, *_ in _interpreters.list_all())
self.assertIn(id, after)
def test_in_subinterpreter(self):
main, = [id for id, *_ in _interpreters.list_all()]
id1 = _interpreters.create()
out = _run_output(id1, dedent("""
import _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, int)
"""))
id2 = int(out.strip())
after = set(id for id, *_ in _interpreters.list_all())
self.assertEqual(after, {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = [id for id, *_ in _interpreters.list_all()]
id1 = _interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
after = set(id for id, *_ in _interpreters.list_all())
self.assertEqual(after, {main, id1, id2})
def test_after_destroy_all(self):
before = set(id for id, *_ in _interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = _interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
_interpreters.destroy(id)
# Finally, create another.
id = _interpreters.create()
after = set(id for id, *_ in _interpreters.list_all())
self.assertEqual(after, before | {id})
def test_after_destroy_some(self):
before = set(id for id, *_ in _interpreters.list_all())
# Create 3 subinterpreters.
id1 = _interpreters.create()
id2 = _interpreters.create()
id3 = _interpreters.create()
# Now destroy 2 of them.
_interpreters.destroy(id1)
_interpreters.destroy(id3)
# Finally, create another.
id = _interpreters.create()
after = set(id for id, *_ in _interpreters.list_all())
self.assertEqual(after, before | {id, id2})
| CreateTests |
python | astropy__astropy | astropy/wcs/utils.py | {
"start": 8933,
"end": 46913
} | class ____:
def __init__(self, mappings=[]):
if callable(mappings):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : ndarray
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs) # fmt: skip
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if pixarea == 0.0:
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return cd_unitary_err < maxerr
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd)) * u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(
getattr(wcs, dist_attr) is not None
for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"]
)
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError(
"World coordinate order doesn't match and automatic matching is ambiguous"
)
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=1)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
lat_resids = lat - lat2
lon_resids = lon - lon2
# In case the longitude has wrapped around
lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0
resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
"""Objective function for fitting SIP.
Parameters
----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from astropy.modeling.models import SIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6 : 6 + len(coeff_names)]
b_params = params[6 + len(coeff_names) :]
# assign to wcs, used for transformations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff["A_" + coeff_names[i]] = a_params[i]
b_coeff["B_" + coeff_names[i]] = b_params[i]
sip = SIP(
crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff
)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u + fuv - crpix[0], v + guv - crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x - w_obj.wcs.crpix[0], y - w_obj.wcs.crpix[1]))
resids = np.concatenate((x - xo, y - yo))
return resids
def fit_wcs_from_points(
xy, world_coords, proj_point="center", projection="TAN", sip_degree=None
):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
-----
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : (`numpy.ndarray`, `numpy.ndarray`) tuple
x & y pixel coordinates. These should be in FITS convention, starting
from (1,1) as the center of the bottom-left pixel.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from scipy.optimize import least_squares
import astropy.units as u
from astropy.coordinates import SkyCoord # here to avoid circular import
from .wcs import Sip
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (type(proj_point) != type(world_coords)) and (proj_point != "center"):
raise ValueError(
"proj_point must be set to 'center', or an"
"`~astropy.coordinates.SkyCoord` object with "
"a pair of points."
)
use_center_as_proj_point = str(proj_point) == "center"
if not use_center_as_proj_point:
assert proj_point.size == 1
proj_codes = [
"AZP",
"SZP",
"TAN",
"STG",
"SIN",
"ARC",
"ZEA",
"AIR",
"CYP",
"CEA",
"CAR",
"MER",
"SFL",
"PAR",
"MOL",
"AIT",
"COP",
"COE",
"COD",
"COO",
"BON",
"PCO",
"TSC",
"CSC",
"QSC",
"HPX",
"XPH",
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError(
"Must specify valid projection code from list of supported types: ",
", ".join(proj_codes),
)
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection)
else: # if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.wcs.cdelt = (1.0, 1.0) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__("pc")
if (sip_degree is not None) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# compute bounding box for sources in image coordinates:
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
# set pixel_shape to span of input points
wcs.pixel_shape = (
1 if xpmax <= 0.0 else int(np.ceil(xpmax)),
1 if ypmax <= 0.0 else int(np.ceil(ypmax)),
)
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if use_center_as_proj_point: # use center of input points
sc1 = SkyCoord(lon.min() * u.deg, lat.max() * u.deg)
sc2 = SkyCoord(lon.max() * u.deg, lat.min() * u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep / 2)
wcs.wcs.crval = (midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)
wcs.wcs.crpix = ((xpmax + xpmin) / 2.0, (ypmax + ypmin) / 2.0)
else: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (
close(lon - wcs.wcs.crval[0], xp + 1),
close(lon - wcs.wcs.crval[1], yp + 1),
)
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
# Use bounds to require that the fit center pixel is on the input image
if xpmin == xpmax:
xpmin, xpmax = xpmin - 0.5, xpmax + 0.5
if ypmin == ypmax:
ypmin, ypmax = ypmin - 0.5, ypmax + 0.5
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(
_linear_wcs_fit,
p0,
args=(lon, lat, xp, yp, wcs),
bounds=[
[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1],
[np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1],
],
)
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if "-SIP" not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + "-SIP" for x in wcs.wcs.ctype]
coef_names = [
f"{i}_{j}"
for i in range(degree + 1)
for j in range(degree + 1)
if (i + j) < (degree + 1) and (i + j) > 1
]
p0 = np.concatenate(
(
np.array(wcs.wcs.crpix),
wcs.wcs.cd.flatten(),
np.zeros(2 * len(coef_names)),
)
)
fit = least_squares(
_sip_fit,
p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names),
bounds=[
[xpmin + 1, ypmin + 1] + [-np.inf] * (4 + 2 * len(coef_names)),
[xpmax + 1, ypmax + 1] + [np.inf] * (4 + 2 * len(coef_names)),
],
)
coef_fit = (
list(fit.x[6 : 6 + len(coef_names)]),
list(fit.x[6 + len(coef_names) :]),
)
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree + 1, degree + 1))
b_vals = np.zeros((degree + 1, degree + 1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(
a_vals,
b_vals,
np.zeros((degree + 1, degree + 1)),
np.zeros((degree + 1, degree + 1)),
wcs.wcs.crpix,
)
return wcs
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
| custom_frame_to_wcs_mappings |
python | pyqtgraph__pyqtgraph | pyqtgraph/dockarea/Container.py | {
"start": 3802,
"end": 5017
} | class ____(Container, QtWidgets.QSplitter):
"""Horizontal or vertical splitter with some changes:
- save/restore works correctly
"""
sigStretchChanged = QtCore.Signal()
def __init__(self, area, orientation):
QtWidgets.QSplitter.__init__(self)
self.setOrientation(orientation)
Container.__init__(self, area)
#self.splitterMoved.connect(self.restretchChildren)
def _insertItem(self, item, index):
self.insertWidget(index, item)
item.show() ## need to show since it may have been previously hidden by tab
def saveState(self):
sizes = self.sizes()
if all(x == 0 for x in sizes):
sizes = [10] * len(sizes)
return {'sizes': sizes}
def restoreState(self, state):
sizes = state['sizes']
self.setSizes(sizes)
for i in range(len(sizes)):
self.setStretchFactor(i, sizes[i])
def childEvent(self, ev):
super().childEvent(ev) # call QSplitter.childEvent()
Container.childEvent_(self, ev)
#def restretchChildren(self):
#sizes = self.sizes()
#tot = sum(sizes)
| SplitContainer |
python | streamlit__streamlit | lib/streamlit/elements/widgets/button_group.py | {
"start": 5240,
"end": 9367
} | class ____(Generic[T]):
"""A serde that can handle both single and multi select options.
It uses the same proto to wire the data, so that we can send and receive
single values via a list. We have different serdes for both cases though so
that when setting / getting the value via session_state, it is mapped correctly.
So for single select, the value will be a single value and for multi select, it will
be a list of values.
"""
def __init__(
self,
options: Sequence[T],
default_values: list[int],
type: Literal["single", "multi"],
) -> None:
self.options = options
self.default_values = default_values
self.type = type
self.serde: _SingleSelectSerde[T] | _MultiSelectSerde[T] = (
_SingleSelectSerde(options, default_value=default_values)
if type == "single"
else _MultiSelectSerde(options, default_values)
)
def serialize(self, value: T | list[T] | None) -> list[int]:
return self.serde.serialize(cast("Any", value))
def deserialize(self, ui_value: list[int] | None) -> list[T] | T | None:
return self.serde.deserialize(ui_value)
def get_mapped_options(
feedback_option: Literal["thumbs", "faces", "stars"],
) -> tuple[list[ButtonGroupProto.Option], list[int]]:
# options object understandable by the web app
options: list[ButtonGroupProto.Option] = []
# we use the option index in the webapp communication to
# indicate which option is selected
options_indices: list[int] = []
if feedback_option == "thumbs":
# reversing the index mapping to have thumbs up first (but still with the higher
# index (=sentiment) in the list)
options_indices = list(reversed(range(len(_THUMB_ICONS))))
options = [ButtonGroupProto.Option(content_icon=icon) for icon in _THUMB_ICONS]
elif feedback_option == "faces":
options_indices = list(range(len(_FACES_ICONS)))
options = [ButtonGroupProto.Option(content_icon=icon) for icon in _FACES_ICONS]
elif feedback_option == "stars":
options_indices = list(range(_NUMBER_STARS))
options = [
ButtonGroupProto.Option(
content_icon=_STAR_ICON,
selected_content_icon=_SELECTED_STAR_ICON,
)
] * _NUMBER_STARS
return options, options_indices
def _build_proto(
widget_id: str,
formatted_options: Sequence[ButtonGroupProto.Option],
default_values: list[int],
disabled: bool,
current_form_id: str,
click_mode: ButtonGroupProto.ClickMode.ValueType,
selection_visualization: ButtonGroupProto.SelectionVisualization.ValueType = (
ButtonGroupProto.SelectionVisualization.ONLY_SELECTED
),
style: Literal["borderless", "pills", "segmented_control"] = "pills",
label: str | None = None,
label_visibility: LabelVisibility = "visible",
help: str | None = None,
) -> ButtonGroupProto:
proto = ButtonGroupProto()
proto.id = widget_id
proto.default[:] = default_values
proto.form_id = current_form_id
proto.disabled = disabled
proto.click_mode = click_mode
proto.style = ButtonGroupProto.Style.Value(style.upper())
# not passing the label looks the same as a collapsed label
if label is not None:
proto.label = label
proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
proto.help = help
for formatted_option in formatted_options:
proto.options.append(formatted_option)
proto.selection_visualization = selection_visualization
return proto
def _maybe_raise_selection_mode_warning(selection_mode: SelectionMode) -> None:
"""Check if the selection_mode value is valid or raise exception otherwise."""
if selection_mode not in ["single", "multi"]:
raise StreamlitAPIException(
"The selection_mode argument must be one of ['single', 'multi']. "
f"The argument passed was '{selection_mode}'."
)
| ButtonGroupSerde |
python | doocs__leetcode | solution/2500-2599/2580.Count Ways to Group Overlapping Ranges/Solution.py | {
"start": 0,
"end": 291
} | class ____:
def countWays(self, ranges: List[List[int]]) -> int:
ranges.sort()
cnt, mx = 0, -1
for start, end in ranges:
if start > mx:
cnt += 1
mx = max(mx, end)
mod = 10**9 + 7
return pow(2, cnt, mod)
| Solution |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/baxis/_tickfont.py | {
"start": 235,
"end": 9925
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.baxis"
_path_str = "layout.ternary.baxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.ternary
.baxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.baxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.baxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/n_step.py | {
"start": 841,
"end": 4759
} | class ____:
def __init__(self, **kwargs):
self.w = None
self.lr = 1e-2
def partial_fit(self, X, Y):
if self.w is None:
D = X.shape[1]
self.w = np.random.randn(D) / np.sqrt(D)
self.w += self.lr*(Y - X.dot(self.w)).dot(X)
def predict(self, X):
return X.dot(self.w)
# replace SKLearn Regressor
q_learning.SGDRegressor = SGDRegressor
# calculate everything up to max[Q(s,a)]
# Ex.
# R(t) + gamma*R(t+1) + ... + (gamma^(n-1))*R(t+n-1) + (gamma^n)*max[Q(s(t+n), a(t+n))]
# def calculate_return_before_prediction(rewards, gamma):
# ret = 0
# for r in reversed(rewards[1:]):
# ret += r + gamma*ret
# ret += rewards[0]
# return ret
# returns a list of states_and_rewards, and the total reward
def play_one(model, eps, gamma, n=5):
observation = env.reset()[0]
done = False
totalreward = 0
rewards = []
states = []
actions = []
iters = 0
# array of [gamma^0, gamma^1, ..., gamma^(n-1)]
multiplier = np.array([gamma]*n)**np.arange(n)
# while not done and iters < 200:
while not done and iters < 10000:
# in earlier versions of gym, episode doesn't automatically
# end when you hit 200 steps
action = model.sample_action(observation, eps)
states.append(observation)
actions.append(action)
prev_observation = observation
observation, reward, done, truncated, info = env.step(action)
rewards.append(reward)
# update the model
if len(rewards) >= n:
# return_up_to_prediction = calculate_return_before_prediction(rewards, gamma)
return_up_to_prediction = multiplier.dot(rewards[-n:])
action_values = model.predict(observation)[0]
# print("action_values.shape:", action_values.shape)
G = return_up_to_prediction + (gamma**n)*np.max(action_values)
# print("G:", G)
model.update(states[-n], actions[-n], G)
# if len(rewards) > n:
# rewards.pop(0)
# states.pop(0)
# actions.pop(0)
# assert(len(rewards) <= n)
totalreward += reward
iters += 1
# empty the cache
if n == 1:
rewards = []
states = []
actions = []
else:
rewards = rewards[-n+1:]
states = states[-n+1:]
actions = actions[-n+1:]
# unfortunately, new version of gym cuts you off at 200 steps
# even if you haven't reached the goal.
# it's not good to do this UNLESS you've reached the goal.
# we are "really done" if position >= 0.5
if observation[0] >= 0.5:
# we actually made it to the goal
# print("made it!")
while len(rewards) > 0:
G = multiplier[:len(rewards)].dot(rewards)
model.update(states[0], actions[0], G)
rewards.pop(0)
states.pop(0)
actions.pop(0)
else:
# we did not make it to the goal
# print("didn't make it...")
while len(rewards) > 0:
guess_rewards = rewards + [-1]*(n - len(rewards))
G = multiplier.dot(guess_rewards)
model.update(states[0], actions[0], G)
rewards.pop(0)
states.pop(0)
actions.pop(0)
return totalreward
if __name__ == '__main__':
env = gym.make('MountainCar-v0')
ft = FeatureTransformer(env)
model = Model(env, ft, "constant")
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 300
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
# eps = 1.0/(0.1*n+1)
eps = 0.1*(0.97**n)
totalreward = play_one(model, eps, gamma)
totalrewards[n] = totalreward
print("episode:", n, "total reward:", totalreward)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", -totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
# plot the optimal state-value function
plot_cost_to_go(env, model)
| SGDRegressor |
python | plotly__plotly.py | plotly/graph_objs/scattermapbox/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9979
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox.marker.colorbar.title"
_path_str = "scattermapbox.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermapbox.
marker.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | django__django | tests/forms_tests/tests/test_error_messages.py | {
"start": 11830,
"end": 13446
} | class ____(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
ChoiceModel.objects.create(pk=1, name="a")
ChoiceModel.objects.create(pk=2, name="b")
ChoiceModel.objects.create(pk=3, name="c")
# ModelChoiceField
e = {
"required": "REQUIRED",
"invalid_choice": "INVALID CHOICE",
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(["REQUIRED"], f.clean, "")
self.assertFormErrors(["INVALID CHOICE"], f.clean, "4")
# ModelMultipleChoiceField
e = {
"required": "REQUIRED",
"invalid_choice": "%(value)s IS INVALID CHOICE",
"invalid_list": "NOT A LIST OF VALUES",
}
f = ModelMultipleChoiceField(
queryset=ChoiceModel.objects.all(), error_messages=e
)
self.assertFormErrors(["REQUIRED"], f.clean, "")
self.assertFormErrors(["NOT A LIST OF VALUES"], f.clean, "3")
self.assertFormErrors(["4 IS INVALID CHOICE"], f.clean, ["4"])
def test_modelchoicefield_value_placeholder(self):
f = ModelChoiceField(
queryset=ChoiceModel.objects.all(),
error_messages={
"invalid_choice": '"%(value)s" is not one of the available choices.',
},
)
self.assertFormErrors(
['"invalid" is not one of the available choices.'],
f.clean,
"invalid",
)
| ModelChoiceFieldErrorMessagesTestCase |
python | apache__airflow | providers/ydb/tests/integration/ydb/operators/test_ydb.py | {
"start": 1617,
"end": 3287
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_ydb_dag_id", default_args=args)
self.mock_context = MagicMock()
def test_execute_hello(self):
operator = YDBExecuteQueryOperator(
task_id="simple_sql", sql="select 987", is_ddl=False, handler=fetch_all_handler
)
results = operator.execute(self.mock_context)
assert results == [(987,)]
def test_bulk_upsert(self):
create_table_op = YDBExecuteQueryOperator(
task_id="create",
sql="""
CREATE TABLE team (
id INT,
name TEXT,
age UINT32,
PRIMARY KEY (id)
);""",
is_ddl=True,
)
create_table_op.execute(self.mock_context)
age_sum_op = YDBExecuteQueryOperator(task_id="age_sum", sql="SELECT SUM(age) as age_sum FROM team")
hook = age_sum_op.get_db_hook()
column_types = (
ydb.BulkUpsertColumns()
.add_column("id", ydb.OptionalType(ydb.PrimitiveType.Int32))
.add_column("name", ydb.OptionalType(ydb.PrimitiveType.Utf8))
.add_column("age", ydb.OptionalType(ydb.PrimitiveType.Uint32))
)
rows = [
{"id": 1, "name": "rabbits", "age": 17},
{"id": 2, "name": "bears", "age": 22},
{"id": 3, "name": "foxes", "age": 9},
]
hook.bulk_upsert("team", rows=rows, column_types=column_types)
result = age_sum_op.execute(self.mock_context)
assert result == [(48,)]
| TestYDBExecuteQueryOperator |
python | ray-project__ray | python/ray/tune/trainable/metadata.py | {
"start": 259,
"end": 3522
} | class ____:
"""Serializable struct for holding runtime trial metadata.
Runtime metadata is data that changes and is updated on runtime. This includes
e.g. the last result, the currently available checkpoints, and the number
of errors encountered for a trial.
"""
def __init__(self, n_steps: Tuple[int] = (5, 10)):
# General metadata
self.start_time = None
# Errors
self.num_failures = 0
self.num_failures_after_restore = 0
self.error_filename = None
self.pickled_error_filename = None
# Results and metrics
self.last_result = {}
self.last_result_time = -float("inf")
# stores in memory max/min/avg/last-n-avg/last result for each
# metric by trial
self.metric_analysis = {}
self._n_steps = n_steps
self.metric_n_steps = {}
# Checkpoints
self.checkpoint_manager: Optional[_CheckpointManager] = None
self._cached_json = None
def invalidate_cache(self):
self._cached_json = None
def update_metric(self, metric: str, value: Number, step: Optional[int] = 1):
if metric not in self.metric_analysis:
self.metric_analysis[metric] = {
"max": value,
"min": value,
"avg": value,
"last": value,
}
self.metric_n_steps[metric] = {}
for n in self._n_steps:
key = "last-{:d}-avg".format(n)
self.metric_analysis[metric][key] = value
# Store n as string for correct restore.
self.metric_n_steps[metric][str(n)] = deque([value], maxlen=n)
else:
step = step or 1
self.metric_analysis[metric]["max"] = max(
value, self.metric_analysis[metric]["max"]
)
self.metric_analysis[metric]["min"] = min(
value, self.metric_analysis[metric]["min"]
)
self.metric_analysis[metric]["avg"] = (
1 / step * (value + (step - 1) * self.metric_analysis[metric]["avg"])
)
self.metric_analysis[metric]["last"] = value
for n in self._n_steps:
key = "last-{:d}-avg".format(n)
self.metric_n_steps[metric][str(n)].append(value)
self.metric_analysis[metric][key] = sum(
self.metric_n_steps[metric][str(n)]
) / len(self.metric_n_steps[metric][str(n)])
self.invalidate_cache()
def __setattr__(self, key, value):
super().__setattr__(key, value)
if key not in {"_cached_json"}:
self.invalidate_cache()
def get_json_state(self) -> str:
if self._cached_json is None:
data = self.__dict__
data.pop("_cached_json", None)
self._cached_json = json.dumps(data, indent=2, cls=TuneFunctionEncoder)
return self._cached_json
@classmethod
def from_json_state(cls, json_state: str) -> "_TrainingRunMetadata":
state = json.loads(json_state, cls=TuneFunctionDecoder)
run_metadata = cls()
run_metadata.__dict__.update(state)
return run_metadata
| _TrainingRunMetadata |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 11593,
"end": 12689
} | class ____(DjangoResetTestCase):
def test_auto_sequence_son(self):
"""The sequence of the concrete son of an abstract model should be autonomous."""
obj = ConcreteSonFactory()
self.assertEqual(1, obj.pk)
def test_auto_sequence_grandson(self):
"""The sequence of the concrete grandson of an abstract model should be autonomous."""
obj = ConcreteGrandSonFactory()
self.assertEqual(1, obj.pk)
def test_optional_abstract(self):
"""Users need not describe the factory for an abstract model as abstract."""
class AbstractBaseFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.AbstractBase
foo = factory.Sequence(lambda n: "foo%d" % n)
class ConcreteSonFactory(AbstractBaseFactory):
class Meta:
model = models.ConcreteSon
self.reset_database_sequences(models.ConcreteSon)
obj = ConcreteSonFactory()
self.assertEqual(1, obj.pk)
self.assertEqual("foo0", obj.foo)
| DjangoAbstractBaseSequenceTestCase |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_theme03.py | {
"start": 315,
"end": 1731
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("theme03.xlsx")
def test_create_file(self):
"""Test the addition of a theme file."""
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {
"font_name": "Aptos Narrow",
"font_size": 11,
"font_scheme": "minor",
},
"default_row_height": 20,
"default_column_width": 64,
},
)
workbook.use_custom_theme(self.theme_dir + "office_2023.xml")
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [55993088, 55995008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/auth_v2/endpoints/csrf.py | {
"start": 703,
"end": 2889
} | class ____(Endpoint):
"""
NOTE: This endpoint is not protected by the feature flag in AuthV2Endpoint!
"""
owner = ApiOwner.ENTERPRISE
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
"PUT": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = ()
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.USER: RateLimit(limit=10, window=60), # 10 per minute per user
RateLimitCategory.IP: RateLimit(limit=20, window=60), # 20 per minute per IP
},
"PUT": {
RateLimitCategory.USER: RateLimit(limit=10, window=60), # 10 per minute per user
RateLimitCategory.IP: RateLimit(limit=20, window=60), # 20 per minute per IP
},
}
)
@extend_schema(
operation_id="Retrieve the CSRF token in your session",
parameters=[],
responses={
"detail": "string",
"session": SessionSerializer,
},
)
@method_decorator(ensure_csrf_cookie)
def get(self, request, *args, **kwargs):
return self.respond(
{
"detail": "Set CSRF cookie",
"session": SessionSerializer().serialize(request, {}, request.user),
},
status=status.HTTP_200_OK,
)
@extend_schema(
operation_id="Rotate the CSRF token in your session",
parameters=[],
responses={
"detail": "string",
"session": SessionSerializer,
},
)
@method_decorator(ensure_csrf_cookie)
def put(self, request, *args, **kwargs):
rotate_token(request)
if referrer := request.GET.get("referrer"):
analytics.record(
AuthV2CsrfTokenRotated(
event=referrer,
)
)
return self.respond(
{
"detail": "Rotated CSRF cookie",
"session": SessionSerializer().serialize(request, {}, request.user),
},
status=status.HTTP_200_OK,
)
| CsrfTokenEndpoint |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol33.py | {
"start": 309,
"end": 454
} | class ____(Generic[T, U]):
def f(self) -> T | U:
raise NotImplementedError
def g(self) -> AProto[T, U]:
return A[T, U]()
| A |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 5735,
"end": 5957
} | class ____(PrefectBaseModel):
"""Filter by `FlowRun.flow_version`."""
any_: Optional[List[str]] = Field(
default=None, description="A list of flow run flow_versions to include"
)
| FlowRunFilterFlowVersion |
python | kamyu104__LeetCode-Solutions | Python/equal-score-substrings.py | {
"start": 42,
"end": 380
} | class ____(object):
def scoreBalance(self, s):
"""
:type s: str
:rtype: bool
"""
total = sum(ord(x)-ord('a')+1 for x in s)
prefix = 0
for x in s:
prefix += ord(x)-ord('a')+1
if prefix == total-prefix:
return True
return False
| Solution |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 609782,
"end": 610351
} | class ____(CoerceToBooleanNode):
"""Special 'CoerceToBooleanNode' for C string arguments which checks the pointer
and additionally that the C string is non-empty.
"""
def __init__(self, arg, env):
assert arg.type.is_string, arg.type
arg = arg.coerce_to_simple(env)
super().__init__(arg, env)
def calculate_result_code(self):
arg = self.arg.result()
if self.arg.type.is_array:
return f"(({arg})[0] != 0)"
else:
return f"({arg} != 0 && ({arg})[0] != 0)"
| CoerceCStringToBooleanNode |
python | kamyu104__LeetCode-Solutions | Python/count-beautiful-substrings-ii.py | {
"start": 100,
"end": 947
} | class ____(object):
def beautifulSubstrings(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
VOWELS = set("aeiou")
prefix = [0]*(len(s)+1)
for i in xrange(len(s)):
prefix[i+1] = prefix[i]+(+1 if s[i] in VOWELS else -1)
new_k = 1
x = k
for i in xrange(2, k+1):
if i*i > k:
break
cnt = 0
while x%i == 0:
x //= i
cnt += 1
if cnt:
new_k *= i**((cnt+1)//2+int(i == 2))
if x != 1:
new_k *= x**((1+1)//2+int(x == 2))
cnt = collections.Counter()
result = 0
for i, p in enumerate(prefix):
result += cnt[p, i%new_k]
cnt[p, i%new_k] += 1
return result
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/update/tutorial002.py | {
"start": 296,
"end": 429
} | class ____(HeroBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
hashed_password: str = Field()
| Hero |
python | bokeh__bokeh | src/bokeh/models/labeling.py | {
"start": 1750,
"end": 1990
} | class ____(LabelingPolicy):
""" Select all labels even if they overlap. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| AllLabels |
python | OmkarPathak__pygorithm | pygorithm/geometry/rect_broad_phase.py | {
"start": 205,
"end": 358
} | class ____:
"""Coord
Class to initialize Coordinate of one point
"""
def __init__(self, x, y):
self.x = x
self.y = y
| Coord |
python | sympy__sympy | sympy/series/formal.py | {
"start": 25824,
"end": 40905
} | class ____(SeriesBase):
"""
Represents Formal Power Series of a function.
Explanation
===========
No computation is performed. This class should only to be used to represent
a series. No checks are performed.
For computing a series use :func:`fps`.
See Also
========
sympy.series.formal.fps
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
def __init__(self, *args):
ak = args[4][0]
k = ak.variables[0]
self.ak_seq = sequence(ak.formula, (k, 1, oo))
self.fact_seq = sequence(factorial(k), (k, 1, oo))
self.bell_coeff_seq = self.ak_seq * self.fact_seq
self.sign_seq = sequence((-1, 1), (k, 1, oo))
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1]
@property
def x0(self):
return self.args[2]
@property
def dir(self):
return self.args[3]
@property
def ak(self):
return self.args[4][0]
@property
def xk(self):
return self.args[4][1]
@property
def ind(self):
return self.args[4][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def infinite(self):
"""Returns an infinite representation of the series"""
from sympy.concrete import Sum
ak, xk = self.ak, self.xk
k = ak.variables[0]
inf_sum = Sum(ak.formula * xk.formula, (k, ak.start, ak.stop))
return self.ind + inf_sum
def _get_pow_x(self, term):
"""Returns the power of x in a term."""
xterm, pow_x = term.as_independent(self.x)[1].as_base_exp()
if not xterm.has(self.x):
return S.Zero
return pow_x
def polynomial(self, n=6):
"""
Truncated series as polynomial.
Explanation
===========
Returns series expansion of ``f`` upto order ``O(x**n)``
as a polynomial(without ``O`` term).
"""
terms = []
sym = self.free_symbols
for i, t in enumerate(self):
xp = self._get_pow_x(t)
if xp.has(*sym):
xp = xp.as_coeff_add(*sym)[0]
if xp >= n:
break
elif xp.is_integer is True and i == n + 1:
break
elif t is not S.Zero:
terms.append(t)
return Add(*terms)
def truncate(self, n=6):
"""
Truncated series.
Explanation
===========
Returns truncated series expansion of f upto
order ``O(x**n)``.
If n is ``None``, returns an infinite iterator.
"""
if n is None:
return iter(self)
x, x0 = self.x, self.x0
pt_xk = self.xk.coeff(n)
if x0 is S.NegativeInfinity:
x0 = S.Infinity
return self.polynomial(n) + Order(pt_xk, (x, x0))
def zero_coeff(self):
return self._eval_term(0)
def _eval_term(self, pt):
try:
pt_xk = self.xk.coeff(pt)
pt_ak = self.ak.coeff(pt).simplify() # Simplify the coefficients
except IndexError:
term = S.Zero
else:
term = (pt_ak * pt_xk)
if self.ind:
ind = S.Zero
sym = self.free_symbols
for t in Add.make_args(self.ind):
pow_x = self._get_pow_x(t)
if pow_x.has(*sym):
pow_x = pow_x.as_coeff_add(*sym)[0]
if pt == 0 and pow_x < 1:
ind += t
elif pow_x >= pt and pow_x < pt + 1:
ind += t
term += ind
return term.collect(self.x)
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def _eval_as_leading_term(self, x, logx, cdir):
for t in self:
if t is not S.Zero:
return t
def _eval_derivative(self, x):
f = self.function.diff(x)
ind = self.ind.diff(x)
pow_xk = self._get_pow_x(self.xk.formula)
ak = self.ak
k = ak.variables[0]
if ak.formula.has(x):
form = []
for e, c in ak.formula.args:
temp = S.Zero
for t in Add.make_args(e):
pow_x = self._get_pow_x(t)
temp += t * (pow_xk + pow_x)
form.append((temp, c))
form = Piecewise(*form)
ak = sequence(form.subs(k, k + 1), (k, ak.start - 1, ak.stop))
else:
ak = sequence((ak.formula * pow_xk).subs(k, k + 1),
(k, ak.start - 1, ak.stop))
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def integrate(self, x=None, **kwargs):
"""
Integrate Formal Power Series.
Examples
========
>>> from sympy import fps, sin, integrate
>>> from sympy.abc import x
>>> f = fps(sin(x))
>>> f.integrate(x).truncate()
-1 + x**2/2 - x**4/24 + O(x**6)
>>> integrate(f, (x, 0, 1))
1 - cos(1)
"""
from sympy.integrals import integrate
if x is None:
x = self.x
elif iterable(x):
return integrate(self.function, x)
f = integrate(self.function, x)
ind = integrate(self.ind, x)
ind += (f - ind).limit(x, 0) # constant of integration
pow_xk = self._get_pow_x(self.xk.formula)
ak = self.ak
k = ak.variables[0]
if ak.formula.has(x):
form = []
for e, c in ak.formula.args:
temp = S.Zero
for t in Add.make_args(e):
pow_x = self._get_pow_x(t)
temp += t / (pow_xk + pow_x + 1)
form.append((temp, c))
form = Piecewise(*form)
ak = sequence(form.subs(k, k - 1), (k, ak.start + 1, ak.stop))
else:
ak = sequence((ak.formula / (pow_xk + 1)).subs(k, k - 1),
(k, ak.start + 1, ak.stop))
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def product(self, other, x=None, n=6):
"""
Multiplies two Formal Power Series, using discrete convolution and
return the truncated terms upto specified order.
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(sin(x))
>>> f2 = fps(exp(x))
>>> f1.product(f2, x).truncate(4)
x + x**2 + x**3/3 + O(x**4)
See Also
========
sympy.discrete.convolutions
sympy.series.formal.FormalPowerSeriesProduct
"""
if n is None:
return iter(self)
other = sympify(other)
if not isinstance(other, FormalPowerSeries):
raise ValueError("Both series should be an instance of FormalPowerSeries"
" class.")
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
elif self.x != other.x:
raise ValueError("Both series should have the same symbol.")
return FormalPowerSeriesProduct(self, other)
def coeff_bell(self, n):
r"""
self.coeff_bell(n) returns a sequence of Bell polynomials of the second kind.
Note that ``n`` should be a integer.
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math::
B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
See Also
========
sympy.functions.combinatorial.numbers.bell
"""
inner_coeffs = [bell(n, j, tuple(self.bell_coeff_seq[:n-j+1])) for j in range(1, n+1)]
k = Dummy('k')
return sequence(tuple(inner_coeffs), (k, 1, oo))
def compose(self, other, x=None, n=6):
r"""
Returns the truncated terms of the formal power series of the composed function,
up to specified ``n``.
Explanation
===========
If ``f`` and ``g`` are two formal power series of two different functions,
then the coefficient sequence ``ak`` of the composed formal power series `fp`
will be as follows.
.. math::
\sum\limits_{k=0}^{n} b_k B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(sin(x))
>>> f1.compose(f2, x).truncate()
1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6)
>>> f1.compose(f2, x).truncate(8)
1 + x + x**2/2 - x**4/8 - x**5/15 - x**6/240 + x**7/90 + O(x**8)
See Also
========
sympy.functions.combinatorial.numbers.bell
sympy.series.formal.FormalPowerSeriesCompose
References
==========
.. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974.
"""
if n is None:
return iter(self)
other = sympify(other)
if not isinstance(other, FormalPowerSeries):
raise ValueError("Both series should be an instance of FormalPowerSeries"
" class.")
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
elif self.x != other.x:
raise ValueError("Both series should have the same symbol.")
if other._eval_term(0).as_coeff_mul(other.x)[0] is not S.Zero:
raise ValueError("The formal power series of the inner function should not have any "
"constant coefficient term.")
return FormalPowerSeriesCompose(self, other)
def inverse(self, x=None, n=6):
r"""
Returns the truncated terms of the inverse of the formal power series,
up to specified ``n``.
Explanation
===========
If ``f`` and ``g`` are two formal power series of two different functions,
then the coefficient sequence ``ak`` of the composed formal power series ``fp``
will be as follows.
.. math::
\sum\limits_{k=0}^{n} (-1)^{k} x_0^{-k-1} B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, exp, cos
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(cos(x))
>>> f1.inverse(x).truncate()
1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + O(x**6)
>>> f2.inverse(x).truncate(8)
1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + O(x**8)
See Also
========
sympy.functions.combinatorial.numbers.bell
sympy.series.formal.FormalPowerSeriesInverse
References
==========
.. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974.
"""
if n is None:
return iter(self)
if self._eval_term(0).is_zero:
raise ValueError("Constant coefficient should exist for an inverse of a formal"
" power series to exist.")
return FormalPowerSeriesInverse(self)
def __add__(self, other):
other = sympify(other)
if isinstance(other, FormalPowerSeries):
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
x, y = self.x, other.x
f = self.function + other.function.subs(y, x)
if self.x not in f.free_symbols:
return f
ak = self.ak + other.ak
if self.ak.start > other.ak.start:
seq = other.ak
s, e = other.ak.start, self.ak.start
else:
seq = self.ak
s, e = self.ak.start, other.ak.start
save = Add(*[z[0]*z[1] for z in zip(seq[0:(e - s)], self.xk[s:e])])
ind = self.ind + other.ind + save
return self.func(f, x, self.x0, self.dir, (ak, self.xk, ind))
elif not other.has(self.x):
f = self.function + other
ind = self.ind + other
return self.func(f, self.x, self.x0, self.dir,
(self.ak, self.xk, ind))
return Add(self, other)
def __radd__(self, other):
return self.__add__(other)
def __neg__(self):
return self.func(-self.function, self.x, self.x0, self.dir,
(-self.ak, self.xk, -self.ind))
def __sub__(self, other):
return self.__add__(-other)
def __rsub__(self, other):
return (-self).__add__(other)
def __mul__(self, other):
other = sympify(other)
if other.has(self.x):
return Mul(self, other)
f = self.function * other
ak = self.ak.coeff_mul(other)
ind = self.ind * other
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def __rmul__(self, other):
return self.__mul__(other)
| FormalPowerSeries |
python | scipy__scipy | benchmarks/benchmarks/io_matlab.py | {
"start": 242,
"end": 2260
} | class ____(Benchmark):
param_names = ['size', 'compressed']
timeout = 4*60
unit = "actual/optimal memory usage ratio"
@property
def params(self):
return [list(self._get_sizes().keys()), [True, False]]
def _get_sizes(self):
sizes = {
'1M': 1e6,
'10M': 10e6,
'100M': 100e6,
'300M': 300e6,
# '500M': 500e6,
# '1000M': 1000e6,
}
return sizes
def setup(self, size, compressed):
set_mem_rlimit()
self.sizes = self._get_sizes()
size = int(self.sizes[size])
mem_info = get_mem_info()
try:
mem_available = mem_info['memavailable']
except KeyError:
mem_available = mem_info['memtotal']
max_size = int(mem_available * 0.7)//4
if size > max_size:
raise NotImplementedError()
# Setup temp file
f = tempfile.NamedTemporaryFile(delete=False, suffix='.mat')
f.close()
self.filename = f.name
def teardown(self, size, compressed):
os.unlink(self.filename)
def track_loadmat(self, size, compressed):
size = int(self.sizes[size])
x = np.random.rand(size//8).view(dtype=np.uint8)
savemat(self.filename, dict(x=x), do_compression=compressed, oned_as='row')
del x
code = f"""
from scipy.io import loadmat
loadmat('{self.filename}')
"""
time, peak_mem = run_monitored(code)
return peak_mem / size
def track_savemat(self, size, compressed):
size = int(self.sizes[size])
code = f"""
import numpy as np
from scipy.io import savemat
x = np.random.rand({size}//8).view(dtype=np.uint8)
savemat(
'{self.filename}',
dict(x=x),
do_compression={compressed},
oned_as='row'
)
"""
time, peak_mem = run_monitored(code)
return peak_mem / size
| MemUsage |
python | pallets__werkzeug | tests/test_datastructures.py | {
"start": 32634,
"end": 33355
} | class ____:
def test_construct(self):
csp = ds.ContentSecurityPolicy([("font-src", "'self'"), ("media-src", "*")])
assert csp.font_src == "'self'"
assert csp.media_src == "*"
policies = [policy.strip() for policy in csp.to_header().split(";")]
assert "font-src 'self'" in policies
assert "media-src *" in policies
def test_properties(self):
csp = ds.ContentSecurityPolicy()
csp.default_src = "* 'self' quart.com"
csp.img_src = "'none'"
policies = [policy.strip() for policy in csp.to_header().split(";")]
assert "default-src * 'self' quart.com" in policies
assert "img-src 'none'" in policies
| TestContentSecurityPolicy |
python | pypa__packaging | src/packaging/_tokenizer.py | {
"start": 245,
"end": 2165
} | class ____(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return f"{self.message}\n {self.source}\n {marker}"
DEFAULT_RULES: dict[str, str | re.Pattern[str]] = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extras?
|dependency_groups
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
}
| ParserSyntaxError |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/data_version.py | {
"start": 1232,
"end": 1638
} | class ____(
NamedTuple(
"_DataVersion",
[("value", str)],
)
):
"""Represents a data version for an asset.
Args:
value (str): An arbitrary string representing a data version.
"""
def __new__(
cls,
value: str,
):
return super().__new__(
cls,
value=check.str_param(value, "value"),
)
@beta
| DataVersion |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial001_py310.py | {
"start": 321,
"end": 3337
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id", ondelete="CASCADE")
team: Team | None = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
def delete_team():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
session.delete(team)
session.commit()
print("Deleted team:", team)
def select_deleted_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Black Lion")
result = session.exec(statement)
hero = result.first()
print("Black Lion not found:", hero)
statement = select(Hero).where(Hero.name == "Princess Sure-E")
result = session.exec(statement)
hero = result.first()
print("Princess Sure-E not found:", hero)
def main():
create_db_and_tables()
create_heroes()
delete_team()
select_deleted_heroes()
if __name__ == "__main__":
main()
| Hero |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/test_post_votes.py | {
"start": 6825,
"end": 13249
} | class ____(TestCase):
@property
def _config(self):
return (
ConfigBuilder()
.with_basic_auth_credentials("user@example.com", "password")
.with_subdomain("d3v-airbyte")
.with_start_date(ab_datetime_now().subtract(timedelta(weeks=104)))
.build()
)
def _get_authenticator(self, config):
return ApiTokenAuthenticator(email=config["credentials"]["email"], password=config["credentials"]["api_token"])
@HttpMocker()
def test_given_no_state_and_successful_sync_when_read_then_set_state_to_now(self, http_mocker):
"""
A normal incremental sync without pagination
"""
api_token_authenticator = self._get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
post_votes_record_builder = PostsVotesRecordBuilder.posts_votes_record()
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
PostsVotesResponseBuilder.posts_votes_response().with_record(post_votes_record_builder).build(),
)
output = read_stream("post_votes", SyncMode.incremental, self._config)
assert len(output.records) == 1
post_vote = post_votes_record_builder.build()
assert output.most_recent_state.stream_descriptor.name == "post_votes"
post_comments_state_value = str(int(string_to_datetime(post_vote["updated_at"]).timestamp()))
assert (
output.most_recent_state.stream_state
== AirbyteStateBlob(
{
"lookback_window": 0,
"parent_state": {
"posts": {"updated_at": post["updated_at"]}
}, # note that this state does not have the concurrent format because SubstreamPartitionRouter is still relying on the declarative cursor
"state": {"updated_at": post_comments_state_value},
"states": [
{
"partition": {
"parent_slice": {},
"post_id": post["id"],
},
"cursor": {
"updated_at": post_comments_state_value,
},
}
],
"use_global_cursor": False,
}
)
)
@HttpMocker()
def test_given_state_and_pagination_when_read_then_return_records(self, http_mocker):
"""
A normal incremental sync with state and pagination
"""
api_token_authenticator = self._get_authenticator(self._config)
state_start_date = ab_datetime_parse(self._config["start_date"]).add(timedelta(weeks=52))
first_page_record_updated_at = state_start_date.add(timedelta(weeks=4))
last_page_record_updated_at = first_page_record_updated_at.add(timedelta(weeks=8))
state = {"updated_at": datetime_to_string(state_start_date)}
posts_record_builder = given_posts(http_mocker, state_start_date, api_token_authenticator)
post = posts_record_builder.build()
post_votes_first_record_builder = PostsVotesRecordBuilder.posts_votes_record().with_field(
FieldPath("updated_at"), datetime_to_string(first_page_record_updated_at)
)
# Read first page request mock
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_start_time(datetime_to_string(state_start_date))
.with_page_size(100)
.build(),
PostsVotesResponseBuilder.posts_votes_response(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"]).with_page_size(100).build()
)
.with_pagination()
.with_record(post_votes_first_record_builder)
.build(),
)
post_votes_last_record_builder = (
PostsVotesRecordBuilder.posts_votes_record()
.with_id("last_record_id_from_last_page")
.with_field(FieldPath("updated_at"), datetime_to_string(last_page_record_updated_at))
)
# Read second page request mock
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_page_after("after-cursor")
.with_page_size(100)
.build(),
PostsVotesResponseBuilder.posts_votes_response().with_record(post_votes_last_record_builder).build(),
)
output = read_stream(
"post_votes", SyncMode.incremental, self._config, StateBuilder().with_stream_state("post_votes", state).build()
)
assert len(output.records) == 2
assert output.most_recent_state.stream_descriptor.name == "post_votes"
post_comments_state_value = str(int(last_page_record_updated_at.timestamp()))
assert output.most_recent_state.stream_state == AirbyteStateBlob(
{
"lookback_window": 0,
"parent_state": {"posts": {"updated_at": post["updated_at"]}},
# note that this state does not have the concurrent format because SubstreamPartitionRouter is still relying on the declarative cursor
"state": {"updated_at": post_comments_state_value},
"states": [
{
"partition": {
"parent_slice": {},
"post_id": post["id"],
},
"cursor": {
"updated_at": post_comments_state_value,
},
}
],
"use_global_cursor": False,
}
)
| TestPostsVotesStreamIncremental |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 54883,
"end": 59458
} | class ____(PreTrainedModel):
config: LongT5Config
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["LongT5Block"]
_can_compile_fullgraph = False # TODO: @raushan more involved due to local/global attn
@property
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, LongT5LayerNorm):
init.constant_(module.weight, factor * 1.0)
elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):
init.normal_(module.shared.weight, mean=0.0, std=factor * 1.0)
if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
init.normal_(module.lm_head.weight, mean=0.0, std=factor * 1.0)
elif isinstance(module, LongT5DenseActDense):
init.normal_(module.wi.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
init.zeros_(module.wi.bias)
init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
init.zeros_(module.wo.bias)
elif isinstance(module, LongT5DenseGatedActDense):
init.normal_(module.wi_0.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
init.zeros_(module.wi_0.bias)
init.normal_(module.wi_1.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
init.zeros_(module.wi_1.bias)
init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
init.zeros_(module.wo.bias)
elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
init.normal_(module.q.weight, mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
init.normal_(module.k.weight, mean=0.0, std=factor * (d_model**-0.5))
init.normal_(module.v.weight, mean=0.0, std=factor * (d_model**-0.5))
init.normal_(module.o.weight, mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
init.normal_(module.relative_attention_bias.weight, mean=0.0, std=factor * ((d_model) ** -0.5))
if isinstance(module, LongT5TransientGlobalAttention):
init.normal_(
module.global_relative_attention_bias.weight, mean=0.0, std=factor * ((d_model) ** -0.5)
)
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
if decoder_start_token_id is None:
raise ValueError(
"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. "
"See LongT5 docs for more information."
)
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
| LongT5PreTrainedModel |
python | boto__boto3 | tests/unit/test_crt.py | {
"start": 2429,
"end": 7756
} | class ____:
@requires_crt()
def test_create_crt_transfer_manager_with_lock_in_use(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
):
mock_crt_process_lock.return_value.acquire.side_effect = RuntimeError
# Verify we can't create a second CRT client
tm = boto3.crt.create_crt_transfer_manager(USW2_S3_CLIENT, None)
assert tm is None
@requires_crt()
def test_create_crt_transfer_manager(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
):
tm = boto3.crt.create_crt_transfer_manager(USW2_S3_CLIENT, None)
assert isinstance(tm, s3transfer.crt.CRTTransferManager)
@requires_crt()
def test_crt_singleton_is_returned_every_call(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
):
first_s3_client = boto3.crt.get_crt_s3_client(USW2_S3_CLIENT, None)
second_s3_client = boto3.crt.get_crt_s3_client(USW2_S3_CLIENT, None)
assert isinstance(first_s3_client, boto3.crt.CRTS3Client)
assert first_s3_client is second_s3_client
assert first_s3_client.crt_client is second_s3_client.crt_client
@requires_crt()
def test_create_crt_transfer_manager_w_client_in_wrong_region(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
):
"""Ensure we don't return the crt transfer manager if client is in
different region. The CRT isn't able to handle region redirects and
will consistently fail.
We can remove this test once we have this fixed on the CRT side.
"""
usw2_s3_client = boto3.crt.create_crt_transfer_manager(
USW2_S3_CLIENT, None
)
assert isinstance(usw2_s3_client, boto3.crt.CRTTransferManager)
use1_s3_client = boto3.crt.create_crt_transfer_manager(
USE1_S3_CLIENT, None
)
assert use1_s3_client is None
@pytest.mark.parametrize(
"boto3_tuple,crt_tuple,matching",
(
(
("access", "secret", "token"),
("access", "secret", "token"),
True,
),
(
("access", "secret", "token"),
("noaccess", "secret", "token"),
False,
),
(
("access", "secret", "token"),
("access", "nosecret", "token"),
False,
),
(
("access", "secret", "token"),
("access", "secret", "notoken"),
False,
),
),
)
@requires_crt()
def test_compare_identities(self, boto3_tuple, crt_tuple, matching):
boto3_creds = Credentials(*boto3_tuple)
crt_creds = Credentials(*crt_tuple)
crt_creds_wrapper = BotocoreCRTCredentialsWrapper(crt_creds)
assert (
boto3.crt.compare_identity(boto3_creds, crt_creds_wrapper)
is matching
)
@requires_crt()
def test_compare_idenities_no_credentials(self):
def no_credentials():
raise botocore.exceptions.NoCredentialsError()
boto3_creds = Credentials("access", "secret", "token")
crt_creds_wrapper = no_credentials
assert (
boto3.crt.compare_identity(boto3_creds, crt_creds_wrapper) is False
)
@requires_crt()
def test_get_crt_s3_client(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
):
config = TransferConfig()
crt_s3_client = boto3.crt.get_crt_s3_client(USW2_S3_CLIENT, config)
assert isinstance(crt_s3_client, boto3.crt.CRTS3Client)
assert isinstance(crt_s3_client.process_lock, CrossProcessLockClass)
assert crt_s3_client.region == "us-west-2"
assert isinstance(
crt_s3_client.cred_provider, BotocoreCRTCredentialsWrapper
)
@requires_crt()
def test_get_crt_s3_client_w_wrong_region(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
):
config = TransferConfig()
crt_s3_client = boto3.crt.get_crt_s3_client(USW2_S3_CLIENT, config)
assert isinstance(crt_s3_client, boto3.crt.CRTS3Client)
# Ensure we don't create additional CRT clients
use1_crt_s3_client = boto3.crt.get_crt_s3_client(
USE1_S3_CLIENT, config
)
assert use1_crt_s3_client is crt_s3_client
assert use1_crt_s3_client.region == "us-west-2"
@requires_crt()
@mock.patch('boto3.crt.TRANSFER_CONFIG_SUPPORTS_CRT', False)
def test_config_without_crt_support_emits_warning(
self,
mock_crt_process_lock,
mock_crt_client_singleton,
mock_serializer_singleton,
caplog,
):
config = TransferConfig()
boto3.crt.create_crt_transfer_manager(USW2_S3_CLIENT, config)
assert any(
[
'requires s3transfer >= 0.16.0' in r.message
for r in caplog.records
]
)
| TestCRTTransferManager |
python | redis__redis-py | redis/commands/core.py | {
"start": 114337,
"end": 121559
} | class ____(CommandsProtocol):
"""
Redis SCAN commands.
see: https://redis.io/commands/scan
"""
def scan(
self,
cursor: int = 0,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
_type: Optional[str] = None,
**kwargs,
) -> ResponseT:
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` provides a hint to Redis about the number of keys to
return per batch.
``_type`` filters the returned values by a particular Redis type.
Stock Redis instances allow for the following types:
HASH, LIST, SET, STREAM, STRING, ZSET
Additionally, Redis modules can expose other types as well.
For more information, see https://redis.io/commands/scan
"""
pieces: list[EncodableT] = [cursor]
if match is not None:
pieces.extend([b"MATCH", match])
if count is not None:
pieces.extend([b"COUNT", count])
if _type is not None:
pieces.extend([b"TYPE", _type])
return self.execute_command("SCAN", *pieces, **kwargs)
def scan_iter(
self,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
_type: Optional[str] = None,
**kwargs,
) -> Iterator:
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` provides a hint to Redis about the number of keys to
return per batch.
``_type`` filters the returned values by a particular Redis type.
Stock Redis instances allow for the following types:
HASH, LIST, SET, STREAM, STRING, ZSET
Additionally, Redis modules can expose other types as well.
"""
cursor = "0"
while cursor != 0:
cursor, data = self.scan(
cursor=cursor, match=match, count=count, _type=_type, **kwargs
)
yield from data
def sscan(
self,
name: KeyT,
cursor: int = 0,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
) -> ResponseT:
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
For more information, see https://redis.io/commands/sscan
"""
pieces: list[EncodableT] = [name, cursor]
if match is not None:
pieces.extend([b"MATCH", match])
if count is not None:
pieces.extend([b"COUNT", count])
return self.execute_command("SSCAN", *pieces)
def sscan_iter(
self,
name: KeyT,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
) -> Iterator:
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = "0"
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor, match=match, count=count)
yield from data
def hscan(
self,
name: KeyT,
cursor: int = 0,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
no_values: Union[bool, None] = None,
) -> ResponseT:
"""
Incrementally return key/value slices in a hash. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``no_values`` indicates to return only the keys, without values.
For more information, see https://redis.io/commands/hscan
"""
pieces: list[EncodableT] = [name, cursor]
if match is not None:
pieces.extend([b"MATCH", match])
if count is not None:
pieces.extend([b"COUNT", count])
if no_values is not None:
pieces.extend([b"NOVALUES"])
return self.execute_command("HSCAN", *pieces, no_values=no_values)
def hscan_iter(
self,
name: str,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
no_values: Union[bool, None] = None,
) -> Iterator:
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``no_values`` indicates to return only the keys, without values
"""
cursor = "0"
while cursor != 0:
cursor, data = self.hscan(
name, cursor=cursor, match=match, count=count, no_values=no_values
)
if no_values:
yield from data
else:
yield from data.items()
def zscan(
self,
name: KeyT,
cursor: int = 0,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
score_cast_func: Union[type, Callable] = float,
) -> ResponseT:
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
For more information, see https://redis.io/commands/zscan
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b"MATCH", match])
if count is not None:
pieces.extend([b"COUNT", count])
options = {"score_cast_func": score_cast_func}
return self.execute_command("ZSCAN", *pieces, **options)
def zscan_iter(
self,
name: KeyT,
match: Union[PatternT, None] = None,
count: Optional[int] = None,
score_cast_func: Union[type, Callable] = float,
) -> Iterator:
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = "0"
while cursor != 0:
cursor, data = self.zscan(
name,
cursor=cursor,
match=match,
count=count,
score_cast_func=score_cast_func,
)
yield from data
| ScanCommands |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/typing_utils.py | {
"start": 3573,
"end": 6877
} | class ____(BaseConfigMeta):
"""Custom metaclass for Resource and PartialResource. This metaclass is responsible for
transforming the type annotations on the class so that Pydantic constructor-time validation
does not error when users provide partially configured resources to resource params.
For example, the following code would ordinarily fail Pydantic validation:
.. code-block:: python
class FooResource(ConfigurableResource):
bar: BarResource
# Types as PartialResource[BarResource]
partial_bar = BarResource.configure_at_runtime()
# Pydantic validation fails because bar is not a BarResource
foo = FooResource(bar=partial_bar)
This metaclass transforms the type annotations on the class so that Pydantic validation
accepts either a PartialResource or a Resource as a value for the resource dependency.
"""
def __new__(cls, name, bases, namespaces, **kwargs) -> Any:
from pydantic.fields import FieldInfo
# Gather all type annotations from the class and its base classes
annotations = namespaces.get("__annotations__", {})
for field in annotations:
if not field.startswith("__"):
# Check if the annotation is a ResourceDependency
if (
get_origin(annotations[field])
== LateBoundTypesForResourceTypeChecking.get_resource_rep_type()
):
# arg = get_args(annotations[field])[0]
# If so, we treat it as a Union of a PartialResource and a Resource
# for Pydantic's sake.
annotations[field] = Annotated[Any, "resource_dependency"]
elif safe_is_subclass(
annotations[field], LateBoundTypesForResourceTypeChecking.get_resource_type()
):
# If the annotation is a Resource, we treat it as a Union of a PartialResource
# and a Resource for Pydantic's sake, so that a user can pass in a partially
# configured resource.
base = annotations[field]
annotations[field] = Annotated[
Union[
base,
LateBoundTypesForResourceTypeChecking.get_partial_resource_type(base),
],
"resource_dependency",
]
# Pydantic 2.5.0 changed the default union mode to "smart", which causes
# partial resource initialization to fail, since Pydantic would attempt to
# initialize a PartialResource with parameters which are invalid.
# https://github.com/pydantic/pydantic-core/pull/867
# Here, we explicitly set the union mode to "left_to_right".
# https://docs.pydantic.dev/latest/concepts/unions/#left-to-right-mode
namespaces[field] = FieldInfo(
union_mode="left_to_right", annotation=annotations[field]
)
namespaces["__annotations__"] = annotations
return super().__new__(cls, name, bases, namespaces, **kwargs)
| BaseResourceMeta |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 161257,
"end": 165568
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"a_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"b_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_b1_id", Integer, ForeignKey("b_table.id")),
Column("parent_a_id", Integer, ForeignKey("a_table.id")),
Column("parent_b2_id", Integer, ForeignKey("b_table.id")),
)
@classmethod
def setup_mappers(cls):
b_table, a_table = cls.tables.b_table, cls.tables.a_table
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(A, a_table)
cls.mapper_registry.map_imperatively(
B,
b_table,
properties={
"parent_b1": relationship(
B,
remote_side=[b_table.c.id],
primaryjoin=(b_table.c.parent_b1_id == b_table.c.id),
order_by=b_table.c.id,
),
"parent_z": relationship(A, lazy=True),
"parent_b2": relationship(
B,
remote_side=[b_table.c.id],
primaryjoin=(b_table.c.parent_b2_id == b_table.c.id),
order_by=b_table.c.id,
),
},
)
@classmethod
def insert_data(cls, connection):
b_table, a_table = cls.tables.b_table, cls.tables.a_table
connection.execute(
a_table.insert(), [dict(id=1), dict(id=2), dict(id=3)]
)
connection.execute(
b_table.insert(),
[
dict(
id=1, parent_a_id=2, parent_b1_id=None, parent_b2_id=None
),
dict(id=2, parent_a_id=1, parent_b1_id=1, parent_b2_id=None),
dict(id=3, parent_a_id=1, parent_b1_id=1, parent_b2_id=2),
dict(id=4, parent_a_id=3, parent_b1_id=1, parent_b2_id=None),
dict(id=5, parent_a_id=3, parent_b1_id=None, parent_b2_id=2),
dict(id=6, parent_a_id=1, parent_b1_id=1, parent_b2_id=3),
dict(id=7, parent_a_id=2, parent_b1_id=None, parent_b2_id=3),
dict(id=8, parent_a_id=2, parent_b1_id=1, parent_b2_id=2),
dict(
id=9, parent_a_id=None, parent_b1_id=1, parent_b2_id=None
),
dict(id=10, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
dict(id=11, parent_a_id=3, parent_b1_id=1, parent_b2_id=8),
dict(id=12, parent_a_id=2, parent_b1_id=5, parent_b2_id=2),
dict(id=13, parent_a_id=3, parent_b1_id=4, parent_b2_id=4),
dict(id=14, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
],
)
def test_eager_load(self):
A, B = self.classes.A, self.classes.B
session = fixture_session()
def go():
eq_(
session.query(B)
.options(
joinedload(B.parent_b1),
joinedload(B.parent_b2),
joinedload(B.parent_z),
)
.filter(B.id.in_([2, 8, 11]))
.order_by(B.id)
.all(),
[
B(
id=2,
parent_z=A(id=1),
parent_b1=B(id=1),
parent_b2=None,
),
B(
id=8,
parent_z=A(id=2),
parent_b1=B(id=1),
parent_b2=B(id=2),
),
B(
id=11,
parent_z=A(id=3),
parent_b1=B(id=1),
parent_b2=B(id=8),
),
],
)
self.assert_sql_count(testing.db, go, 1)
| MixedSelfReferentialEagerTest |
python | run-llama__llama_index | llama-index-instrumentation/tests/test_dispatcher.py | {
"start": 2898,
"end": 35573
} | class ____:
@dispatcher.span
def func(self, a, b=3, **kwargs):
return a + b
@dispatcher.span
async def async_func(self, a, b=3, **kwargs):
return a + b
@dispatcher.span
def func_exc(self, a, b=3, c=4, **kwargs):
raise value_error
@dispatcher.span
async def async_func_exc(self, a, b=3, c=4, **kwargs):
raise cancelled_error
@dispatcher.span
def func_with_event(self, a, b=3, **kwargs):
dispatcher.event(_TestStartEvent())
@dispatcher.span
async def async_func_with_event(self, a, b=3, **kwargs):
dispatcher.event(_TestStartEvent())
await asyncio.sleep(0.1)
await self.async_func(1) # this should create a new span_id
# that is fine because we have dispatch_event
dispatcher.event(_TestEndEvent())
# Can remove this test once dispatcher.get_dispatch_event is safely dopped.
@dispatcher.span
def func_with_event_backwards_compat(self, a, b=3, **kwargs):
dispatch_event = dispatcher.get_dispatch_event()
dispatch_event(_TestStartEvent())
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_span_args(mock_uuid, mock_span_enter, mock_span_exit):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
result = func(3, c=5)
# assert
# span_enter
span_id = f"{func.__qualname__}-mock"
bound_args = inspect.signature(func).bind(3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"result": result,
}
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_span_args_with_instance(mock_uuid, mock_span_enter, mock_span_exit):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
instance = _TestObject()
result = instance.func(3, c=5)
# assert
# span_enter
span_id = f"{instance.func.__qualname__}-mock"
bound_args = inspect.signature(instance.func).bind(3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"result": result,
}
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_span_drop_args(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
instance = _TestObject()
with pytest.raises(ValueError):
_ = instance.func_exc(a=3, b=5, c=2, d=5)
# assert
# span_enter
mock_span_enter.assert_called_once()
# span_drop
mock_span_drop.assert_called_once()
span_id = f"{instance.func_exc.__qualname__}-mock"
bound_args = inspect.signature(instance.func_exc).bind(a=3, b=5, c=2, d=5)
args, kwargs = mock_span_drop.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"err": value_error,
}
# span_exit
mock_span_exit.assert_not_called()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_args(mock_uuid, mock_span_enter, mock_span_exit):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
result = await async_func(a=3, c=5)
# assert
# span_enter
span_id = f"{async_func.__qualname__}-mock"
bound_args = inspect.signature(async_func).bind(a=3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"result": result,
}
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_args_with_instance(
mock_uuid, mock_span_enter, mock_span_exit
):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
instance = _TestObject()
result = await instance.async_func(a=3, c=5)
# assert
# span_enter
span_id = f"{instance.async_func.__qualname__}-mock"
bound_args = inspect.signature(instance.async_func).bind(a=3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"result": result,
}
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_drop_args(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
with pytest.raises(CancelledError):
# act
_ = await async_func_exc(a=3, b=5, c=2, d=5)
# assert
# span_enter
mock_span_enter.assert_called_once()
# span_drop
mock_span_drop.assert_called_once()
span_id = f"{async_func_exc.__qualname__}-mock"
bound_args = inspect.signature(async_func_exc).bind(a=3, b=5, c=2, d=5)
args, kwargs = mock_span_drop.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"err": cancelled_error,
}
# span_exit
mock_span_exit.assert_not_called()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_drop_args_with_instance(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
instance = _TestObject()
with pytest.raises(CancelledError):
_ = await instance.async_func_exc(a=3, b=5, c=2, d=5)
# assert
# span_enter
mock_span_enter.assert_called_once()
# span_drop
mock_span_drop.assert_called_once()
span_id = f"{instance.async_func_exc.__qualname__}-mock"
bound_args = inspect.signature(instance.async_func_exc).bind(a=3, b=5, c=2, d=5)
args, kwargs = mock_span_drop.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"err": cancelled_error,
}
# span_exit
mock_span_exit.assert_not_called()
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
_ = func_with_event(3, c=5)
# assert
span_id = f"{func_with_event.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
async def test_dispatcher_async_fire_event(
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
tasks = [
async_func_with_event(a=3, c=5),
async_func_with_event(5),
async_func_with_event(4),
]
_ = await asyncio.gather(*tasks)
# assert
span_ids = [e.span_id for e in event_handler.events]
id_counts = Counter(span_ids)
assert set(id_counts.values()) == {2}
# span_enter
assert mock_span_enter.call_count == 3
# span
mock_span_drop.assert_not_called()
# span_exit
assert mock_span_exit.call_count == 3
@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [True, False])
@patch.object(Dispatcher, "span_enter")
async def test_dispatcher_attaches_tags_to_events_and_spans(
mock_span_enter: MagicMock,
use_async: bool,
):
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
test_tags = {"test_tag_key": "test_tag_value"}
# Check that tags are set when using context manager
with instrument_tags(test_tags):
if use_async:
await async_func_with_event(a=3, c=5)
else:
func_with_event(a=3, c=5)
mock_span_enter.assert_called_once()
assert mock_span_enter.call_args[1]["tags"] == test_tags
assert all(e.tags == test_tags for e in event_handler.events)
@patch.object(Dispatcher, "span_enter")
def test_dispatcher_attaches_tags_to_concurrent_events(
mock_span_enter: MagicMock,
):
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
num_functions = 5
test_tags = [{"test_tag_key": num} for num in range(num_functions)]
test_tags_set = {str(tag) for tag in test_tags}
def run_func_with_tags(tag):
with instrument_tags(tag):
func_with_event(3, c=5)
# Run functions concurrently
futures = []
with ThreadPoolExecutor(max_workers=2) as executor:
for tag in test_tags:
futures.append(executor.submit(run_func_with_tags, tag))
for future in futures:
future.result()
# Ensure that each function recorded a span and event with the tags
assert len(mock_span_enter.call_args_list) == num_functions
assert len(event_handler.events) == num_functions
actual_span_tags = {
str(call_kwargs["tags"]) for _, call_kwargs in mock_span_enter.call_args_list
}
actual_event_tags = {str(e.tags) for e in event_handler.events}
assert actual_span_tags == test_tags_set
assert actual_event_tags == test_tags_set
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event_with_instance(
mock_uuid, mock_span_enter, mock_span_drop, mock_span_exit
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
instance = _TestObject()
_ = instance.func_with_event(a=3, c=5)
# assert
span_id = f"{instance.func_with_event.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
async def test_dispatcher_async_fire_event_with_instance(
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
# mock_uuid.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
instance = _TestObject()
tasks = [
instance.async_func_with_event(a=3, c=5),
instance.async_func_with_event(5),
]
_ = await asyncio.gather(*tasks)
# assert
span_ids = [e.span_id for e in event_handler.events]
id_counts = Counter(span_ids)
assert set(id_counts.values()) == {2}
# span_enter
assert mock_span_enter.call_count == 4
# span
mock_span_drop.assert_not_called()
# span_exit
assert mock_span_exit.call_count == 4
def test_context_nesting():
# arrange
# A binary tree of parent-child spans
h = 5 # height of binary tree
s = 2 ** (h + 1) - 1 # number of spans per tree
runs = 2 # number of trees (in parallel)
# Below is a tree (r=1) with h=3 (s=15).
# Tn: n-th span run in thread
# An: n-th span run in async
# A1
# βββββββββ΄ββββββββ
# A2 A3
# βββββ΄ββββ βββββ΄ββββ
# T4 T5 A6 A7
# βββ΄ββ βββ΄ββ βββ΄ββ βββ΄ββ
# T8 T9 A10 A11 T12 T13 A14 A15
# Note that child.n // 2 == parent.n, e.g. 11 // 2 == 5.
# We'll check that the parent-child associations are correct.
class Span(BaseSpan):
r: int # tree id
n: int # span id (per tree)
class Event(BaseEvent):
r: int # tree id
n: int # span id (per tree)
lock = Lock()
spans: Dict[str, Span] = {}
events: List[Event] = []
class SpanHandler(BaseSpanHandler):
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
r, n = bound_args.args[:2]
span = Span(r=r, n=n, id_=id_, parent_id=parent_span_id)
with lock:
spans[id_] = span
def prepare_to_drop_span(self, *args: Any, **kwargs: Any) -> None: ...
def prepare_to_exit_span(self, *args: Any, **kwargs: Any) -> None: ...
class EventHandler(BaseEventHandler):
def handle(self, event: Event, **kwargs) -> None: # type: ignore
with lock:
events.append(event)
dispatcher = Dispatcher(
event_handlers=[EventHandler()],
span_handlers=[SpanHandler()],
propagate=False,
)
@dispatcher.span
def bar(r: int, n: int, callback: Callable[[], None] = lambda: None) -> None:
dispatcher.event(Event(r=r, n=n))
if n > 2**h - 1:
callback()
return
if n % 2:
asyncio.run(_foo(r, n))
else:
t0 = Thread(target=bar, args=(r, n * 2))
t1 = Thread(target=bar, args=(r, n * 2 + 1))
t0.start()
t1.start()
time.sleep(0.01)
t0.join()
t1.join()
callback()
@dispatcher.span
async def foo(r: int, n: int) -> None:
dispatcher.event(Event(r=r, n=n))
if n > 2**h - 1:
return
if n % 2:
await _foo(r, n)
else:
q, loop = Queue(), get_event_loop()
Thread(target=bar, args=(r, n * 2, _callback(q, loop))).start()
Thread(target=bar, args=(r, n * 2 + 1, _callback(q, loop))).start()
await gather(q.get(), q.get())
async def _foo(r: int, n: int) -> None:
await gather(foo(r, n * 2), foo(r, n * 2 + 1), sleep(0.01))
def _callback(q: Queue, loop: AbstractEventLoop) -> Callable[[], None]:
return lambda: loop.call_soon_threadsafe(q.put_nowait(1)) # type: ignore
# act
# Use regular thread to ensure that `Token.MISSING` is being handled.
regular_threads = [
(
threading.Thread(target=asyncio.run, args=(foo(r, 1),))
if r % 2
else threading.Thread(target=bar, args=(r, 1))
)
for r in range(runs)
]
[t.start() for t in regular_threads]
[t.join() for t in regular_threads]
# assert
# parent-child associations should be correct
assert sorted(span.n for span in spans.values()) == sorted(
list(range(1, s + 1)) * runs
)
for span in spans.values():
if span.n > 1:
if not span.parent_id:
print(span)
assert span.r == spans[span.parent_id].r # same tree #type:ignore
assert span.n // 2 == spans[span.parent_id].n # type:ignore
# # event-span associations should be correct
# assert sorted(event.n for event in events) == sorted(list(range(1, s + 1)) * runs)
# for event in events:
# assert event.r == spans[event.span_id].r # same tree
# assert event.n == spans[event.span_id].n # same span
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event_backwards_compat(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
_ = func_with_event_backwards_compat(3, c=5)
# assert
span_id = f"{func_with_event_backwards_compat.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event_with_instance_backwards_compat(
mock_uuid, mock_span_enter, mock_span_drop, mock_span_exit
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
instance = _TestObject()
_ = instance.func_with_event_backwards_compat(a=3, c=5)
# assert
span_id = f"{instance.func_with_event_backwards_compat.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@patch.object(Dispatcher, "span_enter")
def test_span_decorator_is_idempotent(mock_span_enter):
x, z = random(), dispatcher.span
assert z(z(z(lambda: x)))() == x
mock_span_enter.assert_called_once()
@patch.object(Dispatcher, "span_enter")
def test_span_decorator_is_idempotent_with_pass_through(mock_span_enter):
x, z = random(), dispatcher.span
a, b, c, d = (wrapt.decorator(lambda f, *_: f()) for _ in range(4))
assert z(a(b(z(c(d(z(lambda: x)))))))() == x
mock_span_enter.assert_called_once()
@patch.object(Dispatcher, "span_enter")
def test_mixin_decorates_abstract_method(mock_span_enter):
x, z = random(), abstractmethod
A = type("A", (DispatcherSpanMixin,), {"f": z(lambda _: ...)})
B = type("B", (A,), {"f": lambda _: x + 0})
C = type("C", (B,), {"f": lambda _: x + 1})
D = type("D", (C, B), {"f": lambda _: x + 2})
for i, T in enumerate((B, C, D)):
assert T().f() - i == pytest.approx(x) # type:ignore
assert mock_span_enter.call_count - i == 1
@patch.object(Dispatcher, "span_enter")
def test_mixin_decorates_overridden_method(mock_span_enter):
x, z = random(), dispatcher.span
A = type("A", (DispatcherSpanMixin,), {"f": z(lambda _: x)})
B = type("B", (A,), {"f": lambda _: x + 1})
C = type("C", (B,), {"f": lambda _: x + 2})
D = type("D", (C, B), {"f": lambda _: x + 3})
for i, T in enumerate((A, B, C, D)):
assert T().f() - i == pytest.approx(x) # type:ignore
assert mock_span_enter.call_count - i == 1
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_with_inheritance(mock_uuid, mock_span_enter, mock_span_exit):
"""Test that span IDs use the runtime class name, not the definition class name."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class BaseClass:
@dispatcher.span
def base_method(self, x):
return x * 2
@dispatcher.span
async def async_base_method(self, x):
return x * 3
class DerivedClass(BaseClass):
pass
class AnotherDerivedClass(BaseClass):
@dispatcher.span
def derived_method(self, x):
return x * 4
# act
base_instance = BaseClass()
derived_instance = DerivedClass()
another_derived_instance = AnotherDerivedClass()
base_result = base_instance.base_method(5)
derived_result = derived_instance.base_method(5)
another_derived_result = another_derived_instance.derived_method(5)
# assert
assert mock_span_enter.call_count == 3
# Check that span IDs use the actual runtime class names
calls = mock_span_enter.call_args_list
# BaseClass.base_method called on BaseClass instance
assert calls[0][1]["id_"] == "BaseClass.base_method-mock"
# BaseClass.base_method called on DerivedClass instance (should use DerivedClass)
assert calls[1][1]["id_"] == "DerivedClass.base_method-mock"
# AnotherDerivedClass.derived_method called on AnotherDerivedClass instance
assert calls[2][1]["id_"] == "AnotherDerivedClass.derived_method-mock"
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_async_span_naming_with_inheritance(
mock_uuid, mock_span_enter, mock_span_exit
):
"""Test that async span IDs use the runtime class name, not the definition class name."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class BaseClass:
@dispatcher.span
async def async_base_method(self, x):
return x * 3
class DerivedClass(BaseClass):
pass
# act
base_instance = BaseClass()
derived_instance = DerivedClass()
base_result = await base_instance.async_base_method(5)
derived_result = await derived_instance.async_base_method(5)
# assert
assert mock_span_enter.call_count == 2
calls = mock_span_enter.call_args_list
# BaseClass.async_base_method called on BaseClass instance
assert calls[0][1]["id_"] == "BaseClass.async_base_method-mock"
# BaseClass.async_base_method called on DerivedClass instance (should use DerivedClass)
assert calls[1][1]["id_"] == "DerivedClass.async_base_method-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_regular_functions_unchanged(
mock_uuid, mock_span_enter, mock_span_exit
):
"""Test that regular functions (non-methods) still use __qualname__."""
# arrange
mock_uuid.uuid4.return_value = "mock"
@dispatcher.span
def regular_function(x):
return x * 5
# act
result = regular_function(10)
# assert
mock_span_enter.assert_called_once()
call_kwargs = mock_span_enter.call_args[1]
# Regular functions should still use __qualname__
assert call_kwargs["id_"] == f"{regular_function.__qualname__}-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_complex_inheritance(mock_uuid, mock_span_enter, mock_span_exit):
"""Test span naming with multiple levels of inheritance."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class GrandParent:
@dispatcher.span
def shared_method(self, x):
return x
class Parent(GrandParent):
pass
class Child(Parent):
@dispatcher.span
def child_method(self, x):
return x * 2
class GrandChild(Child):
pass
# act
instances = [GrandParent(), Parent(), Child(), GrandChild()]
# Call shared_method on all instances
for instance in instances:
instance.shared_method(1)
# Call child_method on child and grandchild
instances[2].child_method(1) # Child
instances[3].child_method(1) # GrandChild
# assert
assert mock_span_enter.call_count == 6
calls = mock_span_enter.call_args_list
# shared_method calls should use the runtime class names
assert calls[0][1]["id_"] == "GrandParent.shared_method-mock"
assert calls[1][1]["id_"] == "Parent.shared_method-mock"
assert calls[2][1]["id_"] == "Child.shared_method-mock"
assert calls[3][1]["id_"] == "GrandChild.shared_method-mock"
# child_method calls should use the runtime class names
assert calls[4][1]["id_"] == "Child.child_method-mock"
assert calls[5][1]["id_"] == "GrandChild.child_method-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_with_method_override(mock_uuid, mock_span_enter, mock_span_exit):
"""Test span naming when methods are overridden in derived classes."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class Base:
@dispatcher.span
def method(self, x):
return x
class Derived(Base):
@dispatcher.span
def method(self, x):
return x * 2
# act
base_instance = Base()
derived_instance = Derived()
base_instance.method(1)
derived_instance.method(1)
# assert
assert mock_span_enter.call_count == 2
calls = mock_span_enter.call_args_list
# Each should use their respective class names
assert calls[0][1]["id_"] == "Base.method-mock"
assert calls[1][1]["id_"] == "Derived.method-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_with_nested_classes(mock_uuid, mock_span_enter, mock_span_exit):
"""Test span naming with nested classes."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class Outer:
class Inner:
@dispatcher.span
def inner_method(self, x):
return x
@dispatcher.span
def outer_method(self, x):
return x * 2
# act
outer_instance = Outer()
inner_instance = Outer.Inner()
outer_instance.outer_method(1)
inner_instance.inner_method(1)
# assert
assert mock_span_enter.call_count == 2
calls = mock_span_enter.call_args_list
# Should use the simple class names (not qualified names)
assert calls[0][1]["id_"] == "Outer.outer_method-mock"
assert calls[1][1]["id_"] == "Inner.inner_method-mock"
def test_aevent_with_sync_handlers():
"""Test that aevent works with sync handlers via default ahandle implementation."""
# arrange
event_handler = _TestEventHandler()
test_dispatcher = Dispatcher(event_handlers=[event_handler], propagate=False)
event = _TestStartEvent()
# act
asyncio.run(test_dispatcher.aevent(event))
# assert
assert len(event_handler.events) == 1
assert event_handler.events[0] == event
@pytest.mark.asyncio
async def test_aevent_with_async_handlers():
"""Test that aevent works with async handlers."""
# arrange
event_handler = _TestAsyncEventHandler()
test_dispatcher = Dispatcher(event_handlers=[event_handler], propagate=False)
event = _TestStartEvent()
# act
await test_dispatcher.aevent(event)
# assert
assert len(event_handler.events) == 1
assert event_handler.events[0] == event
assert event_handler.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_concurrent_handlers():
"""Test that aevent runs handlers concurrently."""
# arrange
handler1 = _TestAsyncEventHandler()
handler2 = _TestAsyncEventHandler()
test_dispatcher = Dispatcher(event_handlers=[handler1, handler2], propagate=False)
event = _TestStartEvent()
# act
start_time = time.time()
await test_dispatcher.aevent(event)
end_time = time.time()
# assert
# Should take ~0.01s (concurrent) not ~0.02s (sequential)
assert end_time - start_time < 0.015
assert len(handler1.events) == 1
assert len(handler2.events) == 1
assert handler1.async_calls == 1
assert handler2.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_error_isolation():
"""Test that handler errors don't affect other handlers."""
# arrange
class FailingHandler(BaseEventHandler):
def handle(self, e: BaseEvent, **kwargs: Any) -> Any:
raise ValueError("Handler failed")
handler1 = _TestAsyncEventHandler()
handler2 = FailingHandler()
handler3 = _TestAsyncEventHandler()
test_dispatcher = Dispatcher(
event_handlers=[handler1, handler2, handler3], propagate=False
)
event = _TestStartEvent()
# act
await test_dispatcher.aevent(event)
# assert
# Both working handlers should have processed the event
assert len(handler1.events) == 1
assert len(handler3.events) == 1
assert handler1.async_calls == 1
assert handler3.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_propagation():
"""Test that aevent respects propagation settings."""
# arrange
child_handler = _TestAsyncEventHandler()
parent_handler = _TestAsyncEventHandler()
child_dispatcher = Dispatcher(
name="child", event_handlers=[child_handler], propagate=True
)
parent_dispatcher = Dispatcher(
name="parent", event_handlers=[parent_handler], propagate=False
)
manager = Manager(parent_dispatcher)
manager.add_dispatcher(child_dispatcher)
child_dispatcher.manager = manager
child_dispatcher.parent_name = "parent"
event = _TestStartEvent()
# act
await child_dispatcher.aevent(event)
# assert
# Both handlers should have processed the event due to propagation
assert len(child_handler.events) == 1
assert len(parent_handler.events) == 1
assert child_handler.async_calls == 1
assert parent_handler.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_no_propagation():
"""Test that aevent respects no-propagation settings."""
# arrange
child_handler = _TestAsyncEventHandler()
parent_handler = _TestAsyncEventHandler()
child_dispatcher = Dispatcher(
name="child", event_handlers=[child_handler], propagate=False
)
parent_dispatcher = Dispatcher(
name="parent", event_handlers=[parent_handler], propagate=False
)
manager = Manager(parent_dispatcher)
manager.add_dispatcher(child_dispatcher)
child_dispatcher.manager = manager
child_dispatcher.parent_name = "parent"
event = _TestStartEvent()
# act
await child_dispatcher.aevent(event)
# assert
# Only child handler should have processed the event
assert len(child_handler.events) == 1
assert len(parent_handler.events) == 0
assert child_handler.async_calls == 1
assert parent_handler.async_calls == 0
| _TestObject |
python | ray-project__ray | python/ray/data/_internal/planner/plan_expression/expression_evaluator.py | {
"start": 13996,
"end": 20147
} | class ____(ast.NodeVisitor):
"""AST visitor that converts string expressions to Ray Data expressions."""
def visit_Compare(self, node: ast.Compare) -> "Expr":
"""Handle comparison operations (e.g., a == b, a < b, a in b)."""
from ray.data.expressions import BinaryExpr, Operation
if len(node.ops) != 1 or len(node.comparators) != 1:
raise ValueError("Only simple binary comparisons are supported")
left = self.visit(node.left)
right = self.visit(node.comparators[0])
op = node.ops[0]
# Map AST comparison operators to Ray Data operations
op_map = {
ast.Eq: Operation.EQ,
ast.NotEq: Operation.NE,
ast.Lt: Operation.LT,
ast.LtE: Operation.LE,
ast.Gt: Operation.GT,
ast.GtE: Operation.GE,
ast.In: Operation.IN,
ast.NotIn: Operation.NOT_IN,
}
if type(op) not in op_map:
raise ValueError(f"Unsupported comparison operator: {type(op).__name__}")
return BinaryExpr(op_map[type(op)], left, right)
def visit_BoolOp(self, node: ast.BoolOp) -> "Expr":
"""Handle logical operations (e.g., a and b, a or b)."""
from ray.data.expressions import BinaryExpr, Operation
conditions = [self.visit(value) for value in node.values]
combined_expr = conditions[0]
for condition in conditions[1:]:
if isinstance(node.op, ast.And):
combined_expr = BinaryExpr(Operation.AND, combined_expr, condition)
elif isinstance(node.op, ast.Or):
combined_expr = BinaryExpr(Operation.OR, combined_expr, condition)
else:
raise ValueError(
f"Unsupported logical operator: {type(node.op).__name__}"
)
return combined_expr
def visit_UnaryOp(self, node: ast.UnaryOp) -> "Expr":
"""Handle unary operations (e.g., not a, -5)."""
from ray.data.expressions import Operation, UnaryExpr, lit
if isinstance(node.op, ast.Not):
operand = self.visit(node.operand)
return UnaryExpr(Operation.NOT, operand)
elif isinstance(node.op, ast.USub):
operand = self.visit(node.operand)
return operand * lit(-1)
else:
raise ValueError(f"Unsupported unary operator: {type(node.op).__name__}")
def visit_Name(self, node: ast.Name) -> "Expr":
"""Handle variable names (column references)."""
from ray.data.expressions import col
return col(node.id)
def visit_Constant(self, node: ast.Constant) -> "Expr":
"""Handle constant values (numbers, strings, booleans)."""
from ray.data.expressions import lit
return lit(node.value)
def visit_List(self, node: ast.List) -> "Expr":
"""Handle list literals."""
from ray.data.expressions import LiteralExpr, lit
# Visit all elements first
visited_elements = [self.visit(elt) for elt in node.elts]
# Try to extract constant values for literal list
elements = []
for elem in visited_elements:
if isinstance(elem, LiteralExpr):
elements.append(elem.value)
else:
# For compatibility with Arrow visitor, we need to support non-literals
# but Ray Data expressions may have limitations here
raise ValueError(
"List contains non-constant expressions. Ray Data expressions "
"currently only support lists of constant values."
)
return lit(elements)
def visit_Attribute(self, node: ast.Attribute) -> "Expr":
"""Handle attribute access (e.g., for nested column names)."""
from ray.data.expressions import col
# For nested column names like "user.age", combine them with dots
if isinstance(node.value, ast.Name):
return col(f"{node.value.id}.{node.attr}")
elif isinstance(node.value, ast.Attribute):
# Recursively handle nested attributes
left_expr = self.visit(node.value)
if isinstance(left_expr, ColumnExpr):
return col(f"{left_expr._name}.{node.attr}")
raise ValueError(
f"Unsupported attribute access: {node.attr}. Node details: {ast.dump(node)}"
)
def visit_Call(self, node: ast.Call) -> "Expr":
"""Handle function calls for operations like is_null, is_not_null, is_nan."""
from ray.data.expressions import BinaryExpr, Operation, UnaryExpr
func_name = node.func.id if isinstance(node.func, ast.Name) else str(node.func)
if func_name == "is_null":
if len(node.args) != 1:
raise ValueError("is_null() expects exactly one argument")
operand = self.visit(node.args[0])
return UnaryExpr(Operation.IS_NULL, operand)
# Adding this conditional to keep it consistent with the current implementation,
# of carrying Pyarrow's semantic of `is_valid`
elif func_name == "is_valid" or func_name == "is_not_null":
if len(node.args) != 1:
raise ValueError(f"{func_name}() expects exactly one argument")
operand = self.visit(node.args[0])
return UnaryExpr(Operation.IS_NOT_NULL, operand)
elif func_name == "is_nan":
if len(node.args) != 1:
raise ValueError("is_nan() expects exactly one argument")
operand = self.visit(node.args[0])
# Use x != x pattern for NaN detection (NaN != NaN is True)
return BinaryExpr(Operation.NE, operand, operand)
elif func_name == "is_in":
if len(node.args) != 2:
raise ValueError("is_in() expects exactly two arguments")
left = self.visit(node.args[0])
right = self.visit(node.args[1])
return BinaryExpr(Operation.IN, left, right)
else:
raise ValueError(f"Unsupported function: {func_name}")
| _ConvertToNativeExpressionVisitor |
python | python__mypy | mypyc/ir/func_ir.py | {
"start": 705,
"end": 1838
} | class ____:
"""Description of a function argument in IR.
Argument kind is one of ARG_* constants defined in mypy.nodes.
"""
def __init__(
self, name: str, typ: RType, kind: ArgKind = ARG_POS, pos_only: bool = False
) -> None:
self.name = name
self.type = typ
self.kind = kind
self.pos_only = pos_only
@property
def optional(self) -> bool:
return self.kind.is_optional()
def __repr__(self) -> str:
return "RuntimeArg(name={}, type={}, optional={!r}, pos_only={!r})".format(
self.name, self.type, self.optional, self.pos_only
)
def serialize(self) -> JsonDict:
return {
"name": self.name,
"type": self.type.serialize(),
"kind": int(self.kind.value),
"pos_only": self.pos_only,
}
@classmethod
def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> RuntimeArg:
return RuntimeArg(
data["name"],
deserialize_type(data["type"], ctx),
ArgKind(data["kind"]),
data["pos_only"],
)
| RuntimeArg |
python | ray-project__ray | python/ray/llm/_internal/batch/processor/http_request_proc.py | {
"start": 461,
"end": 4158
} | class ____(ProcessorConfig):
"""The configuration for the HTTP request processor."""
batch_size: int = Field(
default=64,
description="The batch size.",
)
url: str = Field(
description="The URL to query.",
)
headers: Optional[Dict[str, Any]] = Field(
default=None,
description="The query header. Note that we will add "
"'Content-Type: application/json' to be the header for sure "
"because we only deal with requests body in JSON.",
)
qps: Optional[int] = Field(
default=None,
description="The maximum number of requests per second to avoid rate limit. "
"If None, the request will be sent sequentially.",
)
max_retries: int = Field(
default=0,
description="The maximum number of retries per request in the event of failures.",
)
base_retry_wait_time_in_s: float = Field(
default=1,
description="The base wait time for a retry during exponential backoff.",
)
# Since `session_factory` is a callable, we use type Any to avoid pydantic serialization issues
session_factory: Optional[Any] = Field(
default=None,
description="Optional session factory to be used for initializing a client session. Type: Callable[[], ClientSession]",
# exclude from JSON serialization since `session_factory` is a callable
exclude=True,
)
def build_http_request_processor(
config: HttpRequestProcessorConfig,
preprocess: Optional[UserDefinedFunction] = None,
postprocess: Optional[UserDefinedFunction] = None,
preprocess_map_kwargs: Optional[Dict[str, Any]] = None,
postprocess_map_kwargs: Optional[Dict[str, Any]] = None,
) -> Processor:
"""Construct a Processor and configure stages.
Args:
config: The configuration for the processor.
preprocess: An optional lambda function that takes a row (dict) as input
and returns a preprocessed row (dict). The output row must contain the
required fields for the following processing stages.
postprocess: An optional lambda function that takes a row (dict) as input
and returns a postprocessed row (dict).
preprocess_map_kwargs: Optional kwargs to pass to Dataset.map() for the
preprocess stage (e.g., num_cpus, memory, concurrency).
postprocess_map_kwargs: Optional kwargs to pass to Dataset.map() for the
postprocess stage (e.g., num_cpus, memory, concurrency).
Returns:
The constructed processor.
"""
stages = [
HttpRequestStage(
fn_constructor_kwargs=dict(
url=config.url,
additional_header=config.headers,
qps=config.qps,
max_retries=config.max_retries,
base_retry_wait_time_in_s=config.base_retry_wait_time_in_s,
session_factory=config.session_factory,
),
map_batches_kwargs=dict(
concurrency=config.concurrency,
),
)
]
telemetry_agent = get_or_create_telemetry_agent()
telemetry_agent.push_telemetry_report(
BatchModelTelemetry(
processor_config_name=type(config).__name__,
concurrency=config.concurrency,
)
)
processor = Processor(
config,
stages,
preprocess=preprocess,
postprocess=postprocess,
preprocess_map_kwargs=preprocess_map_kwargs,
postprocess_map_kwargs=postprocess_map_kwargs,
)
return processor
ProcessorBuilder.register(HttpRequestProcessorConfig, build_http_request_processor)
| HttpRequestProcessorConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.