language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | eventlet__eventlet | tests/isolated/wsgi_connection_timeout.py | {
"start": 1941,
"end": 2861
} | class ____:
# new connection's socket.makefile
# eventlet *tends* to use socket.makefile, not raw socket methods.
# need to patch file operations
def __init__(self, conn):
self.conn = conn
self.conn._really_makefile = self.conn.makefile
self.conn.makefile = self
self.armed = False
self.file_reg = []
def unwrap(self):
self.conn.makefile = self.conn._really_makefile
del self.conn._really_makefile
def arm(self):
output_buffer.append("tick")
for i in self.file_reg:
i.arm()
def __call__(self, mode='r', bufsize=-1):
output_buffer.append(self.__class__.__name__ + ".__call__")
# file_obj = self.conn._really_makefile(*args, **kwargs)
file_obj = ExplodingSocketFile(self.conn._sock, mode, bufsize)
self.file_reg.append(file_obj)
return file_obj
| ExplodingConnectionWrap |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_private_ip_v6.py | {
"start": 1875,
"end": 4728
} | class ____(ColumnMapExpectation):
"""Expect column values to be private IP v6 addresses."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_private_ip": [
"fd1a:3092:04c5:d497:1001:2002:3003:4004",
"fd1a:3092:04c5:d497:aaaa:bbbb:cccc:dddd",
"fd1a:3092:04c5:d497:1234:1234:1234:1234",
"fd1a:3092:04c5:d497:abcd:abcd:abcd:abcd",
"fd1a:3092:04c5:d497:1111:2222:aaaa:bbbb",
],
"some_public": [
"a31a:3092:04c5:d497:1001:2002:3003:4004",
"bd1a:3092:04c5:d497:aaaa:bbbb:cccc:dddd",
"cd1a:3092:04c5:d497:1234:1234:1234:1234",
"dd1b:3092:04c5:d497:abcd:abcd:abcd:abcd",
"ed1a:3192:04c5:d497:1111:2222:aaaa:bbbb",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_private_ip"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_public", "mostly": 0.8},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_ipv6_private"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnValuesToBePrivateIpV6().print_diagnostic_checklist()
| ExpectColumnValuesToBePrivateIpV6 |
python | scipy__scipy | scipy/interpolate/_rgi.py | {
"start": 2106,
"end": 32341
} | class ____:
"""Interpolator of specified order on a rectilinear grid in N ≥ 1 dimensions.
The data must be defined on a rectilinear grid; that is, a rectangular
grid with even or uneven spacing. Linear, nearest-neighbor, spline
interpolations are supported. After setting up the interpolator object,
the interpolation method may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions. The points in
each dimension (i.e. every elements of the points tuple) must be
strictly ascending or descending.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions. Complex data is
accepted.
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic" and "pchip". This
parameter will become the default for the object's ``__call__``
method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
Default is True.
fill_value : float or None, optional
The value to use for points outside of the interpolation domain.
If None, values outside the domain are extrapolated.
Default is ``np.nan``.
solver : callable, optional
Only used for methods "slinear", "cubic" and "quintic".
Sparse linear algebra solver for construction of the NdBSpline instance.
Default is the iterative solver `scipy.sparse.linalg.gcrotmk`.
.. versionadded:: 1.13
solver_args: dict, optional
Additional arguments to pass to `solver`, if any.
.. versionadded:: 1.13
Methods
-------
__call__
Attributes
----------
grid : tuple of ndarrays
The points defining the regular grid in n dimensions.
This tuple defines the full grid via
``np.meshgrid(*grid, indexing='ij')``
values : ndarray
Data values at the grid.
method : str
Interpolation method.
fill_value : float or ``None``
Use this value for out-of-bounds arguments to `__call__`.
bounds_error : bool
If ``True``, out-of-bounds argument raise a ``ValueError``.
Notes
-----
Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
In other words, this class assumes that the data is defined on a
*rectilinear* grid.
.. versionadded:: 0.14
The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
tensor-product spline interpolators, where `k` is the spline degree,
If any dimension has fewer points than `k` + 1, an error will be raised.
.. versionadded:: 1.9
If the input data is such that dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolating.
**Choosing a solver for spline methods**
Spline methods, "slinear", "cubic" and "quintic" involve solving a
large sparse linear system at instantiation time. Depending on data,
the default solver may or may not be adequate. When it is not, you may
need to experiment with an optional `solver` argument, where you may
choose between the direct solver (`scipy.sparse.linalg.spsolve`) or
iterative solvers from `scipy.sparse.linalg`. You may need to supply
additional parameters via the optional `solver_args` parameter (for instance,
you may supply the starting value or target tolerance). See the
`scipy.sparse.linalg` documentation for the full list of available options.
Alternatively, you may instead use the legacy methods, "slinear_legacy",
"cubic_legacy" and "quintic_legacy". These methods allow faster construction
but evaluations will be much slower.
**Rounding rule at half points with `nearest` method**
The rounding rule with the `nearest` method at half points is rounding *down*.
Examples
--------
**Evaluate a function on the points of a 3-D grid**
As a first example, we evaluate a simple example function on the points of
a 3-D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> import numpy as np
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
>>> data = f(xg, yg, zg)
``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> interp = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3],
... [3.3, 5.2, 7.1]])
>>> interp(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
>>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
(125.54200000000002, 145.894)
**Interpolate and extrapolate a 2D dataset**
As a second example, we interpolate and extrapolate a 2D data set:
>>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
>>> def ff(x, y):
... return x**2 + y**2
>>> xg, yg = np.meshgrid(x, y, indexing='ij')
>>> data = ff(xg, yg)
>>> interp = RegularGridInterpolator((x, y), data,
... bounds_error=False, fill_value=None)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection='3d')
>>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
... s=60, c='k', label='data')
Evaluate and plot the interpolator on a finer grid
>>> xx = np.linspace(-4, 9, 31)
>>> yy = np.linspace(-4, 9, 31)
>>> X, Y = np.meshgrid(xx, yy, indexing='ij')
>>> # interpolator
>>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
... alpha=0.4, color='m', label='linear interp')
>>> # ground truth
>>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
... alpha=0.4, label='ground truth')
>>> plt.legend()
>>> plt.show()
Other examples are given
:ref:`in the tutorial <tutorial-interpolate_regular_grid_interpolator>`.
See Also
--------
NearestNDInterpolator : Nearest neighbor interpolator on *unstructured*
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolator on *unstructured* data
in N dimensions
interpn : a convenience function which wraps `RegularGridInterpolator`
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
(suitable for e.g., N-D image resampling)
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
:doi:`10.1090/S0025-5718-1988-0917826-0`
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
_SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3,
"slinear_legacy": 1, "cubic_legacy": 3, "quintic_legacy": 5,}
_SPLINE_METHODS_recursive = {"slinear_legacy", "cubic_legacy",
"quintic_legacy", "pchip"}
_SPLINE_METHODS_ndbspl = {"slinear", "cubic", "quintic"}
_SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
_ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan, *, solver=None, solver_args=None):
if method not in self._ALL_METHODS:
raise ValueError(f"Method '{method}' is not defined")
elif method in self._SPLINE_METHODS:
self._validate_grid_dimensions(points, method) # NB: uses np.atleast_1d
try:
xp = array_namespace(*points, values)
except Exception as e:
# either "duck-type" values or a user error?
xp = array_namespace(*points) # still forbid mixed namespaces in `points`
try:
xp_v = array_namespace(values)
except Exception:
# "duck-type" values indeed, continue with `xp` as the namespace
pass
else:
# both `points` and `values` are array API objects, check consistency
if xp_v != xp:
raise e
self._asarray = xp.asarray
self.method = method
self._spline = None
self.bounds_error = bounds_error
self._grid, self._descending_dimensions = _check_points(points)
self._values = self._check_values(values)
self._check_dimensionality(self._grid, self._values)
self.fill_value = self._check_fill_value(self._values, fill_value)
if self._descending_dimensions:
self._values = np.flip(values, axis=self._descending_dimensions)
if self.method == "pchip" and np.iscomplexobj(self._values):
msg = ("`PchipInterpolator` only works with real values. If you are trying "
"to use the real components of the passed array, use `np.real` on "
"the array before passing to `RegularGridInterpolator`.")
raise ValueError(msg)
if method in self._SPLINE_METHODS_ndbspl:
if solver_args is None:
solver_args = {}
self._spline = self._construct_spline(method, solver, **solver_args)
else:
if solver is not None or solver_args:
raise ValueError(
f"{method =} does not accept the 'solver' argument. Got "
f" {solver = } and with arguments {solver_args}."
)
def _construct_spline(self, method, solver=None, **solver_args):
if solver is None:
solver = ssl.gcrotmk
spl = make_ndbspl(
self._grid, self._values, self._SPLINE_DEGREE_MAP[method],
solver=solver, **solver_args
)
return spl
def _check_dimensionality(self, grid, values):
_check_dimensionality(grid, values)
def _check_points(self, points):
return _check_points(points)
def _check_values(self, values):
if is_array_api_obj(values):
values = np.asarray(values)
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
return values
def _check_fill_value(self, values, fill_value):
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
return fill_value
def __call__(self, xi, method=None, *, nu=None):
"""
Interpolation at coordinates.
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to evaluate the interpolator at.
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic" and "pchip". Default is
the method chosen when the interpolator was created.
nu : sequence of ints, length ndim, optional
If not None, the orders of the derivatives to evaluate.
Each entry must be non-negative.
Only allowed for methods "slinear", "cubic" and "quintic".
.. versionadded:: 1.13
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at `xi`. See notes for behaviour when
``xi.ndim == 1``.
Notes
-----
In the case that ``xi.ndim == 1`` a new axis is inserted into
the 0 position of the returned array, values_x, so its shape is
instead ``(1,) + values.shape[ndim:]``.
Examples
--------
Here we define a nearest-neighbor interpolator of a simple function
>>> import numpy as np
>>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
>>> def f(x, y):
... return x**2 + y**2
>>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
>>> from scipy.interpolate import RegularGridInterpolator
>>> interp = RegularGridInterpolator((x, y), data, method='nearest')
By construction, the interpolator uses the nearest-neighbor
interpolation
>>> interp([[1.5, 1.3], [0.3, 4.5]])
array([2., 9.])
We can however evaluate the linear interpolant by overriding the
`method` parameter
>>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
array([ 4.7, 24.3])
"""
_spline = self._spline
method = self.method if method is None else method
is_method_changed = self.method != method
if method not in self._ALL_METHODS:
raise ValueError(f"Method '{method}' is not defined")
if is_method_changed and method in self._SPLINE_METHODS_ndbspl:
_spline = self._construct_spline(method)
if nu is not None and method not in self._SPLINE_METHODS_ndbspl:
raise ValueError(
f"Can only compute derivatives for methods "
f"{self._SPLINE_METHODS_ndbspl}, got {method =}."
)
xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
if method == "linear":
indices, norm_distances = self._find_indices(xi.T)
if (ndim == 2 and hasattr(self._values, 'dtype') and
self._values.ndim == 2 and self._values.flags.writeable and
self._values.dtype in (np.float64, np.complex128) and
self._values.dtype.byteorder == '='):
# until cython supports const fused types, the fast path
# cannot support non-writeable values
# a fast path
out = np.empty(indices.shape[1], dtype=self._values.dtype)
result = evaluate_linear_2d(self._values,
indices,
norm_distances,
self._grid,
out)
else:
result = self._evaluate_linear(indices, norm_distances)
elif method == "nearest":
indices, norm_distances = self._find_indices(xi.T)
result = self._evaluate_nearest(indices, norm_distances)
elif method in self._SPLINE_METHODS:
if is_method_changed:
self._validate_grid_dimensions(self._grid, method)
if method in self._SPLINE_METHODS_recursive:
result = self._evaluate_spline(xi, method)
else:
result = _spline(xi, nu=nu)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
# f(nan) = nan, if any
if np.any(nans):
result[nans] = np.nan
return self._asarray(result.reshape(xi_shape[:-1] + self._values.shape[ndim:]))
@property
def grid(self):
return tuple(self._asarray(p) for p in self._grid)
@property
def values(self):
return self._asarray(self._values)
def _prepare_xi(self, xi):
ndim = len(self._grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != ndim:
raise ValueError("The requested sample points xi have dimension "
f"{xi.shape[-1]} but this "
f"RegularGridInterpolator has dimension {ndim}")
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
xi = np.asarray(xi, dtype=float)
# find nans in input
nans = np.any(np.isnan(xi), axis=-1)
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self._grid[i][0] <= p),
np.all(p <= self._grid[i][-1])):
raise ValueError(
f"One of the requested xi is out of bounds in dimension {i}"
)
out_of_bounds = None
else:
out_of_bounds = self._find_out_of_bounds(xi.T)
return xi, xi_shape, ndim, nans, out_of_bounds
def _evaluate_linear(self, indices, norm_distances):
# slice for broadcasting over trailing dimensions in self._values
vslice = (slice(None),) + (None,)*(self._values.ndim - len(indices))
# Compute shifting up front before zipping everything together
shift_norm_distances = [1 - yi for yi in norm_distances]
shift_indices = [i + 1 for i in indices]
# The formula for linear interpolation in 2d takes the form:
# values = self._values[(i0, i1)] * (1 - y0) * (1 - y1) + \
# self._values[(i0, i1 + 1)] * (1 - y0) * y1 + \
# self._values[(i0 + 1, i1)] * y0 * (1 - y1) + \
# self._values[(i0 + 1, i1 + 1)] * y0 * y1
# We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
zipped1 = zip(indices, shift_norm_distances)
zipped2 = zip(shift_indices, norm_distances)
# Take all products of zipped1 and zipped2 and iterate over them
# to get the terms in the above formula. This corresponds to iterating
# over the vertices of a hypercube.
hypercube = itertools.product(*zip(zipped1, zipped2))
value = np.array([0.])
for h in hypercube:
edge_indices, weights = zip(*h)
weight = np.array([1.])
for w in weights:
weight = weight * w
term = np.asarray(self._values[edge_indices]) * weight[vslice]
value = value + term # cannot use += because broadcasting
return value
def _evaluate_nearest(self, indices, norm_distances):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self._values[tuple(idx_res)]
def _validate_grid_dimensions(self, points, method):
k = self._SPLINE_DEGREE_MAP[method]
for i, point in enumerate(points):
ndim = len(np.atleast_1d(point))
if ndim <= k:
raise ValueError(f"There are {ndim} points in dimension {i},"
f" but method {method} requires at least "
f" {k+1} points per dimension.")
def _evaluate_spline(self, xi, method):
# ensure xi is 2D list of points to evaluate (`m` is the number of
# points and `n` is the number of interpolation dimensions,
# ``n == len(self._grid)``.)
if xi.ndim == 1:
xi = xi.reshape((1, xi.size))
m, n = xi.shape
# Reorder the axes: n-dimensional process iterates over the
# interpolation axes from the last axis downwards: E.g. for a 4D grid
# the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
# the 0th axis of its argument array (for 1D routine it's its ``y``
# array). Thus permute the interpolation axes of `values` *and keep
# trailing dimensions trailing*.
axes = tuple(range(self._values.ndim))
axx = axes[:n][::-1] + axes[n:]
values = self._values.transpose(axx)
if method == 'pchip':
_eval_func = self._do_pchip
else:
_eval_func = self._do_spline_fit
k = self._SPLINE_DEGREE_MAP[method]
# Non-stationary procedure: difficult to vectorize this part entirely
# into numpy-level operations. Unfortunately this requires explicit
# looping over each point in xi.
# can at least vectorize the first pass across all points in the
# last variable of xi.
last_dim = n - 1
first_values = _eval_func(self._grid[last_dim],
values,
xi[:, last_dim],
k)
# the rest of the dimensions have to be on a per point-in-xi basis
shape = (m, *self._values.shape[n:])
result = np.empty(shape, dtype=self._values.dtype)
for j in range(m):
# Main process: Apply 1D interpolate in each dimension
# sequentially, starting with the last dimension.
# These are then "folded" into the next dimension in-place.
folded_values = first_values[j, ...]
for i in range(last_dim-1, -1, -1):
# Interpolate for each 1D from the last dimensions.
# This collapses each 1D sequence into a scalar.
folded_values = _eval_func(self._grid[i],
folded_values,
xi[j, i],
k)
result[j, ...] = folded_values
return result
@staticmethod
def _do_spline_fit(x, y, pt, k):
local_interp = make_interp_spline(x, y, k=k, axis=0)
values = local_interp(pt)
return values
@staticmethod
def _do_pchip(x, y, pt, k):
local_interp = PchipInterpolator(x, y, axis=0)
values = local_interp(pt)
return values
def _find_indices(self, xi):
return find_indices(self._grid, xi)
def _find_out_of_bounds(self, xi):
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self._grid):
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular or rectilinear grids.
Strictly speaking, not all regular grids are supported - this function
works on *rectilinear* grids, that is, a rectangular grid with even or
uneven spacing.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions. The points in
each dimension (i.e. every elements of the points tuple) must be
strictly ascending or descending.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions. Complex data is
accepted.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
"splinef2d" is only supported for 2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at `xi`. See notes for behaviour when
``xi.ndim == 1``.
See Also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : interpolation on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
(suitable for e.g., N-D image resampling)
Notes
-----
.. versionadded:: 0.14
In the case that ``xi.ndim == 1`` a new axis is inserted into
the 0 position of the returned array, values_x, so its shape is
instead ``(1,) + values.shape[ndim:]``.
If the input data is such that input dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolation.
Examples
--------
Evaluate a simple example function on the points of a regular 3-D grid:
>>> import numpy as np
>>> from scipy.interpolate import interpn
>>> def value_func_3d(x, y, z):
... return 2 * x + 3 * y - z
>>> x = np.linspace(0, 4, 5)
>>> y = np.linspace(0, 5, 6)
>>> z = np.linspace(0, 6, 7)
>>> points = (x, y, z)
>>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
Evaluate the interpolating function at a point
>>> point = np.array([2.21, 3.12, 1.15])
>>> print(interpn(points, values, point))
[12.63]
Compare with value at point by function
>>> value_func_3d(*point)
12.63 # up to rounding
Since the function is linear, the interpolation is exact using linear method.
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
"splinef2d", "slinear",
"slinear_legacy", "cubic_legacy", "quintic_legacy"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
f"and 'splinef2d'. You provided {method}.")
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method splinef2d can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method splinef2d does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError(
f"There are {len(points)} point arrays, but values has {ndim} dimensions"
)
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method splinef2d can only be used for "
"scalar data with one point per coordinate")
grid, descending_dimensions = _check_points(points)
_check_dimensionality(grid, values)
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError(
f"The requested sample points xi have dimension {xi.shape[-1]}, "
f"but this RegularGridInterpolator has dimension {len(grid)}"
)
if bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError(
f"One of the requested xi is out of bounds in dimension {i}"
)
# perform interpolation
if method in RegularGridInterpolator._ALL_METHODS:
interp = RegularGridInterpolator(points, values, method=method,
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
else:
raise ValueError(f"unknown {method = }")
| RegularGridInterpolator |
python | google__jax | jax/experimental/mosaic/gpu/launch_context.py | {
"start": 9072,
"end": 14455
} | class ____:
"""Manages ops handling the GMEM scratch that contains the TMA descriptors.
TMA descriptors are created on the host and then copied to GMEM. So there
needs to be some code on the host to allocate and initialize the TMA
descriptors. However, we only know what descriptors we need after we have
lowered the entire kernel. This class helps manage everything needed to
correctly allocate and initialize the scratch.
To help reconcile the needs of kernels that use the dialect lowering with
those that use MGPU APIs directly, this class only creates the relevant ops
lazily. Eager creation would make them appear dead before dialect lowering
and MLIR's DCE would remove them.
During the lowering, we collect information about how many bytes are needed
and also how each descriptor should be initialized on the host. At the end
of the lowering, the finalize_size() method should be called to add the
necessary code on the host to allocate and initialize all descriptors.
Here's how the IR looks after the initial ops are created for the first time:
%1 = llvm.alloc_op {elem_type = !llvm.array<0 x i8>} -> !llvm.ptr
%2 = llvm.load_op (%1) : (!llvm.ptr) -> !llvm.array<0 x i8>
...
%3 = gpu.launch async
^bb0:
%4 = builtin.unrealized_conversion_cast_op(%2)
: (!llvm.array<256 x i8>) -> !llvm.ptr
And here is an example of how the IR could look like after finalize_size() is
called:
%11 = llvm.alloc_op {elem_type = !llvm.array<256 x i8>} -> !llvm.ptr
%22 = llvm.load_op (%11) : (!llvm.ptr) -> !llvm.array<256 x i8>
...
# Ops inserted to initialize the tma descriptors on the host:
...
%33 = llvm.getelementptr %11[0] : (!llvm.ptr) -> !llvm.ptr, i8
call @mosaic_gpu_init_tma_desc (%33, ...)
...
%44 = llvm.getelementptr %11[128] : (!llvm.ptr) -> !llvm.ptr, i8
call @mosaic_gpu_init_tma_desc (%44, ...)
...
%55 = gpu.launch async
^bb0:
%66 = builtin.unrealized_conversion_cast_op(%22)
: (!llvm.array<256 x i8>) -> !llvm.ptr
"""
def __init__(self, gpu_launch_op: _gpu_ops_gen.LaunchOp):
self.next_offset: int = 0
self.host_init: list[Callable[[ir.Value], None]] = []
self._ops_created = False
# Ideally, we would store the gpu.launch op directly. However, it gets
# invalidated by passes like "canonicalize". Thus we store the module and
# find the gpu.launch op from there when needed.
op = gpu_launch_op
while op.name != "builtin.module":
op = op.parent.opview
assert op is not None
self._module_op = op
def _find_first_op(
self, op_name: str, block: ir.Block, tag_attribute_name: str | None = None
) -> ir.OpView | None:
for op in block:
if op.name == op_name and (
tag_attribute_name is None or tag_attribute_name in op.attributes
):
return op
for region in op.regions:
for block in region:
child_op = self._find_first_op(op_name, block, tag_attribute_name)
if child_op is not None:
return child_op
return None
def _create_ops(self):
if self._ops_created:
return
self._ops_created = True
gpu_launch_op = self._find_first_op("gpu.launch", self._module_op.body)
assert gpu_launch_op is not None
ptr_ty = ir.Type.parse("!llvm.ptr")
empty_arr_ty = ir.Type.parse("!llvm.array<0 x i8>")
i64 = ir.IntegerType.get_signless(64)
with ir.InsertionPoint(gpu_launch_op):
alloc_op = llvm.AllocaOp(
ptr_ty, c(1, i64), empty_arr_ty,
alignment=TMA_DESCRIPTOR_ALIGNMENT
)
# Tag the alloc op with an attribute so that we can find it later.
alloc_op.attributes[MOSAIC_GPU_SMEM_ALLOC_ATTR] = ir.UnitAttr.get()
load_op = llvm.LoadOp(empty_arr_ty, alloc_op)
with ir.InsertionPoint.at_block_begin(gpu_launch_op.body.blocks[0]):
builtin.unrealized_conversion_cast([ptr_ty], [load_op])
def _find_alloc_load_and_device_ptr(
self,
) -> tuple[llvm.AllocaOp, llvm.LoadOp, ir.Value]:
if not self._ops_created:
self._create_ops()
alloc_op = self._find_first_op(
"llvm.alloca", self._module_op.body, MOSAIC_GPU_SMEM_ALLOC_ATTR
)
assert alloc_op is not None
[alloc_user] = alloc_op.result.uses
load_op = alloc_user.owner
assert load_op.operation.name == "llvm.load"
[load_op_user] = load_op.result.uses
device_ptr = load_op_user.owner
assert device_ptr.operation.name == "builtin.unrealized_conversion_cast"
return alloc_op, load_op, device_ptr.result
def device_ptr(self) -> ir.Value:
_, _, device_ptr = self._find_alloc_load_and_device_ptr()
return device_ptr
def finalize_size(self):
"""
Allocates and initializes the host buffer. This needs to be done after
lowering, i.e. after all TMA descriptors have been recorded. Only then we
know what the scratch contains.
"""
if self.next_offset == 0:
return
alloc_op, load_op, _ = self._find_alloc_load_and_device_ptr()
with ir.InsertionPoint(load_op):
gmem_scratch_bytes = self.next_offset
scratch_arr_ty = ir.Type.parse(f"!llvm.array<{gmem_scratch_bytes} x i8>")
alloc_op.elem_type = ir.TypeAttr.get(scratch_arr_ty)
load_op.result.set_type(scratch_arr_ty)
for init_callback in self.host_init:
init_callback(alloc_op.result)
| Scratch |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared_tests/test_modules.py | {
"start": 4859,
"end": 4944
} | class ____:
submodule_search_locations: Union[list[str], None] = None
| _FakeModuleSpec |
python | numba__numba | numba/cuda/tests/cudadrv/test_detect.py | {
"start": 594,
"end": 2774
} | class ____(CUDATestCase):
def run_cmd(self, cmdline, env):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in 5 minutes or kill it
timeout = threading.Timer(5 * 60., popen.kill)
try:
timeout.start()
out, err = popen.communicate()
# the process should exit with an error
return out.decode(), err.decode()
finally:
timeout.cancel()
return None, None
def run_test_in_separate_process(self, envvar, envvar_value):
env_copy = os.environ.copy()
env_copy[envvar] = str(envvar_value)
code = """if 1:
from numba import cuda
@cuda.jit('(int64,)')
def kernel(x):
pass
kernel(1,)
"""
cmdline = [sys.executable, "-c", code]
return self.run_cmd(cmdline, env_copy)
@skip_on_cudasim('Simulator does not hit device library search code path')
@unittest.skipIf(not sys.platform.startswith('linux'), "linux only")
def test_cuda_find_lib_errors(self):
"""
This tests that the find_libs works as expected in the case of an
environment variable being used to set the path.
"""
# one of these is likely to exist on linux, it's also unlikely that
# someone has extracted the contents of libdevice into here!
locs = ['lib', 'lib64']
looking_for = None
for l in locs:
looking_for = os.path.join(os.path.sep, l)
if os.path.exists(looking_for):
break
# This is the testing part, the test will only run if there's a valid
# path in which to look
if looking_for is not None:
out, err = self.run_test_in_separate_process("NUMBA_CUDA_DRIVER",
looking_for)
self.assertTrue(out is not None)
self.assertTrue(err is not None)
if __name__ == '__main__':
unittest.main()
| TestCUDAFindLibs |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/dependency.py | {
"start": 20883,
"end": 21195
} | class ____(ABC):
@abstractmethod
def get_node_dependencies(self) -> Sequence["DependencyDefinition"]:
pass
@abstractmethod
def is_fan_in(self) -> bool:
"""The result passed to the corresponding input will be a List made from different node outputs."""
@public
| IDependencyDefinition |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/variables.py | {
"start": 2305,
"end": 2483
} | class ____(BaseModel):
"""Import Variables serializer for responses."""
created_variable_keys: list[str]
import_count: int
created_count: int
| VariablesImportResponse |
python | django__django | tests/queries/tests.py | {
"start": 142807,
"end": 150494
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name="foo")
d2 = ModelD.objects.create(name="bar")
cls.a1 = ModelA.objects.create(name="a1", d=cls.d1)
c = ModelC.objects.create(name="c")
b = ModelB.objects.create(name="b", c=c)
cls.a2 = ModelA.objects.create(name="a2", b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = Q(d__name="foo") | Q(b__name="foo") | Q(b__c__name="foo")
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count("INNER JOIN"), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note="n", misc="m")
e = ExtraInfo.objects.create(info="info", note=n)
a = Author.objects.create(name="Author1", num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name="Foo", creator=a)
r2 = Report.objects.create(name="Bar")
Report.objects.create(name="Bar", creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name="Foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count(" JOIN "), 2)
self.assertSequenceEqual(qs.order_by("name"), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
i3 = Identifier.objects.create(name="i3")
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(
Identifier.objects.filter(program=None, channel=None), [i3]
)
self.assertSequenceEqual(
Identifier.objects.exclude(program=None, channel=None).order_by("name"),
[i1, i2],
)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(
~Q(program__id=p1.id, channel__id=c1.id)
).order_by("pk")
qs1_filter = Identifier.objects.filter(
program__id=p1.id, channel__id=c1.id
).order_by("pk")
self.assertQuerySetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(2, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by("pk")
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by("pk")
self.assertQuerySetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(1, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by("pk")
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by("pk")
self.assertQuerySetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count("JOIN"), str(qs2.query).count("JOIN"))
self.assertEqual(0, str(qs1.query).count("INNER JOIN"))
self.assertEqual(
str(qs1.query).count("INNER JOIN"), str(qs2.query).count("INNER JOIN")
)
| NullJoinPromotionOrTest |
python | doocs__leetcode | solution/1600-1699/1698.Number of Distinct Substrings in a String/Solution2.py | {
"start": 0,
"end": 473
} | class ____:
def countDistinct(self, s: str) -> int:
base = 131
n = len(s)
p = [0] * (n + 10)
h = [0] * (n + 10)
p[0] = 1
for i, c in enumerate(s):
p[i + 1] = p[i] * base
h[i + 1] = h[i] * base + ord(c)
ss = set()
for i in range(1, n + 1):
for j in range(i, n + 1):
t = h[j] - h[i - 1] * p[j - i + 1]
ss.add(t)
return len(ss)
| Solution |
python | python-visualization__folium | folium/map.py | {
"start": 23720,
"end": 25391
} | class ____(MacroElement):
"""
Creates a custom pane to hold map elements.
Behavior is as in https://leafletjs.com/examples/map-panes/
Parameters
----------
name: string
Name of the custom pane. Other map elements can be added
to the pane by specifying the 'pane' kwarg when constructing
them.
z_index: int or string, default 625
The z-index that will be associated with the pane, and will
determine which map elements lie over/under it. The default
(625) corresponds to between markers and tooltips. Default
panes and z-indexes can be found at
https://leafletjs.com/reference.html#map-pane
pointer_events: bool, default False
Whether or not layers in the pane should interact with the
cursor. Setting to False will prevent interfering with
pointer events associated with lower layers.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = {{ this._parent.get_name() }}.createPane(
{{ this.name|tojson }});
{{ this.get_name() }}.style.zIndex = {{ this.z_index|tojson }};
{% if not this.pointer_events %}
{{ this.get_name() }}.style.pointerEvents = 'none';
{% endif %}
{% endmacro %}
"""
)
def __init__(
self,
name: str,
z_index: Union[int, str] = 625,
pointer_events: bool = False,
):
super().__init__()
self._name = "Pane"
self.name = name
self.z_index = z_index
self.pointer_events = pointer_events
| CustomPane |
python | PrefectHQ__prefect | tests/runtime/test_task_run.py | {
"start": 5566,
"end": 6691
} | class ____:
@pytest.mark.parametrize("url_type", ["api_url", "ui_url"])
async def test_url_is_attribute(self, url_type: str):
assert url_type in dir(task_run)
@pytest.mark.parametrize("url_type", ["api_url", "ui_url"])
async def test_url_is_none_when_id_not_set(self, url_type: str):
assert getattr(task_run, url_type) is None
@pytest.mark.parametrize(
"url_type,",
["api_url", "ui_url"],
)
async def test_url_returns_correct_url_when_id_present(
self,
url_type: str,
):
test_id = "12345"
if url_type == "api_url":
base_url_value = PREFECT_API_URL.value()
elif url_type == "ui_url":
base_url_value = PREFECT_UI_URL.value()
else:
raise ValueError(f"Invalid url_type: {url_type}")
expected_url = f"{base_url_value}/runs/task-run/{test_id}"
with TaskRunContext.model_construct(
task_run=TaskRun.model_construct(id=test_id)
):
assert getattr(task_run, url_type) == expected_url
assert not getattr(task_run, url_type)
| TestURL |
python | run-llama__llama_index | llama-index-experimental/llama_index/experimental/query_engine/jsonalyze/jsonalyze_query_engine.py | {
"start": 7428,
"end": 13129
} | class ____(BaseQueryEngine):
"""
JSON List Shape Data Analysis Query Engine.
Converts natural language statasical queries to SQL within in-mem SQLite queries.
list_of_dict(List[Dict[str, Any]]): List of dictionaries to query.
jsonalyze_prompt (BasePromptTemplate): The JSONalyze prompt to use.
use_async (bool): Whether to use async.
analyzer (Callable): The analyzer that executes the query.
sql_parser (BaseSQLParser): The SQL parser that ensures valid SQL being parsed
from llm output.
synthesize_response (bool): Whether to synthesize a response.
response_synthesis_prompt (BasePromptTemplate): The response synthesis prompt
to use.
table_name (str): The table name to use.
verbose (bool): Whether to print verbose output.
"""
def __init__(
self,
list_of_dict: List[Dict[str, Any]],
llm: Optional[LLM] = None,
jsonalyze_prompt: Optional[BasePromptTemplate] = None,
use_async: bool = False,
analyzer: Optional[Callable] = None,
sql_parser: Optional[BaseSQLParser] = None,
synthesize_response: bool = True,
response_synthesis_prompt: Optional[BasePromptTemplate] = None,
table_name: str = DEFAULT_TABLE_NAME,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._list_of_dict = list_of_dict
self._llm = llm or Settings.llm
self._jsonalyze_prompt = jsonalyze_prompt or DEFAULT_JSONALYZE_PROMPT
self._use_async = use_async
self._analyzer = load_jsonalyzer(use_async, analyzer)
self._sql_parser = sql_parser or DefaultSQLParser()
self._synthesize_response = synthesize_response
self._response_synthesis_prompt = (
response_synthesis_prompt or DEFAULT_RESPONSE_SYNTHESIS_PROMPT
)
self._table_name = table_name
self._verbose = verbose
super().__init__(callback_manager=Settings.callback_manager)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {
"jsonalyze_prompt": self._jsonalyze_prompt,
"response_synthesis_prompt": self._response_synthesis_prompt,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "jsonalyze_prompt" in prompts:
self._jsonalyze_prompt = prompts["jsonalyze_prompt"]
if "response_synthesis_prompt" in prompts:
self._response_synthesis_prompt = prompts["response_synthesis_prompt"]
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer an analytical query on the JSON List."""
query = query_bundle.query_str
if self._verbose:
print_text(f"Query: {query}\n", color="green")
# Perform the analysis
sql_query, table_schema, results = self._analyzer(
self._list_of_dict,
query_bundle,
self._llm,
table_name=self._table_name,
prompt=self._jsonalyze_prompt,
sql_parser=self._sql_parser,
)
if self._verbose:
print_text(f"SQL Query: {sql_query}\n", color="blue")
print_text(f"Table Schema: {table_schema}\n", color="cyan")
print_text(f"SQL Response: {results}\n", color="yellow")
if self._synthesize_response:
response_str = self._llm.predict(
self._response_synthesis_prompt,
sql_query=sql_query,
table_schema=table_schema,
sql_response=results,
query_str=query_bundle.query_str,
)
if self._verbose:
print_text(f"Response: {response_str}", color="magenta")
else:
response_str = str(results)
response_metadata = {"sql_query": sql_query, "table_schema": str(table_schema)}
return Response(response=response_str, metadata=response_metadata)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
"""Answer an analytical query on the JSON List."""
query = query_bundle.query_str
if self._verbose:
print_text(f"Query: {query}", color="green")
# Perform the analysis
sql_query, table_schema, results = self._analyzer(
self._list_of_dict,
query,
self._llm,
table_name=self._table_name,
prompt=self._jsonalyze_prompt,
)
if self._verbose:
print_text(f"SQL Query: {sql_query}\n", color="blue")
print_text(f"Table Schema: {table_schema}\n", color="cyan")
print_text(f"SQL Response: {results}\n", color="yellow")
if self._synthesize_response:
response_str = await self._llm.apredict(
self._response_synthesis_prompt,
sql_query=sql_query,
table_schema=table_schema,
sql_response=results,
query_str=query_bundle.query_str,
)
if self._verbose:
print_text(f"Response: {response_str}", color="magenta")
else:
response_str = json.dumps(
{
"sql_query": sql_query,
"table_schema": table_schema,
"sql_response": results,
}
)
response_metadata = {"sql_query": sql_query, "table_schema": str(table_schema)}
return Response(response=response_str, metadata=response_metadata)
| JSONalyzeQueryEngine |
python | scipy__scipy | scipy/optimize/tests/test_least_squares.py | {
"start": 29714,
"end": 29812
} | class ____(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
| TestDogbox |
python | networkx__networkx | networkx/algorithms/approximation/tests/test_traveling_salesman.py | {
"start": 1082,
"end": 3635
} | class ____:
@classmethod
def setup_class(cls):
cls.DG = nx.DiGraph()
cls.DG.add_weighted_edges_from(
{
("A", "B", 3),
("A", "C", 17),
("A", "D", 14),
("B", "A", 3),
("B", "C", 12),
("B", "D", 16),
("C", "A", 13),
("C", "B", 12),
("C", "D", 4),
("D", "A", 14),
("D", "B", 15),
("D", "C", 2),
}
)
cls.DG_cycle = ["D", "C", "B", "A", "D"]
cls.DG_cost = 31.0
cls.DG2 = nx.DiGraph()
cls.DG2.add_weighted_edges_from(
{
("A", "B", 3),
("A", "C", 17),
("A", "D", 14),
("B", "A", 30),
("B", "C", 2),
("B", "D", 16),
("C", "A", 33),
("C", "B", 32),
("C", "D", 34),
("D", "A", 14),
("D", "B", 15),
("D", "C", 2),
}
)
cls.DG2_cycle = ["D", "A", "B", "C", "D"]
cls.DG2_cost = 53.0
cls.unweightedUG = nx.complete_graph(5, nx.Graph())
cls.unweightedDG = nx.complete_graph(5, nx.DiGraph())
cls.incompleteUG = nx.Graph()
cls.incompleteUG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)})
cls.incompleteDG = nx.DiGraph()
cls.incompleteDG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)})
cls.UG = nx.Graph()
cls.UG.add_weighted_edges_from(
{
("A", "B", 3),
("A", "C", 17),
("A", "D", 14),
("B", "C", 12),
("B", "D", 16),
("C", "D", 4),
}
)
cls.UG_cycle = ["D", "C", "B", "A", "D"]
cls.UG_cost = 33.0
cls.UG2 = nx.Graph()
cls.UG2.add_weighted_edges_from(
{
("A", "B", 1),
("A", "C", 15),
("A", "D", 5),
("B", "C", 16),
("B", "D", 8),
("C", "D", 3),
}
)
cls.UG2_cycle = ["D", "C", "B", "A", "D"]
cls.UG2_cost = 25.0
def validate_solution(soln, cost, exp_soln, exp_cost):
assert soln == exp_soln
assert cost == exp_cost
def validate_symmetric_solution(soln, cost, exp_soln, exp_cost):
assert soln == exp_soln or soln == exp_soln[::-1]
assert cost == exp_cost
| TestBase |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 7107,
"end": 7232
} | class ____(ProtocolError, ValueError):
"""Response needs to be chunked in order to read it as chunks."""
| ResponseNotChunked |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 15543,
"end": 16343
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str, start_date: str):
"""Airbyte Source for Iterable.
Documentation can be found at https://docs.airbyte.com/integrations/sources/iterable
Args:
name (str): The name of the destination.
api_key (str): Iterable API Key. See the docs for more information on how to obtain this key.
start_date (str): The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.
"""
self.api_key = check.str_param(api_key, "api_key")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Iterable", name)
| IterableSource |
python | numpy__numpy | numpy/lib/_datasource.py | {
"start": 17036,
"end": 22731
} | class ____ (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str or pathlib.Path
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode,
encoding=encoding, newline=newline)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str or pathlib.Path
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| Repository |
python | great-expectations__great_expectations | great_expectations/execution_engine/sqlalchemy_execution_engine.py | {
"start": 5515,
"end": 5664
} | class ____(ValueError):
def __init__(self, operator: Any) -> None:
super().__init__(f"Invalid operator: {operator!r}")
| InvalidOperatorError |
python | tensorflow__tensorflow | third_party/xla/third_party/gpus/find_cuda_config.py | {
"start": 2820,
"end": 22821
} | class ____(Exception):
pass
def _is_linux():
return platform.system() == "Linux"
def _is_windows():
return platform.system() == "Windows"
def _is_macos():
return platform.system() == "Darwin"
def _matches_version(actual_version, required_version):
"""Checks whether some version meets the requirements.
All elements of the required_version need to be present in the
actual_version.
required_version actual_version result
-----------------------------------------
1 1.1 True
1.2 1 False
1.2 1.3 False
1 True
Args:
required_version: The version specified by the user.
actual_version: The version detected from the CUDA installation.
Returns: Whether the actual version matches the required one.
"""
if actual_version is None:
return False
# Strip spaces from the versions.
actual_version = actual_version.strip()
required_version = required_version.strip()
return actual_version.startswith(required_version)
def _at_least_version(actual_version, required_version):
actual = [int(v) for v in actual_version.split(".")]
required = [int(v) for v in required_version.split(".")]
return actual >= required
def _get_header_version(path, name):
"""Returns preprocessor defines in C header file."""
for line in io.open(path, "r", encoding="utf-8").readlines():
match = re.match(r"\s*#\s*define %s\s+(\d+)" % name, line)
if match:
return match.group(1)
return ""
def _cartesian_product(first, second):
"""Returns all path combinations of first and second."""
return [os.path.join(f, s) for f in first for s in second]
def _get_ld_config_paths():
"""Returns all directories from 'ldconfig -p'."""
if not _is_linux():
return []
ldconfig_path = shutil.which("ldconfig") or "/sbin/ldconfig"
output = subprocess.check_output([ldconfig_path, "-p"])
pattern = re.compile(".* => (.*)")
result = set()
for line in output.splitlines():
try:
match = pattern.match(line.decode("ascii"))
except UnicodeDecodeError:
match = False
if match:
result.add(os.path.dirname(match.group(1)))
return sorted(list(result))
def _get_default_cuda_paths(cuda_version):
if not cuda_version:
cuda_version = "*"
elif not "." in cuda_version:
cuda_version = cuda_version + ".*"
if _is_windows():
return [
os.environ.get(
"CUDA_PATH",
"C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v%s\\" %
cuda_version)
]
return ["/usr/local/cuda-%s" % cuda_version, "/usr/local/cuda", "/usr",
"/usr/local/cudnn"] + _get_ld_config_paths()
def _header_paths():
"""Returns hard-coded set of relative paths to look for header files."""
return [
"",
"include",
"include/cuda",
"include/*-linux-gnu",
"extras/CUPTI/include",
"include/cuda/CUPTI",
"local/cuda/extras/CUPTI/include",
"targets/x86_64-linux/include",
]
def _library_paths():
"""Returns hard-coded set of relative paths to look for library files."""
return [
"",
"lib64",
"lib",
"lib/*-linux-gnu",
"lib/x64",
"extras/CUPTI/*",
"local/cuda/lib64",
"local/cuda/extras/CUPTI/lib64",
]
def _not_found_error(base_paths, relative_paths, filepattern):
base_paths = "".join(["\n '%s'" % path for path in sorted(base_paths)])
relative_paths = "".join(["\n '%s'" % path for path in relative_paths])
return ConfigError(
"Could not find any %s in any subdirectory:%s\nof:%s\n" %
(filepattern, relative_paths, base_paths))
def _find_file(base_paths, relative_paths, filepattern):
for path in _cartesian_product(base_paths, relative_paths):
for file in glob.glob(os.path.join(path, filepattern)):
return file
raise _not_found_error(base_paths, relative_paths, filepattern)
def _find_library(base_paths, library_name, required_version):
"""Returns first valid path to the requested library."""
if _is_windows():
filepattern = library_name + ".lib"
elif _is_macos():
filepattern = "%s*.dylib" % (".".join(["lib" + library_name] +
required_version.split(".")[:1]))
else:
filepattern = ".".join(["lib" + library_name, "so"] +
required_version.split(".")[:1]) + "*"
return _find_file(base_paths, _library_paths(), filepattern)
def _find_versioned_file(base_paths, relative_paths, filepatterns,
required_version, get_version):
"""Returns first valid path to a file that matches the requested version."""
if type(filepatterns) not in [list, tuple]:
filepatterns = [filepatterns]
for path in _cartesian_product(base_paths, relative_paths):
for filepattern in filepatterns:
for file in glob.glob(os.path.join(path, filepattern)):
actual_version = get_version(file)
if _matches_version(actual_version, required_version):
return file, actual_version
raise _not_found_error(
base_paths, relative_paths,
", ".join(filepatterns) + " matching version '%s'" % required_version)
def _find_header(base_paths, header_name, required_version, get_version):
"""Returns first valid path to a header that matches the requested version."""
return _find_versioned_file(base_paths, _header_paths(), header_name,
required_version, get_version)
def _find_cuda_config(base_paths, required_version):
def get_header_version(path):
version = int(_get_header_version(path, "CUDA_VERSION"))
if not version:
return None
return "%d.%d" % (version // 1000, version % 1000 // 10)
cuda_header_path, header_version = _find_header(base_paths, "cuda.h",
required_version,
get_header_version)
cuda_version = header_version # x.y, see above.
cuda_library_path = _find_library(base_paths, "cudart", cuda_version)
def get_nvcc_version(path):
pattern = r"Cuda compilation tools, release \d+\.\d+, V(\d+\.\d+\.\d+)"
for line in subprocess.check_output([path, "--version"]).splitlines():
match = re.match(pattern, line.decode("ascii"))
if match:
return match.group(1)
return None
nvcc_name = "nvcc.exe" if _is_windows() else "nvcc"
nvcc_path, nvcc_version = _find_versioned_file(base_paths, [
"",
"bin",
"local/cuda/bin",
], nvcc_name, cuda_version, get_nvcc_version)
nvvm_path = _find_file(base_paths, [
"nvvm/libdevice",
"share/cuda",
"lib/nvidia-cuda-toolkit/libdevice",
"local/cuda/nvvm/libdevice",
], "libdevice*.10.bc")
cupti_header_path = _find_file(base_paths, _header_paths(), "cupti.h")
nvml_header_dir = _find_file(base_paths, _header_paths(), "nvml.h")
cupti_library_path = _find_library(base_paths, "cupti", required_version)
cuda_binary_dir = os.path.dirname(nvcc_path)
nvvm_library_dir = os.path.dirname(nvvm_path)
# XLA requires the toolkit path to find ptxas and libdevice.
# TODO(csigg): pass in both directories instead.
cuda_toolkit_paths = (
os.path.normpath(os.path.join(cuda_binary_dir, "..")),
os.path.normpath(os.path.join(nvvm_library_dir, "../..")),
)
if cuda_toolkit_paths[0] != cuda_toolkit_paths[1]:
raise ConfigError("Inconsistent CUDA toolkit path: %s vs %s" %
cuda_toolkit_paths)
return {
"cuda_version": cuda_version,
"cuda_include_dir": os.path.dirname(cuda_header_path),
"cuda_library_dir": os.path.dirname(cuda_library_path),
"cuda_binary_dir": cuda_binary_dir,
"nvvm_library_dir": nvvm_library_dir,
"cupti_include_dir": os.path.dirname(cupti_header_path),
"cupti_library_dir": os.path.dirname(cupti_library_path),
"cuda_toolkit_path": cuda_toolkit_paths[0],
"nvml_header_dir": os.path.dirname(nvml_header_dir),
}
def _find_cublas_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "10.1"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUBLAS_VER_MAJOR", "CUBLAS_VER_MINOR",
"CUBLAS_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cublas_api.h",
required_version,
get_header_version)
# cuBLAS uses the major version only.
cublas_version = header_version.split(".")[0]
else:
# There is no version info available before CUDA 10.1, just find the file.
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cublas_api.h")
# cuBLAS version is the same as CUDA version (x.y).
cublas_version = required_version
library_path = _find_library(base_paths, "cublas", cublas_version)
return {
"cublas_version": header_version,
"cublas_include_dir": os.path.dirname(header_path),
"cublas_library_dir": os.path.dirname(library_path),
}
def _find_cusolver_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "11.0"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUSOLVER_VER_MAJOR", "CUSOLVER_VER_MINOR",
"CUSOLVER_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cusolver_common.h",
required_version,
get_header_version)
cusolver_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cusolver_common.h")
cusolver_version = required_version
library_path = _find_library(base_paths, "cusolver", cusolver_version)
return {
"cusolver_version": header_version,
"cusolver_include_dir": os.path.dirname(header_path),
"cusolver_library_dir": os.path.dirname(library_path),
}
def _find_curand_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "11.0"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CURAND_VER_MAJOR", "CURAND_VER_MINOR",
"CURAND_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "curand.h",
required_version,
get_header_version)
curand_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "curand.h")
curand_version = required_version
library_path = _find_library(base_paths, "curand", curand_version)
return {
"curand_version": header_version,
"curand_include_dir": os.path.dirname(header_path),
"curand_library_dir": os.path.dirname(library_path),
}
def _find_cufft_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "11.0"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUFFT_VER_MAJOR", "CUFFT_VER_MINOR", "CUFFT_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cufft.h",
required_version,
get_header_version)
cufft_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cufft.h")
cufft_version = required_version
library_path = _find_library(base_paths, "cufft", cufft_version)
return {
"cufft_version": header_version,
"cufft_include_dir": os.path.dirname(header_path),
"cufft_library_dir": os.path.dirname(library_path),
}
def _find_cudnn_config(base_paths, required_version):
def get_header_version(path):
version = [
_get_header_version(path, name)
for name in ("CUDNN_MAJOR", "CUDNN_MINOR", "CUDNN_PATCHLEVEL")]
return ".".join(version) if version[0] else None
header_path, header_version = _find_header(base_paths,
("cudnn.h", "cudnn_version.h"),
required_version,
get_header_version)
cudnn_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "cudnn", cudnn_version)
return {
"cudnn_version": cudnn_version,
"cudnn_include_dir": os.path.dirname(header_path),
"cudnn_library_dir": os.path.dirname(library_path),
}
def _find_cusparse_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "11.0"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUSPARSE_VER_MAJOR", "CUSPARSE_VER_MINOR",
"CUSPARSE_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cusparse.h",
required_version,
get_header_version)
cusparse_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cusparse.h")
cusparse_version = required_version
library_path = _find_library(base_paths, "cusparse", cusparse_version)
return {
"cusparse_version": header_version,
"cusparse_include_dir": os.path.dirname(header_path),
"cusparse_library_dir": os.path.dirname(library_path),
}
def _find_nccl_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("NCCL_MAJOR", "NCCL_MINOR", "NCCL_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "nccl.h",
required_version,
get_header_version)
nccl_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "nccl", nccl_version)
return {
"nccl_version": nccl_version,
"nccl_include_dir": os.path.dirname(header_path),
"nccl_library_dir": os.path.dirname(library_path),
}
def _find_tensorrt_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("NV_TENSORRT_MAJOR", "NV_TENSORRT_MINOR",
"NV_TENSORRT_PATCH"))
# `version` is a generator object, so we convert it to a list before using
# it (muitiple times below).
version = list(version)
if not all(version):
return None # Versions not found, make _matches_version returns False.
return ".".join(version)
header_path, header_version = _find_header(base_paths, "NvInferVersion.h",
required_version,
get_header_version)
tensorrt_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "nvinfer", tensorrt_version)
return {
"tensorrt_version": header_version,
"tensorrt_include_dir": os.path.dirname(header_path),
"tensorrt_library_dir": os.path.dirname(library_path),
}
def _list_from_env(env_name, default=[]):
"""Returns comma-separated list from environment variable."""
if env_name in os.environ:
return os.environ[env_name].split(",")
return default
def _get_legacy_path(env_name, default=[]):
"""Returns a path specified by a legacy environment variable.
CUDNN_INSTALL_PATH, NCCL_INSTALL_PATH, TENSORRT_INSTALL_PATH set to
'/usr/lib/x86_64-linux-gnu' would previously find both library and header
paths. Detect those and return '/usr', otherwise forward to _list_from_env().
"""
if env_name in os.environ:
match = re.match(r"^(/[^/ ]*)+/lib/\w+-linux-gnu/?$", os.environ[env_name])
if match:
return [match.group(1)]
return _list_from_env(env_name, default)
def _normalize_path(path):
"""Returns normalized path, with forward slashes on Windows."""
path = os.path.realpath(path)
if _is_windows():
path = path.replace("\\", "/")
return path
def find_cuda_config():
"""Returns a dictionary of CUDA library and header file paths."""
libraries = [argv.lower() for argv in sys.argv[1:]]
cuda_version = os.environ.get("TF_CUDA_VERSION", "")
base_paths = _list_from_env("TF_CUDA_PATHS",
_get_default_cuda_paths(cuda_version))
base_paths = [path for path in base_paths if os.path.exists(path)]
result = {}
if "cuda" in libraries:
cuda_paths = _list_from_env("CUDA_TOOLKIT_PATH", base_paths)
res = _find_cuda_config(cuda_paths, cuda_version)
result.update(res)
cuda_version = result["cuda_version"]
cublas_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (10, 1):
# Before CUDA 10.1, cuBLAS was in the same directory as the toolkit.
cublas_paths = cuda_paths
cublas_version = os.environ.get("TF_CUBLAS_VERSION", "")
result.update(
_find_cublas_config(cublas_paths, cublas_version, cuda_version))
cusolver_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
cusolver_paths = cuda_paths
cusolver_version = os.environ.get("TF_CUSOLVER_VERSION", "")
result.update(
_find_cusolver_config(cusolver_paths, cusolver_version, cuda_version))
curand_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
curand_paths = cuda_paths
curand_version = os.environ.get("TF_CURAND_VERSION", "")
result.update(
_find_curand_config(curand_paths, curand_version, cuda_version))
cufft_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
cufft_paths = cuda_paths
cufft_version = os.environ.get("TF_CUFFT_VERSION", "")
result.update(_find_cufft_config(cufft_paths, cufft_version, cuda_version))
cusparse_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
cusparse_paths = cuda_paths
cusparse_version = os.environ.get("TF_CUSPARSE_VERSION", "")
result.update(
_find_cusparse_config(cusparse_paths, cusparse_version, cuda_version))
if "cudnn" in libraries:
cudnn_paths = _get_legacy_path("CUDNN_INSTALL_PATH", base_paths)
cudnn_version = os.environ.get("TF_CUDNN_VERSION", "")
result.update(_find_cudnn_config(cudnn_paths, cudnn_version))
if "nccl" in libraries:
nccl_paths = _get_legacy_path("NCCL_INSTALL_PATH", base_paths)
nccl_version = os.environ.get("TF_NCCL_VERSION", "")
result.update(_find_nccl_config(nccl_paths, nccl_version))
if "tensorrt" in libraries:
tensorrt_paths = _get_legacy_path("TENSORRT_INSTALL_PATH", base_paths)
tensorrt_version = os.environ.get("TF_TENSORRT_VERSION", "")
result.update(_find_tensorrt_config(tensorrt_paths, tensorrt_version))
for k, v in result.items():
if k.endswith("_dir") or k.endswith("_path"):
result[k] = _normalize_path(v)
return result
def main():
try:
for key, value in sorted(find_cuda_config().items()):
print("%s: %s" % (key, value))
except ConfigError as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
if __name__ == "__main__":
main()
| ConfigError |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE790.py | {
"start": 2641,
"end": 2710
} | class ____:
...
'Lorem ipsum dolor sit amet.'
| PotentialDocstring2 |
python | mahmoud__boltons | tests/test_iterutils.py | {
"start": 11672,
"end": 18976
} | class ____:
def test_depth_one(self):
root = ['test']
assert get_path(root, (0,)) == 'test'
assert get_path(root, '0') == 'test'
root = {'key': 'value'}
assert get_path(root, ('key',)) == 'value'
assert get_path(root, 'key') == 'value'
def test_depth_two(self):
root = {'key': ['test']}
assert get_path(root, ('key', 0)) == 'test'
assert get_path(root, 'key.0') == 'test'
def test_research():
root = {}
with pytest.raises(TypeError):
research(root, query=None)
root = {'a': 'a'}
res = research(root, query=lambda p, k, v: v == 'a')
assert len(res) == 1
assert res[0] == (('a',), 'a')
def broken_query(p, k, v):
raise RuntimeError()
with pytest.raises(RuntimeError):
research(root, broken_query, reraise=True)
# empty results with default, reraise=False
assert research(root, broken_query) == []
def test_research_custom_enter():
# see #368
from types import SimpleNamespace as NS
root = NS(
a='a',
b='b',
c=NS(aa='aa') )
def query(path, key, value):
return value.startswith('a')
def custom_enter(path, key, value):
if isinstance(value, NS):
return [], value.__dict__.items()
return default_enter(path, key, value)
with pytest.raises(TypeError):
research(root, query)
assert research(root, query, enter=custom_enter) == [(('a',), 'a'), (('c', 'aa'), 'aa')]
def test_backoff_basic():
from boltons.iterutils import backoff
assert backoff(1, 16) == [1.0, 2.0, 4.0, 8.0, 16.0]
assert backoff(1, 1) == [1.0]
assert backoff(2, 15) == [2.0, 4.0, 8.0, 15.0]
def test_backoff_repeat():
from boltons.iterutils import backoff_iter
fives = []
for val in backoff_iter(5, 5, count='repeat'):
fives.append(val)
if len(fives) >= 1000:
break
assert fives == [5] * 1000
def test_backoff_zero_start():
from boltons.iterutils import backoff
assert backoff(0, 16) == [0.0, 1.0, 2.0, 4.0, 8.0, 16.0]
assert backoff(0, 15) == [0.0, 1.0, 2.0, 4.0, 8.0, 15.0]
slow_backoff = [round(x, 2) for x in backoff(0, 2.9, factor=1.2)]
assert slow_backoff == [0.0, 1.0, 1.2, 1.44, 1.73, 2.07, 2.49, 2.9]
def test_backoff_validation():
from boltons.iterutils import backoff
with pytest.raises(ValueError):
backoff(8, 2)
with pytest.raises(ValueError):
backoff(1, 0)
with pytest.raises(ValueError):
backoff(-1, 10)
with pytest.raises(ValueError):
backoff(2, 8, factor=0)
with pytest.raises(ValueError):
backoff(2, 8, jitter=20)
def test_backoff_jitter():
from boltons.iterutils import backoff
start, stop = 1, 256
unjittered = backoff(start, stop)
jittered = backoff(start, stop, jitter=True)
assert len(unjittered) == len(jittered)
assert [u >= j for u, j in zip(unjittered, jittered)]
neg_jittered = backoff(start, stop, jitter=-0.01)
assert len(unjittered) == len(neg_jittered)
assert [u <= j for u, j in zip(unjittered, neg_jittered)]
o_jittered = backoff(start, stop, jitter=-0.0)
assert len(unjittered) == len(o_jittered)
assert [u == j for u, j in zip(unjittered, o_jittered)]
nonconst_jittered = backoff(stop, stop, count=5, jitter=True)
assert len(nonconst_jittered) == 5
# no two should be equal realistically
assert len(set(nonconst_jittered)) == 5
def test_guiderator():
import string
from boltons.iterutils import GUIDerator
guid_iter = GUIDerator()
guid = next(guid_iter)
assert guid
assert len(guid) == guid_iter.size
assert all([c in string.hexdigits for c in guid])
guid2 = next(guid_iter)
assert guid != guid2
# custom size
guid_iter = GUIDerator(size=26)
assert len(next(guid_iter)) == 26
def test_seqguiderator():
import string
from boltons.iterutils import SequentialGUIDerator as GUIDerator
guid_iter = GUIDerator()
guid = next(guid_iter)
assert guid
assert len(guid) == guid_iter.size
assert all([c in string.hexdigits for c in guid])
guid2 = next(guid_iter)
assert guid != guid2
# custom size
for x in range(10000):
guid_iter = GUIDerator(size=26)
assert len(next(guid_iter)) == 26
def test_chunked_bytes():
# see #231
from boltons.iterutils import chunked
assert chunked(b'123', 2) in (['12', '3'], [b'12', b'3'])
def test_chunk_ranges():
from boltons.iterutils import chunk_ranges
assert list(chunk_ranges(input_offset=10, input_size=10, chunk_size=5)) == [(10, 15), (15, 20)]
assert list(chunk_ranges(input_offset=10, input_size=10, chunk_size=5, overlap_size=1)) == [(10, 15), (14, 19), (18, 20)]
assert list(chunk_ranges(input_offset=10, input_size=10, chunk_size=5, overlap_size=2)) == [(10, 15), (13, 18), (16, 20)]
assert list(chunk_ranges(input_offset=4, input_size=15, chunk_size=5, align=False)) == [(4, 9), (9, 14), (14, 19)]
assert list(chunk_ranges(input_offset=4, input_size=15, chunk_size=5, align=True)) == [(4, 5), (5, 10), (10, 15), (15, 19)]
assert list(chunk_ranges(input_offset=2, input_size=15, chunk_size=5, overlap_size=1, align=False)) == [(2, 7), (6, 11), (10, 15), (14, 17)]
assert list(chunk_ranges(input_offset=2, input_size=15, chunk_size=5, overlap_size=1, align=True)) == [(2, 5), (4, 9), (8, 13), (12, 17)]
assert list(chunk_ranges(input_offset=3, input_size=15, chunk_size=5, overlap_size=1, align=True)) == [(3, 5), (4, 9), (8, 13), (12, 17), (16, 18)]
assert list(chunk_ranges(input_offset=3, input_size=2, chunk_size=5, overlap_size=1, align=True)) == [(3, 5)]
def test_lstrip():
from boltons.iterutils import lstrip
assert lstrip([0,1,0,2,0,3,0],0) == [1,0,2,0,3,0]
assert lstrip([0,0,0,1,0,2,0,3,0],0) == [1,0,2,0,3,0]
assert lstrip([]) == []
def test_rstrip():
from boltons.iterutils import rstrip
assert rstrip([0,1,0,2,0,3,0],0) == [0,1,0,2,0,3]
assert rstrip([0,1,0,2,0,3,0,0,0],0) == [0,1,0,2,0,3]
assert rstrip([]) == []
def test_strip():
from boltons.iterutils import strip
assert strip([0,1,0,2,0,3,0],0) == [1,0,2,0,3]
assert strip([0,0,0,1,0,2,0,3,0,0,0],0) == [1,0,2,0,3]
assert strip([]) == []
def test_pairwise_filled():
assert pairwise(range(4)) == [(0, 1), (1, 2), (2, 3)]
assert pairwise(range(4), end=None) == [(0, 1), (1, 2), (2, 3), (3, None)]
assert pairwise([]) == []
assert pairwise([1], end=None) == [(1, None)]
assert list(pairwise_iter(range(4))) == [(0, 1), (1, 2), (2, 3)]
assert list(pairwise_iter(range(4), end=None)) == [(0, 1), (1, 2), (2, 3), (3, None)]
def test_windowed_filled():
assert windowed(range(4), 3) == [(0, 1, 2), (1, 2, 3)]
assert windowed(range(4), 3, fill=None) == [(0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
assert windowed([], 3) == []
assert windowed([], 3, fill=None) == []
assert windowed([1, 2], 3, fill=None) == [(1, 2, None), (2, None, None)]
assert list(windowed_iter(range(4), 3)) == [(0, 1, 2), (1, 2, 3)]
assert list(windowed_iter(range(4), 3, fill=None)) == [(0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
| TestGetPath |
python | walkccc__LeetCode | solutions/1631. Path With Minimum Effort/1631.py | {
"start": 0,
"end": 886
} | class ____:
def minimumEffortPath(self, heights: list[list[int]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(heights)
n = len(heights[0])
# diff[i][j] := the maximum absolute difference to reach (i, j)
diff = [[math.inf] * n for _ in range(m)]
seen = set()
minHeap = [(0, 0, 0)] # (d, i, j)
diff[0][0] = 0
while minHeap:
d, i, j = heapq.heappop(minHeap)
if i == m - 1 and j == n - 1:
return d
seen.add((i, j))
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if (x, y) in seen:
continue
newDiff = abs(heights[i][j] - heights[x][y])
maxDiff = max(diff[i][j], newDiff)
if diff[x][y] > maxDiff:
diff[x][y] = maxDiff
heapq.heappush(minHeap, (diff[x][y], x, y))
| Solution |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 35776,
"end": 37239
} | class ____(StepBase):
def who(self):
self.cmd = (
"SR AAAA-CORRECT NAME IS {last_name} {first_name}{middle_name}"
" {title}/P{passenger_association}".format(
last_name=last_name,
first_name=first_name,
middle_name=middle_name,
title=title,
passenger_association=passenger_association,
)
)
xxxxxxx_xxxxxx_xxxxxxx = xxx([
xxxxxxxxxxxx(
xxxxxx_xxxxxxx=(
'((x.aaaaaaaaa = "xxxxxx.xxxxxxxxxxxxxxxxxxxxx") || (x.xxxxxxxxx ='
' "xxxxxxxxxxxx")) && '
# xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx.
"(x.bbbbbbbbbbbb.xxx != "
'"xxx:xxx:xxx::cccccccccccc:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && '
)
)
])
if __name__ == "__main__":
for i in range(4, 8):
cmd = (
r"for pid in $(ps aux | grep paster | grep -v grep | grep '\-%d' | awk"
r" '{print $2}'); do kill $pid; done" % (i)
)
def A():
def B():
def C():
def D():
def E():
def F():
def G():
assert (
c_float(val[0][0] / val[0][1]).value
== c_float(value[0][0] / value[0][1]).value
), "%s didn't roundtrip" % tag
| Step |
python | google__flatbuffers | goldens/py/flatbuffers/goldens/Galaxy.py | {
"start": 176,
"end": 1311
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Galaxy()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsGalaxy(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Galaxy
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Galaxy
def NumStars(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
def GalaxyStart(builder):
builder.StartObject(1)
def Start(builder):
GalaxyStart(builder)
def GalaxyAddNumStars(builder, numStars):
builder.PrependInt64Slot(0, numStars, 0)
def AddNumStars(builder, numStars):
GalaxyAddNumStars(builder, numStars)
def GalaxyEnd(builder):
return builder.EndObject()
def End(builder):
return GalaxyEnd(builder)
| Galaxy |
python | catalyst-team__catalyst | catalyst/data/sampler.py | {
"start": 8599,
"end": 13723
} | class ____(Sampler):
"""
This kind of sampler can be used for classification tasks with significant
class imbalance.
The idea of this sampler that we start with the original class distribution
and gradually move to uniform class distribution like with downsampling.
Let's define :math: D_i = #C_i/ #C_min where :math: #C_i is a size of class
i and :math: #C_min is a size of the rarest class, so :math: D_i define
class distribution. Also define :math: g(n_epoch) is a exponential
scheduler. On each epoch current :math: D_i calculated as
:math: current D_i = D_i ^ g(n_epoch),
after this data samples according this distribution.
Notes:
In the end of the training, epochs will contain only
min_size_class * n_classes examples. So, possible it will not
necessary to do validation on each epoch. For this reason use
ControlFlowCallback.
Examples:
>>> import torch
>>> import numpy as np
>>> from catalyst.data import DynamicBalanceClassSampler
>>> from torch.utils import data
>>> features = torch.Tensor(np.random.random((200, 100)))
>>> labels = np.random.randint(0, 4, size=(200,))
>>> sampler = DynamicBalanceClassSampler(labels)
>>> labels = torch.LongTensor(labels)
>>> dataset = data.TensorDataset(features, labels)
>>> loader = data.dataloader.DataLoader(dataset, batch_size=8)
>>> for batch in loader:
>>> b_features, b_labels = batch
Sampler was inspired by https://arxiv.org/abs/1901.06783
"""
def __init__(
self,
labels: List[Union[int, str]],
exp_lambda: float = 0.9,
start_epoch: int = 0,
max_d: Optional[int] = None,
mode: Union[str, int] = "downsampling",
ignore_warning: bool = False,
):
"""
Args:
labels: list of labels for each elem in the dataset
exp_lambda: exponent figure for schedule
start_epoch: start epoch number, can be useful for multi-stage
experiments
max_d: if not None, limit on the difference between the most
frequent and the rarest classes, heuristic
mode: number of samples per class in the end of training. Must be
"downsampling" or number. Before change it, make sure that you
understand how does it work
ignore_warning: ignore warning about min class size
"""
assert isinstance(start_epoch, int)
assert 0 < exp_lambda < 1, "exp_lambda must be in (0, 1)"
super().__init__(labels)
self.exp_lambda = exp_lambda
if max_d is None:
max_d = np.inf
self.max_d = max_d
self.epoch = start_epoch
labels = np.array(labels)
samples_per_class = Counter(labels)
self.min_class_size = min(samples_per_class.values())
if self.min_class_size < 100 and not ignore_warning:
LOGGER.warning(
f"the smallest class contains only"
f" {self.min_class_size} examples. At the end of"
f" training, epochs will contain only"
f" {self.min_class_size * len(samples_per_class)}"
f" examples"
)
self.original_d = {
key: value / self.min_class_size for key, value in samples_per_class.items()
}
self.label2idxes = {
label: np.arange(len(labels))[labels == label].tolist()
for label in set(labels)
}
if isinstance(mode, int):
self.min_class_size = mode
else:
assert mode == "downsampling"
self.labels = labels
self._update()
def _update(self) -> None:
"""Update d coefficients."""
current_d = {
key: min(value ** self._exp_scheduler(), self.max_d)
for key, value in self.original_d.items()
}
samples_per_classes = {
key: int(value * self.min_class_size) for key, value in current_d.items()
}
self.samples_per_classes = samples_per_classes
self.length = np.sum(list(samples_per_classes.values()))
self.epoch += 1
def _exp_scheduler(self) -> float:
return self.exp_lambda ** self.epoch
def __iter__(self) -> Iterator[int]:
"""
Returns:
iterator of indices of stratified sample
"""
indices = []
for key in sorted(self.label2idxes):
samples_per_class = self.samples_per_classes[key]
replace_flag = samples_per_class > len(self.label2idxes[key])
indices += np.random.choice(
self.label2idxes[key], samples_per_class, replace=replace_flag
).tolist()
assert len(indices) == self.length
np.random.shuffle(indices)
self._update()
return iter(indices)
def __len__(self) -> int:
"""
Returns:
length of result sample
"""
return self.length
| DynamicBalanceClassSampler |
python | sqlalchemy__sqlalchemy | test/orm/test_versioning.py | {
"start": 27815,
"end": 30040
} | class ____(fixtures.MappedTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"p",
metadata,
Column("id", String(10), primary_key=True),
Column("version_id", Integer, default=1, nullable=False),
Column("data", String(50)),
)
Table(
"c",
metadata,
Column("id", String(10), ForeignKey("p.id"), primary_key=True),
Column("version_id", Integer, default=1, nullable=False),
Column("data", String(50)),
)
@classmethod
def setup_classes(cls):
class P(cls.Basic):
pass
class C(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
p, c, C, P = cls.tables.p, cls.tables.c, cls.classes.C, cls.classes.P
cls.mapper_registry.map_imperatively(
P,
p,
version_id_col=p.c.version_id,
properties={
"c": relationship(
C, uselist=False, cascade="all, delete-orphan"
)
},
)
cls.mapper_registry.map_imperatively(
C, c, version_id_col=c.c.version_id
)
def test_row_switch(self):
P = self.classes.P
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
session.delete(p)
session.add(P(id="P1", data="really a row-switch"))
with conditional_sane_rowcount_warnings(update=True):
session.commit()
def test_child_row_switch(self):
P, C = self.classes.P, self.classes.C
assert P.c.property.strategy.use_get
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
p.c = C(data="child version 1")
session.commit()
p = session.query(P).first()
p.c = C(data="child row-switch")
with conditional_sane_rowcount_warnings(update=True):
session.commit()
| RowSwitchTest |
python | sympy__sympy | sympy/logic/boolalg.py | {
"start": 2207,
"end": 7588
} | class ____(Basic):
"""A Boolean object is an object for which logic operations make sense."""
__slots__ = ()
kind = BooleanKind
if TYPE_CHECKING:
def __new__(cls, *args: Basic | complex) -> Boolean:
...
@overload # type: ignore
def subs(self, arg1: Mapping[Basic | complex, Boolean | complex], arg2: None=None) -> Boolean: ...
@overload
def subs(self, arg1: Iterable[tuple[Basic | complex, Boolean | complex]], arg2: None=None, **kwargs: Any) -> Boolean: ...
@overload
def subs(self, arg1: Boolean | complex, arg2: Boolean | complex) -> Boolean: ...
@overload
def subs(self, arg1: Mapping[Basic | complex, Basic | complex], arg2: None=None, **kwargs: Any) -> Basic: ...
@overload
def subs(self, arg1: Iterable[tuple[Basic | complex, Basic | complex]], arg2: None=None, **kwargs: Any) -> Basic: ...
@overload
def subs(self, arg1: Basic | complex, arg2: Basic | complex, **kwargs: Any) -> Basic: ...
def subs(self, arg1: Mapping[Basic | complex, Basic | complex] | Basic | complex, # type: ignore
arg2: Basic | complex | None = None, **kwargs: Any) -> Basic:
...
def simplify(self, **kwargs) -> Boolean:
...
@sympify_return([('other', 'Boolean')], NotImplemented)
def __and__(self, other):
return And(self, other)
__rand__ = __and__
@sympify_return([('other', 'Boolean')], NotImplemented)
def __or__(self, other):
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~"""
return Not(self)
@sympify_return([('other', 'Boolean')], NotImplemented)
def __rshift__(self, other):
return Implies(self, other)
@sympify_return([('other', 'Boolean')], NotImplemented)
def __lshift__(self, other):
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
@sympify_return([('other', 'Boolean')], NotImplemented)
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
def equals(self, other):
"""
Returns ``True`` if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy import And, Or, Not
>>> (A >> B).equals(~B >> ~A)
True
>>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))
False
>>> Not(And(A, Not(A))).equals(Or(B, Not(B)))
False
"""
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
def to_nnf(self, simplify=True, form=None):
# override where necessary
return self
def as_set(self):
"""
Rewrites Boolean expression in terms of real sets.
Examples
========
>>> from sympy import Symbol, Eq, Or, And
>>> x = Symbol('x', real=True)
>>> Eq(x, 0).as_set()
{0}
>>> (x > 0).as_set()
Interval.open(0, oo)
>>> And(-2 < x, x < 2).as_set()
Interval.open(-2, 2)
>>> Or(x < -2, 2 < x).as_set()
Union(Interval.open(-oo, -2), Interval.open(2, oo))
"""
from sympy.calculus.util import periodicity
from sympy.core.relational import Relational
free = self.free_symbols
if len(free) == 1:
x = free.pop()
if x.kind is NumberKind:
reps = {}
for r in self.atoms(Relational):
if periodicity(r, x) not in (0, None):
s = r._eval_as_set()
if s in (S.EmptySet, S.UniversalSet, S.Reals):
reps[r] = s.as_relational(x)
continue
raise NotImplementedError(filldedent('''
as_set is not implemented for relationals
with periodic solutions
'''))
new = self.subs(reps)
if new.func != self.func:
return new.as_set() # restart with new obj
else:
return new._eval_as_set()
return self._eval_as_set()
else:
raise NotImplementedError("Sorry, as_set has not yet been"
" implemented for multivariate"
" expressions")
@property
def binary_symbols(self):
from sympy.core.relational import Eq, Ne
return set().union(*[i.binary_symbols for i in self.args
if i.is_Boolean or i.is_Symbol
or isinstance(i, (Eq, Ne))])
def _eval_refine(self, assumptions):
from sympy.assumptions import ask
ret = ask(self, assumptions)
if ret is True:
return true
elif ret is False:
return false
return None
| Boolean |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-toggl/llama_index/readers/toggl/base.py | {
"start": 240,
"end": 3496
} | class ____(BaseReader):
def __init__(
self, api_token: str, user_agent: str = "llama_index_toggl_reader"
) -> None:
"""Initialize with parameters."""
super().__init__()
self.api_token = api_token
self.user_agent = user_agent
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def load_data(
self,
workspace_id: str,
project_id: str,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = datetime.datetime.now(),
out_format: TogglOutFormat = TogglOutFormat.json,
) -> List[Document]:
"""
Load data from Toggl.
Args:
workspace_id (str): The workspace ID.
project_id (str): The project ID.
start_date (Optional[datetime.datetime]): The start date.
end_date (Optional[datetime.datetime]): The end date.
out_format (TogglOutFormat): The output format.
"""
return self.loop.run_until_complete(
self.aload_data(workspace_id, project_id, start_date, end_date, out_format)
)
async def aload_data(
self,
workspace_id: str,
project_id: str,
start_date: Optional[datetime.datetime],
end_date: Optional[datetime.datetime],
out_format: TogglOutFormat,
) -> List[Document]:
"""Load time entries from Toggl."""
from toggl.api_client import TogglClientApi
client = TogglClientApi(
{
"token": self.api_token,
"workspace_id": workspace_id,
"user_agent": self.user_agent,
}
)
project_times = client.get_project_times(project_id, start_date, end_date)
raw_items = [
TogglTrackItem.model_validate(raw_item)
for raw_item in project_times["data"]
]
items = []
for item in raw_items:
if out_format == TogglOutFormat.json:
text = item.model_dump_json()
elif out_format == TogglOutFormat.markdown:
text = f"""# {item.description}
**Start:** {item.start:%Y-%m-%d %H:%M:%S%z}
**End:** {item.end:%Y-%m-%d %H:%M:%S%z}
**Duration:** {self.milliseconds_to_postgresql_interval(item.dur)}
**Tags:** {",".join(item.tags)}
"""
doc = Document(text=text)
doc.metadata = {**doc.metadata, **item.dict()}
items.append(doc)
return items
def milliseconds_to_postgresql_interval(self, milliseconds):
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
interval = ""
if days > 0:
interval += f"{days}d"
if hours > 0:
interval += f"{hours}h"
if minutes > 0:
interval += f"{minutes}m"
if seconds > 0 or milliseconds > 0:
interval += f"{seconds}s"
if milliseconds > 0:
interval += f"{milliseconds}ms"
return interval
| TogglReader |
python | aimacode__aima-python | learning4e.py | {
"start": 14321,
"end": 14644
} | class ____:
"""A leaf of a decision tree holds just a result."""
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
| DecisionLeaf |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_typed_mapping.py | {
"start": 154731,
"end": 156121
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
"""test the Generic support added as part of #8665"""
__dialect__ = "default"
@testing.fixture
def mapping(self):
# anno only: global T_Value
T_Value = TypeVar("T_Value")
class SomeBaseClass(DeclarativeBase):
pass
class GenericSetting(
MappedAsDataclass, SomeBaseClass, Generic[T_Value]
):
"""Represents key value pairs for settings or values"""
__tablename__ = "xx"
id: Mapped[int] = mapped_column(
Integer, primary_key=True, init=False
)
key: Mapped[str] = mapped_column(String, init=True)
value: Mapped[T_Value] = mapped_column(
MutableDict.as_mutable(JSON),
init=True,
default_factory=lambda: {},
)
return GenericSetting
def test_inspect(self, mapping):
GenericSetting = mapping
typ = GenericSetting[Dict[str, Any]]
is_(inspect(typ), GenericSetting.__mapper__)
def test_select(self, mapping):
GenericSetting = mapping
typ = GenericSetting[Dict[str, Any]]
self.assert_compile(
select(typ).where(typ.key == "x"),
"SELECT xx.id, xx.key, xx.value FROM xx WHERE xx.key = :key_1",
)
| GenericMappingQueryTest |
python | joke2k__faker | faker/sphinx/docstring.py | {
"start": 1174,
"end": 8527
} | class ____:
"""
Class that preprocesses provider method docstrings to generate sample usage and output
Notes on how samples are generated:
- If the docstring belongs to a standard provider method, sample usage and output will be
generated using a `Faker` object in the `DEFAULT_LOCALE`.
- If the docstring belongs to a localized provider method, the correct locale will be used.
- If the docstring does not belong to any provider method, docstring preprocessing will be skipped.
- Docstring lines will be parsed for potential sample sections, and the generation details of each
sample section will internally be represented as a ``Sample`` namedtuple.
- Each ``Sample`` will have info on the keyword arguments to pass to the provider method, how many
times the provider method will be called, and the initial seed value to ``Faker.seed()``.
"""
def __init__(self, app, what, name, obj, options, lines):
self._line_iter = iter(lines)
self._parsed_lines = []
self._samples = []
self._skipped = True
self._log_prefix = f"{inspect.getfile(obj)}:docstring of {name}: WARNING:"
if what != "method":
return
base_provider_method_match = _base_provider_method_pattern.match(name)
locale_provider_method_match = _locale_provider_method_pattern.match(name)
standard_provider_method_match = _standard_provider_method_pattern.match(name)
if base_provider_method_match:
groupdict = base_provider_method_match.groupdict()
self._method = groupdict["method"]
self._locale = DEFAULT_LOCALE
elif standard_provider_method_match:
groupdict = standard_provider_method_match.groupdict()
self._method = groupdict["method"]
self._locale = DEFAULT_LOCALE
elif locale_provider_method_match:
groupdict = locale_provider_method_match.groupdict()
self._method = groupdict["method"]
self._locale = groupdict["locale"]
else:
return
self._skipped = False
self._parse()
self._generate_samples()
def _log_warning(self, warning):
logger.warning(f"{self._log_prefix} {warning}")
def _parse(self):
while True:
try:
line = next(self._line_iter)
except StopIteration:
break
else:
self._parse_section(line)
def _parse_section(self, section):
# No-op if section does not look like the start of a sample section
if not section.startswith(":sample"):
self._parsed_lines.append(section)
return
try:
next_line = next(self._line_iter)
except StopIteration:
# No more lines left to consume, so save current sample section
self._process_sample_section(section)
return
# Next line is the start of a new sample section, so process
# current sample section, and start parsing the new section
if next_line.startswith(":sample"):
self._process_sample_section(section)
self._parse_section(next_line)
# Next line is an empty line indicating the end of
# current sample section, so process current section
elif next_line == "":
self._process_sample_section(section)
# Section is assumed to be multiline, so continue
# adding lines to current sample section
else:
section = section + next_line
self._parse_section(section)
def _process_sample_section(self, section):
match = _sample_line_pattern.match(section)
# Discard sample section if malformed
if not match:
msg = f"The section `{section}` is malformed and will be discarded."
self._log_warning(msg)
return
# Set sample generation defaults and do some beautification if necessary
groupdict = match.groupdict()
size = groupdict.get("size")
seed = groupdict.get("seed")
kwargs = groupdict.get("kwargs")
size = max(int(size), DEFAULT_SAMPLE_SIZE) if size else DEFAULT_SAMPLE_SIZE
seed = int(seed) if seed else DEFAULT_SEED
kwargs = self._beautify_kwargs(kwargs) if kwargs else ""
# Store sample generation details
sample = Sample(size, seed, kwargs)
self._samples.append(sample)
def _beautify_kwargs(self, kwargs):
def _repl_whitespace(match):
quoted = match.group(1) or match.group(2)
return quoted if quoted else ""
def _repl_comma(match):
quoted = match.group(1) or match.group(2)
return quoted if quoted else ", "
# First, remove all whitespaces and tabs not within quotes
result = re.sub(r'("[^"]*")|(\'[^\']*\')|[ \t]+', _repl_whitespace, kwargs)
# Next, insert a whitespace after each comma not within quotes
result = re.sub(r'("[^"]*")|(\'[^\']*\')|,', _repl_comma, result)
# Then return the result with all leading and trailing whitespaces stripped
return result.strip()
def _stringify_result(self, value):
return repr(value)
def _generate_eval_scope(self):
from collections import OrderedDict # noqa: F401 Do not remove! The eval command needs this reference.
return {
"generator": _fake[self._locale],
"OrderedDict": OrderedDict,
}
def _inject_default_sample_section(self):
default_sample = Sample(DEFAULT_SAMPLE_SIZE, DEFAULT_SEED, "")
self._samples.append(default_sample)
def _generate_samples(self):
if not self._samples:
self._inject_default_sample_section()
output = ""
eval_scope = self._generate_eval_scope()
for sample in self._samples:
command = _command_template.format(method=self._method, kwargs=sample.kwargs)
validator = SampleCodeValidator(command)
if validator.errors:
msg = (
f"Invalid code elements detected. Sample generation will be "
f"skipped for method `{self._method}` with arguments `{sample.kwargs}`."
)
self._log_warning(msg)
continue
try:
Faker.seed(sample.seed)
results = "\n".join([self._stringify_result(eval(command, eval_scope)) for _ in range(sample.size)])
except Exception:
msg = f"Sample generation failed for method `{self._method}` with arguments `{sample.kwargs}`."
self._log_warning(msg)
continue
else:
output += _sample_output_template.format(
seed=sample.seed,
method=self._method,
kwargs=sample.kwargs,
size=sample.size,
results=results,
)
if output:
output = ":examples:\n\n" + output
self._parsed_lines.extend(output.split("\n"))
@property
def skipped(self):
return self._skipped
@property
def lines(self):
return self._parsed_lines
| ProviderMethodDocstring |
python | rapidsai__cudf | docs/cudf/source/_ext/PandasCompat.py | {
"start": 670,
"end": 734
} | class ____(nodes.Admonition, nodes.Element):
pass
| PandasCompat |
python | django__django | tests/backends/sqlite/tests.py | {
"start": 7720,
"end": 9593
} | class ____(TestCase):
def test_no_interpolation(self):
# This shouldn't raise an exception (#17158)
query = "SELECT strftime('%Y', 'now');"
with connection.cursor() as cursor:
cursor.execute(query)
self.assertEqual(connection.queries[-1]["sql"], query)
def test_parameter_quoting(self):
# The implementation of last_executed_queries isn't optimal. It's
# worth testing that parameters are quoted (#14091).
query = "SELECT %s"
params = ["\"'\\"]
with connection.cursor() as cursor:
cursor.execute(query, params)
# Note that the single quote is repeated
substituted = "SELECT '\"''\\'"
self.assertEqual(connection.queries[-1]["sql"], substituted)
def test_parameter_count_exceeds_variable_or_column_limit(self):
sql = "SELECT MAX(%s)" % ", ".join(["%s"] * 1001)
params = list(range(1001))
for label, limit, current_limit in [
(
"variable",
sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER,
connection.features.max_query_params,
),
(
"column",
sqlite3.SQLITE_LIMIT_COLUMN,
connection.connection.getlimit(sqlite3.SQLITE_LIMIT_COLUMN),
),
]:
with self.subTest(limit=label):
connection.connection.setlimit(limit, 1000)
self.addCleanup(connection.connection.setlimit, limit, current_limit)
with connection.cursor() as cursor:
# This should not raise an exception.
cursor.db.ops.last_executed_query(cursor.cursor, sql, params)
connection.connection.setlimit(limit, current_limit)
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
| LastExecutedQueryTest |
python | pytorch__pytorch | torch/_dynamo/variables/lazy.py | {
"start": 6515,
"end": 7661
} | class ____:
def __init__(
self, sym_node_variable: SymNodeVariable, fmt_spec_var: VariableTracker
) -> None:
from .constant import ConstantVariable
self.sym_node_var = sym_node_variable
self.fmt_var = ConstantVariable.create(
"{:" + fmt_spec_var.as_python_constant() + "}"
)
def __repr__(self) -> str:
return str.format(
self.fmt_var.as_python_constant(),
str(self.sym_node_var.evaluate_expr()),
)
def _create_realize_and_forward(
name: str,
) -> Callable[[LazyVariableTracker, Any, Any], Any]:
@functools.wraps(getattr(VariableTracker, name))
def realize_and_forward(
self: LazyVariableTracker, *args: Any, **kwargs: Any
) -> Any:
return getattr(self.realize(), name)(*args, **kwargs)
return realize_and_forward
def _populate() -> None:
for name, value in VariableTracker.__dict__.items():
if name not in LazyVariableTracker.__dict__:
if callable(value):
setattr(LazyVariableTracker, name, _create_realize_and_forward(name))
_populate()
| LazySymNodeFormatString |
python | prabhupant__python-ds | data_structures/graphs/cycle_in_directed_graph_using_colors_recursive.py | {
"start": 341,
"end": 1260
} | class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
def dfs(self, vertex, colors):
colors[vertex] = 'Gray'
for v in self.graph[vertex]:
if colors[v] == 'Gray':
return True
elif colors[v] == 'White' and self.dfs(v, colors) == True:
return True
colors[vertex] = 'Black'
return False
def is_cyclic(self):
colors = ['White'] * self.vertices
for vertex in self.graph.keys():
if colors[vertex] == 'White':
if self.dfs(vertex, colors) == True:
return True
return False
g = Graph(4)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
print(g.is_cyclic()) | Graph |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_server__embed.py | {
"start": 10140,
"end": 10723
} | class ____:
def test_None(self) -> None:
assert bes._process_arguments(None) == ""
def test_args(self) -> None:
args = dict(foo=10, bar="baz")
r = bes._process_arguments(args)
# order unspecified
assert r == "&foo=10&bar=baz" or r == "&bar=baz&foo=10"
def test_args_ignores_bokeh_prefixed(self) -> None:
args = dict(foo=10, bar="baz")
args["bokeh-junk"] = 20
r = bes._process_arguments(args)
# order unspecified
assert r == "&foo=10&bar=baz" or r == "&bar=baz&foo=10"
| Test__process_arguments |
python | gevent__gevent | src/gevent/tests/test___monitor.py | {
"start": 9325,
"end": 9504
} | class ____(object):
def __init__(self, rss):
self.rss = rss
def memory_full_info(self):
return self
@skipWithoutPSUtil("Accessess memory info")
| MockProcess |
python | pydata__xarray | xarray/tests/test_coordinate_transform.py | {
"start": 295,
"end": 8362
} | class ____(CoordinateTransform):
"""Simple uniform scale transform in a 2D space (x/y coordinates)."""
def __init__(self, shape: tuple[int, int], scale: float, dtype: Any = None):
super().__init__(("x", "y"), {"x": shape[1], "y": shape[0]}, dtype=dtype)
self.scale = scale
# array dimensions in reverse order (y = rows, x = cols)
self.xy_dims = tuple(self.dims)
self.dims = (self.dims[1], self.dims[0])
def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]:
assert set(dim_positions) == set(self.dims)
return {
name: dim_positions[dim] * self.scale
for name, dim in zip(self.coord_names, self.xy_dims, strict=False)
}
def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]:
return {dim: coord_labels[dim] / self.scale for dim in self.xy_dims}
def equals(
self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None
) -> bool:
if not isinstance(other, SimpleCoordinateTransform):
return False
return self.scale == other.scale
def __repr__(self) -> str:
return f"Scale({self.scale})"
def test_abstract_coordinate_transform() -> None:
tr = CoordinateTransform(["x"], {"x": 5})
with pytest.raises(NotImplementedError):
tr.forward({"x": [1, 2]})
with pytest.raises(NotImplementedError):
tr.reverse({"x": [3.0, 4.0]})
with pytest.raises(NotImplementedError):
tr.equals(CoordinateTransform(["x"], {"x": 5}))
def test_coordinate_transform_init() -> None:
tr = SimpleCoordinateTransform((4, 4), 2.0)
assert tr.coord_names == ("x", "y")
# array dimensions in reverse order (y = rows, x = cols)
assert tr.dims == ("y", "x")
assert tr.dim_size == {"x": 4, "y": 4}
assert tr.dtype == np.dtype(np.float64)
tr2 = SimpleCoordinateTransform((4, 4), 2.0, dtype=np.int64)
assert tr2.dtype == np.dtype(np.int64)
@pytest.mark.parametrize("dims", [None, ("y", "x")])
def test_coordinate_transform_generate_coords(dims) -> None:
tr = SimpleCoordinateTransform((2, 2), 2.0)
actual = tr.generate_coords(dims)
expected = {"x": [[0.0, 2.0], [0.0, 2.0]], "y": [[0.0, 0.0], [2.0, 2.0]]}
assert set(actual) == set(expected)
np.testing.assert_array_equal(actual["x"], expected["x"])
np.testing.assert_array_equal(actual["y"], expected["y"])
def create_coords(scale: float, shape: tuple[int, int]) -> xr.Coordinates:
"""Create x/y Xarray coordinate variables from a simple coordinate transform."""
tr = SimpleCoordinateTransform(shape, scale)
index = CoordinateTransformIndex(tr)
return xr.Coordinates.from_xindex(index)
def test_coordinate_transform_variable() -> None:
coords = create_coords(scale=2.0, shape=(2, 2))
assert coords["x"].dtype == np.dtype(np.float64)
assert coords["y"].dtype == np.dtype(np.float64)
assert coords["x"].shape == (2, 2)
assert coords["y"].shape == (2, 2)
np.testing.assert_array_equal(np.array(coords["x"]), [[0.0, 2.0], [0.0, 2.0]])
np.testing.assert_array_equal(np.array(coords["y"]), [[0.0, 0.0], [2.0, 2.0]])
def assert_repr(var: xr.Variable):
assert (
repr(var._data)
== "CoordinateTransformIndexingAdapter(transform=Scale(2.0))"
)
assert_repr(coords["x"].variable)
assert_repr(coords["y"].variable)
def test_coordinate_transform_variable_repr_inline() -> None:
var = create_coords(scale=2.0, shape=(2, 2))["x"].variable
actual = var._data._repr_inline_(70) # type: ignore[union-attr]
assert actual == "0.0 2.0 0.0 2.0"
# truncated inline repr
var2 = create_coords(scale=2.0, shape=(10, 10))["x"].variable
actual2 = var2._data._repr_inline_(70) # type: ignore[union-attr]
assert (
actual2 == "0.0 2.0 4.0 6.0 8.0 10.0 12.0 ... 6.0 8.0 10.0 12.0 14.0 16.0 18.0"
)
def test_coordinate_transform_variable_repr() -> None:
var = create_coords(scale=2.0, shape=(2, 2))["x"].variable
actual = repr(var)
expected = """
<xarray.Variable (y: 2, x: 2)> Size: 32B
[4 values with dtype=float64]
""".strip()
assert actual == expected
def test_coordinate_transform_variable_basic_outer_indexing() -> None:
var = create_coords(scale=2.0, shape=(4, 4))["x"].variable
assert var[0, 0] == 0.0
assert var[0, 1] == 2.0
assert var[0, -1] == 6.0
np.testing.assert_array_equal(var[:, 0:2], [[0.0, 2.0]] * 4)
with pytest.raises(IndexError, match="out of bounds index"):
var[5]
with pytest.raises(IndexError, match="out of bounds index"):
var[-5]
def test_coordinate_transform_variable_vectorized_indexing() -> None:
var = create_coords(scale=2.0, shape=(4, 4))["x"].variable
actual = var[{"x": xr.Variable("z", [0]), "y": xr.Variable("z", [0])}]
expected = xr.Variable("z", [0.0])
assert_equal(actual, expected)
with pytest.raises(IndexError, match="out of bounds index"):
var[{"x": xr.Variable("z", [5]), "y": xr.Variable("z", [5])}]
def test_coordinate_transform_setitem_error() -> None:
var = create_coords(scale=2.0, shape=(4, 4))["x"].variable
# basic indexing
with pytest.raises(TypeError, match="setting values is not supported"):
var[0, 0] = 1.0
# outer indexing
with pytest.raises(TypeError, match="setting values is not supported"):
var[[0, 2], 0] = [1.0, 2.0]
# vectorized indexing
with pytest.raises(TypeError, match="setting values is not supported"):
var[{"x": xr.Variable("z", [0]), "y": xr.Variable("z", [0])}] = 1.0
def test_coordinate_transform_transpose() -> None:
coords = create_coords(scale=2.0, shape=(2, 2))
actual = coords["x"].transpose().values
expected = [[0.0, 0.0], [2.0, 2.0]]
np.testing.assert_array_equal(actual, expected)
def test_coordinate_transform_equals() -> None:
ds1 = create_coords(scale=2.0, shape=(2, 2)).to_dataset()
ds2 = create_coords(scale=2.0, shape=(2, 2)).to_dataset()
ds3 = create_coords(scale=4.0, shape=(2, 2)).to_dataset()
# cannot use `assert_equal()` test utility function here yet
# (indexes invariant check are still based on IndexVariable, which
# doesn't work with coordinate transform index coordinate variables)
assert ds1.equals(ds2)
assert not ds1.equals(ds3)
def test_coordinate_transform_sel() -> None:
ds = create_coords(scale=2.0, shape=(4, 4)).to_dataset()
data = [
[0.0, 1.0, 2.0, 3.0],
[4.0, 5.0, 6.0, 7.0],
[8.0, 9.0, 10.0, 11.0],
[12.0, 13.0, 14.0, 15.0],
]
ds["data"] = (("y", "x"), data)
actual = ds.sel(
x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5]), method="nearest"
)
expected = ds.isel(x=xr.Variable("z", [0, 3]), y=xr.Variable("z", [0, 0]))
# cannot use `assert_equal()` test utility function here yet
# (indexes invariant check are still based on IndexVariable, which
# doesn't work with coordinate transform index coordinate variables)
assert actual.equals(expected)
with pytest.raises(ValueError, match=r".*only supports selection.*nearest"):
ds.sel(x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5]))
with pytest.raises(ValueError, match=r"missing labels for coordinate.*y"):
ds.sel(x=[0.5, 5.5], method="nearest")
with pytest.raises(TypeError, match=r".*only supports advanced.*indexing"):
ds.sel(x=[0.5, 5.5], y=[0.0, 0.5], method="nearest")
with pytest.raises(ValueError, match=r".*only supports advanced.*indexing"):
ds.sel(
x=xr.Variable("z", [0.5, 5.5]),
y=xr.Variable("z", [0.0, 0.5, 1.5]),
method="nearest",
)
def test_coordinate_transform_rename() -> None:
ds = xr.Dataset(coords=create_coords(scale=2.0, shape=(2, 2)))
roundtripped = ds.rename(x="u", y="v").rename(u="x", v="y")
assert_identical(ds, roundtripped, check_default_indexes=False)
| SimpleCoordinateTransform |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_string.py | {
"start": 2730,
"end": 4822
} | class ____(BaseTestZDType):
test_cls = FixedLengthUTF32
valid_dtype = (np.dtype(">U10"), np.dtype("<U10"))
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.float64),
np.dtype("|S10"),
)
valid_json_v2 = (
{"name": ">U10", "object_codec_id": None},
{"name": "<U10", "object_codec_id": None},
)
valid_json_v3 = ({"name": "fixed_length_utf32", "configuration": {"length_bytes": 320}},)
invalid_json_v2 = (
"|U",
"|S10",
"|f8",
)
invalid_json_v3 = (
{"name": "fixed_length_utf32", "configuration": {"length_bits": 0}},
{"name": "numpy.fixed_length_utf32", "configuration": {"length_bits": "invalid"}},
)
scalar_v2_params = ((FixedLengthUTF32(length=1), ""), (FixedLengthUTF32(length=2), "hi"))
scalar_v3_params = (
(FixedLengthUTF32(length=1), ""),
(FixedLengthUTF32(length=2), "hi"),
(FixedLengthUTF32(length=4), "hihi"),
)
cast_value_params = (
(FixedLengthUTF32(length=1), "", np.str_("")),
(FixedLengthUTF32(length=2), "hi", np.str_("hi")),
(FixedLengthUTF32(length=4), "hihi", np.str_("hihi")),
)
item_size_params = (
FixedLengthUTF32(length=1),
FixedLengthUTF32(length=4),
FixedLengthUTF32(length=10),
)
# anything can become a string
invalid_scalar_params = (None,)
@pytest.mark.parametrize(
"zdtype",
[
FixedLengthUTF32(length=10),
],
)
def test_unstable_dtype_warning(zdtype: FixedLengthUTF32 | VariableLengthUTF8) -> None:
"""
Test that we get a warning when serializing a dtype without a zarr v3 spec to json
when zarr_format is 3
"""
with pytest.warns(UnstableSpecificationWarning):
zdtype.to_json(zarr_format=3)
def test_invalid_size() -> None:
"""
Test that it's impossible to create a data type that has no length
"""
length = 0
msg = f"length must be >= 1, got {length}."
with pytest.raises(ValueError, match=msg):
FixedLengthUTF32(length=length)
| TestFixedLengthUTF32 |
python | nedbat__coveragepy | tests/test_arcs.py | {
"start": 63733,
"end": 64921
} | class ____(CoverageTest):
"""Tests of exclusions to indicate known partial branches."""
def test_default(self) -> None:
# A number of forms of pragma comment are accepted.
self.check_coverage(
"""\
a = 1
if a: #pragma: no branch
b = 3
c = 4
if c: # pragma NOBRANCH
d = 6
e = 7
if e:#\tpragma:\tno branch
f = 9
import typing
if typing.TYPE_CHECKING: # only for mypy
g = 12
else:
h = 14
""",
lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 14],
missing="",
branchz="23 24 56 57 89 8A BC BE",
branchz_missing="",
)
def test_custom_pragmas(self) -> None:
self.check_coverage(
"""\
a = 1
while a: # [only some]
c = 3
break
assert c == 5-2
""",
lines=[1, 2, 3, 4, 5],
partials=["only some"],
branchz="23 25",
branchz_missing="",
)
| ExcludeTest |
python | lazyprogrammer__machine_learning_examples | unsupervised_class3/dcgan_theano.py | {
"start": 2427,
"end": 4069
} | class ____:
def __init__(self, mi, mo, apply_batch_norm, filtersz=5, stride=2, f=T.nnet.relu):
# mi = input feature map size
# mo = output feature map size
W = 0.02*np.random.randn(mo, mi, filtersz, filtersz)
self.W = theano.shared(W)
self.b = theano.shared(np.zeros(mo))
self.params = [self.W, self.b]
self.updates = [] # in case we do batch norm
if apply_batch_norm:
self.gamma = theano.shared(np.ones(mo))
self.beta = theano.shared(np.zeros(mo))
self.params += [self.gamma, self.beta]
self.running_mean = theano.shared(np.zeros(mo))
self.running_var = theano.shared(np.zeros(mo))
self.f = f
self.stride = stride
self.apply_batch_norm = apply_batch_norm
def forward(self, X, is_training):
conv_out = conv2d(
input=X,
filters=self.W,
subsample=(self.stride, self.stride),
border_mode='half',
)
conv_out += self.b.dimshuffle('x', 0, 'x', 'x')
# apply batch normalization
if self.apply_batch_norm:
conv_out, new_running_mean, new_running_var = batch_norm(
conv_out,
self.gamma,
self.beta,
self.running_mean,
self.running_var,
is_training,
'spatial'
)
if is_training:
self.updates = [
(self.running_mean, new_running_mean),
(self.running_var, new_running_var),
]
return self.f(conv_out)
# regular convolution expects output size to be:
# new_dim = floor( (old_dim - filter_sz) / stride ) + 1
# therefore, for fs-conv, output size should be:
# new_dim = stride * (old_dim - 1) + filter_sz
| ConvLayer |
python | plotly__plotly.py | plotly/graph_objs/sunburst/_hoverlabel.py | {
"start": 233,
"end": 11248
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sunburst"
_path_str = "sunburst.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.sunburst.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sunburst.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 10478,
"end": 11263
} | class ____(_PartitionerOneColumnOneParam):
divisor: int
column_name: str
method_name: Literal["partition_on_divided_integer"] = "partition_on_divided_integer"
@property
@override
def param_names(self) -> List[str]:
return ["quotient"]
@override
def partitioner_method_kwargs(self) -> Dict[str, Any]:
return {"column_name": self.column_name, "divisor": self.divisor}
@override
def batch_parameters_to_batch_spec_kwarg_identifiers(
self, options: BatchParameters
) -> Dict[str, Any]:
if "quotient" not in options:
raise ValueError("'quotient' must be specified in the batch parameters") # noqa: TRY003 # FIXME CoP
return {self.column_name: options["quotient"]}
| SqlPartitionerDividedInteger |
python | huggingface__transformers | src/transformers/models/qwen3_moe/modeling_qwen3_moe.py | {
"start": 32059,
"end": 32164
} | class ____(GenericForTokenClassification, Qwen3MoePreTrainedModel):
pass
| Qwen3MoeForTokenClassification |
python | django-extensions__django-extensions | django_extensions/db/fields/__init__.py | {
"start": 20123,
"end": 21340
} | class ____(UUIDFieldMixin, CharField):
"""
ShortUUIDField
Generates concise (22 characters instead of 36), unambiguous, URL-safe UUIDs.
Based on `shortuuid`: https://github.com/stochastic-technologies/shortuuid
"""
DEFAULT_MAX_LENGTH = 22
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not HAS_SHORT_UUID:
raise ImproperlyConfigured(
"'shortuuid' module is required for ShortUUIDField. "
"(Do you have Python 2.5 or higher installed ?)"
)
kwargs.setdefault("max_length", self.DEFAULT_MAX_LENGTH)
def create_uuid(self):
if not self.version or self.version == 4:
return shortuuid.uuid()
elif self.version == 1:
return shortuuid.uuid()
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
raise UUIDVersionError("UUID version 3 is not supported.")
elif self.version == 5:
return shortuuid.uuid(name=self.namespace)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
| ShortUUIDField |
python | django__django | tests/utils_tests/test_deconstruct.py | {
"start": 202,
"end": 364
} | class ____(DeconstructibleClass):
pass
@deconstructible(
path="utils_tests.deconstructible_classes.DeconstructibleWithPathClass"
)
| DeconstructibleChildClass |
python | jmcnamara__XlsxWriter | xlsxwriter/test/core/test_initialisation.py | {
"start": 291,
"end": 800
} | class ____(unittest.TestCase):
"""
Test initialisation of the Core class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.core = Core()
self.core._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Core xml_declaration()"""
self.core._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestInitialisation |
python | pytest-dev__pytest | testing/python/fixtures.py | {
"start": 130611,
"end": 166334
} | class ____:
"""Class of tests that ensure fixtures are ordered based on their scopes (#2405)"""
@pytest.mark.parametrize("variant", ["mark", "autouse"])
def test_func_closure_module_auto(
self, pytester: Pytester, variant, monkeypatch
) -> None:
"""Semantically identical to the example posted in #2405 when ``use_mark=True``"""
monkeypatch.setenv("FIXTURE_ACTIVATION_VARIANT", variant)
pytester.makepyfile(
"""
import warnings
import os
import pytest
VAR = 'FIXTURE_ACTIVATION_VARIANT'
VALID_VARS = ('autouse', 'mark')
VARIANT = os.environ.get(VAR)
if VARIANT is None or VARIANT not in VALID_VARS:
warnings.warn("{!r} is not in {}, assuming autouse".format(VARIANT, VALID_VARS) )
variant = 'mark'
@pytest.fixture(scope='module', autouse=VARIANT == 'autouse')
def m1(): pass
if VARIANT=='mark':
pytestmark = pytest.mark.usefixtures('m1')
@pytest.fixture(scope='function', autouse=True)
def f1(): pass
def test_func(m1):
pass
"""
)
items, _ = pytester.inline_genitems()
assert isinstance(items[0], Function)
request = TopRequest(items[0], _ispytest=True)
assert request.fixturenames == "m1 f1".split()
def test_func_closure_with_native_fixtures(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Sanity check that verifies the order returned by the closures and the actual fixture execution order:
The execution order may differ because of fixture inter-dependencies.
"""
monkeypatch.setattr(pytest, "FIXTURE_ORDER", [], raising=False)
pytester.makepyfile(
"""
import pytest
FIXTURE_ORDER = pytest.FIXTURE_ORDER
@pytest.fixture(scope="session")
def s1():
FIXTURE_ORDER.append('s1')
@pytest.fixture(scope="package")
def p1():
FIXTURE_ORDER.append('p1')
@pytest.fixture(scope="module")
def m1():
FIXTURE_ORDER.append('m1')
@pytest.fixture(scope='session')
def my_tmp_path_factory():
FIXTURE_ORDER.append('my_tmp_path_factory')
@pytest.fixture
def my_tmp_path(my_tmp_path_factory):
FIXTURE_ORDER.append('my_tmp_path')
@pytest.fixture
def f1(my_tmp_path):
FIXTURE_ORDER.append('f1')
@pytest.fixture
def f2():
FIXTURE_ORDER.append('f2')
def test_foo(f1, p1, m1, f2, s1): pass
"""
)
items, _ = pytester.inline_genitems()
assert isinstance(items[0], Function)
request = TopRequest(items[0], _ispytest=True)
# order of fixtures based on their scope and position in the parameter list
assert (
request.fixturenames
== "s1 my_tmp_path_factory p1 m1 f1 f2 my_tmp_path".split()
)
pytester.runpytest()
# actual fixture execution differs: dependent fixtures must be created first ("my_tmp_path")
FIXTURE_ORDER = pytest.FIXTURE_ORDER # type: ignore[attr-defined]
assert FIXTURE_ORDER == "s1 my_tmp_path_factory p1 m1 my_tmp_path f1 f2".split()
def test_func_closure_module(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope='module')
def m1(): pass
@pytest.fixture(scope='function')
def f1(): pass
def test_func(f1, m1):
pass
"""
)
items, _ = pytester.inline_genitems()
assert isinstance(items[0], Function)
request = TopRequest(items[0], _ispytest=True)
assert request.fixturenames == "m1 f1".split()
def test_func_closure_scopes_reordered(self, pytester: Pytester) -> None:
"""Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although
fixtures of same scope keep the declared order
"""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope='session')
def s1(): pass
@pytest.fixture(scope='module')
def m1(): pass
@pytest.fixture(scope='function')
def f1(): pass
@pytest.fixture(scope='function')
def f2(): pass
class Test:
@pytest.fixture(scope='class')
def c1(cls): pass
def test_func(self, f2, f1, c1, m1, s1):
pass
"""
)
items, _ = pytester.inline_genitems()
assert isinstance(items[0], Function)
request = TopRequest(items[0], _ispytest=True)
assert request.fixturenames == "s1 m1 c1 f2 f1".split()
def test_func_closure_same_scope_closer_root_first(
self, pytester: Pytester
) -> None:
"""Auto-use fixtures of same scope are ordered by closer-to-root first"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(scope='module', autouse=True)
def m_conf(): pass
"""
)
pytester.makepyfile(
**{
"sub/conftest.py": """
import pytest
@pytest.fixture(scope='package', autouse=True)
def p_sub(): pass
@pytest.fixture(scope='module', autouse=True)
def m_sub(): pass
""",
"sub/__init__.py": "",
"sub/test_func.py": """
import pytest
@pytest.fixture(scope='module', autouse=True)
def m_test(): pass
@pytest.fixture(scope='function')
def f1(): pass
def test_func(m_test, f1):
pass
""",
}
)
items, _ = pytester.inline_genitems()
assert isinstance(items[0], Function)
request = TopRequest(items[0], _ispytest=True)
assert request.fixturenames == "p_sub m_conf m_sub m_test f1".split()
def test_func_closure_all_scopes_complex(self, pytester: Pytester) -> None:
"""Complex test involving all scopes and mixing autouse with normal fixtures"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(scope='session')
def s1(): pass
@pytest.fixture(scope='package', autouse=True)
def p1(): pass
"""
)
pytester.makepyfile(**{"__init__.py": ""})
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope='module', autouse=True)
def m1(): pass
@pytest.fixture(scope='module')
def m2(s1): pass
@pytest.fixture(scope='function')
def f1(): pass
@pytest.fixture(scope='function')
def f2(): pass
class Test:
@pytest.fixture(scope='class', autouse=True)
def c1(self):
pass
def test_func(self, f2, f1, m2):
pass
"""
)
items, _ = pytester.inline_genitems()
assert isinstance(items[0], Function)
request = TopRequest(items[0], _ispytest=True)
assert request.fixturenames == "s1 p1 m1 m2 c1 f2 f1".split()
def test_parametrized_package_scope_reordering(self, pytester: Pytester) -> None:
"""A parameterized package-scoped fixture correctly reorders items to
minimize setups & teardowns.
Regression test for #12328.
"""
pytester.makepyfile(
__init__="",
conftest="""
import pytest
@pytest.fixture(scope="package", params=["a", "b"])
def fix(request):
return request.param
""",
test_1="def test1(fix): pass",
test_2="def test2(fix): pass",
)
result = pytester.runpytest("--setup-plan")
assert result.ret == ExitCode.OK
result.stdout.fnmatch_lines(
[
" SETUP P fix['a']",
" test_1.py::test1[a] (fixtures used: fix, request)",
" test_2.py::test2[a] (fixtures used: fix, request)",
" TEARDOWN P fix['a']",
" SETUP P fix['b']",
" test_1.py::test1[b] (fixtures used: fix, request)",
" test_2.py::test2[b] (fixtures used: fix, request)",
" TEARDOWN P fix['b']",
],
)
def test_multiple_packages(self, pytester: Pytester) -> None:
"""Complex test involving multiple package fixtures. Make sure teardowns
are executed in order.
.
└── root
├── __init__.py
├── sub1
│ ├── __init__.py
│ ├── conftest.py
│ └── test_1.py
└── sub2
├── __init__.py
├── conftest.py
└── test_2.py
"""
root = pytester.mkdir("root")
root.joinpath("__init__.py").write_text("values = []", encoding="utf-8")
sub1 = root.joinpath("sub1")
sub1.mkdir()
sub1.joinpath("__init__.py").touch()
sub1.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
from .. import values
@pytest.fixture(scope="package")
def fix():
values.append("pre-sub1")
yield values
assert values.pop() == "pre-sub1"
"""
),
encoding="utf-8",
)
sub1.joinpath("test_1.py").write_text(
textwrap.dedent(
"""\
from .. import values
def test_1(fix):
assert values == ["pre-sub1"]
"""
),
encoding="utf-8",
)
sub2 = root.joinpath("sub2")
sub2.mkdir()
sub2.joinpath("__init__.py").touch()
sub2.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
from .. import values
@pytest.fixture(scope="package")
def fix():
values.append("pre-sub2")
yield values
assert values.pop() == "pre-sub2"
"""
),
encoding="utf-8",
)
sub2.joinpath("test_2.py").write_text(
textwrap.dedent(
"""\
from .. import values
def test_2(fix):
assert values == ["pre-sub2"]
"""
),
encoding="utf-8",
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2)
def test_class_fixture_self_instance(self, pytester: Pytester) -> None:
"""Check that plugin classes which implement fixtures receive the plugin instance
as self (see #2270).
"""
pytester.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(MyPlugin())
class MyPlugin():
def __init__(self):
self.arg = 1
@pytest.fixture(scope='function')
def myfix(self):
assert isinstance(self, MyPlugin)
return self.arg
"""
)
pytester.makepyfile(
"""
class TestClass(object):
def test_1(self, myfix):
assert myfix == 1
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_call_fixture_function_error():
"""Check if an error is raised if a fixture function is called directly (#4545)"""
@pytest.fixture
def fix():
raise NotImplementedError()
with pytest.raises(pytest.fail.Exception):
assert fix() == 1
def test_fixture_double_decorator(pytester: Pytester) -> None:
"""Check if an error is raised when using @pytest.fixture twice."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
@pytest.fixture
def fixt():
pass
"""
)
result = pytester.runpytest()
result.assert_outcomes(errors=1)
result.stdout.fnmatch_lines(
[
"E * ValueError: @pytest.fixture is being applied more than once to the same function 'fixt'"
]
)
def test_fixture_class(pytester: Pytester) -> None:
"""Check if an error is raised when using @pytest.fixture on a class."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
class A:
pass
"""
)
result = pytester.runpytest()
result.assert_outcomes(errors=1)
def test_fixture_param_shadowing(pytester: Pytester) -> None:
"""Parametrized arguments would be shadowed if a fixture with the same name also exists (#5036)"""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=['a', 'b'])
def argroot(request):
return request.param
@pytest.fixture
def arg(argroot):
return argroot
# This should only be parametrized directly
@pytest.mark.parametrize("arg", [1])
def test_direct(arg):
assert arg == 1
# This should be parametrized based on the fixtures
def test_normal_fixture(arg):
assert isinstance(arg, str)
# Indirect should still work:
@pytest.fixture
def arg2(request):
return 2*request.param
@pytest.mark.parametrize("arg2", [1], indirect=True)
def test_indirect(arg2):
assert arg2 == 2
"""
)
# Only one test should have run
result = pytester.runpytest("-v")
result.assert_outcomes(passed=4)
result.stdout.fnmatch_lines(["*::test_direct[[]1[]]*"])
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]a[]]*"])
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]b[]]*"])
result.stdout.fnmatch_lines(["*::test_indirect[[]1[]]*"])
def test_fixture_named_request(pytester: Pytester) -> None:
pytester.copy_example("fixtures/test_fixture_named_request.py")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*'request' is a reserved word for fixtures, use another name:",
" *test_fixture_named_request.py:8",
]
)
def test_indirect_fixture_does_not_break_scope(pytester: Pytester) -> None:
"""Ensure that fixture scope is respected when using indirect fixtures (#570)"""
pytester.makepyfile(
"""
import pytest
instantiated = []
@pytest.fixture(scope="session")
def fixture_1(request):
instantiated.append(("fixture_1", request.param))
@pytest.fixture(scope="session")
def fixture_2(request):
instantiated.append(("fixture_2", request.param))
scenarios = [
("A", "a1"),
("A", "a2"),
("B", "b1"),
("B", "b2"),
("C", "c1"),
("C", "c2"),
]
@pytest.mark.parametrize(
"fixture_1,fixture_2", scenarios, indirect=["fixture_1", "fixture_2"]
)
def test_create_fixtures(fixture_1, fixture_2):
pass
def test_check_fixture_instantiations():
assert instantiated == [
('fixture_1', 'A'),
('fixture_2', 'a1'),
('fixture_2', 'a2'),
('fixture_1', 'B'),
('fixture_2', 'b1'),
('fixture_2', 'b2'),
('fixture_1', 'C'),
('fixture_2', 'c1'),
('fixture_2', 'c2'),
]
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=7)
def test_fixture_parametrization_nparray(pytester: Pytester) -> None:
pytest.importorskip("numpy")
pytester.makepyfile(
"""
from numpy import linspace
from pytest import fixture
@fixture(params=linspace(1, 10, 10))
def value(request):
return request.param
def test_bug(value):
assert value == value
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=10)
def test_fixture_arg_ordering(pytester: Pytester) -> None:
"""
This test describes how fixtures in the same scope but without explicit dependencies
between them are created. While users should make dependencies explicit, often
they rely on this order, so this test exists to catch regressions in this regard.
See #6540 and #6492.
"""
p1 = pytester.makepyfile(
"""
import pytest
suffixes = []
@pytest.fixture
def fix_1(): suffixes.append("fix_1")
@pytest.fixture
def fix_2(): suffixes.append("fix_2")
@pytest.fixture
def fix_3(): suffixes.append("fix_3")
@pytest.fixture
def fix_4(): suffixes.append("fix_4")
@pytest.fixture
def fix_5(): suffixes.append("fix_5")
@pytest.fixture
def fix_combined(fix_1, fix_2, fix_3, fix_4, fix_5): pass
def test_suffix(fix_combined):
assert suffixes == ["fix_1", "fix_2", "fix_3", "fix_4", "fix_5"]
"""
)
result = pytester.runpytest("-vv", str(p1))
assert result.ret == 0
def test_yield_fixture_with_no_value(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture(name='custom')
def empty_yield():
if False:
yield
def test_fixt(custom):
pass
"""
)
expected = "E ValueError: custom did not yield a value"
result = pytester.runpytest()
result.assert_outcomes(errors=1)
result.stdout.fnmatch_lines([expected])
assert result.ret == ExitCode.TESTS_FAILED
def test_deduplicate_names() -> None:
items = deduplicate_names("abacd")
assert items == ("a", "b", "c", "d")
items = deduplicate_names((*items, "g", "f", "g", "e", "b"))
assert items == ("a", "b", "c", "d", "g", "f", "e")
def test_staticmethod_classmethod_fixture_instance(pytester: Pytester) -> None:
"""Ensure that static and class methods get and have access to a fresh
instance.
This also ensures `setup_method` works well with static and class methods.
Regression test for #12065.
"""
pytester.makepyfile(
"""
import pytest
class Test:
ran_setup_method = False
ran_fixture = False
def setup_method(self):
assert not self.ran_setup_method
self.ran_setup_method = True
@pytest.fixture(autouse=True)
def fixture(self):
assert not self.ran_fixture
self.ran_fixture = True
def test_method(self):
assert self.ran_setup_method
assert self.ran_fixture
@staticmethod
def test_1(request):
assert request.instance.ran_setup_method
assert request.instance.ran_fixture
@classmethod
def test_2(cls, request):
assert request.instance.ran_setup_method
assert request.instance.ran_fixture
"""
)
result = pytester.runpytest()
assert result.ret == ExitCode.OK
result.assert_outcomes(passed=3)
def test_scoped_fixture_caching(pytester: Pytester) -> None:
"""Make sure setup and finalization is only run once when using scoped fixture
multiple times."""
pytester.makepyfile(
"""
from __future__ import annotations
from typing import Generator
import pytest
executed: list[str] = []
@pytest.fixture(scope="class")
def fixture_1() -> Generator[None, None, None]:
executed.append("fix setup")
yield
executed.append("fix teardown")
class TestFixtureCaching:
def test_1(self, fixture_1: None) -> None:
assert executed == ["fix setup"]
def test_2(self, fixture_1: None) -> None:
assert executed == ["fix setup"]
def test_expected_setup_and_teardown() -> None:
assert executed == ["fix setup", "fix teardown"]
"""
)
result = pytester.runpytest()
assert result.ret == 0
def test_scoped_fixture_caching_exception(pytester: Pytester) -> None:
"""Make sure setup & finalization is only run once for scoped fixture, with a cached exception."""
pytester.makepyfile(
"""
from __future__ import annotations
import pytest
executed_crash: list[str] = []
@pytest.fixture(scope="class")
def fixture_crash(request: pytest.FixtureRequest) -> None:
executed_crash.append("fix_crash setup")
def my_finalizer() -> None:
executed_crash.append("fix_crash teardown")
request.addfinalizer(my_finalizer)
raise Exception("foo")
class TestFixtureCachingException:
@pytest.mark.xfail
def test_crash_1(self, fixture_crash: None) -> None:
...
@pytest.mark.xfail
def test_crash_2(self, fixture_crash: None) -> None:
...
def test_crash_expected_setup_and_teardown() -> None:
assert executed_crash == ["fix_crash setup", "fix_crash teardown"]
"""
)
result = pytester.runpytest()
assert result.ret == 0
def test_scoped_fixture_teardown_order(pytester: Pytester) -> None:
"""
Make sure teardowns happen in reverse order of setup with scoped fixtures, when
a later test only depends on a subset of scoped fixtures.
Regression test for https://github.com/pytest-dev/pytest/issues/1489
"""
pytester.makepyfile(
"""
from typing import Generator
import pytest
last_executed = ""
@pytest.fixture(scope="module")
def fixture_1() -> Generator[None, None, None]:
global last_executed
assert last_executed == ""
last_executed = "fixture_1_setup"
yield
assert last_executed == "fixture_2_teardown"
last_executed = "fixture_1_teardown"
@pytest.fixture(scope="module")
def fixture_2() -> Generator[None, None, None]:
global last_executed
assert last_executed == "fixture_1_setup"
last_executed = "fixture_2_setup"
yield
assert last_executed == "run_test"
last_executed = "fixture_2_teardown"
def test_fixture_teardown_order(fixture_1: None, fixture_2: None) -> None:
global last_executed
assert last_executed == "fixture_2_setup"
last_executed = "run_test"
def test_2(fixture_1: None) -> None:
# This would previously queue an additional teardown of fixture_1,
# despite fixture_1's value being cached, which caused fixture_1 to be
# torn down before fixture_2 - violating the rule that teardowns should
# happen in reverse order of setup.
pass
"""
)
result = pytester.runpytest()
assert result.ret == 0
def test_subfixture_teardown_order(pytester: Pytester) -> None:
"""
Make sure fixtures don't re-register their finalization in parent fixtures multiple
times, causing ordering failure in their teardowns.
Regression test for #12135
"""
pytester.makepyfile(
"""
import pytest
execution_order = []
@pytest.fixture(scope="class")
def fixture_1():
...
@pytest.fixture(scope="class")
def fixture_2(fixture_1):
execution_order.append("setup 2")
yield
execution_order.append("teardown 2")
@pytest.fixture(scope="class")
def fixture_3(fixture_1):
execution_order.append("setup 3")
yield
execution_order.append("teardown 3")
class TestFoo:
def test_initialize_fixtures(self, fixture_2, fixture_3):
...
# This would previously reschedule fixture_2's finalizer in the parent fixture,
# causing it to be torn down before fixture 3.
def test_reschedule_fixture_2(self, fixture_2):
...
# Force finalization directly on fixture_1
# Otherwise the cleanup would sequence 3&2 before 1 as normal.
@pytest.mark.parametrize("fixture_1", [None], indirect=["fixture_1"])
def test_finalize_fixture_1(self, fixture_1):
...
def test_result():
assert execution_order == ["setup 2", "setup 3", "teardown 3", "teardown 2"]
"""
)
result = pytester.runpytest()
assert result.ret == 0
def test_parametrized_fixture_scope_allowed(pytester: Pytester) -> None:
"""
Make sure scope from parametrize does not affect fixture's ability to be
depended upon.
Regression test for #13248
"""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope="session")
def my_fixture(request):
return getattr(request, "param", None)
@pytest.fixture(scope="session")
def another_fixture(my_fixture):
return my_fixture
@pytest.mark.parametrize("my_fixture", ["a value"], indirect=True, scope="function")
def test_foo(another_fixture):
assert another_fixture == "a value"
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_collect_positional_only(pytester: Pytester) -> None:
"""Support the collection of tests with positional-only arguments (#13376)."""
pytester.makepyfile(
"""
import pytest
class Test:
@pytest.fixture
def fix(self):
return 1
def test_method(self, /, fix):
assert fix == 1
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_parametrization_dependency_pruning(pytester: Pytester) -> None:
"""Test that when a fixture is dynamically shadowed by parameterization, it
is properly pruned and not executed."""
pytester.makepyfile(
"""
import pytest
# This fixture should never run because shadowed_fixture is parametrized.
@pytest.fixture
def boom():
raise RuntimeError("BOOM!")
# This fixture is shadowed by metafunc.parametrize in pytest_generate_tests.
@pytest.fixture
def shadowed_fixture(boom):
return "fixture_value"
# Dynamically parametrize shadowed_fixture, replacing the fixture with direct values.
def pytest_generate_tests(metafunc):
if "shadowed_fixture" in metafunc.fixturenames:
metafunc.parametrize("shadowed_fixture", ["param1", "param2"])
# This test should receive shadowed_fixture as a parametrized value, and
# boom should not explode.
def test_shadowed(shadowed_fixture):
assert shadowed_fixture in ["param1", "param2"]
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=2)
def test_fixture_closure_with_overrides(pytester: Pytester) -> None:
"""Test that an item's static fixture closure properly includes transitive
dependencies through overridden fixtures (#13773)."""
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def db(): pass
@pytest.fixture
def app(db): pass
"""
)
pytester.makepyfile(
"""
import pytest
# Overrides conftest-level `app` and requests it.
@pytest.fixture
def app(app): pass
class TestClass:
# Overrides module-level `app` and requests it.
@pytest.fixture
def app(self, app): pass
def test_something(self, request, app):
# Both dynamic and static fixture closures should include 'db'.
assert 'db' in request.fixturenames
assert 'db' in request.node.fixturenames
# No dynamic dependencies, should be equal.
assert set(request.fixturenames) == set(request.node.fixturenames)
"""
)
result = pytester.runpytest("-v")
result.assert_outcomes(passed=1)
def test_fixture_closure_with_overrides_and_intermediary(pytester: Pytester) -> None:
"""Test that an item's static fixture closure properly includes transitive
dependencies through overridden fixtures (#13773).
A more complicated case than test_fixture_closure_with_overrides, adds an
intermediary so the override chain is not direct.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def db(): pass
@pytest.fixture
def app(db): pass
@pytest.fixture
def intermediate(app): pass
"""
)
pytester.makepyfile(
"""
import pytest
# Overrides conftest-level `app` and requests it.
@pytest.fixture
def app(intermediate): pass
class TestClass:
# Overrides module-level `app` and requests it.
@pytest.fixture
def app(self, app): pass
def test_something(self, request, app):
# Both dynamic and static fixture closures should include 'db'.
assert 'db' in request.fixturenames
assert 'db' in request.node.fixturenames
# No dynamic dependencies, should be equal.
assert set(request.fixturenames) == set(request.node.fixturenames)
"""
)
result = pytester.runpytest("-v")
result.assert_outcomes(passed=1)
def test_fixture_closure_with_broken_override_chain(pytester: Pytester) -> None:
"""Test that an item's static fixture closure properly includes transitive
dependencies through overridden fixtures (#13773).
A more complicated case than test_fixture_closure_with_overrides, one of the
fixtures in the chain doesn't call its super, so it shouldn't be included.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def db(): pass
@pytest.fixture
def app(db): pass
"""
)
pytester.makepyfile(
"""
import pytest
# Overrides conftest-level `app` and *doesn't* request it.
@pytest.fixture
def app(): pass
class TestClass:
# Overrides module-level `app` and requests it.
@pytest.fixture
def app(self, app): pass
def test_something(self, request, app):
# Both dynamic and static fixture closures should include 'db'.
assert 'db' not in request.fixturenames
assert 'db' not in request.node.fixturenames
# No dynamic dependencies, should be equal.
assert set(request.fixturenames) == set(request.node.fixturenames)
"""
)
result = pytester.runpytest("-v")
result.assert_outcomes(passed=1)
def test_fixture_closure_handles_circular_dependencies(pytester: Pytester) -> None:
"""Test that getfixtureclosure properly handles circular dependencies.
The test will error in the runtest phase due to the fixture loop,
but the closure computation still completes.
"""
pytester.makepyfile(
"""
import pytest
# Direct circular dependency.
@pytest.fixture
def fix_a(fix_b): pass
@pytest.fixture
def fix_b(fix_a): pass
# Indirect circular dependency through multiple fixtures.
@pytest.fixture
def fix_x(fix_y): pass
@pytest.fixture
def fix_y(fix_z): pass
@pytest.fixture
def fix_z(fix_x): pass
def test_circular_deps(fix_a, fix_x):
pass
"""
)
items, _hookrec = pytester.inline_genitems()
assert isinstance(items[0], Function)
assert items[0].fixturenames == ["fix_a", "fix_x", "fix_b", "fix_y", "fix_z"]
def test_fixture_closure_handles_diamond_dependencies(pytester: Pytester) -> None:
"""Test that getfixtureclosure properly handles diamond dependencies."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def db(): pass
@pytest.fixture
def user(db): pass
@pytest.fixture
def session(db): pass
@pytest.fixture
def app(user, session): pass
def test_diamond_deps(request, app):
assert request.node.fixturenames == ["request", "app", "user", "db", "session"]
assert request.fixturenames == ["request", "app", "user", "db", "session"]
"""
)
result = pytester.runpytest("-v")
result.assert_outcomes(passed=1)
def test_fixture_closure_with_complex_override_and_shared_deps(
pytester: Pytester,
) -> None:
"""Test that shared dependencies in override chains are processed only once."""
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def db(): pass
@pytest.fixture
def cache(): pass
@pytest.fixture
def settings(): pass
@pytest.fixture
def app(db, cache, settings): pass
"""
)
pytester.makepyfile(
"""
import pytest
# Override app, but also directly use cache and settings.
# This creates multiple paths to the same fixtures.
@pytest.fixture
def app(app, cache, settings): pass
class TestClass:
# Another override that uses both app and cache.
@pytest.fixture
def app(self, app, cache): pass
def test_shared_deps(self, request, app):
assert request.node.fixturenames == ["request", "app", "db", "cache", "settings"]
"""
)
result = pytester.runpytest("-v")
result.assert_outcomes(passed=1)
def test_fixture_closure_with_parametrize_ignore(pytester: Pytester) -> None:
"""Test that getfixtureclosure properly handles parametrization argnames
which override a fixture."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def fix1(fix2): pass
@pytest.fixture
def fix2(fix3): pass
@pytest.fixture
def fix3(): pass
@pytest.mark.parametrize('fix2', ['2'])
def test_it(request, fix1):
assert request.node.fixturenames == ["request", "fix1", "fix2"]
assert request.fixturenames == ["request", "fix1", "fix2"]
"""
)
result = pytester.runpytest("-v")
result.assert_outcomes(passed=1)
| TestScopeOrdering |
python | pandas-dev__pandas | pandas/core/indexes/interval.py | {
"start": 3787,
"end": 49890
} | class ____(ExtensionIndex):
"""
Immutable index of intervals that are closed on the same side.
Parameters
----------
data : array-like (1-dimensional)
Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing
Interval objects from which to build the IntervalIndex.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
copy : bool, default False
Copy the input data.
name : object, optional
Name to be stored in the index.
verify_integrity : bool, default True
Verify that the IntervalIndex is valid.
Attributes
----------
left
right
closed
mid
length
is_empty
is_non_overlapping_monotonic
is_overlapping
values
Methods
-------
from_arrays
from_tuples
from_breaks
contains
overlaps
set_closed
to_tuples
See Also
--------
Index : The base pandas Index type.
Interval : A bounded slice-like interval; the elements of an IntervalIndex.
interval_range : Function to create a fixed frequency IntervalIndex.
cut : Bin values into discrete Intervals.
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`__
for more.
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
dtype='interval[int64, right]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""
_typ = "intervalindex"
# annotate properties pinned via inherit_names
closed: IntervalClosedType
is_non_overlapping_monotonic: bool
closed_left: bool
closed_right: bool
open_left: bool
open_right: bool
_data: IntervalArray
_values: IntervalArray
_can_hold_strings = False
_data_cls = IntervalArray
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data,
closed: IntervalClosedType | None = None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable | None = None,
verify_integrity: bool = True,
) -> Self:
name = maybe_extract_name(name, data, cls)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
@classmethod
def from_breaks(
cls,
breaks,
closed: IntervalClosedType | None = "right",
name: Hashable | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
"""
Construct an IntervalIndex from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
name : str, optional
Name of the resulting IntervalIndex.
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalIndex
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalIndex.from_arrays : Construct from a left and right array.
IntervalIndex.from_tuples : Construct from a sequence of tuples.
Examples
--------
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
dtype='interval[int64, right]')
"""
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
breaks, closed=closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
def from_arrays(
cls,
left,
right,
closed: IntervalClosedType = "right",
name: Hashable | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
name : str, optional
Name of the resulting IntervalIndex.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
IntervalIndex
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits.
IntervalIndex.from_tuples : Construct an IntervalIndex from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
Examples
--------
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
dtype='interval[int64, right]')
"""
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
left, right, closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
def from_tuples(
cls,
data,
closed: IntervalClosedType = "right",
name: Hashable | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
"""
Construct an IntervalIndex from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
name : str, optional
Name of the resulting IntervalIndex.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalIndex
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array.
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits.
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
dtype='interval[int64, right]')
"""
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
# --------------------------------------------------------------------
# error: Return type "IntervalTree" of "_engine" incompatible with return type
# "Union[IndexEngine, ExtensionEngine]" in supertype "Index"
@cache_readonly
def _engine(self) -> IntervalTree: # type: ignore[override]
# IntervalTree does not supports numpy array unless they are 64 bit
left = self._maybe_convert_i8(self.left)
left = maybe_upcast_numeric_to_64bit(left)
right = self._maybe_convert_i8(self.right)
right = maybe_upcast_numeric_to_64bit(right)
return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key: Any) -> bool:
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
bool
"""
hash(key)
if not isinstance(key, Interval):
if is_valid_na_for_dtype(key, self.dtype):
return self.hasnans
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def _getitem_slice(self, slobj: slice) -> IntervalIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._data[slobj]
return type(self)._simple_new(res, name=self._name)
@cache_readonly
def _multiindex(self) -> MultiIndex:
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
def __reduce__(self):
d = {
"left": self.left,
"right": self.right,
"closed": self.closed,
"name": self.name,
}
return _new_IntervalIndex, (type(self), d), None
@property
def inferred_type(self) -> str:
"""Return a string of the type inferred from the values"""
return "interval"
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
Returns memory usage of the values in the Index in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.memory_usage()
24
"""
# we don't use an explicit engine
# so return the bytes here
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
# IntervalTree doesn't have a is_monotonic_decreasing, so have to override
# the Index implementation
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self) -> bool:
"""
Return True if the IntervalIndex contains unique elements, else False.
"""
left = self.left
right = self.right
if self.isna().sum() > 1:
return False
if left.is_unique or right.is_unique:
return True
seen_pairs = set()
check_idx = np.where(left.duplicated(keep=False))[0]
for idx in check_idx:
pair = (left[idx], right[idx])
if pair in seen_pairs:
return False
seen_pairs.add(pair)
return True
@property
def is_overlapping(self) -> bool:
"""
Return True if the IntervalIndex has overlapping intervals, else False.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
Returns
-------
bool
Boolean indicating if the IntervalIndex has overlapping intervals.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
IntervalIndex.overlaps : Check an IntervalIndex elementwise for
overlaps.
Examples
--------
>>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
>>> index
IntervalIndex([(0, 2], (1, 3], (4, 5]],
dtype='interval[int64, right]')
>>> index.is_overlapping
True
Intervals that share closed endpoints overlap:
>>> index = pd.interval_range(0, 3, closed="both")
>>> index
IntervalIndex([[0, 1], [1, 2], [2, 3]],
dtype='interval[int64, both]')
>>> index.is_overlapping
True
Intervals that only have an open endpoint in common do not overlap:
>>> index = pd.interval_range(0, 3, closed="left")
>>> index
IntervalIndex([[0, 1), [1, 2), [2, 3)],
dtype='interval[int64, left]')
>>> index.is_overlapping
False
"""
# GH 23309
return self._engine.is_overlapping
def _needs_i8_conversion(self, key) -> bool:
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
Interval-like requires conversion if its endpoints are one of the
aforementioned types.
Assumes that any list-like data has already been cast to an Index.
Parameters
----------
key : scalar or Index-like
The key that should be checked for i8 conversion
Returns
-------
bool
"""
key_dtype = getattr(key, "dtype", None)
if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
return isinstance(key, i8_types)
def _maybe_convert_i8(self, key):
"""
Maybe convert a given key to its equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Index with an int64 dtype if converted list-like.
"""
if is_list_like(key):
key = ensure_index(key)
key = maybe_upcast_numeric_to_64bit(key)
if not self._needs_i8_conversion(key):
return key
scalar = is_scalar(key)
key_dtype = getattr(key, "dtype", None)
if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
# convert left/right and reconstruct
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key)
if isinstance(key, Period):
key_i8 = key.ordinal
elif isinstance(key_i8, Timestamp):
key_i8 = key_i8._value
elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
key_i8 = key_i8.view("i8")
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
# convert NaT from its i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
# error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
# ExtensionDtype]" has no attribute "subtype"
subtype = self.dtype.subtype # type: ignore[union-attr]
if subtype != key_dtype:
raise ValueError(
f"Cannot index an IntervalIndex of subtype {subtype} with "
f"values of dtype {key_dtype}"
)
return key_i8
def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if bounds are "
"non-overlapping and all monotonic increasing or decreasing"
)
if isinstance(label, (IntervalMixin, IntervalIndex)):
raise NotImplementedError("Interval objects are not currently supported")
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and not self.left.is_monotonic_increasing
):
sub_idx = self.right
if self.open_right:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
# --------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key) -> int | slice | np.ndarray:
"""
Get integer location, slice or boolean mask for requested label.
The `get_loc` method is used to retrieve the integer index, a slice for
slicing objects, or a boolean mask indicating the presence of the label
in the `IntervalIndex`.
Parameters
----------
key : label
The value or range to find in the IntervalIndex.
Returns
-------
int if unique index, slice if monotonic index, else mask
The position or positions found. This could be a single
number, a range, or an array of true/false values
indicating the position(s) of the label.
See Also
--------
IntervalIndex.get_indexer_non_unique : Compute indexer and
mask for new index given the current index.
Index.get_loc : Similar method in the base Index class.
Examples
--------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply a point inside an interval.
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
>>> overlapping_index.get_loc(0.5)
array([ True, False, True])
Only exact matches will be returned if an interval is provided.
>>> index.get_loc(pd.Interval(0, 1))
0
"""
self._check_indexing_error(key)
if isinstance(key, Interval):
if self.closed != key.closed:
raise KeyError(key)
mask = (self.left == key.left) & (self.right == key.right)
elif is_valid_na_for_dtype(key, self.dtype):
mask = self.isna()
else:
# assume scalar
op_left = le if self.closed_left else lt
op_right = le if self.closed_right else lt
try:
mask = op_left(self.left, key) & op_right(key, self.right)
except TypeError as err:
# scalar is not comparable to II subtype --> invalid label
raise KeyError(key) from err
matches = mask.sum()
if matches == 0:
raise KeyError(key)
if matches == 1:
return mask.argmax()
res = lib.maybe_booleans_to_slice(mask.view("u1"))
if isinstance(res, slice) and res.stop is None:
# TODO: DO this in maybe_booleans_to_slice?
res = slice(res.start, len(self), res.step)
return res
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance: Any | None = None,
) -> npt.NDArray[np.intp]:
if isinstance(target, IntervalIndex):
# We only get here with not self.is_overlapping
# -> at most one match per interval in target
# want exact matches -> need both left/right to match, so defer to
# left/right get_indexer, compare elementwise, equality -> match
if self.left.is_unique and self.right.is_unique:
indexer = self._get_indexer_unique_sides(target)
else:
indexer = self._get_indexer_pointwise(target)[0]
elif not (is_object_dtype(target.dtype) or is_string_dtype(target.dtype)):
# homogeneous scalar index: use IntervalTree
# we should always have self._should_partial_index(target) here
target = self._maybe_convert_i8(target)
indexer = self._engine.get_indexer(target.values)
else:
# heterogeneous scalar index: defer elementwise to get_loc
# we should always have self._should_partial_index(target) here
return self._get_indexer_pointwise(target)[0]
return ensure_platform_int(indexer)
def get_indexer_non_unique(
self, target: Index
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : IntervalIndex or list of Intervals
An iterable containing the values to be used for computing indexer.
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["b", "b"])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["q", "r", "t"])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["f", "b", "s"])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
"""
target = ensure_index(target)
if not self._should_compare(target) and not self._should_partial_index(target):
# e.g. IntervalIndex with different closed or incompatible subtype
# -> no matches
return self._get_indexer_non_comparable(target, None, unique=False)
elif isinstance(target, IntervalIndex):
if self.left.is_unique and self.right.is_unique:
# fastpath available even if we don't have self._index_as_unique
indexer = self._get_indexer_unique_sides(target)
missing = (indexer == -1).nonzero()[0]
else:
return self._get_indexer_pointwise(target)
elif is_object_dtype(target.dtype) or not self._should_partial_index(target):
# target might contain intervals: defer elementwise to get_loc
return self._get_indexer_pointwise(target)
else:
# Note: this case behaves differently from other Index subclasses
# because IntervalIndex does partial-int indexing
target = self._maybe_convert_i8(target)
indexer, missing = self._engine.get_indexer_non_unique(target.values)
return ensure_platform_int(indexer), ensure_platform_int(missing)
def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:
"""
_get_indexer specialized to the case where both of our sides are unique.
"""
# Caller is responsible for checking
# `self.left.is_unique and self.right.is_unique`
left_indexer = self.left.get_indexer(target.left)
right_indexer = self.right.get_indexer(target.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
return indexer
def _get_indexer_pointwise(
self, target: Index
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
pointwise implementation for get_indexer and get_indexer_non_unique.
"""
indexer, missing = [], []
for i, key in enumerate(target):
try:
locs = self.get_loc(key)
if isinstance(locs, slice):
# Only needed for get_indexer_non_unique
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
elif lib.is_integer(locs):
locs = np.array(locs, ndmin=1)
else:
# otherwise we have ndarray[bool]
locs = np.where(locs)[0]
except KeyError:
missing.append(i)
locs = np.array([-1])
except InvalidIndexError:
# i.e. non-scalar key e.g. a tuple.
# see test_append_different_columns_types_raises
missing.append(i)
locs = np.array([-1])
indexer.append(locs)
indexer = np.concatenate(indexer)
return ensure_platform_int(indexer), ensure_platform_int(missing)
@cache_readonly
def _index_as_unique(self) -> bool:
return not self.is_overlapping and self._engine._na_count < 2
_requires_unique_msg = (
"cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique"
)
def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
if not (key.step is None or key.step == 1):
# GH#31658 if label-based, we require step == 1,
# if positional, we disallow float start/stop
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
if kind == "loc":
raise ValueError(msg)
if kind == "getitem":
if not is_valid_positional_slice(key):
# i.e. this cannot be interpreted as a positional slice
raise ValueError(msg)
return super()._convert_slice_indexer(key, kind)
@cache_readonly
def _should_fallback_to_positional(self) -> bool:
# integer lookups in Series.__getitem__ are unambiguously
# positional in this case
# error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
# ExtensionDtype]" has no attribute "subtype"
return self.dtype.subtype.kind in "mM" # type: ignore[union-attr]
def _maybe_cast_slice_bound(self, label, side: str):
return getattr(self, side)._maybe_cast_slice_bound(label, side)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
if not isinstance(dtype, IntervalDtype):
return False
common_subtype = find_common_type([self.dtype, dtype])
return not is_object_dtype(common_subtype)
# --------------------------------------------------------------------
@cache_readonly
def left(self) -> Index:
"""
Return left bounds of the intervals in the IntervalIndex.
The left bounds of each interval in the IntervalIndex are
returned as an Index. The datatype of the left bounds is the
same as the datatype of the endpoints of the intervals.
Returns
-------
Index
An Index containing the left bounds of the intervals.
See Also
--------
IntervalIndex.right : Return the right bounds of the intervals
in the IntervalIndex.
IntervalIndex.mid : Return the mid-point of the intervals in
the IntervalIndex.
IntervalIndex.length : Return the length of the intervals in
the IntervalIndex.
Examples
--------
>>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6], closed="right")
>>> iv_idx.left
Index([1, 2, 3], dtype='int64')
>>> iv_idx = pd.IntervalIndex.from_tuples(
... [(1, 4), (2, 5), (3, 6)], closed="left"
... )
>>> iv_idx.left
Index([1, 2, 3], dtype='int64')
"""
return Index(self._data.left, copy=False)
@cache_readonly
def right(self) -> Index:
"""
Return right bounds of the intervals in the IntervalIndex.
The right bounds of each interval in the IntervalIndex are
returned as an Index. The datatype of the right bounds is the
same as the datatype of the endpoints of the intervals.
Returns
-------
Index
An Index containing the right bounds of the intervals.
See Also
--------
IntervalIndex.left : Return the left bounds of the intervals
in the IntervalIndex.
IntervalIndex.mid : Return the mid-point of the intervals in
the IntervalIndex.
IntervalIndex.length : Return the length of the intervals in
the IntervalIndex.
Examples
--------
>>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6], closed="right")
>>> iv_idx.right
Index([4, 5, 6], dtype='int64')
>>> iv_idx = pd.IntervalIndex.from_tuples(
... [(1, 4), (2, 5), (3, 6)], closed="left"
... )
>>> iv_idx.right
Index([4, 5, 6], dtype='int64')
"""
return Index(self._data.right, copy=False)
@cache_readonly
def mid(self) -> Index:
"""
Return the midpoint of each interval in the IntervalIndex as an Index.
Each midpoint is calculated as the average of the left and right bounds
of each interval. The midpoints are returned as a pandas Index object.
Returns
-------
pandas.Index
An Index containing the midpoints of each interval.
See Also
--------
IntervalIndex.left : Return the left bounds of the intervals
in the IntervalIndex.
IntervalIndex.right : Return the right bounds of the intervals
in the IntervalIndex.
IntervalIndex.length : Return the length of the intervals in
the IntervalIndex.
Notes
-----
The midpoint is the average of the interval bounds, potentially resulting
in a floating-point number even if bounds are integers. The returned Index
will have a dtype that accurately holds the midpoints. This computation is
the same regardless of whether intervals are open or closed.
Examples
--------
>>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6])
>>> iv_idx.mid
Index([2.5, 3.5, 4.5], dtype='float64')
>>> iv_idx = pd.IntervalIndex.from_tuples([(1, 4), (2, 5), (3, 6)])
>>> iv_idx.mid
Index([2.5, 3.5, 4.5], dtype='float64')
"""
return Index(self._data.mid, copy=False)
@property
def length(self) -> Index:
"""
Calculate the length of each interval in the IntervalIndex.
This method returns a new Index containing the lengths of each interval
in the IntervalIndex. The length of an interval is defined as the difference
between its end and its start.
Returns
-------
Index
An Index containing the lengths of each interval.
See Also
--------
Interval.length : Return the length of the Interval.
Examples
--------
>>> intervals = pd.IntervalIndex.from_arrays(
... [1, 2, 3], [4, 5, 6], closed="right"
... )
>>> intervals.length
Index([3, 3, 3], dtype='int64')
>>> intervals = pd.IntervalIndex.from_tuples([(1, 5), (6, 10), (11, 15)])
>>> intervals.length
Index([4, 4, 4], dtype='int64')
"""
return Index(self._data.length, copy=False)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
# For IntervalIndex we also know other.closed == self.closed
if self.left.is_unique and self.right.is_unique:
taken = self._intersection_unique(other)
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
# Swap other/self if other is unique and self does not have
# multiple NaNs
taken = other._intersection_unique(self)
else:
# duplicates
taken = self._intersection_non_unique(other)
if sort:
taken = taken.sort_values()
return taken
def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:
"""
Used when the IntervalIndex does not have any common endpoint,
no matter left or right.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
IntervalIndex
"""
# Note: this is much more performant than super()._intersection(other)
lindexer = self.left.get_indexer(other.left)
rindexer = self.right.get_indexer(other.right)
match = (lindexer == rindexer) & (lindexer != -1)
indexer = lindexer.take(match.nonzero()[0])
indexer = unique(indexer)
return self.take(indexer)
def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:
"""
Used when the IntervalIndex does have some common endpoints,
on either sides.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
IntervalIndex
"""
# Note: this is about 3.25x faster than super()._intersection(other)
# in IntervalIndexMethod.time_intersection_both_duplicate(1000)
mask = np.zeros(len(self), dtype=bool)
if self.hasnans and other.hasnans:
first_nan_loc = np.arange(len(self))[self.isna()][0]
mask[first_nan_loc] = True
other_tups = set(zip(other.left, other.right, strict=True))
for i, tup in enumerate(zip(self.left, self.right, strict=True)):
if tup in other_tups:
mask[i] = True
return self[mask]
# --------------------------------------------------------------------
def _get_engine_target(self) -> np.ndarray:
# Note: we _could_ use libjoin functions by either casting to object
# dtype or constructing tuples (faster than constructing Intervals)
# but the libjoin fastpaths are no longer fast in these cases.
raise NotImplementedError(
"IntervalIndex does not use libjoin fastpaths or pass values to "
"IndexEngine objects"
)
def _from_join_target(self, result):
raise NotImplementedError("IntervalIndex does not use libjoin fastpaths")
# TODO: arithmetic operations
def _is_valid_endpoint(endpoint) -> bool:
"""
Helper for interval_range to check if start/end are valid types.
"""
return any(
[
is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None,
]
)
def _is_type_compatible(a, b) -> bool:
"""
Helper for interval_range to check type compat of start/end/freq.
"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))
return (
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
or (is_td_compat(a) and is_td_compat(b))
or com.any_none(a, b)
)
@set_module("pandas")
def interval_range(
start=None,
end=None,
periods=None,
freq=None,
name: Hashable | None = None,
closed: IntervalClosedType = "right",
) -> IntervalIndex:
"""
Return a fixed frequency IntervalIndex.
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals.
end : numeric or datetime-like, default None
Right bound for generating intervals.
periods : int, default None
Number of periods to generate.
freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : str, default None
Name of the resulting IntervalIndex.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalIndex
Object with a fixed frequency.
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see
:ref:`this link<timeseries.offset_aliases>`.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
dtype='interval[int64, right]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(
... start=pd.Timestamp("2017-01-01"), end=pd.Timestamp("2017-01-04")
... )
IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00],
(2017-01-02 00:00:00, 2017-01-03 00:00:00],
(2017-01-03 00:00:00, 2017-01-04 00:00:00]],
dtype='interval[datetime64[us], right]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
dtype='interval[float64, right]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp("2017-01-01"), periods=3, freq="MS")
IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00],
(2017-02-01 00:00:00, 2017-03-01 00:00:00],
(2017-03-01 00:00:00, 2017-04-01 00:00:00]],
dtype='interval[datetime64[us], right]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
dtype='interval[float64, right]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed="both")
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
dtype='interval[int64, both]')
"""
start = maybe_box_datetimelike(start)
end = maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com.any_none(periods, start, end):
freq = 1 if is_number(endpoint) else "D"
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
if not _is_valid_endpoint(start):
raise ValueError(f"start must be numeric or datetime-like, got {start}")
if not _is_valid_endpoint(end):
raise ValueError(f"end must be numeric or datetime-like, got {end}")
periods = validate_periods(periods)
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError as err:
raise ValueError(
f"freq must be numeric or convertible to DateOffset, got {freq}"
) from err
# verify type compatibility
if not all(
[
_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq),
]
):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
breaks: np.ndarray | TimedeltaIndex | DatetimeIndex
if is_number(endpoint):
dtype: np.dtype = np.dtype("int64")
if com.all_not_none(start, end, freq):
if (
isinstance(start, (np.integer, np.floating))
and isinstance(end, (np.integer, np.floating))
and start.dtype == end.dtype
):
dtype = start.dtype
elif (
isinstance(start, (float, np.floating))
or isinstance(end, (float, np.floating))
or isinstance(freq, (float, np.floating))
):
dtype = np.dtype("float64")
# 0.1 ensures we capture end
breaks = np.arange(start, end + (freq * 0.1), freq)
breaks = maybe_downcast_numeric(breaks, dtype)
else:
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_numeric(breaks, dtype)
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
breaks = date_range(start=start, end=end, periods=periods, freq=freq)
else:
breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(
breaks,
name=name,
closed=closed,
dtype=IntervalDtype(subtype=breaks.dtype, closed=closed),
)
| IntervalIndex |
python | pytorch__pytorch | test/distributed/test_dist2.py | {
"start": 512,
"end": 1422
} | class ____(TestCase):
def test_context_manager(self):
os.environ["RANK"] = str(0)
os.environ["WORLD_SIZE"] = str(1)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
pg1 = dist2.new_group(
backend="gloo",
timeout=timedelta(seconds=60),
device="cpu",
)
pg2 = dist2.new_group(
backend="gloo",
timeout=timedelta(seconds=60),
device="cpu",
)
self.assertIsNone(dist2.current_process_group())
with dist2.process_group(pg1):
self.assertIs(dist2.current_process_group(), pg1)
with dist2.process_group(pg2):
self.assertIs(dist2.current_process_group(), pg2)
self.assertIs(dist2.current_process_group(), pg1)
self.assertIsNone(dist2.current_process_group())
| ProcessGroupTest |
python | numba__numba | numba/misc/help/inspector.py | {
"start": 5602,
"end": 5855
} | class ____(object):
"""Base class for formatters.
"""
def __init__(self, fileobj):
self._fileobj = fileobj
def print(self, *args, **kwargs):
kwargs.setdefault('file', self._fileobj)
print(*args, **kwargs)
| Formatter |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 5055,
"end": 11136
} | class ____:
"""Holds volatile state relating to an individual Dask task.
Not to be confused with :class:`distributed.scheduler.TaskState`, which holds
similar information on the scheduler side.
"""
#: Task key. Mandatory.
key: Key
#: Task prefix (leftmost part of the key)
prefix: str = field(init=False)
#: Task run ID.
run_id: int = RUN_ID_SENTINEL
#: A tuple containing the ``function``, ``args``, ``kwargs`` and ``task``
#: associated with this `TaskState` instance. This defaults to ``None`` and can
#: remain empty if it is a dependency that this worker will receive from another
#: worker.
run_spec: T_runspec | None = None
#: The data needed by this key to run
dependencies: set[TaskState] = field(default_factory=set)
#: The keys that use this dependency
dependents: set[TaskState] = field(default_factory=set)
#: Subset of dependencies that are not in memory
waiting_for_data: set[TaskState] = field(default_factory=set)
#: Subset of dependents that are not in memory
waiters: set[TaskState] = field(default_factory=set)
#: The current state of the task
state: TaskStateState = "released"
#: The previous state of the task. It is not None iff :attr:`state` in
#: (cancelled, resumed).
previous: Literal["executing", "long-running", "flight", None] = None
#: The next state of the task. It is not None iff :attr:`state` == resumed.
next: Literal["fetch", "waiting", None] = None
#: The priority this task given by the scheduler. Determines run order.
priority: tuple[int, ...] | None = None
#: Addresses of workers that we believe have this data
who_has: set[str] = field(default_factory=set)
#: The worker that current task data is coming from if task is in flight
coming_from: str | None = None
#: Abstract resources required to run a task
resource_restrictions: dict[str, float] = field(default_factory=dict)
#: The exception caused by running a task if it erred (serialized)
exception: Serialize | None = None
#: The traceback caused by running a task if it erred (serialized)
traceback: Serialize | None = None
#: string representation of exception
exception_text: str = ""
#: string representation of traceback
traceback_text: str = ""
#: The type of a particular piece of data
type: type | None = None
#: The number of times a dependency has not been where we expected it
suspicious_count: int = 0
#: Log of transfer, load, and compute times for a task
startstops: list[StartStop] = field(default_factory=list)
#: Time at which task begins running
start_time: float | None = None
#: Time at which task finishes running
stop_time: float | None = None
#: Metadata related to the task.
#: Stored metadata should be msgpack serializable (e.g. int, string, list, dict).
metadata: dict = field(default_factory=dict)
#: The size of the value of the task, if in memory
nbytes: int | None = None
#: Arbitrary task annotations
annotations: dict | None = None
#: unique span id (see ``distributed.spans``).
#: Matches ``distributed.scheduler.TaskState.group.span_id``.
span_id: str | None = None
#: True if the :meth:`~WorkerBase.execute` or :meth:`~WorkerBase.gather_dep`
#: coroutine servicing this task completed; False otherwise. This flag changes
#: the behaviour of transitions out of the ``executing``, ``flight`` etc. states.
done: bool = False
_instances: ClassVar[weakref.WeakSet[TaskState]] = weakref.WeakSet()
# Support for weakrefs to a class with __slots__
__weakref__: Any = field(init=False)
def __post_init__(self) -> None:
TaskState._instances.add(self)
self.prefix = key_split(self.key)
def __repr__(self) -> str:
if self.state == "cancelled":
state = f"cancelled({self.previous})"
elif self.state == "resumed":
state = f"resumed({self.previous}->{self.next})"
else:
state = self.state
return f"<TaskState {self.key!r} {state}>"
def __hash__(self) -> int:
"""Override dataclass __hash__, reverting to the default behaviour
hash(o) == id(o).
Note that we also defined @dataclass(eq=False), which reverts to the default
behaviour (a == b) == (a is b).
On first thought, it would make sense to use TaskState.key for equality and
hashing. However, a task may be forgotten and a new TaskState object with the
same key may be created in its place later on. In the Worker state, you should
never have multiple TaskState objects with the same key; see
WorkerState.validate_state for relevant checks. We can't assert the same thing
in __eq__ though, as multiple objects with the same key may appear in
TaskState._instances for a brief period of time.
"""
return id(self)
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else _default_data_size()
def _to_dict_no_nest(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Client.dump_cluster_state
distributed.utils.recursive_to_dict
Notes
-----
This class uses ``_to_dict_no_nest`` instead of ``_to_dict``.
When a task references another task, just print the task repr. All tasks
should neatly appear under Worker.state.tasks. This also prevents a RecursionError
during particularly heavy loads, which have been observed to happen whenever
there's an acyclic dependency chain of ~200+ tasks.
"""
out = recursive_to_dict(self, exclude=exclude, members=True)
# Remove all Nones, empty containers, and derived attributes
return {k: v for k, v in out.items() if v and k != "prefix"}
@dataclass
| TaskState |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | {
"start": 1084,
"end": 1352
} | class ____:
def __init__(self, select):
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.related = get_related(select)
def get_related(select):
return Recursive(select)
| Recursive |
python | facebook__pyre-check | client/commands/tests/language_server_test.py | {
"start": 57503,
"end": 65811
} | class ____(ApiTestCase):
@setup.async_test
async def test_did_change__basic(self) -> None:
tracked_path = Path("/tracked.py")
for telemetry in (
features.TelemetryAvailability.ENABLED,
features.TelemetryAvailability.DISABLED,
):
querier = server_setup.MockDaemonQuerier()
setup = server_setup.create_pyre_language_server_api_setup(
opened_documents={
tracked_path: state.OpenedDocumentState(
code=server_setup.DEFAULT_FILE_CONTENTS
)
},
querier=querier,
server_options=server_setup.create_server_options(
language_server_features=features.LanguageServerFeatures(
telemetry=telemetry
),
),
)
api = setup.api
output_writer = setup.output_writer
await api.process_did_change_request(
parameters=lsp.DidChangeTextDocumentParameters(
text_document=lsp.TextDocumentIdentifier(
uri=lsp.DocumentUri.from_file_path(tracked_path).unparse(),
),
content_changes=[lsp.ContentChange(text="reveal_type(1)")],
),
)
# When unsaved changes is not enabled, we should send no requests.
self.assertEqual(
querier.requests,
[],
)
if telemetry.is_enabled():
expectations = [
self._expect_telemetry_event(
operation="didChange",
result=None,
),
]
else:
expectations = []
self._assert_output_messages(
output_writer,
expectations,
)
@setup.async_test
async def test_did_change__with_type_errors(self) -> None:
unsaved_file_content = "# some example code"
tracked_path = Path("/tracked.py")
for telemetry in (
features.TelemetryAvailability.ENABLED,
features.TelemetryAvailability.DISABLED,
):
querier = server_setup.MockDaemonQuerier(
mock_type_errors={
tracked_path: [
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("/tracked.py"),
code=42,
name="name",
description="description",
),
]
}
)
setup = server_setup.create_pyre_language_server_api_setup(
opened_documents={
tracked_path: state.OpenedDocumentState(
code=unsaved_file_content,
)
},
querier=querier,
server_options=server_setup.create_server_options(
language_server_features=features.LanguageServerFeatures(
unsaved_changes=features.UnsavedChangesAvailability.ENABLED,
telemetry=telemetry,
),
),
)
api = setup.api
output_writer = setup.output_writer
await api.process_did_change_request(
parameters=lsp.DidChangeTextDocumentParameters(
text_document=lsp.TextDocumentIdentifier(
uri=lsp.DocumentUri.from_file_path(tracked_path).unparse(),
),
content_changes=[lsp.ContentChange(text=unsaved_file_content)],
),
)
# When unsaved changes is not enabled, we should send no requests.
self.assertEqual(
querier.requests,
[
{"path": tracked_path, "code": unsaved_file_content},
],
)
expect_diagnostics = self._expect_diagnostics(
uri="file:///tracked.py",
diagnostics=[
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=1, character=2),
),
message="description",
severity=lsp.DiagnosticSeverity.ERROR,
code="name [42]",
source="Pyre",
code_description=lsp.CodeDescription(
href="https://pyre-check.org/docs/errors/#42-missing-overload-implementation"
),
)
],
)
if telemetry.is_enabled():
expectations = [
self._expect_telemetry_event(
operation="didChange",
result=None,
),
expect_diagnostics,
self._expect_telemetry_event(
operation="typeErrors",
result=None,
),
]
else:
expectations = [expect_diagnostics]
self._assert_output_messages(
output_writer,
expectations,
)
@setup.async_test
async def test_did_change__no_type_errors(self) -> None:
tracked_path = Path("/tracked.py")
for telemetry in (
features.TelemetryAvailability.ENABLED,
features.TelemetryAvailability.DISABLED,
):
querier = server_setup.MockDaemonQuerier(
mock_type_errors={},
)
setup = server_setup.create_pyre_language_server_api_setup(
opened_documents={
tracked_path: state.OpenedDocumentState(
code=server_setup.DEFAULT_FILE_CONTENTS
)
},
querier=querier,
server_options=server_setup.create_server_options(
language_server_features=features.LanguageServerFeatures(
unsaved_changes=features.UnsavedChangesAvailability.ENABLED,
telemetry=telemetry,
),
),
)
api = setup.api
output_writer = setup.output_writer
await api.process_did_change_request(
parameters=lsp.DidChangeTextDocumentParameters(
text_document=lsp.TextDocumentIdentifier(
uri=lsp.DocumentUri.from_file_path(tracked_path).unparse(),
),
content_changes=[
lsp.ContentChange(text=server_setup.DEFAULT_FILE_CONTENTS)
],
),
)
# When unsaved changes is not enabled, we should send no requests.
self.assertEqual(
querier.requests,
[
{"path": tracked_path, "code": server_setup.DEFAULT_FILE_CONTENTS},
],
)
expect_diagnostics = self._expect_diagnostics(
uri="file:///tracked.py",
diagnostics=[],
)
if telemetry.is_enabled():
expectations = [
self._expect_telemetry_event(
operation="didChange",
result=None,
),
expect_diagnostics,
self._expect_telemetry_event(
operation="typeErrors",
result=None,
),
]
else:
expectations = [expect_diagnostics]
self._assert_output_messages(
output_writer,
expectations,
)
| DidChangeTest |
python | xlwings__xlwings | tests/reports/test_report.py | {
"start": 1805,
"end": 5102
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.wb = render_template(
this_dir / "template1.xlsx", this_dir / "output.xlsx", **data
)
@classmethod
def tearDownClass(cls):
xw.Book("output.xlsx").app.quit()
def test_string(self):
self.assertEqual(self.wb.sheets[0]["A1"].value, "stringtest")
def test_float(self):
self.assertAlmostEqual(self.wb.sheets[0]["B1"].value, 12.12)
def test_substring(self):
self.assertEqual(
self.wb.sheets[0]["C1"].value, "This is text with a substringtest."
)
def test_df(self):
assert_frame_equal(
self.wb.sheets[0]["A2"].options(pd.DataFrame, expand="table").value,
df1.reset_index().set_index("index"),
)
def test_df_table(self):
df = self.wb.sheets["Sheet4"]["A1"].options(pd.DataFrame, expand="table").value
df.index.name = None
assert_frame_equal(df, df1)
self.assertIsNotNone(self.wb.sheets["Sheet4"]["A1"].table)
def test_var_operations(self):
assert_array_equal(
self.wb.sheets[1]["A1"].options(np.array, expand="table", ndim=2).value,
data["mydict"]["df"][:1].values,
)
def test_picture(self):
self.assertEqual(
self.wb.sheets[1].pictures[0].top, self.wb.sheets[1]["A17"].top
)
self.assertEqual(
self.wb.sheets[1].pictures[0].left, self.wb.sheets[1]["A17"].left
)
def test_matplotlib(self):
self.assertAlmostEqual(
self.wb.sheets[1].pictures[1].top, self.wb.sheets[1]["B33"].top, places=2
)
self.assertAlmostEqual(
self.wb.sheets[1].pictures[1].left, self.wb.sheets[1]["B33"].left, places=2
)
def test_used_range(self):
self.assertEqual(
self.wb.sheets[2]["B11"].value, "This is text with a substringtest."
)
self.assertEqual(self.wb.sheets[2]["A1"].value, None)
def test_different_vars_at_either_end(self):
self.assertEqual(self.wb.sheets[0]["I1"].value, "stringtest vs. stringtest")
def test_shape_text(self):
self.assertEqual(
self.wb.sheets[4].shapes["TextBox 1"].text,
"This is no template. So the formatting should be left untouched.",
)
self.assertEqual(
self.wb.sheets[4].shapes["Oval 2"].text, "This shows stringtest."
)
self.assertEqual(
self.wb.sheets[4].shapes["TextBox 3"].text, "This shows stringtest."
)
self.assertEqual(self.wb.sheets[4].shapes["TextBox 4"].text, "stringtest")
self.assertIsNone(self.wb.sheets[4].shapes["Oval 5"].text)
def test_markdown_cell(self):
self.assertEqual(
self.wb.sheets["Sheet6"]["A1"].value,
"Title\nText bold and italic\n\n• a first bullet\n• a second bullet\n\n"
"Another title\nthis has a line break\nnew line",
)
def test_markdown_shape(self):
self.assertEqual(
self.wb.sheets["Sheet6"].shapes[0].text,
"Title\nText bold and italic\n\n• a first bullet\n• a second bullet\n\n"
"Another title\nthis has a line break\nnew line",
)
| TestCreateReport |
python | ray-project__ray | python/ray/data/_internal/datasource/mcap_datasource.py | {
"start": 803,
"end": 1580
} | class ____:
"""Time range for filtering MCAP messages.
Attributes:
start_time: Start time in nanoseconds (inclusive).
end_time: End time in nanoseconds (exclusive).
"""
start_time: int
end_time: int
def __post_init__(self):
"""Validate time range after initialization."""
if self.start_time >= self.end_time:
raise ValueError(
f"start_time ({self.start_time}) must be less than "
f"end_time ({self.end_time})"
)
if self.start_time < 0 or self.end_time < 0:
raise ValueError(
f"time values must be non-negative, got start_time={self.start_time}, "
f"end_time={self.end_time}"
)
@DeveloperAPI
| TimeRange |
python | walkccc__LeetCode | solutions/1604. Alert Using Same Key-Card Three or More Times in a One Hour Period/1604.py | {
"start": 0,
"end": 722
} | class ____:
def alertNames(self, keyName: list[str], keyTime: list[str]) -> list[str]:
nameToMinutes = collections.defaultdict(list)
for name, time in zip(keyName, keyTime):
minutes = self._getMinutes(time)
nameToMinutes[name].append(minutes)
return sorted([name for name, minutes in nameToMinutes.items()
if self._hasAlert(minutes)])
def _hasAlert(self, minutes: list[int]) -> bool:
if len(minutes) > 70:
return True
minutes.sort()
for i in range(2, len(minutes)):
if minutes[i - 2] + 60 >= minutes[i]:
return True
return False
def _getMinutes(self, time: str) -> int:
h, m = map(int, time.split(':'))
return 60 * h + m
| Solution |
python | aimacode__aima-python | learning4e.py | {
"start": 18703,
"end": 19164
} | class ____:
"""k-NearestNeighbor: the k nearest neighbors vote."""
def __init__(self, dataset, k=1):
self.dataset = dataset
self.k = k
def predict(self, example):
"""Find the k closest items, and have them vote for the best."""
best = heapq.nsmallest(self.k, ((self.dataset.distance(e, example), e) for e in self.dataset.examples))
return mode(e[self.dataset.target] for (d, e) in best)
| NearestNeighborLearner |
python | getsentry__sentry | fixtures/safe_migrations_apps/good_flow_delete_pending_with_fk_constraints_app/migrations/0002_remove_constraints_and_pending.py | {
"start": 252,
"end": 968
} | class ____(CheckedMigration):
atomic = False
dependencies = [
("good_flow_delete_pending_with_fk_constraints_app", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="TestTable",
name="fk_table",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="good_flow_delete_pending_with_fk_constraints_app.fktable",
db_index=False,
db_constraint=False,
),
),
SafeDeleteModel(
name="TestTable",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
python | miyuchina__mistletoe | mistletoe/markdown_renderer.py | {
"start": 1177,
"end": 1747
} | class ____(block_token.Footnote):
"""
A sequence of link reference definitions.
This is a leaf block token. Its children are link reference definition tokens.
This class inherits from `Footnote` and modifies the behavior of the constructor,
to keep the tokens in the AST.
"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, matches):
self.children = [LinkReferenceDefinition(match) for match in matches]
| LinkReferenceDefinitionBlock |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_include.py | {
"start": 1164,
"end": 1407
} | class ____(HasProps):
x = Int(12)
y = String("hello")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
| IsDelegate |
python | doocs__leetcode | solution/0700-0799/0716.Max Stack/Solution.py | {
"start": 0,
"end": 156
} | class ____:
def __init__(self, val=0):
self.val = val
self.prev: Union[Node, None] = None
self.next: Union[Node, None] = None
| Node |
python | getsentry__sentry | tests/sentry/utils/security/test_encrypted_field_key_store.py | {
"start": 12785,
"end": 15653
} | class ____:
def test_get_primary_fernet_returns_tuple(self, fernet_keys_store: tuple[str, bytes]) -> None:
"""Test get_primary_fernet returns (key_id, Fernet) tuple."""
key_id, _fernet_key = fernet_keys_store
with override_settings(
DATABASE_ENCRYPTION_SETTINGS={
"fernet_primary_key_id": key_id,
}
):
returned_key_id, fernet = FernetKeyStore.get_primary_fernet()
assert returned_key_id == key_id
def test_get_primary_fernet_raises_when_not_configured(self) -> None:
"""Test get_primary_fernet raises error when primary key ID not configured."""
with override_settings(DATABASE_ENCRYPTION_SETTINGS={}):
with pytest.raises(ValueError, match="Fernet primary key ID is not configured"):
FernetKeyStore.get_primary_fernet()
def test_get_primary_fernet_raises_when_key_not_found(
self, fernet_keys_store: tuple[str, bytes]
) -> None:
"""Test get_primary_fernet raises error when primary key doesn't exist."""
_key_id, _fernet_key = fernet_keys_store
with override_settings(
DATABASE_ENCRYPTION_SETTINGS={
"fernet_primary_key_id": "nonexistent_primary",
}
):
with pytest.raises(
ValueError, match="Encryption key with ID 'nonexistent_primary' not found"
):
FernetKeyStore.get_primary_fernet()
def test_get_primary_fernet_with_multiple_keys(
self, multi_fernet_keys_store: dict[str, bytes]
) -> None:
"""Test get_primary_fernet returns correct key when multiple keys exist."""
with override_settings(
DATABASE_ENCRYPTION_SETTINGS={
"fernet_primary_key_id": "key_secondary",
}
):
key_id, fernet = FernetKeyStore.get_primary_fernet()
assert key_id == "key_secondary"
assert isinstance(fernet, Fernet)
def test_get_primary_fernet_auto_loads_if_needed(self, temp_keys_dir: Path) -> None:
"""Test get_primary_fernet auto-loads keys if not already loaded."""
key = Fernet.generate_key()
(temp_keys_dir / "primary").write_text(key.decode("utf-8"))
with override_settings(
DATABASE_ENCRYPTION_SETTINGS={
"fernet_keys_location": str(temp_keys_dir),
"fernet_primary_key_id": "primary",
}
):
# Keys not loaded yet
assert FernetKeyStore._is_loaded is False
key_id, fernet = FernetKeyStore.get_primary_fernet()
# Should have auto-loaded
assert FernetKeyStore._is_loaded is True
assert key_id == "primary" # type: ignore[unreachable]
assert isinstance(fernet, Fernet)
| TestGetPrimaryFernet |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 238075,
"end": 242624
} | class ____(UnaryExpression[bool]):
"""Represent an ``EXISTS`` clause.
See :func:`_sql.exists` for a description of usage.
An ``EXISTS`` clause can also be constructed from a :func:`_sql.select`
instance by calling :meth:`_sql.SelectBase.exists`.
"""
inherit_cache = True
def __init__(
self,
__argument: Optional[
Union[_ColumnsClauseArgument[Any], SelectBase, ScalarSelect[Any]]
] = None,
/,
):
s: ScalarSelect[Any]
# TODO: this seems like we should be using coercions for this
if __argument is None:
s = Select(literal_column("*")).scalar_subquery()
elif isinstance(__argument, SelectBase):
s = __argument.scalar_subquery()
s._propagate_attrs = __argument._propagate_attrs
elif isinstance(__argument, ScalarSelect):
s = __argument
else:
s = Select(__argument).scalar_subquery()
UnaryExpression.__init__(
self,
s,
operator=operators.exists,
type_=type_api.BOOLEANTYPE,
)
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return []
def _regroup(
self,
fn: Callable[[Select[Unpack[TupleAny]]], Select[Unpack[TupleAny]]],
) -> ScalarSelect[Any]:
assert isinstance(self.element, ScalarSelect)
element = self.element.element
if not isinstance(element, Select):
raise exc.InvalidRequestError(
"Can only apply this operation to a plain SELECT construct"
)
new_element = fn(element)
return_value = new_element.scalar_subquery()
return return_value
def select(self) -> Select[bool]:
r"""Return a SELECT of this :class:`_expression.Exists`.
e.g.::
stmt = exists(some_table.c.id).where(some_table.c.id == 5).select()
This will produce a statement resembling:
.. sourcecode:: sql
SELECT EXISTS (SELECT id FROM some_table WHERE some_table = :param) AS anon_1
.. seealso::
:func:`_expression.select` - general purpose
method which allows for arbitrary column lists.
""" # noqa
return Select(self)
def correlate(
self,
*fromclauses: Union[Literal[None, False], _FromClauseArgument],
) -> Self:
"""Apply correlation to the subquery noted by this
:class:`_sql.Exists`.
.. seealso::
:meth:`_sql.ScalarSelect.correlate`
"""
e = self._clone()
e.element = self._regroup(
lambda element: element.correlate(*fromclauses)
)
return e
def correlate_except(
self,
*fromclauses: Union[Literal[None, False], _FromClauseArgument],
) -> Self:
"""Apply correlation to the subquery noted by this
:class:`_sql.Exists`.
.. seealso::
:meth:`_sql.ScalarSelect.correlate_except`
"""
e = self._clone()
e.element = self._regroup(
lambda element: element.correlate_except(*fromclauses)
)
return e
def select_from(self, *froms: _FromClauseArgument) -> Self:
"""Return a new :class:`_expression.Exists` construct,
applying the given
expression to the :meth:`_expression.Select.select_from`
method of the select
statement contained.
.. note:: it is typically preferable to build a :class:`_sql.Select`
statement first, including the desired WHERE clause, then use the
:meth:`_sql.SelectBase.exists` method to produce an
:class:`_sql.Exists` object at once.
"""
e = self._clone()
e.element = self._regroup(lambda element: element.select_from(*froms))
return e
def where(self, *clause: _ColumnExpressionArgument[bool]) -> Self:
"""Return a new :func:`_expression.exists` construct with the
given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
.. note:: it is typically preferable to build a :class:`_sql.Select`
statement first, including the desired WHERE clause, then use the
:meth:`_sql.SelectBase.exists` method to produce an
:class:`_sql.Exists` object at once.
"""
e = self._clone()
e.element = self._regroup(lambda element: element.where(*clause))
return e
| Exists |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_memusage.py | {
"start": 12337,
"end": 33088
} | class ____(fixtures.MappedTest, EnsureZeroed):
__requires__ = "cpython", "posix", "memory_process_intensive", "no_asyncio"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo:
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = self.mapper_registry.map_imperatively(B, table2)
@profile_memory()
def go():
with Session(self.engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select(1))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = self.mapper_registry.map_imperatively(
B, table2, _compiled_cache_size=50
)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
with Session(engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
engine.dispose()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData()
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)],
)
class Wide:
pass
self.mapper_registry.map_imperatively(
Wide, wide_table, _compiled_cache_size=10
)
metadata.create_all(self.engine)
with Session(self.engine) as session:
w1 = Wide()
session.add(w1)
session.commit()
del session
counter = [1]
@profile_memory()
def go():
with Session(self.engine) as session:
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.commit()
counter[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.requires.savepoints
def test_savepoints(self):
metadata = MetaData()
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass:
pass
self.mapper_registry.map_imperatively(SomeClass, some_table)
metadata.create_all(self.engine)
with Session(self.engine) as session:
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
with Session(self.engine) as session, session.begin():
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all(self.engine)
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
with self.engine.connect() as conn:
conn.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
self.mapper_registry.map_imperatively(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
self.mapper_registry.map_imperatively(
A, a, polymorphic_identity="a", polymorphic_on=a.c.type
)
self.mapper_registry.map_imperatively(
ASub, asub, inherits=A, polymorphic_identity="asub"
)
self.mapper_registry.map_imperatively(
B, b, properties={"as_": relationship(A)}
)
metadata.create_all(self.engine)
sess = Session(self.engine)
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session(self.engine)
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
del sess
try:
go()
finally:
metadata.drop_all(self.engine)
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = self.mapper_registry.map_imperatively(
A, a, properties={"bs": relationship(B)}
)
self.mapper_registry.map_imperatively(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(ComparableEntity):
pass
class B(A):
pass
clear_mappers()
self.mapper_registry.map_imperatively(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
self.mapper_registry.map_imperatively(
B, table2, inherits=A, polymorphic_identity="b"
)
sess = Session(self.engine, autoflush=False)
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(ComparableEntity):
pass
class B(ComparableEntity):
pass
self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# mappers necessarily find themselves in the compiled cache,
# so to allow them to be GC'ed clear out the cache
self.engine.clear_compiled_cache()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1:
pass
t1_mapper = self.mapper_registry.map_imperatively(T1, t1)
@testing.emits_warning(r"This declarative base")
@testing.expect_deprecated(r"User-placed attribute .* is replacing")
@profile_memory()
def go():
class T2:
pass
t2_mapper = self.mapper_registry.map_imperatively(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session(testing.db)
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# https://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = aliased(Bar, table2.select().subquery())
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
| MemUsageWBackendTest |
python | ray-project__ray | python/ray/serve/tests/test_task_processor.py | {
"start": 4958,
"end": 26678
} | class ____:
"""Test task consumer integration with Ray Serve."""
def test_task_consumer_as_serve_deployment(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers can be used as Ray Serve deployments."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.data_received = None
self.task_received = False
@task_handler(name="process_request")
def process_request(self, data):
self.task_received = True
self.data_received = data
def assert_task_received(self):
assert self.task_received is True
assert self.data_received is not None
assert self.data_received == "test_data_1"
# Deploy the consumer as a Serve deployment
handle = serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_1")
def assert_result():
try:
# `assert_task_received` will throw AssertionError if the task was not received or data is not as expected
handle.assert_task_received.remote().result()
return True
except Exception:
return False
wait_for_condition(assert_result)
def test_task_consumer_as_serve_deployment_with_failed_task(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers can be used as Ray Serve deployments."""
processor_config = create_processor_config(
failed_task_queue_name="my_failed_task_queue"
)
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.num_calls = 0
@task_handler(name="process_request")
def process_request(self, data):
self.num_calls += 1
raise ValueError("Task failed as expected")
def get_num_calls(self):
return self.num_calls
handle = serve.run(ServeTaskConsumer.bind())
task_id_ref = send_request_to_queue.remote(processor_config, "test_data_1")
task_id = ray.get(task_id_ref)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
def assert_result():
result = adapter_instance.get_task_status_sync(task_id)
if (
result.status == "FAILURE"
and result.result is not None
and isinstance(result.result, ValueError)
and str(result.result) == "Task failed as expected"
and handle.get_num_calls.remote().result()
== 1 + processor_config.max_retries
):
return True
else:
return False
wait_for_condition(assert_result, timeout=20)
def test_task_consumer_persistence_across_restarts(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that tasks persist in queue and get executed after deployment restart."""
# Setup
config = create_processor_config()
tracker = ProcessedTasksTracker.remote()
signal1 = SignalActor.remote()
@serve.deployment(
num_replicas=1, graceful_shutdown_timeout_s=60, max_ongoing_requests=1
)
@task_consumer(task_processor_config=config)
class TaskConsumer:
def __init__(self, tracker_ref, signal_ref):
self.tracker, self.signal = tracker_ref, signal_ref
self.local_processed = []
@task_handler(name="process_request")
def process_request(self, data):
ray.get(self.signal.wait.remote()) # Block until signal
self.local_processed.append(data)
ray.get(self.tracker.add_task.remote(data))
return f"Processed: {data}"
def get_local_processed(self):
return self.local_processed
# Deploy first version and send tasks
serve.run(TaskConsumer.bind(tracker, signal1), name="app_v1")
num_tasks = 20
for i in range(num_tasks):
ray.get(send_request_to_queue.remote(config, f"task_{i}"))
# Process exactly 1 task, then restart deployment
wait_for_condition(
lambda: ray.get(signal1.cur_num_waiters.remote()) == 1, timeout=10
)
ray.get(signal1.send.remote(clear=True)) # Allow 1 task to complete
wait_for_condition(lambda: ray.get(tracker.get_count.remote()) == 1, timeout=10)
# Shutdown first deployment
serve.delete("app_v1", _blocking=False)
ray.get(signal1.send.remote()) # Release any stuck tasks
wait_for_condition(
lambda: "app_v1" not in serve.status().applications, timeout=100
)
tasks_before_restart = ray.get(tracker.get_count.remote())
assert (
tasks_before_restart >= 2 and tasks_before_restart < num_tasks
), f"Expected at least 2 tasks processed and atleast one less than num_tasks, got {tasks_before_restart}"
# Deploy second version and process remaining tasks
signal2 = SignalActor.remote()
handle = serve.run(TaskConsumer.bind(tracker, signal2), name="app_v2")
wait_for_condition(
lambda: ray.get(signal2.cur_num_waiters.remote()) == 1, timeout=10
)
ray.get(signal2.send.remote()) # Process all remaining tasks
wait_for_condition(
lambda: ray.get(tracker.get_count.remote()) == num_tasks, timeout=100
)
# Verify all tasks were processed and distributed correctly
expected_tasks = {f"task_{i}" for i in range(num_tasks)}
final_tasks = ray.get(tracker.get_processed_tasks.remote())
second_deployment_tasks = handle.get_local_processed.remote().result()
assert (
final_tasks == expected_tasks
), f"Missing tasks: {expected_tasks - final_tasks}"
assert (
len(second_deployment_tasks) == num_tasks - tasks_before_restart
), f"Second deployment processed {len(second_deployment_tasks)} tasks, expected {num_tasks - tasks_before_restart}"
def test_task_consumer_as_serve_deployment_with_async_task_handler(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers properly raise NotImplementedError for async task handlers."""
processor_config = create_processor_config()
# Test that async task handlers raise NotImplementedError during decoration
with pytest.raises(
NotImplementedError,
match="Async task handlers are not supported yet",
):
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.data_received = None
self.task_received = False
# This async task handler should raise NotImplementedError during decoration
@task_handler(name="process_request")
async def process_request(self, data):
self.task_received = True
self.data_received = data
def test_task_consumer_metrics(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task processor metrics are collected and exposed correctly."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.task_received = False
@task_handler(name="process_request")
def process_request(self, data):
self.task_received = True
def get_task_received(self) -> bool:
return self.task_received
handle = serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_1")
def assert_task_received():
return handle.get_task_received.remote().result()
wait_for_condition(assert_task_received, timeout=20)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
metrics = adapter_instance.get_metrics_sync()
assert len(metrics) == 1
worker_name = next(iter(metrics))
worker_stats = metrics[worker_name]
# Check that the total number of processed tasks is correct.
assert worker_stats["pool"]["threads"] == 1
assert worker_stats["pool"]["max-concurrency"] == 1
assert worker_stats["total"]["process_request"] == 1
assert worker_stats["broker"]["transport"] == "filesystem"
def test_task_consumer_health_check(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that the health check for the task processor works correctly."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
pass
serve.run(ServeTaskConsumer.bind())
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
def check_health():
health_status = adapter_instance.health_check_sync()
return len(health_status) > 0
# Wait for the worker to be ready
wait_for_condition(check_health, timeout=20)
health_status = adapter_instance.health_check_sync()
assert len(health_status) == 1
worker_reply = health_status[0]
assert len(worker_reply) == 1
worker_name = next(iter(worker_reply))
assert worker_reply[worker_name] == {"ok": "pong"}
def test_task_processor_with_cancel_tasks_and_app_custom_config(
self, external_redis, serve_instance # noqa: F811
):
"""Test the cancel task functionality with celery broker."""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
processor_config = TaskProcessorConfig(
queue_name="my_app_queue",
adapter_config=CeleryAdapterConfig(
broker_url=f"redis://{redis_address}/0",
backend_url=f"redis://{redis_address}/1",
app_custom_config={"worker_prefetch_multiplier": 1},
),
)
signal = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class MyTaskConsumer:
def __init__(self, signal_actor):
self._signal = signal_actor
self.message_received = []
@task_handler(name="process")
def process(self, data):
ray.get(self._signal.wait.remote())
self.message_received.append(data)
def get_message_received(self):
return self.message_received
handle = serve.run(MyTaskConsumer.bind(signal), name="app_v1")
task_ids = []
for i in range(2):
task_id_ref = send_request_to_queue.remote(
processor_config, f"test_data_{i}", task_name="process"
)
task_ids.append(ray.get(task_id_ref))
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 1, timeout=10
)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
adapter_instance.cancel_task_sync(task_ids[1])
ray.get(signal.send.remote())
def check_revoked():
status = adapter_instance.get_task_status_sync(task_ids[1])
return status.status == "REVOKED"
wait_for_condition(check_revoked, timeout=20)
assert "test_data_0" in handle.get_message_received.remote().result()
assert "test_data_1" not in handle.get_message_received.remote().result()
serve.delete("app_v1")
def test_task_consumer_with_task_custom_config(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumer works with app custom config."""
processor_config = create_processor_config()
processor_config.adapter_config.task_custom_config = {
"retry_backoff_max": 1,
"retry_kwargs": {"max_retries": 10},
}
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.num_calls = 0
@task_handler(name="process_request")
def process_request(self, data):
self.num_calls += 1
raise ValueError("Task failed as expected")
def get_num_calls(self):
return self.num_calls
handle = serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_0")
wait_for_condition(
lambda: handle.get_num_calls.remote().result() == 11, timeout=20
)
def test_task_consumer_failed_task_queue_consumption(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that failed tasks can be consumed from the failed task queue with the correct arguments."""
# Create first processor config with failed task queue
failed_queue_name = "failed_task_queue"
failing_processor_config = create_processor_config(
failed_task_queue_name=failed_queue_name
)
# Create second processor config that consumes from the failed queue
failed_processor_config = create_processor_config()
failed_processor_config.queue_name = failed_queue_name
# First consumer that always fails
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=failing_processor_config)
class FailingTaskConsumer:
@task_handler(name="process_request")
def process_request(self, data):
raise ValueError("Test error message from first consumer")
# Second consumer that processes failed tasks
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=failed_processor_config)
class FailedTaskConsumer:
def __init__(self):
self.received_error = None
self.received_task_id = None
self.received_original_args = None
@task_handler(name="process_request")
def process_request(self, task_id, exception_msg, args, kwargs, einfo):
self.received_task_id = task_id
self.received_error = exception_msg
self.received_original_args = args
def get_received_error(self):
return self.received_error
def get_received_task_id(self):
return self.received_task_id
def get_received_original_args(self):
return self.received_original_args
# Deploy both consumers
serve.run(
FailingTaskConsumer.bind(),
name="failing_task_consumer",
route_prefix="/failing_task_consumer",
)
handle_2 = serve.run(
FailedTaskConsumer.bind(),
name="failed_task_consumer",
route_prefix="/failed_task_consumer",
)
# Send a task to the first consumer (which will fail)
task_id = send_request_to_queue.remote(failing_processor_config, "test_data_1")
# Verify the received data
def assert_failed_task_received():
received_error = handle_2.get_received_error.remote().result()
received_task_id = handle_2.get_received_task_id.remote().result()
received_original_args = (
handle_2.get_received_original_args.remote().result()
)
args_data = "['test_data_1']"
err_msg = "ValueError: Test error message from first consumer"
assert err_msg in received_error
assert received_task_id == ray.get(task_id)
assert received_original_args == args_data
return True
wait_for_condition(assert_failed_task_received, timeout=20)
def test_multiple_task_consumers_in_single_app(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that multiple task consumers can coexist in a single Ray Serve application."""
orchestrator_config = create_processor_config()
orchestrator_config.queue_name = "orchestrator_queue"
worker_config = create_processor_config()
worker_config.queue_name = "worker_queue"
@serve.deployment(name="worker-deployment")
@task_consumer(task_processor_config=worker_config)
class WorkerTaskConsumer:
def __init__(self):
self.task_count = 0
@task_handler(name="process_data")
def process_data(self, payload):
self.task_count += 1
return f"Worker processed: {payload}"
def get_worker_task_count(self):
return self.task_count
@serve.deployment(name="orchestrator-deployment")
@task_consumer(task_processor_config=orchestrator_config)
class OrchestratorTaskConsumer:
def __init__(self, worker_deployment):
self.worker_deployment = worker_deployment
self.message_received = []
@task_handler(name="orchestrate_task")
def orchestrate_task(self, payload):
send_request_to_queue.remote(
worker_config, payload, task_name="process_data"
)
self.message_received.append(payload)
return f"Orchestrated complex task for payload: {payload}"
async def get_worker_task_count(self):
return await self.worker_deployment.get_worker_task_count.remote()
def get_message_received(self):
return self.message_received
worker_deployment = WorkerTaskConsumer.bind()
orchestrator_deployment = OrchestratorTaskConsumer.bind(worker_deployment)
handle = serve.run(orchestrator_deployment, name="multi_consumer_app")
num_tasks_to_send = 3
data_sent_to_orchestrator = []
for i in range(num_tasks_to_send):
data_id = f"data_{i}"
send_request_to_queue.remote(
orchestrator_config, data_id, task_name="orchestrate_task"
)
data_sent_to_orchestrator.append(data_id)
# Wait for tasks to be processed properly
def check_data_processed_properly():
worker_count = handle.get_worker_task_count.remote().result()
data_received_by_orchestrator = (
handle.get_message_received.remote().result()
)
return worker_count == num_tasks_to_send and set(
data_received_by_orchestrator
) == set(data_sent_to_orchestrator)
wait_for_condition(check_data_processed_properly, timeout=300)
def test_task_consumer_with_one_queue_and_multiple_different_tasks(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers can handle multiple different tasks in the same queue."""
processor_config = create_processor_config()
@serve.deployment
@task_consumer(task_processor_config=processor_config)
class MyTaskConsumer:
def __init__(self):
self.message_received = []
@task_handler(name="process_data")
def process_data(self, data):
self.message_received.append(data)
@task_handler(name="process_data2")
def process_data2(self, data):
self.message_received.append(data)
def get_message_received(self):
return self.message_received
handle = serve.run(MyTaskConsumer.bind())
send_request_to_queue.remote(
processor_config, "test_data_1", task_name="process_data"
)
send_request_to_queue.remote(
processor_config, "test_data_2", task_name="process_data2"
)
send_request_to_queue.remote(
processor_config, "test_data_3", task_name="process_data"
)
wait_for_condition(
lambda: "test_data_1" in handle.get_message_received.remote().result()
)
wait_for_condition(
lambda: "test_data_2" in handle.get_message_received.remote().result()
)
wait_for_condition(
lambda: "test_data_3" in handle.get_message_received.remote().result()
)
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
| TestTaskConsumerWithRayServe |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_validators.py | {
"start": 3941,
"end": 15521
} | class ____(UptimeTestCase):
def setUp(self):
super().setUp()
self.context = {
"organization": self.organization,
"project": self.project,
"request": self.make_request(user=self.user),
}
def get_valid_data(self, **kwargs):
return {
"name": kwargs.get("name", "Test Uptime Monitor"),
"type": UptimeDomainCheckFailure.slug,
"enabled": kwargs.get("enabled", True),
"config": kwargs.get(
"config",
{
"mode": UptimeMonitorMode.MANUAL.value,
"environment": None,
"recovery_threshold": DEFAULT_RECOVERY_THRESHOLD,
"downtime_threshold": DEFAULT_DOWNTIME_THRESHOLD,
},
),
"dataSources": kwargs.get(
"data_sources",
[
{
"url": "https://sentry.io",
"intervalSeconds": 60,
"timeoutMs": 1000,
}
],
),
}
def test_rejects_multiple_data_sources(self):
"""Test that multiple data sources are rejected for uptime monitors."""
data = self.get_valid_data(
data_sources=[
{
"url": "https://sentry.io",
"intervalSeconds": 60,
"timeoutMs": 1000,
},
{
"url": "https://example.com",
"intervalSeconds": 60,
"timeoutMs": 1000,
},
]
)
validator = UptimeDomainCheckFailureValidator(data=data, context=self.context)
assert not validator.is_valid()
assert "dataSources" in validator.errors
assert "Only one data source is allowed" in str(validator.errors["dataSources"])
@mock.patch(
"sentry.quotas.backend.assign_seat",
return_value=Outcome.ACCEPTED,
)
def test_create_enabled_assigns_seat(self, mock_assign_seat: mock.MagicMock) -> None:
"""Test that creating an enabled detector assigns a billing seat."""
validator = UptimeDomainCheckFailureValidator(
data=self.get_valid_data(enabled=True), context=self.context
)
assert validator.is_valid(), validator.errors
detector = validator.save()
detector.refresh_from_db()
assert detector.enabled is True
# Verify seat was assigned
mock_assign_seat.assert_called_with(DataCategory.UPTIME, detector)
@mock.patch(
"sentry.quotas.backend.assign_seat",
return_value=Outcome.RATE_LIMITED,
)
def test_create_enabled_no_seat_available(self, mock_assign_seat: mock.MagicMock) -> None:
"""Test that creating a detector with no seats available creates it but leaves it disabled."""
validator = UptimeDomainCheckFailureValidator(
data=self.get_valid_data(enabled=True), context=self.context
)
assert validator.is_valid(), validator.errors
detector = validator.save()
detector.refresh_from_db()
# Detector created but not enabled due to no seat assignment
assert detector.enabled is False
# Verify seat assignment was attempted
mock_assign_seat.assert_called_with(DataCategory.UPTIME, detector)
uptime_subscription = get_uptime_subscription(detector)
assert uptime_subscription.status == UptimeSubscription.Status.DISABLED.value
@mock.patch(
"sentry.quotas.backend.assign_seat",
return_value=Outcome.ACCEPTED,
)
def test_update_enable_assigns_seat(self, mock_assign_seat: mock.MagicMock) -> None:
"""Test that enabling a previously disabled detector assigns a seat."""
# Create a disabled detector
detector = self.create_uptime_detector(enabled=False)
validator = UptimeDomainCheckFailureValidator(
instance=detector, data={"enabled": True}, context=self.context, partial=True
)
assert validator.is_valid(), validator.errors
validator.save()
detector.refresh_from_db()
assert detector.enabled is True
# Verify seat was assigned
mock_assign_seat.assert_called_with(DataCategory.UPTIME, detector)
uptime_subscription = get_uptime_subscription(detector)
assert uptime_subscription.status == UptimeSubscription.Status.ACTIVE.value
@mock.patch(
"sentry.quotas.backend.check_assign_seat",
return_value=SeatAssignmentResult(assignable=False, reason="No seats available"),
)
def test_update_enable_no_seat_available(self, mock_check_assign_seat: mock.MagicMock) -> None:
"""Test that enabling fails with validation error when no seats are available."""
# Create a disabled detector
detector = self.create_uptime_detector(enabled=False)
validator = UptimeDomainCheckFailureValidator(
instance=detector, data={"enabled": True}, context=self.context, partial=True
)
# Validation should fail due to no seats available
assert not validator.is_valid()
assert "enabled" in validator.errors
assert validator.errors["enabled"] == ["No seats available"]
detector.refresh_from_db()
# Detector should still be disabled
assert detector.enabled is False
# Verify seat availability check was performed
mock_check_assign_seat.assert_called_with(DataCategory.UPTIME, detector)
@mock.patch("sentry.quotas.backend.disable_seat")
def test_update_disable_removes_seat(self, mock_disable_seat: mock.MagicMock) -> None:
"""Test that disabling a previously enabled detector removes the seat."""
# Create an enabled detector
detector = self.create_uptime_detector(enabled=True)
validator = UptimeDomainCheckFailureValidator(
instance=detector, data={"enabled": False}, context=self.context, partial=True
)
assert validator.is_valid(), validator.errors
validator.save()
detector.refresh_from_db()
assert detector.enabled is False
# Verify disable_seat was called
mock_disable_seat.assert_called_with(DataCategory.UPTIME, detector)
uptime_subscription = get_uptime_subscription(detector)
assert uptime_subscription.status == UptimeSubscription.Status.DISABLED.value
@mock.patch("sentry.quotas.backend.remove_seat")
def test_delete_removes_seat(self, mock_remove_seat: mock.MagicMock) -> None:
"""Test that deleting a detector removes its billing seat immediately."""
detector = self.create_uptime_detector(enabled=True)
validator = UptimeDomainCheckFailureValidator(
instance=detector, data={}, context=self.context
)
validator.delete()
# Verify remove_seat was called immediately
mock_remove_seat.assert_called_with(DataCategory.UPTIME, detector)
@mock.patch(
"sentry.quotas.backend.assign_seat",
return_value=Outcome.ACCEPTED,
)
def test_update_no_enable_change_no_seat_call(self, mock_assign_seat: mock.MagicMock) -> None:
"""Test that updating without changing enabled status doesn't trigger seat operations."""
# Create an enabled detector
detector = self.create_uptime_detector(enabled=True)
# Clear any previous mock calls from creation
mock_assign_seat.reset_mock()
validator = UptimeDomainCheckFailureValidator(
instance=detector, data={"name": "Updated Name"}, context=self.context, partial=True
)
assert validator.is_valid(), validator.errors
validator.save()
detector.refresh_from_db()
assert detector.name == "Updated Name"
assert detector.enabled is True
# Verify no seat operations were called
mock_assign_seat.assert_not_called()
def test_non_superuser_cannot_create_with_auto_detected_mode(self) -> None:
"""Test that non-superuser cannot create detector with AUTO_DETECTED mode."""
data = self.get_valid_data(
config={
"mode": UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value,
"environment": None,
"recovery_threshold": DEFAULT_RECOVERY_THRESHOLD,
"downtime_threshold": DEFAULT_DOWNTIME_THRESHOLD,
}
)
validator = UptimeDomainCheckFailureValidator(data=data, context=self.context)
assert not validator.is_valid()
assert validator.errors["config"] == ["Only superusers can modify `mode`"]
def test_non_superuser_cannot_change_mode(self) -> None:
"""Test that non-superuser cannot change mode via update."""
# Create a detector with MANUAL mode
detector = self.create_uptime_detector(mode=UptimeMonitorMode.MANUAL)
data = {
"config": {
"mode": UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value,
"environment": None,
"recovery_threshold": DEFAULT_RECOVERY_THRESHOLD,
"downtime_threshold": DEFAULT_DOWNTIME_THRESHOLD,
}
}
validator = UptimeDomainCheckFailureValidator(
instance=detector, data=data, context=self.context, partial=True
)
assert not validator.is_valid()
assert validator.errors["config"] == ["Only superusers can modify `mode`"]
# Verify mode was not changed
detector.refresh_from_db()
assert detector.config["mode"] == UptimeMonitorMode.MANUAL.value
def test_non_superuser_can_update_with_same_mode(self) -> None:
"""Test that non-superuser can pass config if mode doesn't change."""
# Create a detector with AUTO_DETECTED_ACTIVE mode (e.g., from autodetection)
detector = self.create_uptime_detector(mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE)
data = {
"config": {
"mode": UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value, # Same mode
"environment": None,
"recovery_threshold": DEFAULT_RECOVERY_THRESHOLD,
"downtime_threshold": DEFAULT_DOWNTIME_THRESHOLD,
}
}
validator = UptimeDomainCheckFailureValidator(
instance=detector, data=data, context=self.context, partial=True
)
# Should be valid since mode hasn't changed
assert validator.is_valid(), validator.errors
def test_superuser_can_create_with_auto_detected_mode(self) -> None:
"""Test that superuser can create detector with AUTO_DETECTED mode."""
superuser = self.create_user(is_superuser=True)
self.context["request"] = self.make_request(user=superuser, is_superuser=True)
data = self.get_valid_data(
config={
"mode": UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value,
"environment": None,
"recovery_threshold": DEFAULT_RECOVERY_THRESHOLD,
"downtime_threshold": DEFAULT_DOWNTIME_THRESHOLD,
}
)
validator = UptimeDomainCheckFailureValidator(data=data, context=self.context)
assert validator.is_valid(), validator.errors
detector = validator.save()
detector.refresh_from_db()
assert detector.config["mode"] == UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value
| UptimeDomainCheckFailureValidatorTest |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_custom_business_month.py | {
"start": 1442,
"end": 7218
} | class ____:
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = CBMonthBegin()
offset2 = CBMonthBegin(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(CBMonthBegin()) == "<CustomBusinessMonthBegin>"
assert repr(CBMonthBegin(2)) == "<2 * CustomBusinessMonthBegins>"
def test_add_datetime(self, dt):
assert CBMonthBegin(2) + dt == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self, dt):
assert CBMonthBegin(10).rollback(dt) == datetime(2008, 1, 1)
def testRollforward1(self, dt):
assert CBMonthBegin(10).rollforward(dt) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.parametrize(
"case",
[
(
CBMonthBegin(n=1, offset=timedelta(days=5)),
{
datetime(2021, 3, 1): datetime(2021, 4, 1) + timedelta(days=5),
datetime(2021, 4, 17): datetime(2021, 5, 3) + timedelta(days=5),
},
),
(
CBMonthBegin(n=2, offset=timedelta(days=40)),
{
datetime(2021, 3, 10): datetime(2021, 5, 3) + timedelta(days=40),
datetime(2021, 4, 30): datetime(2021, 6, 1) + timedelta(days=40),
},
),
(
CBMonthBegin(n=1, offset=timedelta(days=-5)),
{
datetime(2021, 3, 1): datetime(2021, 4, 1) - timedelta(days=5),
datetime(2021, 4, 11): datetime(2021, 5, 3) - timedelta(days=5),
},
),
(
-2 * CBMonthBegin(n=1, offset=timedelta(days=10)),
{
datetime(2021, 3, 1): datetime(2021, 1, 1) + timedelta(days=10),
datetime(2021, 4, 3): datetime(2021, 3, 1) + timedelta(days=10),
},
),
(
CBMonthBegin(n=0, offset=timedelta(days=1)),
{
datetime(2021, 3, 2): datetime(2021, 4, 1) + timedelta(days=1),
datetime(2021, 4, 1): datetime(2021, 4, 1) + timedelta(days=1),
},
),
(
CBMonthBegin(
n=1, holidays=["2021-04-01", "2021-04-02"], offset=timedelta(days=1)
),
{
datetime(2021, 3, 2): datetime(2021, 4, 5) + timedelta(days=1),
},
),
],
)
def test_apply_with_extra_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
| TestCustomBusinessMonthBegin |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 13071,
"end": 13710
} | class ____(GeoFunc):
output_field = TextField()
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, "precision", int))
super().__init__(*expressions, **extra)
def as_mysql(self, compiler, connection, **extra_context):
clone = self.copy()
# If no precision is provided, set it to the maximum.
if len(clone.source_expressions) < 2:
clone.source_expressions.append(Value(100))
return clone.as_sql(compiler, connection, **extra_context)
| GeoHash |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/selector.py | {
"start": 6395,
"end": 7364
} | class ____:
location_name: str
repository_name: str
schedule_name: str
def to_graphql_input(self):
return {
"repositoryLocationName": self.location_name,
"repositoryName": self.repository_name,
"scheduleName": self.schedule_name,
}
@property
def instigator_name(self) -> str:
return self.schedule_name
@staticmethod
def from_graphql_input(graphql_data):
return ScheduleSelector(
location_name=graphql_data["repositoryLocationName"],
repository_name=graphql_data["repositoryName"],
schedule_name=graphql_data["scheduleName"],
)
@staticmethod
def from_instigator_selector(selector: "InstigatorSelector"):
return ScheduleSelector(
location_name=selector.location_name,
repository_name=selector.repository_name,
schedule_name=selector.name,
)
@record
| ScheduleSelector |
python | huggingface__transformers | src/transformers/models/gemma2/modular_gemma2.py | {
"start": 10799,
"end": 10845
} | class ____(GemmaRMSNorm):
pass
| Gemma2RMSNorm |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2.py | {
"start": 141405,
"end": 147011
} | class ____(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (
isinstance(self.categorical_column, fc_types.FeatureColumn)
and self.categorical_column._is_v2_column
) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(self.categorical_column.name,
self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
sparse_weight_tensor = self._transform_weight_tensor(weight_tensor)
sparse_categorical_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.categorical_column, state_manager))
return (sparse_categorical_tensor, sparse_weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
@serialization.register_feature_column
| WeightedCategoricalColumn |
python | getsentry__sentry | src/sentry/analytics/events/alert_created.py | {
"start": 70,
"end": 609
} | class ____(analytics.Event):
user_id: int | None = None
default_user_id: int | str | None = None
organization_id: int
project_id: int
rule_id: int
rule_type: str
referrer: str | None = None
session_id: str | None = None
is_api_token: bool
# `alert_rule_ui_component` can be `alert-rule-action`
alert_rule_ui_component: str | None = None
duplicate_rule: str | None = None
wizard_v3: str | None = None
query_type: str | None = None
analytics.register(AlertCreatedEvent)
| AlertCreatedEvent |
python | huggingface__transformers | src/transformers/models/code_llama/tokenization_code_llama.py | {
"start": 1711,
"end": 16088
} | class ____(TokenizersBackend):
"""
Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
This uses notably ByteFallback and no normalization.
```python
>>> from transformers import CodeLlamaTokenizer
>>> tokenizer = CodeLlamaTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
>>> tokenizer.encode("Hello this is a test")
[1, 15043, 445, 338, 263, 1243]
```
If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
[post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods. The default configuration match that of
[meta-llama/CodeLlama-7b-Instruct-hf](https://huggingface.co/meta-llama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json)
which supports prompt infilling.
Args:
clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):
Whether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra
spaces.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`):
Prefix token used for infilling.
middle_token (`str`, *optional*, defaults to `"▁<MID>"`):
Middle token used for infilling.
suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`):
Suffix token used for infilling.
eot_token (`str`, *optional*, defaults to `"▁<EOT>"`):
End of text token used for infilling.
fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`):
The token used to split the input between the prefix and suffix.
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
add_bos_token (`bool`, *optional*, defaults to `True`):
Whether to add a beginning of sequence token at the start of sequences.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether to add an end of sequence token at the end of sequences.
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
Whether or not the default system prompt for Llama should be used.
add_prefix_space (`bool`, *optional*):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
merges (`list`, *optional*):
Custom merges list. If not provided, merges are loaded from merges_file.
vocab_file (`str`, *optional*):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
contains the vocabulary necessary to instantiate a tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
padding_side = "left"
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
clean_up_tokenization_spaces=False,
unk_token="<unk>",
bos_token="<s>",
eos_token="</s>",
prefix_token="▁<PRE>",
middle_token="▁<MID>",
suffix_token="▁<SUF>",
eot_token="▁<EOT>",
fill_token="<FILL_ME>",
additional_special_tokens=None,
add_bos_token=True,
add_eos_token=False,
use_default_system_prompt=False,
add_prefix_space=None,
vocab=None,
merges=None,
vocab_file=None,
**kwargs,
):
self.add_prefix_space = add_prefix_space if add_prefix_space is not None else True
self.use_default_system_prompt = use_default_system_prompt
additional_special_tokens = additional_special_tokens or []
for token in [prefix_token, middle_token, suffix_token, eot_token, fill_token]:
additional_special_tokens += [token] if token is not None else []
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
str(unk_token): 0,
str(bos_token): 1,
str(eos_token): 2,
}
filtered_vocab = {
t: i for t, i in self._vocab.items() if t not in {str(eos_token), str(bos_token), str(unk_token)}
}
self._merges = merges if merges is not None else generate_merges(filtered_vocab)
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
fuse_unk=True,
byte_fallback=True,
dropout=None,
unk_token=str(unk_token),
)
)
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(
replacement="▁", prepend_scheme=_get_prepend_scheme(self.add_prefix_space, self), split=False
)
self._tokenizer.decoder = decoders.Sequence(
[decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), decoders.Strip(content=" ", left=1)]
)
super().__init__(
tokenizer_object=self._tokenizer,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
use_default_system_prompt=use_default_system_prompt,
add_prefix_space=add_prefix_space,
prefix_token=prefix_token,
middle_token=middle_token,
suffix_token=suffix_token,
eot_token=eot_token,
fill_token=fill_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self._add_bos_token = add_bos_token
self._add_eos_token = add_eos_token
self.vocab_file = vocab_file
self._prefix_token = prefix_token
self._middle_token = middle_token
self._suffix_token = suffix_token
self._eot_token = eot_token
self.fill_token = fill_token
self._post_init()
def _post_init(self):
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="first", split=False)
self._tokenizer.normalizer = None
# This matches LlamaTokenizer's behavior and is needed when loading from vocab/merges
self.add_tokens([AddedToken(token, special=True) for token in self.all_special_tokens])
self.update_post_processor()
super()._post_init()
@property
def prefix_token(self):
return self._prefix_token
@property
def prefix_id(self):
if self._prefix_token is None:
return None
return self.convert_tokens_to_ids(self.prefix_token)
@property
def middle_token(self):
return self._middle_token
@property
def middle_id(self):
if self._middle_token is None:
return None
return self.convert_tokens_to_ids(self.middle_token)
@property
def suffix_token(self):
return self._suffix_token
@property
def suffix_id(self):
if self._suffix_token is None:
return None
return self.convert_tokens_to_ids(self.suffix_token)
@property
def eot_id(self):
if self._eot_token is None:
return None
return self.convert_tokens_to_ids(self.eot_token)
@property
def eot_token(self):
return self._eot_token
def set_infilling_processor(self, reset, suffix_first=False, add_special_tokens=True):
"""
Updates the normalizer to make sure the prompt format for `infilling` is respected. The infilling format is the
following: if suffix_first
" <PRE> <SUF>{suf} <MID> {pre}"
else:
" <PRE> {pre} <SUF>{suf} <MID>"
If `reset` is set to `True`, the `normalizer` and `post_processor` are reset to their "normal" behaviour, which
is to add a prefix space for the normalizer, and add a `bos_token` to the input text for the `post_processor`.
"""
if reset:
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Prepend(prepend="▁"),
normalizers.Replace(pattern=" ", content="▁"),
]
)
self.update_post_processor()
return
self._tokenizer.normalizer = normalizers.Replace(pattern=" ", content="▁")
pair = [self.bos_token] if self.add_bos_token and add_special_tokens else []
special_tokens = [(self.bos_token, self.bos_token_id)] if self.add_bos_token and add_special_tokens else []
if suffix_first:
# format as " <PRE> <SUF>{suf} <MID> {pre}"
pair += [self.prefix_token, self.suffix_token, "$B", self.middle_token, "$A"]
special_tokens += [
(self.prefix_token, self.prefix_id),
(self.suffix_token, self.suffix_id),
(self.middle_token, self.middle_id),
]
else:
# format as " <PRE> {pre} <SUF>{suf} <MID>"
pair += [self.prefix_token, "$A", self.suffix_token, "$B", self.middle_token]
special_tokens += [
(self.prefix_token, self.prefix_id),
(self.suffix_token, self.suffix_id),
(self.middle_token, self.middle_id),
]
if self.add_eos_token and add_special_tokens:
pair += [self.eos_token]
special_tokens += [(self.eos_token, self.eos_token_id)]
self._tokenizer.post_processor = processors.TemplateProcessing(
single="$A", pair=pair, special_tokens=special_tokens
)
def tokenize(self, text, suffix=None, suffix_first=False, **kwargs):
# Handle fill_token splitting
if self.fill_token is not None and self.fill_token in text and suffix is None:
text, suffix = text.split(self.fill_token)
# If no suffix, use standard tokenization
if suffix is None or len(suffix) < 1:
return super().tokenize(text, **kwargs)
# Check that infilling tokens are available
if None in (self.prefix_id, self.middle_id, self.suffix_id):
raise ValueError(
"The input either includes a `prefix` and a `suffix` used for the infilling task,"
f" or can be split on the {self.fill_token} token, creating a suffix and prefix,"
" but the model does not support `infilling`."
)
# Temporarily set infilling processor
self.set_infilling_processor(False, suffix_first=suffix_first, add_special_tokens=False)
# Remove text_pair and pair from kwargs if present to avoid conflict
kwargs.pop("text_pair", None)
kwargs.pop("pair", None)
# Tokenize with infilling format
# The processor will handle the special token arrangement
# Use pair=suffix (not text_pair) since base class tokenize expects 'pair' parameter
result = super().tokenize(" " + text, pair=suffix, **kwargs)
# Reset processor
self.set_infilling_processor(True)
return result
def _encode_plus(self, text, text_pair=None, suffix=None, suffix_first=False, add_special_tokens=True, **kwargs):
is_infilling = False
if suffix is not None:
text_pair = suffix
is_infilling = True
elif "suffix" in kwargs:
text_pair = kwargs.pop("suffix")
is_infilling = True
if isinstance(text, str) and self.fill_token is not None and self.fill_token in text and text_pair is None:
text, text_pair = text.split(self.fill_token)
is_infilling = True
if not is_infilling:
return super()._encode_plus(text, text_pair=text_pair, add_special_tokens=add_special_tokens, **kwargs)
if (
text_pair is None
or (isinstance(text_pair, str) and len(text_pair) < 1)
or (isinstance(text_pair, list) and len(text_pair) == 0)
):
return super()._encode_plus(text, text_pair=text_pair, add_special_tokens=add_special_tokens, **kwargs)
if None in (self.prefix_id, self.middle_id, self.suffix_id):
raise ValueError(
"The input includes a `prefix` and a `suffix` used for the infilling task,"
" the `prefix_id, middle_id, suffix_id` must all be initialized. Current"
f" values : {self.prefix_id, self.middle_id, self.suffix_id}"
)
self.set_infilling_processor(False, suffix_first=suffix_first, add_special_tokens=add_special_tokens)
kwargs.pop("text_pair", None)
if isinstance(text, str):
text = " " + text
elif isinstance(text, list):
text = [" " + t if isinstance(t, str) else t for t in text]
result = super()._encode_plus(text, text_pair=text_pair, add_special_tokens=True, **kwargs)
self.set_infilling_processor(True)
return result
__all__ = ["CodeLlamaTokenizer", "CodeLlamaTokenizerFast"]
# Backward alias
CodeLlamaTokenizerFast = CodeLlamaTokenizer
| CodeLlamaTokenizer |
python | pypa__hatch | src/hatch/env/plugin/interface.py | {
"start": 38317,
"end": 48079
} | class ____:
def __init__(self, env: EnvironmentInterface, config: dict[str, Any]):
self.env = env
self.config = config
@cached_property
def parallel(self) -> bool:
parallel = self.config.get("parallel", True)
if not isinstance(parallel, bool):
message = f"Field `tool.hatch.envs.{self.env.name}.workspace.parallel` must be a boolean"
raise TypeError(message)
return parallel
def get_dependencies(self) -> list[str]:
static_members: list[WorkspaceMember] = []
dynamic_members: list[WorkspaceMember] = []
for member in self.members:
if member.has_static_dependencies:
static_members.append(member)
else:
dynamic_members.append(member)
all_dependencies = []
for member in static_members:
dependencies, features = member.get_dependencies()
all_dependencies.extend(dependencies)
for feature in member.features:
all_dependencies.extend(features.get(feature, []))
if self.parallel:
from concurrent.futures import ThreadPoolExecutor
def get_member_deps(member):
with self.env.app.status(f"Checking workspace member: {member.name}"):
dependencies, features = member.get_dependencies()
deps = list(dependencies)
for feature in member.features:
deps.extend(features.get(feature, []))
return deps
with ThreadPoolExecutor() as executor:
results = executor.map(get_member_deps, dynamic_members)
for deps in results:
all_dependencies.extend(deps)
else:
for member in dynamic_members:
with self.env.app.status(f"Checking workspace member: {member.name}"):
dependencies, features = member.get_dependencies()
all_dependencies.extend(dependencies)
for feature in member.features:
all_dependencies.extend(features.get(feature, []))
return all_dependencies
@cached_property
def members(self) -> list[WorkspaceMember]:
import fnmatch
from hatch.project.core import Project
from hatch.utils.fs import Path
from hatch.utils.metadata import normalize_project_name
raw_members = self.config.get("members", [])
if not isinstance(raw_members, list):
message = f"Field `tool.hatch.envs.{self.env.name}.workspace.members` must be an array"
raise TypeError(message)
# Get exclude patterns
exclude_patterns = self.config.get("exclude", [])
if not isinstance(exclude_patterns, list):
message = f"Field `tool.hatch.envs.{self.env.name}.workspace.exclude` must be an array"
raise TypeError(message)
# First normalize configuration with context expansion
member_data: list[dict[str, Any]] = []
with self.env.apply_context():
for i, data in enumerate(raw_members, 1):
if isinstance(data, str):
expanded_path = self.env.metadata.context.format(data)
member_data.append({"path": expanded_path, "features": ()})
elif isinstance(data, dict):
if "path" not in data:
message = (
f"Member #{i} of field `tool.hatch.envs.{self.env.name}.workspace.members` must define "
f"a `path` key"
)
raise TypeError(message)
path = data["path"]
if not isinstance(path, str):
message = (
f"Option `path` of member #{i} of field `tool.hatch.envs.{self.env.name}.workspace.members` "
f"must be a string"
)
raise TypeError(message)
if not path:
message = (
f"Option `path` of member #{i} of field `tool.hatch.envs.{self.env.name}.workspace.members` "
f"cannot be an empty string"
)
raise ValueError(message)
expanded_path = self.env.metadata.context.format(path)
features = data.get("features", [])
if not isinstance(features, list):
message = (
f"Option `features` of member #{i} of field `tool.hatch.envs.{self.env.name}.workspace."
f"members` must be an array of strings"
)
raise TypeError(message)
all_features: set[str] = set()
for j, feature in enumerate(features, 1):
if not isinstance(feature, str):
message = (
f"Feature #{j} of option `features` of member #{i} of field "
f"`tool.hatch.envs.{self.env.name}.workspace.members` must be a string"
)
raise TypeError(message)
if not feature:
message = (
f"Feature #{j} of option `features` of member #{i} of field "
f"`tool.hatch.envs.{self.env.name}.workspace.members` cannot be an empty string"
)
raise ValueError(message)
normalized_feature = normalize_project_name(feature)
if normalized_feature in all_features:
message = (
f"Feature #{j} of option `features` of member #{i} of field "
f"`tool.hatch.envs.{self.env.name}.workspace.members` is a duplicate"
)
raise ValueError(message)
all_features.add(normalized_feature)
member_data.append({"path": expanded_path, "features": tuple(sorted(all_features))})
else:
message = (
f"Member #{i} of field `tool.hatch.envs.{self.env.name}.workspace.members` must be "
f"a string or an inline table"
)
raise TypeError(message)
root = str(self.env.root)
member_paths: dict[str, WorkspaceMember] = {}
for data in member_data:
# Given root R and member spec M, we need to find:
#
# 1. The absolute path AP of R/M
# 2. The shared prefix SP of R and AP
# 3. The relative path RP of M from AP
#
# For example, if:
#
# R = /foo/bar/baz
# M = ../dir/pkg-*
#
# Then:
#
# AP = /foo/bar/dir/pkg-*
# SP = /foo/bar
# RP = dir/pkg-*
path_spec = data["path"]
normalized_path = os.path.normpath(os.path.join(root, path_spec))
absolute_path = os.path.abspath(normalized_path)
shared_prefix = os.path.commonprefix([root, absolute_path])
relative_path = os.path.relpath(absolute_path, shared_prefix)
# Now we have the necessary information to perform an optimized glob search for members
members_found = False
for member_path in find_members(root, relative_path.split(os.sep)):
# Check if member should be excluded
relative_member_path = os.path.relpath(member_path, root)
should_exclude = False
for exclude_pattern in exclude_patterns:
if fnmatch.fnmatch(relative_member_path, exclude_pattern) or fnmatch.fnmatch(
member_path, exclude_pattern
):
should_exclude = True
break
if should_exclude:
continue
project_file = os.path.join(member_path, "pyproject.toml")
if not os.path.isfile(project_file):
message = (
f"Member derived from `{path_spec}` of field "
f"`tool.hatch.envs.{self.env.name}.workspace.members` is not a project (no `pyproject.toml` "
f"file): {member_path}"
)
raise OSError(message)
members_found = True
if member_path in member_paths:
message = (
f"Member derived from `{path_spec}` of field "
f"`tool.hatch.envs.{self.env.name}.workspace.members` is a duplicate: {member_path}"
)
raise ValueError(message)
project = Project(Path(member_path), locate=False)
project.set_app(self.env.app)
member_paths[member_path] = WorkspaceMember(project, features=data["features"])
if not members_found:
message = (
f"No members could be derived from `{path_spec}` of field "
f"`tool.hatch.envs.{self.env.name}.workspace.members`: {absolute_path}"
)
raise OSError(message)
return list(member_paths.values())
| Workspace |
python | getsentry__sentry | src/sentry/api/endpoints/project_symbol_sources.py | {
"start": 8985,
"end": 15155
} | class ____(ProjectEndpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"DELETE": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieve a Project's Symbol Sources",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
ProjectParams.source_id(
"The ID of the source to look up. If this is not provided, all sources are returned.",
False,
),
],
responses={
200: REDACTED_SOURCES_SCHEMA,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ProjectExamples.GET_SYMBOL_SOURCES,
)
def get(self, request: Request, project: Project) -> Response:
"""
List custom symbol sources configured for a project.
"""
id = request.GET.get("id")
custom_symbol_sources_json = project.get_option("sentry:symbol_sources") or []
sources = parse_sources(custom_symbol_sources_json, filter_appconnect=False)
redacted = redact_source_secrets(sources)
if id:
for source in redacted:
if source["id"] == id:
return Response([source])
return Response(data={"error": f"Unknown source id: {id}"}, status=404)
return Response(redacted)
@extend_schema(
operation_id="Delete a Symbol Source from a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
ProjectParams.source_id("The ID of the source to delete.", True),
],
responses={
204: RESPONSE_NO_CONTENT,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ProjectExamples.DELETE_SYMBOL_SOURCE,
)
def delete(self, request: Request, project: Project) -> Response:
"""
Delete a custom symbol source from a project.
"""
id = request.GET.get("id")
custom_symbol_sources_json = project.get_option("sentry:symbol_sources") or []
sources = parse_sources(custom_symbol_sources_json, filter_appconnect=False)
if id:
filtered_sources = [src for src in sources if src["id"] != id]
if len(filtered_sources) == len(sources):
return Response(data={"error": f"Unknown source id: {id}"}, status=404)
serialized = orjson.dumps(filtered_sources).decode()
project.update_option("sentry:symbol_sources", serialized)
return Response(status=204)
return Response(data={"error": "Missing source id"}, status=404)
@extend_schema(
operation_id="Add a Symbol Source to a Project",
parameters=[GlobalParams.ORG_ID_OR_SLUG, GlobalParams.PROJECT_ID_OR_SLUG],
request=SourceSerializer,
responses={
201: REDACTED_SOURCE_SCHEMA,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
examples=ProjectExamples.ADD_SYMBOL_SOURCE,
)
def post(self, request: Request, project: Project) -> Response:
"""
Add a custom symbol source to a project.
"""
custom_symbol_sources_json = project.get_option("sentry:symbol_sources") or []
sources = parse_sources(custom_symbol_sources_json, filter_appconnect=False)
source = request.data
if "id" in source:
id = source["id"]
else:
id = str(uuid4())
source["id"] = id
sources.append(source)
try:
validate_sources(sources)
except InvalidSourcesError:
return Response(status=400)
serialized = orjson.dumps(sources).decode()
project.update_option("sentry:symbol_sources", serialized)
redacted = redact_source_secrets([source])
return Response(data=redacted[0], status=201)
@extend_schema(
operation_id="Update a Project's Symbol Source",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
ProjectParams.source_id("The ID of the source to update.", True),
],
request=SourceSerializer,
responses={
200: REDACTED_SOURCE_SCHEMA,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ProjectExamples.UPDATE_SYMBOL_SOURCE,
)
def put(self, request: Request, project: Project) -> Response:
"""
Update a custom symbol source in a project.
"""
id = request.GET.get("id")
source = request.data
custom_symbol_sources_json = project.get_option("sentry:symbol_sources") or []
sources = parse_sources(custom_symbol_sources_json, filter_appconnect=False)
if id is None:
return Response(data={"error": "Missing source id"}, status=404)
if "id" not in source:
source["id"] = str(uuid4())
try:
sources_by_id = {src["id"]: src for src in sources}
backfill_source(source, sources_by_id)
except InvalidSourcesError:
return Response(status=400)
except KeyError:
return Response(status=400)
found = False
for i in range(len(sources)):
if sources[i]["id"] == id:
found = True
sources[i] = source
break
if not found:
return Response(data={"error": f"Unknown source id: {id}"}, status=404)
try:
validate_sources(sources)
except InvalidSourcesError as e:
return Response(data={"error": str(e)}, status=400)
serialized = orjson.dumps(sources).decode()
project.update_option("sentry:symbol_sources", serialized)
redacted = redact_source_secrets([source])
return Response(data=redacted[0], status=200)
| ProjectSymbolSourcesEndpoint |
python | spack__spack | lib/spack/spack/test/install.py | {
"start": 3717,
"end": 12854
} | class ____:
def __init__(self, wrapped_rm_prefix):
self.removed = False
self.wrapped_rm_prefix = wrapped_rm_prefix
def remove_prefix(self):
self.removed = True
self.wrapped_rm_prefix()
def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch, working_env):
s = spack.concretize.concretize_one("canfail")
instance_rm_prefix = s.package.remove_prefix
s.package.remove_prefix = mock_remove_prefix
with pytest.raises(MockInstallError):
PackageInstaller([s.package], explicit=True).install()
assert os.path.isdir(s.package.prefix)
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
s.package.remove_prefix = rm_prefix_checker.remove_prefix
# must clear failure markings for the package before re-installing it
spack.store.STORE.failure_tracker.clear(s, True)
s.package.set_install_succeed()
PackageInstaller([s.package], explicit=True, restage=True).install()
assert rm_prefix_checker.removed
assert s.package.spec.installed
@pytest.mark.not_on_windows("Fails spuriously on Windows")
@pytest.mark.disable_clean_stage_check
def test_failing_overwrite_install_should_keep_previous_installation(
mock_fetch, install_mockery, working_env
):
"""
Make sure that whenever `spack install --overwrite` fails, spack restores
the original install prefix instead of cleaning it.
"""
# Do a successful install
s = spack.concretize.concretize_one("canfail")
s.package.set_install_succeed()
# Do a failing overwrite install
PackageInstaller([s.package], explicit=True).install()
s.package.set_install_fail()
kwargs = {"overwrite": [s.dag_hash()]}
with pytest.raises(Exception):
PackageInstaller([s.package], explicit=True, **kwargs).install()
assert s.package.spec.installed
assert os.path.exists(s.prefix)
def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch, monkeypatch):
dependency = spack.concretize.concretize_one("dependency-install")
PackageInstaller([dependency.package], explicit=True).install()
dependency_hash = dependency.dag_hash()
dependent = spack.concretize.concretize_one("dependent-install ^/" + dependency_hash)
monkeypatch.setitem(
dependency.package.patches,
"dependency-install",
[spack.patch.UrlPatch(dependent.package, "file://fake.patch", sha256="unused-hash")],
)
assert dependent["dependency-install"] == dependency
def test_installed_dependency_request_conflicts(install_mockery, mock_fetch, mutable_mock_repo):
dependency = spack.concretize.concretize_one("dependency-install")
PackageInstaller([dependency.package], explicit=True).install()
dependency_hash = dependency.dag_hash()
dependent = Spec("conflicting-dependent ^/" + dependency_hash)
with pytest.raises(spack.error.UnsatisfiableSpecError):
spack.concretize.concretize_one(dependent)
def test_install_times(install_mockery, mock_fetch, mutable_mock_repo):
"""Test install times added."""
spec = spack.concretize.concretize_one("dev-build-test-install-phases")
PackageInstaller([spec.package], explicit=True).install()
# Ensure dependency directory exists after the installation.
install_times = os.path.join(spec.package.prefix, ".spack", spack_times_log)
assert os.path.isfile(install_times)
# Ensure the phases are included
with open(install_times, "r", encoding="utf-8") as timefile:
times = sjson.load(timefile.read())
# The order should be maintained
phases = [x["name"] for x in times["phases"]]
assert phases == ["stage", "one", "two", "three", "install", "post-install"]
assert all(isinstance(x["seconds"], float) for x in times["phases"])
@pytest.fixture()
def install_upstream(tmp_path_factory: pytest.TempPathFactory, gen_mock_layout, install_mockery):
"""Provides a function that installs a specified set of specs to an
upstream database. The function returns a store which points to the
upstream, as well as the upstream layout (for verifying that dependent
installs are using the upstream installs).
"""
mock_db_root = str(tmp_path_factory.mktemp("mock_db_root"))
upstream_layout = gen_mock_layout("a")
prepared_db = spack.database.Database(mock_db_root, layout=upstream_layout)
spack.config.CONFIG.push_scope(
spack.config.InternalConfigScope(
name="install-upstream-fixture",
data={"upstreams": {"mock1": {"install_tree": prepared_db.root}}},
)
)
def _install_upstream(*specs):
for spec_str in specs:
prepared_db.add(spack.concretize.concretize_one(spec_str))
downstream_root = str(tmp_path_factory.mktemp("mock_downstream_db_root"))
return downstream_root, upstream_layout
return _install_upstream
def test_installed_upstream_external(install_upstream, mock_fetch):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
store_root, _ = install_upstream("externaltool")
with spack.store.use_store(store_root):
dependent = spack.concretize.concretize_one("externaltest")
new_dependency = dependent["externaltool"]
assert new_dependency.external
assert new_dependency.prefix == os.path.sep + os.path.join("path", "to", "external_tool")
PackageInstaller([dependent.package], explicit=True).install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
def test_installed_upstream(install_upstream, mock_fetch):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
store_root, upstream_layout = install_upstream("dependency-install")
with spack.store.use_store(store_root):
dependency = spack.concretize.concretize_one("dependency-install")
dependent = spack.concretize.concretize_one("dependent-install")
new_dependency = dependent["dependency-install"]
assert new_dependency.installed_upstream
assert new_dependency.prefix == upstream_layout.path_for_spec(dependency)
PackageInstaller([dependent.package], explicit=True).install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
@pytest.mark.disable_clean_stage_check
def test_partial_install_keep_prefix(install_mockery, mock_fetch, monkeypatch, working_env):
s = spack.concretize.concretize_one("canfail")
# If remove_prefix is called at any point in this test, that is an error
monkeypatch.setattr(spack.package_base.PackageBase, "remove_prefix", mock_remove_prefix)
with pytest.raises(spack.build_environment.ChildError):
PackageInstaller([s.package], explicit=True, keep_prefix=True).install()
assert os.path.exists(s.package.prefix)
# must clear failure markings for the package before re-installing it
spack.store.STORE.failure_tracker.clear(s, True)
s.package.set_install_succeed()
PackageInstaller([s.package], explicit=True, keep_prefix=True).install()
assert s.package.spec.installed
def test_second_install_no_overwrite_first(install_mockery, mock_fetch, monkeypatch):
s = spack.concretize.concretize_one("canfail")
monkeypatch.setattr(spack.package_base.PackageBase, "remove_prefix", mock_remove_prefix)
s.package.set_install_succeed()
PackageInstaller([s.package], explicit=True).install()
assert s.package.spec.installed
# If Package.install is called after this point, it will fail
s.package.set_install_fail()
PackageInstaller([s.package], explicit=True).install()
def test_install_prefix_collision_fails(config, mock_fetch, mock_packages, tmp_path: pathlib.Path):
"""
Test that different specs with coinciding install prefixes will fail
to install.
"""
projections = {"projections": {"all": "one-prefix-per-package-{name}"}}
with spack.store.use_store(str(tmp_path), extra_data=projections):
with spack.config.override("config:checksum", False):
pkg_a = spack.concretize.concretize_one("libelf@0.8.13").package
pkg_b = spack.concretize.concretize_one("libelf@0.8.12").package
PackageInstaller([pkg_a], explicit=True, fake=True).install()
with pytest.raises(InstallError, match="Install prefix collision"):
PackageInstaller([pkg_b], explicit=True, fake=True).install()
def test_store(install_mockery, mock_fetch):
spec = spack.concretize.concretize_one("cmake-client")
pkg = spec.package
PackageInstaller([pkg], fake=True, explicit=True).install()
@pytest.mark.disable_clean_stage_check
def test_failing_build(install_mockery, mock_fetch, capfd):
spec = spack.concretize.concretize_one("failing-build")
pkg = spec.package
with pytest.raises(spack.build_environment.ChildError, match="Expected failure"):
PackageInstaller([pkg], explicit=True).install()
| RemovePrefixChecker |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 20125,
"end": 20230
} | class ____(BasicModule):
def forward(self, x):
return BasicModule.forward(self, x)
| SuperModule2 |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0125_update_naming.py | {
"start": 190,
"end": 3585
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0124_remove_zh_locale"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="repo",
field=models.CharField(
db_index=True,
help_text="Git repository URL",
max_length=255,
validators=[readthedocs.projects.validators.RepositoryURLValidator()],
verbose_name="Repository URL",
),
),
migrations.AlterField(
model_name="historicalproject",
name="versioning_scheme",
field=models.CharField(
choices=[
(
"multiple_versions_with_translations",
"Multiple versions with translations (/<language>/<version>/<filename>)",
),
(
"multiple_versions_without_translations",
"Multiple versions without translations (/<version>/<filename>)",
),
(
"single_version_without_translations",
"Single version without translations (/<filename>)",
),
],
default="multiple_versions_with_translations",
help_text="This affects URL your documentation is served from, and if it supports translations or versions. Changing the versioning scheme will break your current URLs, so you might need to create a redirect.",
max_length=120,
null=True,
verbose_name="URL versioning scheme",
),
),
migrations.AlterField(
model_name="project",
name="repo",
field=models.CharField(
db_index=True,
help_text="Git repository URL",
max_length=255,
validators=[readthedocs.projects.validators.RepositoryURLValidator()],
verbose_name="Repository URL",
),
),
migrations.AlterField(
model_name="project",
name="versioning_scheme",
field=models.CharField(
choices=[
(
"multiple_versions_with_translations",
"Multiple versions with translations (/<language>/<version>/<filename>)",
),
(
"multiple_versions_without_translations",
"Multiple versions without translations (/<version>/<filename>)",
),
(
"single_version_without_translations",
"Single version without translations (/<filename>)",
),
],
default="multiple_versions_with_translations",
help_text="This affects URL your documentation is served from, and if it supports translations or versions. Changing the versioning scheme will break your current URLs, so you might need to create a redirect.",
max_length=120,
null=True,
verbose_name="URL versioning scheme",
),
),
]
| Migration |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 1131,
"end": 1235
} | class ____(Proto_CoSelf):
def m[T: Impl_CoGenericExplicit2](self: T) -> T: ...
| Impl_CoGenericExplicit2 |
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 4034,
"end": 4126
} | class ____(Parent):
def method(self):
print("Niece")
super().method()
| Niece |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_tensors_map_ops_test.py | {
"start": 1585,
"end": 7929
} | class ____(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
@test_util.run_deprecated_v1
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops_stack.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
@test_util.run_deprecated_v1
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
| SparseTensorsMapTest |
python | pypa__hatch | src/hatch/template/files_feature_ci.py | {
"start": 66,
"end": 1416
} | class ____(File):
TEMPLATE = """\
name: test
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]
concurrency:
group: test-${{ github.head_ref }}
cancel-in-progress: true
env:
PYTHONUNBUFFERED: "1"
FORCE_COLOR: "1"
jobs:
run:
name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13']
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install Hatch
run: pip install --upgrade hatch
- name: Run static analysis
run: hatch fmt --check
- name: Run tests
run: hatch test --python ${{ matrix.python-version }} --cover --randomize --parallel --retries 2 --retry-delay 1
"""
def __init__(
self,
template_config: dict, # noqa: ARG002
plugin_config: dict, # noqa: ARG002
):
super().__init__(Path(".github", "workflows", "test.yml"), self.TEMPLATE)
| CommandLinePackage |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 18699,
"end": 20487
} | class ____(SimpleTestCase):
maxDiff = None
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateTimeField(default=now())
field_d = models.DateTimeField(default=now().date())
field_now = models.DateTimeField(default=now)
field_dt = Model._meta.get_field("field_dt")
field_d = Model._meta.get_field("field_d")
field_now = Model._meta.get_field("field_now")
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(
errors,
[
DjangoWarning(
"Fixed default value provided.",
hint="It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`",
obj=field_dt,
id="fields.W161",
),
DjangoWarning(
"Fixed default value provided.",
hint="It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`",
obj=field_d,
id="fields.W161",
),
],
)
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps("invalid_models_tests")
| DateTimeFieldTests |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0057_add_page_rank.py | {
"start": 179,
"end": 769
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0056_add_disable_analytics"),
]
operations = [
migrations.AddField(
model_name="importedfile",
name="rank",
field=models.IntegerField(
null=True,
validators=[
django.core.validators.MinValueValidator(-10),
django.core.validators.MaxValueValidator(10),
],
verbose_name="Page search rank",
),
),
]
| Migration |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py | {
"start": 12478,
"end": 15033
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
name: str
icon: Optional[str] = None
definitionId: UUID
connectorBuildOptions: Optional[ConnectorBuildOptions] = None
connectorTestSuitesOptions: Optional[List[ConnectorTestSuiteOptions]] = None
connectorType: Literal["destination", "source"]
dockerRepository: str
dockerImageTag: str
supportsDbt: Optional[bool] = None
supportsNormalization: Optional[bool] = None
license: str
documentationUrl: AnyUrl
externalDocumentationUrls: Optional[List[ExternalDocumentationUrl]] = Field(
None,
description="An array of external vendor documentation URLs (changelogs, API references, deprecation notices, etc.)",
)
githubIssueLabel: str
maxSecondsBetweenMessages: Optional[int] = Field(
None,
description="Maximum delay between 2 airbyte protocol messages, in second. The source will timeout if this delay is reached",
)
releaseDate: Optional[date] = Field(
None,
description="The date when this connector was first released, in yyyy-mm-dd format.",
)
protocolVersion: Optional[str] = Field(
None, description="the Airbyte Protocol version supported by the connector"
)
erdUrl: Optional[str] = Field(
None, description="The URL where you can visualize the ERD"
)
connectorSubtype: Literal[
"api",
"database",
"datalake",
"file",
"custom",
"message_queue",
"unknown",
"vectorstore",
]
releaseStage: ReleaseStage
supportLevel: Optional[SupportLevel] = None
tags: Optional[List[str]] = Field(
[],
description="An array of tags that describe the connector. E.g: language:python, keyword:rds, etc.",
)
registryOverrides: Optional[RegistryOverride] = None
allowedHosts: Optional[AllowedHosts] = None
releases: Optional[ConnectorReleases] = None
normalizationConfig: Optional[NormalizationDestinationDefinitionConfig] = None
suggestedStreams: Optional[SuggestedStreams] = None
resourceRequirements: Optional[ActorDefinitionResourceRequirements] = None
ab_internal: Optional[AirbyteInternal] = None
remoteRegistries: Optional[RemoteRegistries] = None
supportsRefreshes: Optional[bool] = False
generated: Optional[GeneratedFields] = None
supportsFileTransfer: Optional[bool] = False
supportsDataActivation: Optional[bool] = False
connectorIPCOptions: Optional[ConnectorIPCOptions] = None
| Data |
python | huggingface__transformers | src/transformers/models/lfm2_vl/modeling_lfm2_vl.py | {
"start": 3814,
"end": 5398
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Lfm2Vl outputs, with hidden states and attentions.
"""
)
| Lfm2VlCausalLMOutputWithPast |
python | sdispater__pendulum | src/pendulum/duration.py | {
"start": 13207,
"end": 14771
} | class ____(Duration):
"""
Duration that expresses a time difference in absolute values.
"""
def __new__(
cls,
days: float = 0,
seconds: float = 0,
microseconds: float = 0,
milliseconds: float = 0,
minutes: float = 0,
hours: float = 0,
weeks: float = 0,
years: float = 0,
months: float = 0,
) -> AbsoluteDuration:
if not isinstance(years, int) or not isinstance(months, int):
raise ValueError("Float year and months are not supported")
self = timedelta.__new__(
cls, days, seconds, microseconds, milliseconds, minutes, hours, weeks
)
# We need to compute the total_seconds() value
# on a native timedelta object
delta = timedelta(
days, seconds, microseconds, milliseconds, minutes, hours, weeks
)
# Intuitive normalization
self._total = delta.total_seconds()
total = abs(self._total)
self._microseconds = round(total % 1 * 1e6)
days, self._seconds = divmod(int(total), SECONDS_PER_DAY)
self._days = abs(days + years * 365 + months * 30)
self._weeks, self._remaining_days = divmod(days, 7)
self._months = abs(months)
self._years = abs(years)
return self
def total_seconds(self) -> float:
return abs(self._total)
@property
def invert(self) -> bool:
if self._invert is None:
self._invert = self._total < 0
return self._invert
| AbsoluteDuration |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/block.py | {
"start": 2661,
"end": 2743
} | class ____(TypedDict):
type: Literal["CodeBlock"]
codeSnippet: str
| CodeBlock |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 496627,
"end": 497096
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CloseDiscussion"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "discussion")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
discussion = sgqlc.types.Field("Discussion", graphql_name="discussion")
"""The discussion that was closed."""
| CloseDiscussionPayload |
python | tensorflow__tensorflow | tensorflow/compiler/tests/stateless_random_ops_test.py | {
"start": 16361,
"end": 17812
} | class ____(test.Benchmark):
"""Microbenchmarks for the stateless random ops."""
def _benchmarkUniform(self, name, dtype, use_xla_jit):
def builder_fn():
shape = (10, 1000, 1000)
seed_var = variables.Variable((312, 456),
dtype=dtypes.int32,
name='input')
random_t = stateless.stateless_random_uniform(
shape, seed=seed_var, dtype=dtype)
return '%s.shape%s' % (name, shape), [random_t]
xla_test.Benchmark(self, builder_fn, use_xla_jit=use_xla_jit, device='cpu')
def benchmarkUniformF16(self):
self._benchmarkUniform(
'uniform_f16', dtype=dtypes.float16, use_xla_jit=False)
def benchmarkUniformF32(self):
self._benchmarkUniform(
'uniform_f32', dtype=dtypes.float32, use_xla_jit=False)
def benchmarkUniformF64(self):
self._benchmarkUniform(
'uniform_f64', dtype=dtypes.float64, use_xla_jit=False)
def benchmarkUniformF16XLA(self):
self._benchmarkUniform(
'uniform_f16', dtype=dtypes.float16, use_xla_jit=True)
def benchmarkUniformF32XLA(self):
self._benchmarkUniform(
'uniform_f32', dtype=dtypes.float32, use_xla_jit=True)
def benchmarkUniformF64XLA(self):
self._benchmarkUniform(
'uniform_f64', dtype=dtypes.float64, use_xla_jit=True)
if __name__ == '__main__':
config.set_soft_device_placement(False)
test.main()
| StatelessRandomOpsBenchmark |
python | apache__airflow | providers/google/tests/unit/google/suite/transfers/test_gcs_to_gdrive.py | {
"start": 1136,
"end": 9831
} | class ____:
@mock.patch(MODULE + ".GCSHook")
@mock.patch(MODULE + ".GoogleDriveHook")
@mock.patch(MODULE + ".tempfile.NamedTemporaryFile")
def test_should_copy_single_file(self, mock_named_temporary_file, mock_gdrive, mock_gcs_hook):
type(mock_named_temporary_file.return_value.__enter__.return_value).name = mock.PropertyMock(
side_effect=["TMP1"]
)
task = GCSToGoogleDriveOperator(
task_id="copy_single_file",
source_bucket="data",
source_object="sales/sales-2017/january.avro",
destination_object="copied_sales/2017/january-backup.avro",
impersonation_chain=None,
)
task.execute(mock.MagicMock())
mock_gcs_hook.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
),
mock.call().download(
bucket_name="data", filename="TMP1", object_name="sales/sales-2017/january.avro"
),
]
)
mock_gdrive.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
),
mock.call().upload_file(
local_location="TMP1",
remote_location="copied_sales/2017/january-backup.avro",
folder_id="root",
),
]
)
@mock.patch(MODULE + ".GCSHook")
@mock.patch(MODULE + ".GoogleDriveHook")
@mock.patch(MODULE + ".tempfile.NamedTemporaryFile")
def test_should_copy_single_file_with_folder(self, mock_named_temporary_file, mock_gdrive, mock_gcs_hook):
type(mock_named_temporary_file.return_value.__enter__.return_value).name = mock.PropertyMock(
side_effect=["TMP1"]
)
task = GCSToGoogleDriveOperator(
task_id="copy_single_file",
source_bucket="data",
source_object="sales/sales-2017/january.avro",
destination_object="copied_sales/2017/january-backup.avro",
destination_folder_id="aAopls6bE4tUllZVGJvRUU",
)
task.execute(mock.MagicMock())
mock_gcs_hook.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
),
mock.call().download(
bucket_name="data", filename="TMP1", object_name="sales/sales-2017/january.avro"
),
]
)
mock_gdrive.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
),
mock.call().upload_file(
local_location="TMP1",
remote_location="copied_sales/2017/january-backup.avro",
folder_id="aAopls6bE4tUllZVGJvRUU",
),
]
)
#
@mock.patch(MODULE + ".GCSHook")
@mock.patch(MODULE + ".GoogleDriveHook")
@mock.patch(MODULE + ".tempfile.NamedTemporaryFile")
def test_should_copy_files(self, mock_named_temporary_file, mock_gdrive, mock_gcs_hook):
mock_gcs_hook.return_value.list.return_value = ["sales/A.avro", "sales/B.avro", "sales/C.avro"]
type(mock_named_temporary_file.return_value.__enter__.return_value).name = mock.PropertyMock(
side_effect=["TMP1", "TMP2", "TMP3"]
)
task = GCSToGoogleDriveOperator(
task_id="copy_files",
source_bucket="data",
source_object="sales/sales-2017/*.avro",
destination_object="copied_sales/2017/",
impersonation_chain=IMPERSONATION_CHAIN,
)
task.execute(mock.MagicMock())
mock_gcs_hook.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
),
mock.call().list("data", delimiter=".avro", prefix="sales/sales-2017/"),
# TODO: After deprecating delimiter and wildcards in source objects,
# remove previous line and uncomment the following:
# mock.call().list("data", match_glob="**/*.avro", prefix="sales/sales-2017/"),
mock.call().download(bucket_name="data", filename="TMP1", object_name="sales/A.avro"),
mock.call().download(bucket_name="data", filename="TMP2", object_name="sales/B.avro"),
mock.call().download(bucket_name="data", filename="TMP3", object_name="sales/C.avro"),
]
)
mock_gdrive.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
),
mock.call().upload_file(
local_location="TMP1", remote_location="sales/A.avro", folder_id="root"
),
mock.call().upload_file(
local_location="TMP2", remote_location="sales/B.avro", folder_id="root"
),
mock.call().upload_file(
local_location="TMP3", remote_location="sales/C.avro", folder_id="root"
),
]
)
@mock.patch(MODULE + ".GCSHook")
@mock.patch(MODULE + ".GoogleDriveHook")
@mock.patch(MODULE + ".tempfile.NamedTemporaryFile")
def test_should_move_files(self, mock_named_temporary_file, mock_gdrive, mock_gcs_hook):
type(mock_named_temporary_file.return_value.__enter__.return_value).name = mock.PropertyMock(
side_effect=["TMP1", "TMP2", "TMP3"]
)
mock_gcs_hook.return_value.list.return_value = ["sales/A.avro", "sales/B.avro", "sales/C.avro"]
task = GCSToGoogleDriveOperator(
task_id="move_files",
source_bucket="data",
source_object="sales/sales-2017/*.avro",
move_object=True,
impersonation_chain=IMPERSONATION_CHAIN,
)
task.execute(mock.MagicMock())
mock_gcs_hook.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
),
mock.call().list("data", delimiter=".avro", prefix="sales/sales-2017/"),
# TODO: After deprecating delimiter and wildcards in source objects,
# remove previous line and uncomment the following:
# mock.call().list("data", match_glob="**/*.avro", prefix="sales/sales-2017/"),
mock.call().download(bucket_name="data", filename="TMP1", object_name="sales/A.avro"),
mock.call().delete("data", "sales/A.avro"),
mock.call().download(bucket_name="data", filename="TMP2", object_name="sales/B.avro"),
mock.call().delete("data", "sales/B.avro"),
mock.call().download(bucket_name="data", filename="TMP3", object_name="sales/C.avro"),
mock.call().delete("data", "sales/C.avro"),
]
)
mock_gdrive.assert_has_calls(
[
mock.call(
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
),
mock.call().upload_file(
local_location="TMP1", remote_location="sales/A.avro", folder_id="root"
),
mock.call().upload_file(
local_location="TMP2", remote_location="sales/B.avro", folder_id="root"
),
mock.call().upload_file(
local_location="TMP3", remote_location="sales/C.avro", folder_id="root"
),
]
)
@mock.patch(MODULE + ".GCSHook")
@mock.patch(MODULE + ".GoogleDriveHook")
@mock.patch(MODULE + ".tempfile.NamedTemporaryFile")
def test_should_raise_exception_on_multiple_wildcard(
self, mock_named_temporary_file, mock_gdrive, mock_gcs_hook
):
task = GCSToGoogleDriveOperator(
task_id="move_files", source_bucket="data", source_object="sales/*/*.avro", move_object=True
)
with pytest.raises(AirflowException, match="Only one wildcard"):
task.execute(mock.MagicMock())
| TestGcsToGDriveOperator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/pubsub.py | {
"start": 2301,
"end": 28026
} | class ____(GoogleBaseHook):
"""
Hook for accessing Google Pub/Sub.
The Google Cloud project against which actions are applied is determined by
the project embedded in the Connection referenced by gcp_conn_id.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
enable_message_ordering: bool = False,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self.enable_message_ordering = enable_message_ordering
self._client = None
def get_conn(self) -> PublisherClient:
"""
Retrieve connection to Google Cloud Pub/Sub.
:return: Google Cloud Pub/Sub client object.
"""
if not self._client:
self._client = PublisherClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
publisher_options=PublisherOptions(
enable_message_ordering=self.enable_message_ordering,
),
)
return self._client
@cached_property
def subscriber_client(self) -> SubscriberClient:
"""
Creates SubscriberClient.
:return: Google Cloud Pub/Sub client object.
"""
return SubscriberClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
@GoogleBaseHook.fallback_to_default_project_id
def publish(
self,
topic: str,
messages: list[dict],
project_id: str = PROVIDE_PROJECT_ID,
) -> None:
"""
Publish messages to a Pub/Sub topic.
:param topic: the Pub/Sub topic to which to publish; do not
include the ``projects/{project}/topics/`` prefix.
:param messages: messages to publish; if the data field in a
message is set, it should be a bytestring (utf-8 encoded)
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage
:param project_id: Optional, the Google Cloud project ID in which to publish.
If set to None or missing, the default project_id from the Google Cloud connection is used.
"""
self._validate_messages(messages)
publisher = self.get_conn()
topic_path = f"projects/{project_id}/topics/{topic}"
self.log.info("Publish %d messages to topic (path) %s", len(messages), topic_path)
try:
for message in messages:
future = publisher.publish(
topic=topic_path, data=message.get("data", b""), **message.get("attributes", {})
)
future.result()
except GoogleAPICallError as e:
raise PubSubException(f"Error publishing to topic {topic_path}", e)
self.log.info("Published %d messages to topic (path) %s", len(messages), topic_path)
@staticmethod
def _validate_messages(messages) -> None:
for message in messages:
# To warn about broken backward compatibility
# TODO: remove one day
if "data" in message and isinstance(message["data"], str):
try:
b64decode(message["data"])
warnings.warn(
"The base 64 encoded string as 'data' field has been deprecated. "
"You should pass bytestring (utf-8 encoded).",
AirflowProviderDeprecationWarning,
stacklevel=4,
)
except ValueError:
pass
if not isinstance(message, dict):
raise PubSubException("Wrong message type. Must be a dictionary.")
if "data" not in message and "attributes" not in message:
raise PubSubException("Wrong message. Dictionary must contain 'data' or 'attributes'.")
if "data" in message and not isinstance(message["data"], bytes):
raise PubSubException("Wrong message. 'data' must be send as a bytestring")
if ("data" not in message and "attributes" in message and not message["attributes"]) or (
"attributes" in message and not isinstance(message["attributes"], dict)
):
raise PubSubException(
"Wrong message. If 'data' is not provided 'attributes' must be a non empty dictionary."
)
@GoogleBaseHook.fallback_to_default_project_id
def create_topic(
self,
topic: str,
project_id: str = PROVIDE_PROJECT_ID,
fail_if_exists: bool = False,
labels: dict[str, str] | None = None,
message_storage_policy: dict | MessageStoragePolicy = None,
kms_key_name: str | None = None,
schema_settings: dict | SchemaSettings = None,
message_retention_duration: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Create a Pub/Sub topic, if it does not already exist.
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:param project_id: Optional, the Google Cloud project ID in which to create the topic
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param fail_if_exists: if set, raise an exception if the topic
already exists
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:param message_storage_policy: Policy constraining the set
of Google Cloud regions where messages published to
the topic may be stored. If not present, then no constraints
are in effect.
Union[dict, google.cloud.pubsub_v1.types.MessageStoragePolicy]
:param kms_key_name: The resource name of the Cloud KMS CryptoKey
to be used to protect access to messages published on this topic.
The expected format is
``projects/*/locations/*/keyRings/*/cryptoKeys/*``.
:param schema_settings: (Optional) Settings for validating messages published against an
existing schema. The expected format is ``projects/*/schemas/*``.
:param message_retention_duration: (Optional) Indicates the minimum duration to retain a
message after it is published to the topic. The expected format is a duration in
seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
publisher = self.get_conn()
topic_path = f"projects/{project_id}/topics/{topic}"
# Add airflow-version label to the topic
labels = labels or {}
labels["airflow-version"] = "v" + version.replace(".", "-").replace("+", "-")
self.log.info("Creating topic (path) %s", topic_path)
try:
publisher.create_topic(
request={
"name": topic_path,
"labels": labels,
"message_storage_policy": message_storage_policy,
"kms_key_name": kms_key_name,
"schema_settings": schema_settings,
"message_retention_duration": message_retention_duration,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except AlreadyExists:
self.log.warning("Topic already exists: %s", topic)
if fail_if_exists:
raise PubSubException(f"Topic already exists: {topic}")
except GoogleAPICallError as e:
raise PubSubException(f"Error creating topic {topic}", e)
self.log.info("Created topic (path) %s", topic_path)
@GoogleBaseHook.fallback_to_default_project_id
def delete_topic(
self,
topic: str,
project_id: str = PROVIDE_PROJECT_ID,
fail_if_not_exists: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a Pub/Sub topic if it exists.
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:param project_id: Optional, the Google Cloud project ID in which to delete the topic.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
publisher = self.get_conn()
topic_path = f"projects/{project_id}/topics/{topic}"
self.log.info("Deleting topic (path) %s", topic_path)
try:
publisher.delete_topic(
request={"topic": topic_path}, retry=retry, timeout=timeout, metadata=metadata or ()
)
except NotFound:
self.log.warning("Topic does not exist: %s", topic_path)
if fail_if_not_exists:
raise PubSubException(f"Topic does not exist: {topic_path}")
except GoogleAPICallError as e:
raise PubSubException(f"Error deleting topic {topic}", e)
self.log.info("Deleted topic (path) %s", topic_path)
@GoogleBaseHook.fallback_to_default_project_id
def create_subscription(
self,
topic: str,
project_id: str = PROVIDE_PROJECT_ID,
subscription: str | None = None,
subscription_project_id: str | None = None,
ack_deadline_secs: int = 10,
fail_if_exists: bool = False,
push_config: dict | PushConfig | None = None,
retain_acked_messages: bool | None = None,
message_retention_duration: dict | Duration | None = None,
labels: dict[str, str] | None = None,
enable_message_ordering: bool = False,
expiration_policy: dict | ExpirationPolicy | None = None,
filter_: str | None = None,
dead_letter_policy: dict | DeadLetterPolicy | None = None,
retry_policy: dict | RetryPolicy | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Create a Pub/Sub subscription, if it does not already exist.
:param topic: the Pub/Sub topic name that the subscription will be bound
to create; do not include the ``projects/{project}/subscriptions/`` prefix.
:param project_id: Optional, the Google Cloud project ID of the topic that the subscription will be
bound to. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:param subscription_project_id: the Google Cloud project ID where the subscription
will be created. If unspecified, ``project_id`` will be used.
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:param fail_if_exists: if set, raise an exception if the topic
already exists
:param push_config: If push delivery is used with this subscription,
this field is used to configure it. An empty ``pushConfig`` signifies
that the subscriber will pull and ack messages using API methods.
:param retain_acked_messages: Indicates whether to retain acknowledged
messages. If true, then messages are not expunged from the subscription's
backlog, even if they are acknowledged, until they fall out of the
``message_retention_duration`` window. This must be true if you would
like to Seek to a timestamp.
:param message_retention_duration: How long to retain unacknowledged messages
in the subscription's backlog, from the moment a message is published. If
``retain_acked_messages`` is true, then this also configures the
retention of acknowledged messages, and thus configures how far back in
time a ``Seek`` can be done. Defaults to 7 days. Cannot be more than 7
days or less than 10 minutes.
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:param enable_message_ordering: If true, messages published with the same
ordering_key in PubsubMessage will be delivered to the subscribers in the order
in which they are received by the Pub/Sub system. Otherwise, they may be
delivered in any order.
:param expiration_policy: A policy that specifies the conditions for this
subscription's expiration. A subscription is considered active as long as any
connected subscriber is successfully consuming messages from the subscription or
is issuing operations on the subscription. If expiration_policy is not set,
a default policy with ttl of 31 days will be used. The minimum allowed value for
expiration_policy.ttl is 1 day.
:param filter_: An expression written in the Cloud Pub/Sub filter language. If
non-empty, then only PubsubMessages whose attributes field matches the filter are
delivered on this subscription. If empty, then no messages are filtered out.
:param dead_letter_policy: A policy that specifies the conditions for dead lettering
messages in this subscription. If dead_letter_policy is not set, dead lettering is
disabled.
:param retry_policy: A policy that specifies how Pub/Sub retries message delivery
for this subscription. If not set, the default retry policy is applied. This
generally implies that messages will be retried as soon as possible for healthy
subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline
exceeded events for a given message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:return: subscription name which will be the system-generated value if
the ``subscription`` parameter is not supplied
"""
subscriber = self.subscriber_client
if not subscription:
subscription = f"sub-{uuid4()}"
if not subscription_project_id:
subscription_project_id = project_id
# Add airflow-version label to the subscription
labels = labels or {}
labels["airflow-version"] = "v" + version.replace(".", "-").replace("+", "-")
subscription_path = f"projects/{subscription_project_id}/subscriptions/{subscription}"
topic_path = f"projects/{project_id}/topics/{topic}"
self.log.info("Creating subscription (path) %s for topic (path) %a", subscription_path, topic_path)
try:
subscriber.create_subscription(
request={
"name": subscription_path,
"topic": topic_path,
"push_config": push_config,
"ack_deadline_seconds": ack_deadline_secs,
"retain_acked_messages": retain_acked_messages,
"message_retention_duration": message_retention_duration,
"labels": labels,
"enable_message_ordering": enable_message_ordering,
"expiration_policy": expiration_policy,
"filter": filter_,
"dead_letter_policy": dead_letter_policy,
"retry_policy": retry_policy,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except AlreadyExists:
self.log.warning("Subscription already exists: %s", subscription_path)
if fail_if_exists:
raise PubSubException(f"Subscription already exists: {subscription_path}")
except GoogleAPICallError as e:
raise PubSubException(f"Error creating subscription {subscription_path}", e)
self.log.info("Created subscription (path) %s for topic (path) %s", subscription_path, topic_path)
return subscription
@GoogleBaseHook.fallback_to_default_project_id
def delete_subscription(
self,
subscription: str,
project_id: str = PROVIDE_PROJECT_ID,
fail_if_not_exists: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a Pub/Sub subscription, if it exists.
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:param project_id: Optional, the Google Cloud project ID where the subscription exists
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param fail_if_not_exists: if set, raise an exception if the topic does not exist
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
subscriber = self.subscriber_client
# E501
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Deleting subscription (path) %s", subscription_path)
try:
subscriber.delete_subscription(
request={"subscription": subscription_path},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except NotFound:
self.log.warning("Subscription does not exist: %s", subscription_path)
if fail_if_not_exists:
raise PubSubException(f"Subscription does not exist: {subscription_path}")
except GoogleAPICallError as e:
raise PubSubException(f"Error deleting subscription {subscription_path}", e)
self.log.info("Deleted subscription (path) %s", subscription_path)
@GoogleBaseHook.fallback_to_default_project_id
def pull(
self,
subscription: str,
max_messages: int,
project_id: str = PROVIDE_PROJECT_ID,
return_immediately: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[ReceivedMessage]:
"""
Pull up to ``max_messages`` messages from Pub/Sub subscription.
:param subscription: the Pub/Sub subscription name to pull from; do not
include the 'projects/{project}/topics/' prefix.
:param max_messages: The maximum number of messages to return from
the Pub/Sub API.
:param project_id: Optional, the Google Cloud project ID where the subscription exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param return_immediately: If set, the Pub/Sub API will immediately
return if no messages are available. Otherwise, the request will
block for an undisclosed, but bounded period of time
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:return: A list of Pub/Sub ReceivedMessage objects each containing
an ``ackId`` property and a ``message`` property, which includes
the base64-encoded message content. See
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.ReceivedMessage
"""
subscriber = self.subscriber_client
# E501
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Pulling max %d messages from subscription (path) %s", max_messages, subscription_path)
try:
response = subscriber.pull(
request={
"subscription": subscription_path,
"max_messages": max_messages,
"return_immediately": return_immediately,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result = getattr(response, "received_messages", [])
self.log.info("Pulled %d messages from subscription (path) %s", len(result), subscription_path)
return result
except (HttpError, GoogleAPICallError) as e:
raise PubSubException(f"Error pulling messages from subscription {subscription_path}", e)
@GoogleBaseHook.fallback_to_default_project_id
def acknowledge(
self,
subscription: str,
project_id: str,
ack_ids: list[str] | None = None,
messages: list[ReceivedMessage] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Acknowledges the messages associated with the ``ack_ids`` from Pub/Sub subscription.
:param subscription: the Pub/Sub subscription name to delete; do not
include the 'projects/{project}/topics/' prefix.
:param ack_ids: List of ReceivedMessage ackIds from a previous pull response.
Mutually exclusive with ``messages`` argument.
:param messages: List of ReceivedMessage objects to acknowledge.
Mutually exclusive with ``ack_ids`` argument.
:param project_id: Optional, the Google Cloud project name or ID in which to create the topic
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
if ack_ids is not None and messages is None:
pass # use ack_ids as is
elif ack_ids is None and messages is not None:
ack_ids = [message.ack_id for message in messages] # extract ack_ids from messages
else:
raise ValueError("One and only one of 'ack_ids' and 'messages' arguments have to be provided")
subscriber = self.subscriber_client
# E501
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Acknowledging %d ack_ids from subscription (path) %s", len(ack_ids), subscription_path)
try:
subscriber.acknowledge(
request={"subscription": subscription_path, "ack_ids": ack_ids},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except (HttpError, GoogleAPICallError) as e:
raise PubSubException(
f"Error acknowledging {len(ack_ids)} messages pulled from subscription {subscription_path}",
e,
)
self.log.info("Acknowledged ack_ids from subscription (path) %s", subscription_path)
| PubSubHook |
python | fastai__fastai | fastai/callback/rnn.py | {
"start": 1162,
"end": 1918
} | class ____(Callback):
"Add AR and TAR regularization"
order,run_valid = RNNCallback.order+1,False
def __init__(self, alpha=0., beta=0.): store_attr()
def after_loss(self):
if not self.training: return
if self.alpha: self.learn.loss_grad += self.alpha * self.rnn.out.float().pow(2).mean()
if self.beta:
h = self.rnn.raw_out
if len(h)>1: self.learn.loss_grad += self.beta * (h[:,1:] - h[:,:-1]).float().pow(2).mean()
# %% ../../nbs/34_callback.rnn.ipynb 8
def rnn_cbs(alpha=0., beta=0.):
"All callbacks needed for (optionally regularized) RNN training"
reg = [RNNRegularizer(alpha=alpha, beta=beta)] if alpha or beta else []
return [ModelResetter(), RNNCallback()] + reg
| RNNRegularizer |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_align.py | {
"start": 193,
"end": 12243
} | class ____:
def test_frame_align_aware(self):
idx1 = date_range("2001", periods=5, freq="h", tz="US/Eastern")
idx2 = date_range("2001", periods=5, freq="2h", tz="US/Eastern")
df1 = DataFrame(np.random.default_rng(2).standard_normal((len(idx1), 3)), idx1)
df2 = DataFrame(np.random.default_rng(2).standard_normal((len(idx2), 3)), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# different timezones convert to UTC
# frame with frame
df1_central = df1.tz_convert("US/Central")
new1, new2 = df1.align(df1_central)
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
# frame with Series
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
df1[0].align(df1_central, axis=0)
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
def test_align_frame_with_series(self, float_frame):
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
def test_align_series_condition(self):
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
@pytest.mark.parametrize(
"l_ordered,r_ordered,expected",
[
[True, True, pd.CategoricalIndex],
[True, False, Index],
[False, True, Index],
[False, False, pd.CategoricalIndex],
],
)
def test_align_categorical(self, l_ordered, r_ordered, expected):
# GH-28397
df_1 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(
pd.CategoricalDtype(list("cab"), ordered=l_ordered)
),
}
).set_index("B")
df_2 = DataFrame(
{
"A": np.arange(5, dtype="int64"),
"B": Series(list("babca")).astype(
pd.CategoricalDtype(list("cab"), ordered=r_ordered)
),
}
).set_index("B")
aligned_1, aligned_2 = df_1.align(df_2)
assert isinstance(aligned_1.index, expected)
assert isinstance(aligned_2.index, expected)
tm.assert_index_equal(aligned_1.index, aligned_2.index)
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = Index(range(2), name="b")
df1 = DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join="right")
res2l, res2r = df2.align(df1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = DataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_multiindex_align_to_series_with_common_index_level(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2], name="bar")
series = Series([1, 2], index=bar_index, name="foo_series")
df = DataFrame(
{"col": np.arange(6)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series([1, 2] * 3, index=df.index, name="foo_series")
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_missing_in_left(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2], name="bar")
series = Series(
[1, 2, 3, 4], index=Index([1, 2, 3, 4], name="bar"), name="foo_series"
)
df = DataFrame(
{"col": np.arange(6)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series([1, 2] * 3, index=df.index, name="foo_series")
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_missing_in_right(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2, 3, 4], name="bar")
series = Series([1, 2], index=Index([1, 2], name="bar"), name="foo_series")
df = DataFrame(
{"col": np.arange(12)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series(
[1, 2, np.nan, np.nan] * 3, index=df.index, name="foo_series"
)
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_missing_in_both(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 3, 4], name="bar")
series = Series(
[1, 2, 3], index=Index([1, 2, 4], name="bar"), name="foo_series"
)
df = DataFrame(
{"col": np.arange(9)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series([1, np.nan, 3] * 3, index=df.index, name="foo_series")
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_non_unique_cols(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2], name="bar")
series = Series([1, 2], index=bar_index, name="foo_series")
df = DataFrame(
np.arange(18).reshape(6, 3),
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
df.columns = ["cfoo", "cbar", "cfoo"]
expected = Series([1, 2] * 3, index=df.index, name="foo_series")
result_left, result_right = df.align(series, axis=0)
tm.assert_series_equal(result_right, expected)
tm.assert_index_equal(result_left.columns, df.columns)
def test_missing_axis_specification_exception(self):
df = DataFrame(np.arange(50).reshape((10, 5)))
series = Series(np.arange(5))
with pytest.raises(ValueError, match=r"axis=0 or 1"):
df.align(series)
def test_align_series_check_copy(self):
# GH#
df = DataFrame({0: [1, 2]})
ser = Series([1], name=0)
expected = ser.copy()
result, other = df.align(ser, axis=1)
ser.iloc[0] = 100
tm.assert_series_equal(other, expected)
def test_align_identical_different_object(self):
# GH#51032
df = DataFrame({"a": [1, 2]})
ser = Series([3, 4])
result, result2 = df.align(ser, axis=0)
tm.assert_frame_equal(result, df)
tm.assert_series_equal(result2, ser)
assert df is not result
assert ser is not result2
def test_align_identical_different_object_columns(self):
# GH#51032
df = DataFrame({"a": [1, 2]})
ser = Series([1], index=["a"])
result, result2 = df.align(ser, axis=1)
tm.assert_frame_equal(result, df)
tm.assert_series_equal(result2, ser)
assert df is not result
assert ser is not result2
| TestDataFrameAlign |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 13897,
"end": 15522
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([NystromformerLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Nystromformer
| NystromformerEncoder |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_settings.py | {
"start": 115,
"end": 1303
} | class ____(IntegrationTestCase):
def test_panel_title(self):
response = self.client.get("/regular/basic/")
# The settings module is None due to using Django's UserSettingsHolder
# in tests.
self.assertContains(
response,
"""
<li id="djdt-SettingsPanel" class="djDebugPanelButton">
<input type="checkbox" checked title="Disable for next and successive requests" data-cookie="djdtSettingsPanel">
<a class="SettingsPanel" href="#" title="Settings from None">Settings</a>
</li>
""",
html=True,
)
self.assertContains(
response,
"""
<div id="SettingsPanel" class="djdt-panelContent djdt-hidden">
<div class="djDebugPanelTitle">
<h3>Settings from None</h3>
<button type="button" class="djDebugClose">×</button>
</div>
<div class="djDebugPanelContent">
<div class="djdt-loader"></div>
<div class="djdt-scroll"></div>
</div>
</div>
""",
html=True,
)
| SettingsIntegrationTestCase |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_task_command.py | {
"start": 16182,
"end": 19153
} | class ____:
def setup_method(self) -> None:
self.dag_id = "test_logging_dag"
self.task_id = "test_task"
self.run_id = "test_run"
self.dag_path = os.path.join(ROOT_FOLDER, "dags", "test_logging_in_dag.py")
reset(self.dag_id)
self.logical_date = timezone.datetime(2017, 1, 1)
self.logical_date_str = self.logical_date.isoformat()
self.task_args = ["tasks", "run", self.dag_id, self.task_id, "--local", self.logical_date_str]
self.log_dir = conf.get_mandatory_value("logging", "base_log_folder")
self.log_filename = f"dag_id={self.dag_id}/run_id={self.run_id}/task_id={self.task_id}/attempt=1.log"
self.ti_log_file_path = os.path.join(self.log_dir, self.log_filename)
# Clearing the cache before calling it
cli_parser.get_parser.cache_clear()
self.parser = cli_parser.get_parser()
dag = DagBag().get_dag(self.dag_id)
data_interval = dag.timetable.infer_manual_data_interval(run_after=self.logical_date)
self.dr = dag.create_dagrun(
run_id=self.run_id,
logical_date=self.logical_date,
data_interval=data_interval,
run_after=self.logical_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
run_type=DagRunType.MANUAL,
triggered_by=DagRunTriggeredByType.TEST,
)
self.tis = self.dr.get_task_instances()
assert len(self.tis) == 1
self.ti = self.tis[0]
root = self.root_logger = logging.getLogger()
self.root_handlers = root.handlers.copy()
self.root_filters = root.filters.copy()
self.root_level = root.level
with contextlib.suppress(OSError):
os.remove(self.ti_log_file_path)
def teardown_method(self) -> None:
root = self.root_logger
root.setLevel(self.root_level)
root.handlers[:] = self.root_handlers
root.filters[:] = self.root_filters
reset(self.dag_id)
with contextlib.suppress(OSError):
os.remove(self.ti_log_file_path)
def assert_log_line(self, text, logs_list, expect_from_logging_mixin=False):
"""
Get Log Line and assert only 1 Entry exists with the given text. Also check that
"logging_mixin" line does not appear in that log line to avoid duplicate logging as below:
[2020-06-24 16:47:23,537] {logging_mixin.py:91} INFO - [2020-06-24 16:47:23,536] {python.py:135}
"""
log_lines = [log for log in logs_list if text in log]
assert len(log_lines) == 1
log_line = log_lines[0]
if not expect_from_logging_mixin:
# Logs from print statement still show with logging_mixing as filename
# Example: [2020-06-24 17:07:00,482] {logging_mixin.py:91} INFO - Log from Print statement
assert "logging_mixin.py" not in log_line
return log_line
| TestLogsfromTaskRunCommand |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 146748,
"end": 148485
} | class ____(VisitorTransform, SkipDeclarations):
"""
Allow certain Python operations inside of nogil blocks by implicitly acquiring the GIL.
Must run before the AnalyseDeclarationsTransform to make sure the GILStatNodes get
set up, parallel sections know that the GIL is acquired inside of them, etc.
"""
nogil = False
# special node handling
def _inject_gil_in_nogil(self, node):
"""Allow the (Python statement) node in nogil sections by wrapping it in a 'with gil' block."""
if self.nogil:
node = Nodes.GILStatNode(node.pos, state='gil', body=node)
return node
visit_RaiseStatNode = _inject_gil_in_nogil
visit_PrintStatNode = _inject_gil_in_nogil # sadly, not the function
# further candidates:
# def visit_ReraiseStatNode(self, node):
# nogil tracking
def visit_GILStatNode(self, node):
was_nogil = self.nogil
self.nogil = (node.state == 'nogil')
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_CFuncDefNode(self, node):
was_nogil = self.nogil
if isinstance(node.declarator, Nodes.CFuncDeclaratorNode):
self.nogil = node.declarator.nogil and not node.declarator.with_gil
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ParallelRangeNode(self, node):
was_nogil = self.nogil
self.nogil = node.nogil
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ExprNode(self, node):
# No special GIL handling inside of expressions for now.
return node
visit_Node = VisitorTransform.recurse_to_children
| InjectGilHandling |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.