language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/maximum-height-of-a-triangle.py | {
"start": 39,
"end": 628
} | class ____(object):
def maxHeightOfTriangle(self, red, blue):
"""
:type red: int
:type blue: int
:rtype: int
"""
def f(x, y):
# odd level:
# (1+h)*((1+h)//2)//2 <= x
# => h <= int(2*x**0.5)-1
# even level:
# (2+h)*(h//2)//2 <= y
# => h <= int((4*y+1)**0.5)-1
a, b = int(2*x**0.5)-1, int((4*y+1)**0.5)-1
return min(a, b)+int(a != b)
return max(f(red, blue), f(blue, red))
# Time: O(sqrt(n))
# Space: O(1)
# simulation
| Solution |
python | scipy__scipy | scipy/sparse/linalg/_eigen/arpack/arpack.py | {
"start": 40157,
"end": 40712
} | class ____(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
self.shape = M.shape
self.dtype = M.dtype
def _matvec(self, x):
return lu_solve(self.M_lu, x)
def gmres_loose(A, b, tol):
"""
gmres with looser termination condition.
"""
b = np.asarray(b)
min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
return gmres(A, b, rtol=max(tol, min_tol), atol=0)
| LuInv |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_triangulation.py | {
"start": 370,
"end": 55271
} | class ____:
x = [-1, 0, 1, 0]
y = [0, -1, 0, 1]
triangles = [[0, 1, 2], [0, 2, 3]]
mask = [False, True]
@pytest.mark.parametrize('args, kwargs, expected', [
([x, y], {}, [x, y, None, None]),
([x, y, triangles], {}, [x, y, triangles, None]),
([x, y], dict(triangles=triangles), [x, y, triangles, None]),
([x, y], dict(mask=mask), [x, y, None, mask]),
([x, y, triangles], dict(mask=mask), [x, y, triangles, mask]),
([x, y], dict(triangles=triangles, mask=mask), [x, y, triangles, mask])
])
def test_extract_triangulation_params(self, args, kwargs, expected):
other_args = [1, 2]
other_kwargs = {'a': 3, 'b': '4'}
x_, y_, triangles_, mask_, args_, kwargs_ = \
mtri.Triangulation._extract_triangulation_params(
args + other_args, {**kwargs, **other_kwargs})
x, y, triangles, mask = expected
assert x_ is x
assert y_ is y
assert_array_equal(triangles_, triangles)
assert mask_ is mask
assert args_ == other_args
assert kwargs_ == other_kwargs
def test_extract_triangulation_positional_mask():
# mask cannot be passed positionally
mask = [True]
args = [[0, 2, 1], [0, 0, 1], [[0, 1, 2]], mask]
x_, y_, triangles_, mask_, args_, kwargs_ = \
mtri.Triangulation._extract_triangulation_params(args, {})
assert mask_ is None
assert args_ == [mask]
# the positional mask must be caught downstream because this must pass
# unknown args through
def test_triangulation_init():
x = [-1, 0, 1, 0]
y = [0, -1, 0, 1]
with pytest.raises(ValueError, match="x and y must be equal-length"):
mtri.Triangulation(x, [1, 2])
with pytest.raises(
ValueError,
match=r"triangles must be a \(N, 3\) int array, but found shape "
r"\(3,\)"):
mtri.Triangulation(x, y, [0, 1, 2])
with pytest.raises(
ValueError,
match=r"triangles must be a \(N, 3\) int array, not 'other'"):
mtri.Triangulation(x, y, 'other')
with pytest.raises(ValueError, match="found value 99"):
mtri.Triangulation(x, y, [[0, 1, 99]])
with pytest.raises(ValueError, match="found value -1"):
mtri.Triangulation(x, y, [[0, 1, -1]])
def test_triangulation_set_mask():
x = [-1, 0, 1, 0]
y = [0, -1, 0, 1]
triangles = [[0, 1, 2], [2, 3, 0]]
triang = mtri.Triangulation(x, y, triangles)
# Check neighbors, which forces creation of C++ triangulation
assert_array_equal(triang.neighbors, [[-1, -1, 1], [-1, -1, 0]])
# Set mask
triang.set_mask([False, True])
assert_array_equal(triang.mask, [False, True])
# Reset mask
triang.set_mask(None)
assert triang.mask is None
msg = r"mask array must have same length as triangles array"
for mask in ([False, True, False], [False], [True], False, True):
with pytest.raises(ValueError, match=msg):
triang.set_mask(mask)
def test_delaunay():
# No duplicate points, regular grid.
nx = 5
ny = 4
x, y = np.meshgrid(np.linspace(0.0, 1.0, nx), np.linspace(0.0, 1.0, ny))
x = x.ravel()
y = y.ravel()
npoints = nx*ny
ntriangles = 2 * (nx-1) * (ny-1)
nedges = 3*nx*ny - 2*nx - 2*ny + 1
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# The tests in the remainder of this function should be passed by any
# triangulation that does not contain duplicate points.
# Points - floating point.
assert_array_almost_equal(triang.x, x)
assert_array_almost_equal(triang.y, y)
# Triangles - integers.
assert len(triang.triangles) == ntriangles
assert np.min(triang.triangles) == 0
assert np.max(triang.triangles) == npoints-1
# Edges - integers.
assert len(triang.edges) == nedges
assert np.min(triang.edges) == 0
assert np.max(triang.edges) == npoints-1
# Neighbors - integers.
# Check that neighbors calculated by C++ triangulation class are the same
# as those returned from delaunay routine.
neighbors = triang.neighbors
triang._neighbors = None
assert_array_equal(triang.neighbors, neighbors)
# Is each point used in at least one triangle?
assert_array_equal(np.unique(triang.triangles), np.arange(npoints))
def test_delaunay_duplicate_points():
npoints = 10
duplicate = 7
duplicate_of = 3
np.random.seed(23)
x = np.random.random(npoints)
y = np.random.random(npoints)
x[duplicate] = x[duplicate_of]
y[duplicate] = y[duplicate_of]
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# Duplicate points should be ignored, so the index of the duplicate points
# should not appear in any triangle.
assert_array_equal(np.unique(triang.triangles),
np.delete(np.arange(npoints), duplicate))
def test_delaunay_points_in_line():
# Cannot triangulate points that are all in a straight line, but check
# that delaunay code fails gracefully.
x = np.linspace(0.0, 10.0, 11)
y = np.linspace(0.0, 10.0, 11)
with pytest.raises(RuntimeError):
mtri.Triangulation(x, y)
# Add an extra point not on the line and the triangulation is OK.
x = np.append(x, 2.0)
y = np.append(y, 8.0)
mtri.Triangulation(x, y)
@pytest.mark.parametrize('x, y', [
# Triangulation should raise a ValueError if passed less than 3 points.
([], []),
([1], [5]),
([1, 2], [5, 6]),
# Triangulation should also raise a ValueError if passed duplicate points
# such that there are less than 3 unique points.
([1, 2, 1], [5, 6, 5]),
([1, 2, 2], [5, 6, 6]),
([1, 1, 1, 2, 1, 2], [5, 5, 5, 6, 5, 6]),
])
def test_delaunay_insufficient_points(x, y):
with pytest.raises(ValueError):
mtri.Triangulation(x, y)
def test_delaunay_robust():
# Fails when mtri.Triangulation uses matplotlib.delaunay, works when using
# qhull.
tri_points = np.array([
[0.8660254037844384, -0.5000000000000004],
[0.7577722283113836, -0.5000000000000004],
[0.6495190528383288, -0.5000000000000003],
[0.5412658773652739, -0.5000000000000003],
[0.811898816047911, -0.40625000000000044],
[0.7036456405748561, -0.4062500000000004],
[0.5953924651018013, -0.40625000000000033]])
test_points = np.asarray([
[0.58, -0.46],
[0.65, -0.46],
[0.65, -0.42],
[0.7, -0.48],
[0.7, -0.44],
[0.75, -0.44],
[0.8, -0.48]])
# Utility function that indicates if a triangle defined by 3 points
# (xtri, ytri) contains the test point xy. Avoid calling with a point that
# lies on or very near to an edge of the triangle.
def tri_contains_point(xtri, ytri, xy):
tri_points = np.vstack((xtri, ytri)).T
return Path(tri_points).contains_point(xy)
# Utility function that returns how many triangles of the specified
# triangulation contain the test point xy. Avoid calling with a point that
# lies on or very near to an edge of any triangle in the triangulation.
def tris_contain_point(triang, xy):
return sum(tri_contains_point(triang.x[tri], triang.y[tri], xy)
for tri in triang.triangles)
# Using matplotlib.delaunay, an invalid triangulation is created with
# overlapping triangles; qhull is OK.
triang = mtri.Triangulation(tri_points[:, 0], tri_points[:, 1])
for test_point in test_points:
assert tris_contain_point(triang, test_point) == 1
# If ignore the first point of tri_points, matplotlib.delaunay throws a
# KeyError when calculating the convex hull; qhull is OK.
triang = mtri.Triangulation(tri_points[1:, 0], tri_points[1:, 1])
@image_comparison(['tripcolor1.png'])
def test_tripcolor():
x = np.asarray([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1, 0.75])
y = np.asarray([0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1, 0.75])
triangles = np.asarray([
[0, 1, 3], [1, 4, 3],
[1, 2, 4], [2, 5, 4],
[3, 4, 6], [4, 7, 6],
[4, 5, 9], [7, 4, 9], [8, 7, 9], [5, 8, 9]])
# Triangulation with same number of points and triangles.
triang = mtri.Triangulation(x, y, triangles)
Cpoints = x + 0.5*y
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
Cfaces = 0.5*xmid + ymid
plt.subplot(121)
plt.tripcolor(triang, Cpoints, edgecolors='k')
plt.title('point colors')
plt.subplot(122)
plt.tripcolor(triang, facecolors=Cfaces, edgecolors='k')
plt.title('facecolors')
def test_tripcolor_color():
x = [-1, 0, 1, 0]
y = [0, -1, 0, 1]
fig, ax = plt.subplots()
with pytest.raises(TypeError, match=r"tripcolor\(\) missing 1 required "):
ax.tripcolor(x, y)
with pytest.raises(ValueError, match="The length of c must match either"):
ax.tripcolor(x, y, [1, 2, 3])
with pytest.raises(ValueError,
match="length of facecolors must match .* triangles"):
ax.tripcolor(x, y, facecolors=[1, 2, 3, 4])
with pytest.raises(ValueError,
match="'gouraud' .* at the points.* not at the faces"):
ax.tripcolor(x, y, facecolors=[1, 2], shading='gouraud')
with pytest.raises(ValueError,
match="'gouraud' .* at the points.* not at the faces"):
ax.tripcolor(x, y, [1, 2], shading='gouraud') # faces
with pytest.raises(TypeError,
match="positional.*'c'.*keyword-only.*'facecolors'"):
ax.tripcolor(x, y, C=[1, 2, 3, 4])
with pytest.raises(TypeError, match="Unexpected positional parameter"):
ax.tripcolor(x, y, [1, 2], 'unused_positional')
# smoke test for valid color specifications (via C or facecolors)
ax.tripcolor(x, y, [1, 2, 3, 4]) # edges
ax.tripcolor(x, y, [1, 2, 3, 4], shading='gouraud') # edges
ax.tripcolor(x, y, [1, 2]) # faces
ax.tripcolor(x, y, facecolors=[1, 2]) # faces
def test_tripcolor_clim():
np.random.seed(19680801)
a, b, c = np.random.rand(10), np.random.rand(10), np.random.rand(10)
ax = plt.figure().add_subplot()
clim = (0.25, 0.75)
norm = ax.tripcolor(a, b, c, clim=clim).norm
assert (norm.vmin, norm.vmax) == clim
def test_tripcolor_warnings():
x = [-1, 0, 1, 0]
y = [0, -1, 0, 1]
c = [0.4, 0.5]
fig, ax = plt.subplots()
# facecolors takes precedence over c
with pytest.warns(UserWarning, match="Positional parameter c .*no effect"):
ax.tripcolor(x, y, c, facecolors=c)
with pytest.warns(UserWarning, match="Positional parameter c .*no effect"):
ax.tripcolor(x, y, 'interpreted as c', facecolors=c)
def test_no_modify():
# Test that Triangulation does not modify triangles array passed to it.
triangles = np.array([[3, 2, 0], [3, 1, 0]], dtype=np.int32)
points = np.array([(0, 0), (0, 1.1), (1, 0), (1, 1)])
old_triangles = triangles.copy()
mtri.Triangulation(points[:, 0], points[:, 1], triangles).edges
assert_array_equal(old_triangles, triangles)
def test_trifinder():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
trifinder = triang.get_trifinder()
xs = [0.25, 1.25, 2.25, 3.25]
ys = [0.25, 1.25, 2.25, 3.25]
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, -1, 6, -1, 10, -1,
12, 14, 16, -1, -1, -1, -1, -1])
tris = trifinder(xs-0.5, ys-0.5)
assert_array_equal(tris, [-1, -1, -1, -1, -1, 1, 3, 5,
-1, 7, -1, 11, -1, 13, 15, 17])
# Test points exactly on boundary edges of masked triangulation.
xs = [0.5, 1.5, 2.5, 0.5, 1.5, 2.5, 1.5, 1.5, 0.0, 1.0, 2.0, 3.0]
ys = [0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.5, 1.5, 1.5, 1.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, 13, 15, 17, 3, 14, 6, 7, 10, 11])
# Test points exactly on boundary corners of masked triangulation.
xs = [0.0, 3.0]
ys = [0.0, 3.0]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 17])
#
# Test triangles with horizontal colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
#
# If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
delta = 0.0
x = [1.5, 0, 1, 2, 3, 1.5, 1.5]
y = [-1, 0, 0, 0, 0, delta, 1]
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
ys = [-0.1, 0.1]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, 0, 0, 1, 1, 2, -1],
[-1, 6, 6, 6, 7, 7, -1]])
#
# Test triangles with vertical colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
#
# If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
delta = 0.0
x = [-1, -delta, 0, 0, 0, 0, 1]
y = [1.5, 1.5, 0, 1, 2, 3, 1.5]
triangles = [[0, 1, 2], [0, 1, 5], [1, 2, 3], [1, 3, 4], [1, 4, 5],
[2, 6, 3], [3, 6, 4], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.1]
ys = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, -1], [0, 5], [0, 5], [0, 6], [1, 6], [1, 7],
[-1, -1]])
# Test that changing triangulation by setting a mask causes the trifinder
# to be reinitialised.
x = [0, 1, 0, 1]
y = [0, 0, 1, 1]
triangles = [[0, 1, 2], [1, 3, 2]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.2, 0.2, 0.8, 1.2]
ys = [0.5, 0.5, 0.5, 0.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, 0, 1, -1])
triang.set_mask([1, 0])
assert trifinder == triang.get_trifinder()
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, -1, 1, -1])
def test_triinterp():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
z = 1.23*x - 4.79*y
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
xs = np.linspace(0.25, 2.75, 6)
ys = [0.25, 0.75, 2.25, 2.75]
xs, ys = np.meshgrid(xs, ys) # Testing arrays with array.ndim = 2
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
# Test points outside triangulation.
xs = [-0.25, 1.25, 1.75, 3.25]
ys = xs
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = linear_interp(xs, ys)
assert_array_equal(zs.mask, [[True]*4]*4)
# Test mixed configuration (outside / inside).
xs = np.linspace(0.25, 1.75, 6)
ys = [0.25, 0.75, 1.25, 1.75]
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
matest.assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
mask = (xs >= 1) * (xs <= 2) * (ys >= 1) * (ys <= 2)
assert_array_equal(zs.mask, mask)
# 2nd order patch test: on a grid with an 'arbitrary shaped' triangle,
# patch test shall be exact for quadratic functions and cubic
# interpolator if *kind* = user
(a, b, c) = (1.23, -4.79, 0.6)
def quad(x, y):
return a*(x-0.5)**2 + b*(y-0.5)**2 + c*x*y
def gradient_quad(x, y):
return (2*a*(x-0.5) + c*y, 2*b*(y-0.5) + c*x)
x = np.array([0.2, 0.33367, 0.669, 0., 1., 1., 0.])
y = np.array([0.3, 0.80755, 0.4335, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
z = quad(x, y)
dz = gradient_quad(x, y)
# test points for 2nd order patch test
xs = np.linspace(0., 1., 5)
ys = np.linspace(0., 1., 5)
xs, ys = np.meshgrid(xs, ys)
cubic_user = mtri.CubicTriInterpolator(triang, z, kind='user', dz=dz)
interp_zs = cubic_user(xs, ys)
assert_array_almost_equal(interp_zs, quad(xs, ys))
(interp_dzsdx, interp_dzsdy) = cubic_user.gradient(x, y)
(dzsdx, dzsdy) = gradient_quad(x, y)
assert_array_almost_equal(interp_dzsdx, dzsdx)
assert_array_almost_equal(interp_dzsdy, dzsdy)
# Cubic improvement: cubic interpolation shall perform better than linear
# on a sufficiently dense mesh for a quadratic function.
n = 11
x, y = np.meshgrid(np.linspace(0., 1., n+1), np.linspace(0., 1., n+1))
x = x.ravel()
y = y.ravel()
z = quad(x, y)
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
xs, ys = np.meshgrid(np.linspace(0.1, 0.9, 5), np.linspace(0.1, 0.9, 5))
xs = xs.ravel()
ys = ys.ravel()
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
zs = quad(xs, ys)
diff_lin = np.abs(linear_interp(xs, ys) - zs)
for interp in (cubic_min_E, cubic_geom):
diff_cubic = np.abs(interp(xs, ys) - zs)
assert np.max(diff_lin) >= 10 * np.max(diff_cubic)
assert (np.dot(diff_lin, diff_lin) >=
100 * np.dot(diff_cubic, diff_cubic))
def test_triinterpcubic_C1_continuity():
# Below the 4 tests which demonstrate C1 continuity of the
# TriCubicInterpolator (testing the cubic shape functions on arbitrary
# triangle):
#
# 1) Testing continuity of function & derivatives at corner for all 9
# shape functions. Testing also function values at same location.
# 2) Testing C1 continuity along each edge (as gradient is polynomial of
# 2nd order, it is sufficient to test at the middle).
# 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles
# meet)
# 4) Testing C1 continuity at median 1/3 points (midside between 2
# subtriangles)
# Utility test function check_continuity
def check_continuity(interpolator, loc, values=None):
"""
Checks the continuity of interpolator (and its derivatives) near
location loc. Can check the value at loc itself if *values* is
provided.
*interpolator* TriInterpolator
*loc* location to test (x0, y0)
*values* (optional) array [z0, dzx0, dzy0] to check the value at *loc*
"""
n_star = 24 # Number of continuity points in a boundary of loc
epsilon = 1.e-10 # Distance for loc boundary
k = 100. # Continuity coefficient
(loc_x, loc_y) = loc
star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star))
star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star))
z = interpolator([loc_x], [loc_y])[0]
(dzx, dzy) = interpolator.gradient([loc_x], [loc_y])
if values is not None:
assert_array_almost_equal(z, values[0])
assert_array_almost_equal(dzx[0], values[1])
assert_array_almost_equal(dzy[0], values[2])
diff_z = interpolator(star_x, star_y) - z
(tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y)
diff_dzx = tab_dzx - dzx
diff_dzy = tab_dzy - dzy
assert_array_less(diff_z, epsilon*k)
assert_array_less(diff_dzx, epsilon*k)
assert_array_less(diff_dzy, epsilon*k)
# Drawing arbitrary triangle (a, b, c) inside a unit square.
(ax, ay) = (0.2, 0.3)
(bx, by) = (0.33367, 0.80755)
(cx, cy) = (0.669, 0.4335)
x = np.array([ax, bx, cx, 0., 1., 1., 0.])
y = np.array([ay, by, cy, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
for idof in range(9):
z = np.zeros(7, dtype=np.float64)
dzx = np.zeros(7, dtype=np.float64)
dzy = np.zeros(7, dtype=np.float64)
values = np.zeros([3, 3], dtype=np.float64)
case = idof//3
values[case, idof % 3] = 1.0
if case == 0:
z[idof] = 1.0
elif case == 1:
dzx[idof % 3] = 1.0
elif case == 2:
dzy[idof % 3] = 1.0
interp = mtri.CubicTriInterpolator(triang, z, kind='user',
dz=(dzx, dzy))
# Test 1) Checking values and continuity at nodes
check_continuity(interp, (ax, ay), values[:, 0])
check_continuity(interp, (bx, by), values[:, 1])
check_continuity(interp, (cx, cy), values[:, 2])
# Test 2) Checking continuity at midside nodes
check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5))
check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5))
check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5))
# Test 3) Checking continuity at barycenter
check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.))
# Test 4) Checking continuity at median 1/3-point
check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.))
check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.))
check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.))
def test_triinterpcubic_cg_solver():
# Now 3 basic tests of the Sparse CG solver, used for
# TriCubicInterpolator with *kind* = 'min_E'
# 1) A commonly used test involves a 2d Poisson matrix.
def poisson_sparse_matrix(n, m):
"""
Return the sparse, (n*m, n*m) matrix in COO format resulting from the
discretisation of the 2-dimensional Poisson equation according to a
finite difference numerical scheme on a uniform (n, m) grid.
"""
l = m*n
rows = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(l-1, dtype=np.int32), np.arange(1, l, dtype=np.int32),
np.arange(l-n, dtype=np.int32), np.arange(n, l, dtype=np.int32)])
cols = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(1, l, dtype=np.int32), np.arange(l-1, dtype=np.int32),
np.arange(n, l, dtype=np.int32), np.arange(l-n, dtype=np.int32)])
vals = np.concatenate([
4*np.ones(l, dtype=np.float64),
-np.ones(l-1, dtype=np.float64), -np.ones(l-1, dtype=np.float64),
-np.ones(l-n, dtype=np.float64), -np.ones(l-n, dtype=np.float64)])
# In fact +1 and -1 diags have some zeros
vals[l:2*l-1][m-1::m] = 0.
vals[2*l-1:3*l-2][m-1::m] = 0.
return vals, rows, cols, (n*m, n*m)
# Instantiating a sparse Poisson matrix of size 48 x 48:
(n, m) = (12, 4)
mat = mtri._triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 48 basis vector
for itest in range(n*m):
b = np.zeros(n*m, dtype=np.float64)
b[itest] = 1.
x, _ = mtri._triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 2) Same matrix with inserting 2 rows - cols with null diag terms
# (but still linked with the rest of the matrix by extra-diag terms)
(i_zero, j_zero) = (12, 49)
vals, rows, cols, _ = poisson_sparse_matrix(n, m)
rows = rows + 1*(rows >= i_zero) + 1*(rows >= j_zero)
cols = cols + 1*(cols >= i_zero) + 1*(cols >= j_zero)
# adding extra-diag terms
rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]])
cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]])
vals = np.concatenate([vals, [1., 1., 1., 1.]])
mat = mtri._triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
(n*m + 2, n*m + 2))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 50 basis vec
for itest in range(n*m + 2):
b = np.zeros(n*m + 2, dtype=np.float64)
b[itest] = 1.
x, _ = mtri._triinterpolate._cg(A=mat, b=b, x0=np.ones(n * m + 2),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 3) Now a simple test that summation of duplicate (i.e. with same rows,
# same cols) entries occurs when compressed.
vals = np.ones(17, dtype=np.float64)
rows = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
dtype=np.int32)
cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
dtype=np.int32)
dim = (3, 3)
mat = mtri._triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
mat.compress_csc()
mat_dense = mat.to_dense()
assert_array_almost_equal(mat_dense, np.array([
[1., 2., 0.], [2., 1., 5.], [0., 5., 1.]], dtype=np.float64))
def test_triinterpcubic_geom_weights():
# Tests to check computation of weights for _DOF_estimator_geom:
# The weight sum per triangle can be 1. (in case all angles < 90 degrees)
# or (2*w_i) where w_i = 1-alpha_i/np.pi is the weight of apex i; alpha_i
# is the apex angle > 90 degrees.
(ax, ay) = (0., 1.687)
x = np.array([ax, 0.5*ax, 0., 1.])
y = np.array([ay, -ay, 0., 0.])
z = np.zeros(4, dtype=np.float64)
triangles = [[0, 2, 3], [1, 3, 2]]
sum_w = np.zeros([4, 2]) # 4 possibilities; 2 triangles
for theta in np.linspace(0., 2*np.pi, 14): # rotating the figure...
x_rot = np.cos(theta)*x + np.sin(theta)*y
y_rot = -np.sin(theta)*x + np.cos(theta)*y
triang = mtri.Triangulation(x_rot, y_rot, triangles)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
dof_estimator = mtri._triinterpolate._DOF_estimator_geom(cubic_geom)
weights = dof_estimator.compute_geom_weights()
# Testing for the 4 possibilities...
sum_w[0, :] = np.sum(weights, 1) - 1
for itri in range(3):
sum_w[itri+1, :] = np.sum(weights, 1) - 2*weights[:, itri]
assert_array_almost_equal(np.min(np.abs(sum_w), axis=0),
np.array([0., 0.], dtype=np.float64))
def test_triinterp_colinear():
# Tests interpolating inside a triangulation with horizontal colinear
# points (refer also to the tests :func:`test_trifinder` ).
#
# These are not valid triangulations, but we try to deal with the
# simplest violations (i. e. those handled by default TriFinder).
#
# Note that the LinearTriInterpolator and the CubicTriInterpolator with
# kind='min_E' or 'geom' still pass a linear patch test.
# We also test interpolation inside a flat triangle, by forcing
# *tri_index* in a call to :meth:`_interpolate_multikeys`.
# If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
delta = 0.
x0 = np.array([1.5, 0, 1, 2, 3, 1.5, 1.5])
y0 = np.array([-1, 0, 0, 0, 0, delta, 1])
# We test different affine transformations of the initial figure; to
# avoid issues related to round-off errors we only use integer
# coefficients (otherwise the Triangulation might become invalid even with
# delta == 0).
transformations = [[1, 0], [0, 1], [1, 1], [1, 2], [-2, -1], [-2, 1]]
for transformation in transformations:
x_rot = transformation[0]*x0 + transformation[1]*y0
y_rot = -transformation[1]*x0 + transformation[0]*y0
(x, y) = (x_rot, y_rot)
z = 1.23*x - 4.79*y
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
xs = np.linspace(np.min(triang.x), np.max(triang.x), 20)
ys = np.linspace(np.min(triang.y), np.max(triang.y), 20)
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
mask_out = (triang.get_trifinder()(xs, ys) == -1)
zs_target = np.ma.array(1.23*xs - 4.79*ys, mask=mask_out)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs_target, zs)
# Testing interpolation inside the flat triangle number 4: [2, 3, 5]
# by imposing *tri_index* in a call to :meth:`_interpolate_multikeys`
itri = 4
pt1 = triang.triangles[itri, 0]
pt2 = triang.triangles[itri, 1]
xs = np.linspace(triang.x[pt1], triang.x[pt2], 10)
ys = np.linspace(triang.y[pt1], triang.y[pt2], 10)
zs_target = 1.23*xs - 4.79*ys
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs, = interp._interpolate_multikeys(
xs, ys, tri_index=itri*np.ones(10, dtype=np.int32))
assert_array_almost_equal(zs_target, zs)
def test_triinterp_transformations():
# 1) Testing that the interpolation scheme is invariant by rotation of the
# whole figure.
# Note: This test is non-trivial for a CubicTriInterpolator with
# kind='min_E'. It does fail for a non-isotropic stiffness matrix E of
# :class:`_ReducedHCT_Element` (tested with E=np.diag([1., 1., 1.])), and
# provides a good test for :meth:`get_Kff_and_Ff`of the same class.
#
# 2) Also testing that the interpolation scheme is invariant by expansion
# of the whole figure along one axis.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.hypot(0.5 - x, 0.5 - y)
theta1 = np.arctan2(0.5 - x, 0.5 - y)
r2 = np.hypot(-x - 0.2, -y - 0.2)
theta2 = np.arctan2(-x - 0.2, -y - 0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
# Then create the test points
xs0 = np.linspace(-1., 1., 23)
ys0 = np.linspace(-1., 1., 23)
xs0, ys0 = np.meshgrid(xs0, ys0)
xs0 = xs0.ravel()
ys0 = ys0.ravel()
interp_z0 = {}
for i_angle in range(2):
# Rotating everything
theta = 2*np.pi / n_angles * i_angle
x = np.cos(theta)*x0 + np.sin(theta)*y0
y = -np.sin(theta)*x0 + np.cos(theta)*y0
xs = np.cos(theta)*xs0 + np.sin(theta)*ys0
ys = -np.sin(theta)*xs0 + np.cos(theta)*ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Testing that the interpolation is invariant by rotation...
for interp_key in ['lin', 'min_E', 'geom']:
interp = dic_interp[interp_key]
if i_angle == 0:
interp_z0[interp_key] = interp(xs0, ys0) # storage
else:
interpz = interp(xs, ys)
matest.assert_array_almost_equal(interpz,
interp_z0[interp_key])
scale_factor = 987654.3210
for scaled_axis in ('x', 'y'):
# Scaling everything (expansion along scaled_axis)
if scaled_axis == 'x':
x = scale_factor * x0
y = y0
xs = scale_factor * xs0
ys = ys0
else:
x = x0
y = scale_factor * y0
xs = xs0
ys = scale_factor * ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Test that the interpolation is invariant by expansion along 1 axis...
for interp_key in ['lin', 'min_E', 'geom']:
interpz = dic_interp[interp_key](xs, ys)
matest.assert_array_almost_equal(interpz, interp_z0[interp_key])
@image_comparison(['tri_smooth_contouring.png'], remove_text=True, tol=0.072)
def test_tri_smooth_contouring():
# Image comparison based on example tricontour_smooth_user.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.hypot(0.5 - x, 0.5 - y)
theta1 = np.arctan2(0.5 - x, 0.5 - y)
r2 = np.hypot(-x - 0.2, -y - 0.2)
theta2 = np.arctan2(-x - 0.2, -y - 0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
triang0.set_mask(np.hypot(x0[triang0.triangles].mean(axis=1),
y0[triang0.triangles].mean(axis=1))
< min_radius)
# Then the plot
refiner = mtri.UniformTriRefiner(triang0)
tri_refi, z_test_refi = refiner.refine_field(z0, subdiv=4)
levels = np.arange(0., 1., 0.025)
plt.triplot(triang0, lw=0.5, color='0.5')
plt.tricontour(tri_refi, z_test_refi, levels=levels, colors="black")
@image_comparison(['tri_smooth_gradient.png'], remove_text=True, tol=0.092)
def test_tri_smooth_gradient():
# Image comparison based on example trigradient_demo.
def dipole_potential(x, y):
"""An electric dipole potential V."""
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z)-z) / (np.max(z)-np.min(z))
# Creating a Triangulation
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
triang = mtri.Triangulation(x, y)
triang.set_mask(np.hypot(x[triang.triangles].mean(axis=1),
y[triang.triangles].mean(axis=1))
< min_radius)
# Refine data - interpolates the electrical potential V
refiner = mtri.UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
# Computes the electrical field (Ex, Ey) as gradient of -V
tci = mtri.CubicTriInterpolator(triang, -V)
Ex, Ey = tci.gradient(triang.x, triang.y)
E_norm = np.hypot(Ex, Ey)
# Plot the triangulation, the potential iso-contours and the vector field
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = mpl.colormaps['hot']
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
# We are leaving ax.use_sticky_margins as True, so the
# view limits are the contour data limits.
def test_tritools():
# Tests TriAnalyzer.scale_factors on masked triangulation
# Tests circle_ratios on equilateral and right-angled triangle.
x = np.array([0., 1., 0.5, 0., 2.])
y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.])
triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32)
mask = np.array([False, False, True], dtype=bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.scale_factors, [1, 1/(1+3**.5/2)])
assert_array_almost_equal(
analyser.circle_ratios(rescale=False),
np.ma.masked_array([0.5, 1./(1.+np.sqrt(2.)), np.nan], mask))
# Tests circle ratio of a flat triangle
x = np.array([0., 1., 2.])
y = np.array([1., 1.+3., 1.+6.])
triangles = np.array([[0, 1, 2]], dtype=np.int32)
triang = mtri.Triangulation(x, y, triangles)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.circle_ratios(), np.array([0.]))
# Tests TriAnalyzer.get_flat_tri_mask
# Creates a triangulation of [-1, 1] x [-1, 1] with contiguous groups of
# 'flat' triangles at the 4 corners and at the center. Checks that only
# those at the borders are eliminated by TriAnalyzer.get_flat_tri_mask
n = 9
def power(x, a):
return np.abs(x)**a*np.sign(x)
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(power(x, 2.), power(x, 0.25))
x = x.ravel()
y = y.ravel()
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
analyser = mtri.TriAnalyzer(triang)
mask_flat = analyser.get_flat_tri_mask(0.2)
verif_mask = np.zeros(162, dtype=bool)
corners_index = [0, 1, 2, 3, 14, 15, 16, 17, 18, 19, 34, 35, 126, 127,
142, 143, 144, 145, 146, 147, 158, 159, 160, 161]
verif_mask[corners_index] = True
assert_array_equal(mask_flat, verif_mask)
# Now including a hole (masked triangle) at the center. The center also
# shall be eliminated by get_flat_tri_mask.
mask = np.zeros(162, dtype=bool)
mask[80] = True
triang.set_mask(mask)
mask_flat = analyser.get_flat_tri_mask(0.2)
center_index = [44, 45, 62, 63, 78, 79, 80, 81, 82, 83, 98, 99, 116, 117]
verif_mask[center_index] = True
assert_array_equal(mask_flat, verif_mask)
def test_trirefine():
# Testing subdiv=2 refinement
n = 3
subdiv = 2
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(x, x)
x = x.ravel()
y = y.ravel()
mask = np.zeros(2*n**2, dtype=bool)
mask[n**2:] = True
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1),
mask=mask)
refiner = mtri.UniformTriRefiner(triang)
refi_triang = refiner.refine_triangulation(subdiv=subdiv)
x_refi = refi_triang.x
y_refi = refi_triang.y
n_refi = n * subdiv**2
x_verif = np.linspace(-1., 1., n_refi+1)
x_verif, y_verif = np.meshgrid(x_verif, x_verif)
x_verif = x_verif.ravel()
y_verif = y_verif.ravel()
ind1d = np.isin(np.around(x_verif*(2.5+y_verif), 8),
np.around(x_refi*(2.5+y_refi), 8))
assert_array_equal(ind1d, True)
# Testing the mask of the refined triangulation
refi_mask = refi_triang.mask
refi_tri_barycenter_x = np.sum(refi_triang.x[refi_triang.triangles],
axis=1) / 3.
refi_tri_barycenter_y = np.sum(refi_triang.y[refi_triang.triangles],
axis=1) / 3.
tri_finder = triang.get_trifinder()
refi_tri_indices = tri_finder(refi_tri_barycenter_x,
refi_tri_barycenter_y)
refi_tri_mask = triang.mask[refi_tri_indices]
assert_array_equal(refi_mask, refi_tri_mask)
# Testing that the numbering of triangles does not change the
# interpolation result.
x = np.asarray([0.0, 1.0, 0.0, 1.0])
y = np.asarray([0.0, 0.0, 1.0, 1.0])
triang = [mtri.Triangulation(x, y, [[0, 1, 3], [3, 2, 0]]),
mtri.Triangulation(x, y, [[0, 1, 3], [2, 0, 3]])]
z = np.hypot(x - 0.3, y - 0.4)
# Refining the 2 triangulations and reordering the points
xyz_data = []
for i in range(2):
refiner = mtri.UniformTriRefiner(triang[i])
refined_triang, refined_z = refiner.refine_field(z, subdiv=1)
xyz = np.dstack((refined_triang.x, refined_triang.y, refined_z))[0]
xyz = xyz[np.lexsort((xyz[:, 1], xyz[:, 0]))]
xyz_data += [xyz]
assert_array_almost_equal(xyz_data[0], xyz_data[1])
@pytest.mark.parametrize('interpolator',
[mtri.LinearTriInterpolator,
mtri.CubicTriInterpolator],
ids=['linear', 'cubic'])
def test_trirefine_masked(interpolator):
# Repeated points means we will have fewer triangles than points, and thus
# get masking.
x, y = np.mgrid[:2, :2]
x = np.repeat(x.flatten(), 2)
y = np.repeat(y.flatten(), 2)
z = np.zeros_like(x)
tri = mtri.Triangulation(x, y)
refiner = mtri.UniformTriRefiner(tri)
interp = interpolator(tri, z)
refiner.refine_field(z, triinterpolator=interp, subdiv=2)
def meshgrid_triangles(n):
"""
Return (2*(N-1)**2, 3) array of triangles to mesh (N, N)-point np.meshgrid.
"""
tri = []
for i in range(n-1):
for j in range(n-1):
a = i + j*n
b = (i+1) + j*n
c = i + (j+1)*n
d = (i+1) + (j+1)*n
tri += [[a, b, d], [a, d, c]]
return np.array(tri, dtype=np.int32)
def test_triplot_return():
# Check that triplot returns the artists it adds
ax = plt.figure().add_subplot()
triang = mtri.Triangulation(
[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0],
triangles=[[0, 1, 3], [3, 2, 0]])
assert ax.triplot(triang, "b-") is not None, \
'triplot should return the artist it adds'
def test_trirefiner_fortran_contiguous_triangles():
# github issue 4180. Test requires two arrays of triangles that are
# identical except that one is C-contiguous and one is fortran-contiguous.
triangles1 = np.array([[2, 0, 3], [2, 1, 0]])
assert not np.isfortran(triangles1)
triangles2 = np.array(triangles1, copy=True, order='F')
assert np.isfortran(triangles2)
x = np.array([0.39, 0.59, 0.43, 0.32])
y = np.array([33.99, 34.01, 34.19, 34.18])
triang1 = mtri.Triangulation(x, y, triangles1)
triang2 = mtri.Triangulation(x, y, triangles2)
refiner1 = mtri.UniformTriRefiner(triang1)
refiner2 = mtri.UniformTriRefiner(triang2)
fine_triang1 = refiner1.refine_triangulation(subdiv=1)
fine_triang2 = refiner2.refine_triangulation(subdiv=1)
assert_array_equal(fine_triang1.triangles, fine_triang2.triangles)
def test_qhull_triangle_orientation():
# github issue 4437.
xi = np.linspace(-2, 2, 100)
x, y = map(np.ravel, np.meshgrid(xi, xi))
w = (x > y - 1) & (x < -1.95) & (y > -1.2)
x, y = x[w], y[w]
theta = np.radians(25)
x1 = x*np.cos(theta) - y*np.sin(theta)
y1 = x*np.sin(theta) + y*np.cos(theta)
# Calculate Delaunay triangulation using Qhull.
triang = mtri.Triangulation(x1, y1)
# Neighbors returned by Qhull.
qhull_neighbors = triang.neighbors
# Obtain neighbors using own C++ calculation.
triang._neighbors = None
own_neighbors = triang.neighbors
assert_array_equal(qhull_neighbors, own_neighbors)
def test_trianalyzer_mismatched_indices():
# github issue 4999.
x = np.array([0., 1., 0.5, 0., 2.])
y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.])
triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32)
mask = np.array([False, False, True], dtype=bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
# numpy >= 1.10 raises a VisibleDeprecationWarning in the following line
# prior to the fix.
analyser._get_compressed_triangulation()
def test_tricontourf_decreasing_levels():
# github issue 5477.
x = [0.0, 1.0, 1.0]
y = [0.0, 0.0, 1.0]
z = [0.2, 0.4, 0.6]
plt.figure()
with pytest.raises(ValueError):
plt.tricontourf(x, y, z, [1.0, 0.0])
def test_internal_cpp_api() -> None:
# Following github issue 8197.
from matplotlib import _tri # noqa: F401, ensure lazy-loaded module *is* loaded.
# C++ Triangulation.
with pytest.raises(
TypeError,
match=r'__init__\(\): incompatible constructor arguments.'):
mpl._tri.Triangulation() # type: ignore[call-arg]
with pytest.raises(
ValueError, match=r'x and y must be 1D arrays of the same length'):
mpl._tri.Triangulation(np.array([]), np.array([1]), np.array([[]]), (), (), (),
False)
x = np.array([0, 1, 1], dtype=np.float64)
y = np.array([0, 0, 1], dtype=np.float64)
with pytest.raises(
ValueError,
match=r'triangles must be a 2D array of shape \(\?,3\)'):
mpl._tri.Triangulation(x, y, np.array([[0, 1]]), (), (), (), False)
tris = np.array([[0, 1, 2]], dtype=np.int_)
with pytest.raises(
ValueError,
match=r'mask must be a 1D array with the same length as the '
r'triangles array'):
mpl._tri.Triangulation(x, y, tris, np.array([0, 1]), (), (), False)
with pytest.raises(
ValueError, match=r'edges must be a 2D array with shape \(\?,2\)'):
mpl._tri.Triangulation(x, y, tris, (), np.array([[1]]), (), False)
with pytest.raises(
ValueError,
match=r'neighbors must be a 2D array with the same shape as the '
r'triangles array'):
mpl._tri.Triangulation(x, y, tris, (), (), np.array([[-1]]), False)
triang = mpl._tri.Triangulation(x, y, tris, (), (), (), False)
with pytest.raises(
ValueError,
match=r'z must be a 1D array with the same length as the '
r'triangulation x and y arrays'):
triang.calculate_plane_coefficients([])
for mask in ([0, 1], None):
with pytest.raises(
ValueError,
match=r'mask must be a 1D array with the same length as the '
r'triangles array'):
triang.set_mask(mask) # type: ignore[arg-type]
triang.set_mask(np.array([True]))
assert_array_equal(triang.get_edges(), np.empty((0, 2)))
triang.set_mask(()) # Equivalent to Python Triangulation mask=None
assert_array_equal(triang.get_edges(), [[1, 0], [2, 0], [2, 1]])
# C++ TriContourGenerator.
with pytest.raises(
TypeError,
match=r'__init__\(\): incompatible constructor arguments.'):
mpl._tri.TriContourGenerator() # type: ignore[call-arg]
with pytest.raises(
ValueError,
match=r'z must be a 1D array with the same length as the x and y arrays'):
mpl._tri.TriContourGenerator(triang, np.array([1]))
z = np.array([0, 1, 2])
tcg = mpl._tri.TriContourGenerator(triang, z)
with pytest.raises(
ValueError, match=r'filled contour levels must be increasing'):
tcg.create_filled_contour(1, 0)
# C++ TrapezoidMapTriFinder.
with pytest.raises(
TypeError,
match=r'__init__\(\): incompatible constructor arguments.'):
mpl._tri.TrapezoidMapTriFinder() # type: ignore[call-arg]
trifinder = mpl._tri.TrapezoidMapTriFinder(triang)
with pytest.raises(
ValueError, match=r'x and y must be array-like with same shape'):
trifinder.find_many(np.array([0]), np.array([0, 1]))
def test_qhull_large_offset():
# github issue 8682.
x = np.asarray([0, 1, 0, 1, 0.5])
y = np.asarray([0, 0, 1, 1, 0.5])
offset = 1e10
triang = mtri.Triangulation(x, y)
triang_offset = mtri.Triangulation(x + offset, y + offset)
assert len(triang.triangles) == len(triang_offset.triangles)
def test_tricontour_non_finite_z():
# github issue 10167.
x = [0, 1, 0, 1]
y = [0, 0, 1, 1]
triang = mtri.Triangulation(x, y)
plt.figure()
with pytest.raises(ValueError, match='z array must not contain non-finite '
'values within the triangulation'):
plt.tricontourf(triang, [0, 1, 2, np.inf])
with pytest.raises(ValueError, match='z array must not contain non-finite '
'values within the triangulation'):
plt.tricontourf(triang, [0, 1, 2, -np.inf])
with pytest.raises(ValueError, match='z array must not contain non-finite '
'values within the triangulation'):
plt.tricontourf(triang, [0, 1, 2, np.nan])
with pytest.raises(ValueError, match='z must not contain masked points '
'within the triangulation'):
plt.tricontourf(triang, np.ma.array([0, 1, 2, 3], mask=[1, 0, 0, 0]))
def test_tricontourset_reuse():
# If TriContourSet returned from one tricontour(f) call is passed as first
# argument to another the underlying C++ contour generator will be reused.
x = [0.0, 0.5, 1.0]
y = [0.0, 1.0, 0.0]
z = [1.0, 2.0, 3.0]
fig, ax = plt.subplots()
tcs1 = ax.tricontourf(x, y, z)
tcs2 = ax.tricontour(x, y, z)
assert tcs2._contour_generator != tcs1._contour_generator
tcs3 = ax.tricontour(tcs1, z)
assert tcs3._contour_generator == tcs1._contour_generator
@check_figures_equal()
def test_triplot_with_ls(fig_test, fig_ref):
x = [0, 2, 1]
y = [0, 0, 1]
data = [[0, 1, 2]]
fig_test.subplots().triplot(x, y, data, ls='--')
fig_ref.subplots().triplot(x, y, data, linestyle='--')
def test_triplot_label():
x = [0, 2, 1]
y = [0, 0, 1]
data = [[0, 1, 2]]
fig, ax = plt.subplots()
lines, markers = ax.triplot(x, y, data, label='label')
handles, labels = ax.get_legend_handles_labels()
assert labels == ['label']
assert len(handles) == 1
assert handles[0] is lines
def test_tricontour_path():
x = [0, 4, 4, 0, 2]
y = [0, 0, 4, 4, 2]
triang = mtri.Triangulation(x, y)
_, ax = plt.subplots()
# Line strip from boundary to boundary
cs = ax.tricontour(triang, [1, 0, 0, 0, 0], levels=[0.5])
paths = cs.get_paths()
assert len(paths) == 1
expected_vertices = [[2, 0], [1, 1], [0, 2]]
assert_array_almost_equal(paths[0].vertices, expected_vertices)
assert_array_equal(paths[0].codes, [1, 2, 2])
assert_array_almost_equal(
paths[0].to_polygons(closed_only=False), [expected_vertices])
# Closed line loop inside domain
cs = ax.tricontour(triang, [0, 0, 0, 0, 1], levels=[0.5])
paths = cs.get_paths()
assert len(paths) == 1
expected_vertices = [[3, 1], [3, 3], [1, 3], [1, 1], [3, 1]]
assert_array_almost_equal(paths[0].vertices, expected_vertices)
assert_array_equal(paths[0].codes, [1, 2, 2, 2, 79])
assert_array_almost_equal(paths[0].to_polygons(), [expected_vertices])
def test_tricontourf_path():
x = [0, 4, 4, 0, 2]
y = [0, 0, 4, 4, 2]
triang = mtri.Triangulation(x, y)
_, ax = plt.subplots()
# Polygon inside domain
cs = ax.tricontourf(triang, [0, 0, 0, 0, 1], levels=[0.5, 1.5])
paths = cs.get_paths()
assert len(paths) == 1
expected_vertices = [[3, 1], [3, 3], [1, 3], [1, 1], [3, 1]]
assert_array_almost_equal(paths[0].vertices, expected_vertices)
assert_array_equal(paths[0].codes, [1, 2, 2, 2, 79])
assert_array_almost_equal(paths[0].to_polygons(), [expected_vertices])
# Polygon following boundary and inside domain
cs = ax.tricontourf(triang, [1, 0, 0, 0, 0], levels=[0.5, 1.5])
paths = cs.get_paths()
assert len(paths) == 1
expected_vertices = [[2, 0], [1, 1], [0, 2], [0, 0], [2, 0]]
assert_array_almost_equal(paths[0].vertices, expected_vertices)
assert_array_equal(paths[0].codes, [1, 2, 2, 2, 79])
assert_array_almost_equal(paths[0].to_polygons(), [expected_vertices])
# Polygon is outer boundary with hole
cs = ax.tricontourf(triang, [0, 0, 0, 0, 1], levels=[-0.5, 0.5])
paths = cs.get_paths()
assert len(paths) == 1
expected_vertices = [[0, 0], [4, 0], [4, 4], [0, 4], [0, 0],
[1, 1], [1, 3], [3, 3], [3, 1], [1, 1]]
assert_array_almost_equal(paths[0].vertices, expected_vertices)
assert_array_equal(paths[0].codes, [1, 2, 2, 2, 79, 1, 2, 2, 2, 79])
assert_array_almost_equal(paths[0].to_polygons(), np.split(expected_vertices, [5]))
| TestTriangulationParams |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 62650,
"end": 92023
} | class ____(Kernel):
"""
Base class for C++ kernel code generation in PyTorch Inductor.
This class is responsible for generating C++ code from the intermediate representation.
Args:
args: Kernel arguments used for code generation
num_threads: Number of threads for parallel execution
"""
overrides = CppOverrides # type: ignore[assignment]
sexpr = cexpr
newvar_prefix = "auto "
suffix = ";"
def __init__(self, args, num_threads):
super().__init__(args)
# Indicate when this kernel is active, for example
# {x0, {24, 26}} -> this kernel is active when x0 >= 24 and x0 < 26
self.active_ranges: dict[sympy.Expr, tuple[sympy.Expr, ...]] = {}
# Indicate this kernel will be moved under the inner for-loop
# See move_code_under_inner_loop
self.inner_itervars: list[sympy.Symbol] = []
self.call_ranges: Optional[tuple[sympy.Expr, ...]] = None
self.ranges: list[sympy.Expr] = []
self.itervars: list[sympy.Symbol] = []
self.reduction_depth = None
self.reduction_prefix = IndentedBuffer()
# We need this because when we run "reduction" nodes here, we lack
# "loop" information to decide whether we need a scalar init or an array init
# in the reduction prefix. Meanwhile, we have other information like
# reduction types and dtype to generate the reduction prefix. We record the information
# with a callable lambda function, and when we have enough information to finalize
# the reduction prefix, we can invoke the functions here with additional information.
self.reduction_prefix_generators: list[Callable] = [] # type: ignore[type-arg]
self.reduction_suffix = IndentedBuffer()
self.parallel_reduction_prefix = IndentedBuffer()
self.parallel_reduction_suffix = IndentedBuffer()
self.local_reduction_init = IndentedBuffer()
self.local_reduction_stores = IndentedBuffer()
self.is_reduction = False
self.non_parallel_reduction_prefix = IndentedBuffer()
self.non_parallel_reduction_suffix = IndentedBuffer()
self.reduction_cse = CSE(self.newvar_prefix, self.suffix, name_prefix="tmp_acc")
self.welford_helper_cse = CSE(
self.newvar_prefix, self.suffix, name_prefix="welford_helper"
)
self.cascade_helper_cse = CSE(
self.newvar_prefix, self.suffix, name_prefix="cascade_helper"
)
self.preloads = IndentedBuffer()
self.poststores = IndentedBuffer()
self.num_threads = num_threads # num_threads the kernel specialized for
self.reduction_omp_dec: dict[tuple[str, str], str] = {}
self.reduction_var_names: list[str] = []
def _gen_parallel_reduction_buffers(
self,
acc,
acc_type,
reduction_type,
dtype,
reduction_combine_fn=reduction_combine,
reduction_init_fn=reduction_init,
):
if config.cpp.dynamic_threads and not self.parallel_reduction_prefix:
self.parallel_reduction_prefix.writeline(
"int max_threads = omp_get_max_threads();"
)
acc_local = f"{acc}_local"
num_threads = (
"max_threads" if config.cpp.dynamic_threads else parallel_num_threads()
)
acc_local_in_array = f"{acc}_arr[tid]"
self.local_reduction_init.writeline(
f"{acc_type} {acc_local} = {reduction_init_fn(reduction_type, dtype)};"
)
self.parallel_reduction_prefix.splice(
reduction_prefix_array(
acc,
acc_type,
reduction_type,
dtype,
num_threads,
reduction_init_fn,
)
)
self.local_reduction_stores.writeline(f"{acc_local_in_array} = {acc_local};")
self.parallel_reduction_suffix.writelines(
[
f"for (int tid = 0; tid < {num_threads}; tid++)",
"{",
f" {acc} = {reduction_combine_fn(reduction_type, acc, acc_local_in_array, src_dtype=dtype)};",
"}",
],
)
def update_stores_with_parallel_reduction(self):
for var_name in self.reduction_var_names:
replace_acc_name(self.stores, var_name, f"{var_name}_local")
def gen_body(self, code: Optional[BracesBuffer] = None):
assert code is None
code = BracesBuffer()
with contextlib.ExitStack() as stack:
if hasattr(self, "codegen_inner_loops"):
code.splice(self.preloads)
self.codegen_inner_loops(code)
stack.enter_context(code.indent())
code.splice(self.loads)
code.splice(self.compute)
code.splice(self.stores)
if hasattr(self, "codegen_inner_loops"):
code.splice(self.poststores)
if self.inner_itervars:
for idx in self.inner_itervars:
start, end = self.active_ranges[idx]
code = move_code_under_inner_loop(code, idx, f"{idx}_tail", start, end)
return code
@contextlib.contextmanager
def masked(self, mask):
"""Context manager to add an additional mask to loads and stores."""
prior = self._load_mask
if prior:
mask = ops.and_(mask, prior)
if isinstance(mask, OpsValue):
mask = mask.value
assert isinstance(mask, CppCSEVariable)
# see NOTE [dtype of CppCSEVariable]
# mask's dtype should be bool
mask.dtype = torch.bool
# pyrefly: ignore [bad-assignment]
self._load_mask = mask
try:
yield mask
finally:
self._load_mask = prior
def scale_index_with_offset(
self, index: sympy.Expr, scale=1, itervar_idx=-1, offset=0
):
var = self.itervars[itervar_idx]
replacement = {var: var * scale + offset}
new_index = sympy_subs(index, replacement)
return new_index
def index_to_str(self, index: sympy.Expr) -> str:
"""
Convert an index expr to a string that can be used in cpp code.
e.g. a sympy expression "s2" may actually appear as "ks1" in the cpp kernel.
"""
return cexpr(self.rename_indexing(index))
def index_indirect_depends_on(self, index: sympy.Expr, itervar: sympy.Symbol):
"""
Check if an index has free symbol CppCSEVariable that depends on `itervar`.
"""
return any(
self.cse.varname_map[s.name].depends_on(itervar) # type: ignore[attr-defined]
for s in index.free_symbols
if s.name in self.cse.varname_map # type: ignore[attr-defined]
and isinstance(self.cse.varname_map[s.name], CppCSEVariable) # type: ignore[attr-defined]
)
def index_depends_on(self, index: sympy.Expr, itervar: sympy.Symbol):
return itervar in index.free_symbols or self.index_indirect_depends_on(
index, itervar
)
def var_ranges(self):
return dict(zip(self.itervars, self.ranges))
def check_bounds(
self,
expr: sympy.Expr,
size: sympy.Expr,
lower: bool,
upper: bool,
):
if not (lower or upper):
return
indirect = free_symbol_is_type(expr, SymT.TMP)
if indirect:
# indexing in compute
csevar = ops.index_expr(expr, torch.int64).value
buffer = V.kernel.compute
else:
# indexing in loads
prior_compute = V.kernel.compute
try:
V.kernel.compute = self.loads
csevar = ops.index_expr(expr, torch.int64).value
finally:
V.kernel.compute = prior_compute
buffer = self.loads
size_str = V.kernel.sexpr(self.rename_indexing(size)) if upper else None
line = self.indirect_assert(
csevar, "0" if lower else None, size_str, self._load_mask
)
self.cse.generate(buffer, line, assignment=False)
def load(self, name: str, index: sympy.Expr):
var = self.args.input(name)
index = self.rename_indexing(index)
line = f"{var}[{cexpr_index(index)}]"
csevar = self.cse.generate(self.loads, line, dtype=V.graph.get_dtype(name))
csevar.update_on_args("load", (self, name, index), {})
return csevar
def store(self, name, index, value, mode=None):
assert "buf" in name
var = self.args.output(name)
index = self.rename_indexing(index)
if mode is None:
line = f"{var}[{cexpr_index(index)}] = {value};"
elif mode == "atomic_add":
if not config.cpp.dynamic_threads and self.num_threads == 1:
line = f"{var}[{cexpr_index(index)}] += {value};"
else:
dtype = V.graph.get_dtype(name)
# mirroring static_cast<float>(...) in load:
value = f"static_cast<{DTYPE_TO_CPP[dtype]}>({value})"
line = f"atomic_add(&{var}[{cexpr_index(index)}], {value});"
else:
raise NotImplementedError(f"store mode={mode}")
self.stores.writeline(DeferredLine(name, line))
def device_assert_async(self, cond, msg):
self.compute.writeline(
f'({cond} ? 0 : (throw std::runtime_error("{msg}"), 0));'
)
def _gen_reduction_prefix(
self,
acc: Union[CSEVariable, str],
acc_type: str,
rtype: str,
dtype: torch.dtype,
init_fn,
):
# Generate reduction prefix
# If size is None, we will define and initialize a single reduction variable
# => float tmp_acc0 = 0;
# Otherwise, we will define and initialize a reduction array
# => float tmp_acc0_arr[size];
# => for (int i = 0; i < size; i++) tmp_acc0_arr[i] = 0;
def inner(size: Optional[int] = None):
if size is None:
return f"{acc_type} {acc} = {init_fn(rtype, dtype)};"
else:
return reduction_prefix_array(
acc,
acc_type,
rtype,
dtype,
size,
init_fn,
)
return inner
def finalize_reduction_prefix(self, size: Optional[int] = None):
for gen_fn in self.reduction_prefix_generators:
self.reduction_prefix.splice(gen_fn(size))
def need_use_acc_helper(self, reduction_type, dtype, use_scalar):
# Check if we need accumulate helper for the reduction operation.
# using accumulate helper generates the necessary code to improve precision for
# sum and welford
# Note: using helper has non-negligible impact on performance
if reduction_type == "welford_reduce":
return True
# TODO add supports for more data types when needed
if reduction_type == "sum" and dtype == torch.float:
assert self.call_ranges is not None
reduction_size = functools.reduce(
operator.mul, self.call_ranges[self.reduction_depth :]
)
# chunk size to balance accuracy and performance
chunk_size = 4096
# use acc helper If cannot get size_hint
try:
reduction_size_hint = V.graph.sizevars.size_hint(reduction_size)
except Exception:
return True
if reduction_size_hint > chunk_size:
# use helper if the reduction size is too large
V.graph.sizevars.check_lt(chunk_size, reduction_size)
return True
else:
V.graph.sizevars.check_leq(reduction_size, chunk_size)
return False
def _acc_helper_init(
self,
reduction_type,
helper_val,
helper_range,
dtype,
num_threads=None,
use_scalar=False,
):
num_range_thread = (
CeilDiv(helper_range, num_threads) if num_threads else helper_range
)
num_range_thread_expr = cexpr_index(num_range_thread)
assert reduction_type in ["welford_reduce", "sum"]
chunk_size = 4096
num_chunks = CeilDiv(num_range_thread, chunk_size)
helper_type = (
"WelfordHelper"
if reduction_type == "welford_reduce"
else "CascadeSumHelper"
)
if use_scalar:
h_type = DTYPE_TO_CPP[dtype]
else:
h_type = (
self._get_vec_type(dtype)
if hasattr(self, "_get_vec_type")
else DTYPE_TO_CPP[dtype]
)
helper_init_line = (
f"{helper_type}<{h_type}, {chunk_size}> {helper_val}"
f"("
f"{num_range_thread_expr}"
f");"
)
if reduction_type == "sum":
return helper_init_line
if isinstance(num_chunks, sympy.Integer) and num_chunks <= 1:
# When the number of chunks <= 1, there is no need to use cascade summation to improve
# reduction accuracy. We can initialize a static WelfordHelper to improve performance.
return f"static {helper_init_line}"
else:
return helper_init_line
def _use_acc_helper(
self, reduction_type, acc, helper_val, helper_range, dtype, use_scalar=False
):
num_threads = (
"max_threads" if config.cpp.dynamic_threads else parallel_num_threads()
)
self.non_parallel_reduction_prefix.writeline(
self._acc_helper_init(
reduction_type, helper_val, helper_range, dtype, None, use_scalar
)
)
self.local_reduction_init.writeline(
self._acc_helper_init(
reduction_type, helper_val, helper_range, dtype, num_threads, use_scalar
)
)
result = acc if use_scalar else f"{acc}_vec"
if reduction_type == "welford_reduce":
self.non_parallel_reduction_suffix.writeline(
f"{result} = welford_combine({result}, &{helper_val});"
)
self.local_reduction_stores.writeline(
f"{result}_local = welford_combine({result}_local, &{helper_val});"
)
else:
self.non_parallel_reduction_suffix.writeline(
f"{result} = cascade_sum_final(&{helper_val});"
)
self.local_reduction_stores.writeline(
f"{result}_local = cascade_sum_final(&{helper_val});"
)
def reduction(self, dtype, src_dtype, reduction_type, value):
argmax_or_argmin = reduction_type in ("argmax", "argmin")
reduction_key = src_dtype, reduction_type, value
if reduction_key in self.reduction_cse.reduction_cache:
return self.reduction_cse.reduction_cache[reduction_key]
acc = self.reduction_cse.generate(
self.loads, f"reduction {reduction_key}", write=False
)
self.reduction_var_names.append(f"{acc}")
self.is_reduction = True
init_dtype = src_dtype if argmax_or_argmin else dtype
acc_type = reduction_acc_type(reduction_type, init_dtype)
self.reduction_prefix_generators.append(
self._gen_reduction_prefix(
acc, acc_type, reduction_type, init_dtype, reduction_init
)
)
if self.need_use_acc_helper(reduction_type, dtype, True):
# use cascade_helper for vec kernel
reduction_size = functools.reduce(
operator.mul, self.ranges[self.reduction_depth :]
)
# use welford_helper/cascade_helper for vec kernel
if reduction_type == "welford_reduce":
helper_val = self.welford_helper_cse.generate(
self.compute, f"reduction {reduction_key}", write=False
)
else:
helper_val = self.cascade_helper_cse.generate(
self.compute, f"reduction {reduction_key}", write=False
)
# rename the helper variable to distinguish it from vectorized version
scalar_helper_val = f"scalar_{helper_val}"
self._use_acc_helper(
reduction_type,
acc,
scalar_helper_val,
reduction_size,
dtype,
use_scalar=True,
)
self.stores.writeline(
f"{acc} = {reduction_combine(reduction_type, acc, value, scalar_helper_val)};"
)
else:
assert self.reduction_depth is not None
index = self.itervars[self.reduction_depth]
for i in range(self.reduction_depth + 1, len(self.itervars)):
index = index * self.ranges[i] + self.itervars[i]
self.stores.writeline(
f"{acc} = {reduction_combine(reduction_type, acc, value, index=index)};"
)
self._gen_parallel_reduction_buffers(acc, acc_type, reduction_type, init_dtype)
result = reduction_project(reduction_type, acc)
self.reduction_cse.reduction_cache[reduction_key] = result
return result
def store_reduction(self, name, index, value):
index = self.rename_indexing(index)
var = self.args.output(name)
self.reduction_suffix.writeline(
DeferredLine(name, f"{var}[{cexpr_index(index)}] = {value};")
)
def set_ranges(self, lengths, reduction_lengths):
if self.call_ranges:
assert self.call_ranges == tuple(lengths) + tuple(reduction_lengths), (
f"{self.call_ranges} == {tuple(lengths)} + {tuple(reduction_lengths)}"
)
assert self.reduction_depth == len(lengths)
else:
self.call_ranges = tuple(lengths) + tuple(reduction_lengths)
self.ranges = [self.rename_indexing(x) for x in self.call_ranges]
self.itervars = [
sympy_index_symbol_with_prefix(SymT.XBLOCK, n)
for n in range(len(self.ranges))
]
# pyrefly: ignore [bad-assignment]
self.reduction_depth = len(lengths)
return (
self.itervars[: self.reduction_depth],
self.itervars[self.reduction_depth :],
)
def size_hint(self):
assert self.call_ranges is not None
return V.graph.sizevars.size_hint(
sympy_product(self.call_ranges), fallback=8192
)
def codegen_loops_impl(self, loop_nest, code, worksharing):
assert isinstance(self, CppKernelProxy)
threads = parallel_num_threads()
assert self.call_ranges is not None
if isinstance(loop_nest.kernel, OuterLoopFusedKernel):
par_depth = loop_nest.kernel.decide_parallel_depth(
loop_nest.max_parallel_depth(), threads
)
else:
par_depth = self.decide_parallel_depth(
loop_nest.max_parallel_depth(), threads
)
is_reduction_loop = (
loop_nest.loops is not None
and loop_nest.loops[par_depth.start_depth].is_reduction
)
with contextlib.ExitStack() as stack:
if par_depth.parallel_depth:
if is_reduction_loop:
# need to close the worksharing scope to define reduction vars outside it
worksharing.close()
else:
worksharing.parallel(threads)
loop_nest.mark_parallel(par_depth)
elif threads > 1:
if worksharing.single():
stack.enter_context(code.indent())
def gen_kernel(_loop_nest: LoopNest):
def is_parallel_reduction():
assert _loop_nest.loops
root = _loop_nest.loops[par_depth.start_depth]
return root.is_reduction and root.parallel
kernel = _loop_nest.get_kernel()
if isinstance(kernel, OuterLoopFusedKernel):
for _loop_nest in kernel.inner:
gen_loop_nest(_loop_nest)
else:
assert isinstance(kernel, CppKernelProxy)
if _loop_nest.loops is not None and is_parallel_reduction():
kernel.update_stores_with_parallel_reduction()
with contextlib.ExitStack() as stack:
stack.enter_context(code.indent())
kernel.gen_body(code)
def get_reduction_prefix_suffix(kernel, parallel=False, is_suffix=False):
if is_suffix:
suffix = kernel.reduction_suffix
if parallel:
suffix = kernel.parallel_reduction_suffix + suffix
else:
suffix = kernel.non_parallel_reduction_suffix + suffix
return suffix
else:
prefix = kernel.reduction_prefix
if parallel:
prefix = prefix + kernel.parallel_reduction_prefix
else:
prefix = prefix + kernel.non_parallel_reduction_prefix
return prefix
def gen_loop_with_reduction(
_loop_nest: LoopNest, depth: int = 0, in_reduction=False
):
kernel = _loop_nest.get_kernel()
assert _loop_nest.loops
loop = _loop_nest.loops[depth]
with contextlib.ExitStack() as stack_outer:
if loop.is_reduction and not in_reduction:
reduction_prefix = get_reduction_prefix_suffix(
kernel, loop.parallel, is_suffix=False
)
if reduction_prefix:
stack_outer.enter_context(code.indent())
code.splice(reduction_prefix)
if is_reduction_loop and loop.parallel:
worksharing.parallel(threads)
if kernel.local_reduction_init:
assert kernel.local_reduction_stores
code.splice(kernel.local_reduction_init)
gen_loop_at(_loop_nest, depth)
if is_reduction_loop and loop.parallel:
if kernel.local_reduction_stores:
code.splice(kernel.local_reduction_stores)
worksharing.close()
if loop.is_reduction and not in_reduction:
code.splice(
get_reduction_prefix_suffix(
kernel, loop.parallel, is_suffix=True
)
)
def gen_loop_at(_loop_nest: LoopNest, depth: int = 0):
with contextlib.ExitStack() as stack:
assert _loop_nest.loops
loop = _loop_nest.loops[depth]
loop_lines = loop.lines()
if loop_lines is None:
return
code.writelines(loop_lines)
stack.enter_context(code.indent())
gen_loop_nest(_loop_nest, depth + 1, loop.is_reduction)
def gen_loop_nest(
_loop_nest: LoopNest,
depth: int = 0,
in_reduction: bool = False,
):
if _loop_nest.loops is None or depth == len(_loop_nest.loops): # type: ignore[arg-type]
gen_kernel(_loop_nest)
else:
gen_loop_with_reduction(_loop_nest, depth, in_reduction)
stack.enter_context(code.indent())
if (
isinstance(loop_nest.kernel, OuterLoopFusedKernel)
and isinstance(V.local_buffer_context, LocalBufferContext)
and V.local_buffer_context.local_buffers
):
# Allocate local buffer
local_buffers = V.local_buffer_context.local_buffers
for local_buffer in local_buffers.values():
# For dynamic size, rename s to ks
local_buf_size = sympy_product(
[
self.rename_indexing(size_val)
for size_val in local_buffer.get_layout().size
]
)
local_buf_dtype = DTYPE_TO_CPP[local_buffer.get_layout().dtype]
allocate = f"std::make_unique<{local_buf_dtype} []>({cexpr(local_buf_size)})"
local_buffer_name = local_buffer.get_name()
code.splice(
f"std::unique_ptr<{local_buf_dtype} []> buf_{local_buffer_name} = {allocate};"
)
code.splice(
f"{local_buf_dtype}* {local_buffer_name} = buf_{local_buffer_name}.get();"
)
gen_loop_nest(loop_nest)
def codegen_loops(self, code, worksharing):
loop_nest = LoopNest.build(self)
self.codegen_loops_impl(loop_nest, code, worksharing)
@property
def assert_function(self) -> str:
if V.graph.aot_mode:
return "AOTI_TORCH_CHECK"
else:
return "TORCH_CHECK"
def decide_parallel_depth(self, max_parallel_depth, threads):
assert self.call_ranges is not None
ranges = self.call_ranges[
max_parallel_depth.start_depth : (
max_parallel_depth.start_depth + max_parallel_depth.parallel_depth
)
]
seq = self.size_hint()
par = 1
depth = 0
for expr in ranges:
hint = V.graph.sizevars.size_hint(expr, fallback=8192)
if par >= 2 * threads or par == threads:
break
if seq // threads < config.cpp.min_chunk_size:
# not enough work
break
depth += 1
par *= hint
seq /= hint
# if we assume thread number is dynamic, make sure we
# have at least one parallel scope and let OMP runtime
# to manage the serial vs. parallel.
if config.cpp.dynamic_threads and depth == 0 and len(ranges) > 0:
depth = 1
return ParallelDepth(
parallel_depth=depth, start_depth=max_parallel_depth.start_depth
)
@contextlib.contextmanager
def write_to_suffix(self):
prior = (self.loads, self.compute, self.stores, self.cse)
self.loads = IndentedBuffer()
self.compute = IndentedBuffer()
self.stores = IndentedBuffer()
self.cse = self.cse.clone()
yield
self.reduction_suffix.splice(self.loads)
self.reduction_suffix.splice(self.compute)
self.reduction_suffix.splice(self.stores)
(self.loads, self.compute, self.stores, self.cse) = prior
def create_cse_var(self, *args, **kwargs):
return CppCSEVariable(*args, **kwargs)
def get_to_dtype_expr(self, src, dtype, src_dtype):
return f"c10::convert<{DTYPE_TO_CPP[dtype]}>({src})"
def cache_dtype_convert(self, dst, dst_dtype, src, src_dtype):
expr = self.get_to_dtype_expr(src, dst_dtype, src_dtype)
self.cse.put(expr, dst)
def codegen_conditions(
self,
code: BracesBuffer,
prefix: Optional[str] = None,
var: Optional[sympy.Symbol] = None,
):
if prefix is None:
prefix = ""
if not self.active_ranges:
return True
conditions = []
def gen(start, end, var):
if start == end:
return False
var_id = None
for i, _var in enumerate(self.itervars):
if var == _var:
var_id = i
break
if (
type(self) is CppKernel
and var_id
and start == 0
and end == self.ranges[var_id]
):
end = 1
# pyrefly: ignore [bad-argument-type]
conditions.append(f"{var} >= {cexpr_index(start)}")
# pyrefly: ignore [bad-argument-type]
conditions.append(f"{var} < {cexpr_index(end)}")
return True
if var is not None:
assert var in self.active_ranges
start, end = self.active_ranges[var]
if not gen(start, end, var):
return False
else:
for _var, _range in self.active_ranges.items():
start, end = _range
if not gen(start, end, _var):
return False
joined_conditions = " && ".join(conditions)
if joined_conditions:
code.writeline(f"if({prefix}({joined_conditions}))")
return True
else:
return False
| CppKernel |
python | ray-project__ray | rllib/examples/algorithms/maml_lr_supervised_learning.py | {
"start": 2404,
"end": 16308
} | class ____ an example of how to override the main `TorchDifferentiableLearner`.
Note, the meta-learner needs a long-enough training (`default_iters`=~70,000) to learn
to adapt quickly to new tasks.
How to run this script
----------------------
`python [script file name].py --iters=70000 --meta-train-batch-size=5 --fine-tune-batch-size=5`
Use the `--meta-train-batch-size` to set the training/testing batch size in meta-learning and
the `--fine-tune-batch-size` to adjust the number of samples used in all updates during
few-shot learning.
To suppress plotting (plotting is the default) use `--no-plot` and for taking a longer
look at the plot increase the seconds for which plotting is paused at the end of the
script by `--pause-plot-secs`.
Results to expect
-----------------
You should expect to see sometimes alternating test losses ("Total Loss") due to new
(unseen) tasks during meta learning. In few-shot learning after the meta-learning the
(few shot) loss should decrease almost monotonically. In the plot you can expect to see
a decent adaption to the new task after fine-tuning updates of the `RLModule` weights.
With `--iters=70_000`, `--meta-train-batch-size=5`, `--fine-tune-batch-size=5`,
`--fine-tune-lr=0.01`, `--fine-tune-iters=10`, `--meta-lr=0.001`, `--noise-std=0.0`,
and no seed defined.
-------------------------
Iteration: 68000
Total loss: 0.013758559711277485
-------------------------
Iteration: 69000
Total loss: 0.7246640920639038
-------------------------
Iteration: 70000
Total loss: 3.091259002685547
Few shot loss: 2.754437208175659
Few shot loss: 2.7399725914001465
Few shot loss: 2.499554395675659
Few shot loss: 2.1763901710510254
Few shot loss: 1.793503999710083
Few shot loss: 1.4362313747406006
Few shot loss: 1.083552598953247
Few shot loss: 0.7845061421394348
Few shot loss: 0.5579453110694885
Few shot loss: 0.4087105393409729
"""
import gymnasium as gym
import matplotlib.pyplot as plt
import numpy as np
from ray.rllib.algorithms.algorithm_config import DifferentiableAlgorithmConfig
from ray.rllib.core import DEFAULT_MODULE_ID
from ray.rllib.core.columns import Columns
from ray.rllib.core.learner.differentiable_learner_config import (
DifferentiableLearnerConfig,
)
from ray.rllib.core.learner.training_data import TrainingData
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.examples.algorithms.classes.maml_lr_differentiable_learner import (
MAMLTorchDifferentiableLearner,
)
from ray.rllib.examples.algorithms.classes.maml_lr_differentiable_rlm import (
DifferentiableTorchRLModule,
)
from ray.rllib.examples.algorithms.classes.maml_lr_meta_learner import (
MAMLTorchMetaLearner,
)
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.test_utils import add_rllib_example_script_args
# Import torch.
torch, _ = try_import_torch()
# Implement generation of data from sinusoid curves.
def generate_sinusoid_task(batch_size, noise_std=0.1, return_params=False):
"""Generate a sinusoid task with random amplitude and phase.
Args:
batch_size: The number of data points to be generated.
noise_std: An optional standard deviation to be used in the sinusoid
data generation. Defines a linear error term added to the sine
curve.
return_params: If the sampled amplitude and phase should be returned.
Returns:
Torch tensors with the support data and the labels of a sinusoid
curve.
"""
# Sample the amplitude and the phase for a task.
amplitude = np.random.uniform(0.1, 5.0)
phase = np.random.uniform(0.0, np.pi)
# Sample the support.
x = np.random.uniform(-5.0, 5.0, (batch_size, 1))
# Generate the labels.
y = amplitude * np.sin(x - phase)
# Add noise.
y += noise_std * np.random.random((batch_size, 1))
# If sampled parameters should be returned.
if return_params:
# Return torch tensors.
return (
torch.tensor(x, dtype=torch.float32),
torch.tensor(y, dtype=torch.float32),
amplitude,
phase,
)
# Otherwise, return only the sampled data.
else:
return (
torch.tensor(x, dtype=torch.float32),
torch.tensor(y, dtype=torch.float32),
)
def sample_task(batch_size=10, noise_std=0.1, training_data=False, return_params=False):
"""Samples training batches for meta learner and differentiable learner.
Args:
batch_size: The batch size for both meta learning and task learning.
noise_std: An optional standard deviation to be used in the sinusoid
data generation. Defines a linear error term added to the sine
curve.
training_data: Whether data should be returned as `TrainingData`.
Otherwise, a `MultiAgentBatch` is returned. Default is `False`.
return_params: If the sampled amplitude and phase should be returned.
Returns:
A tuple with training batches for the meta learner and the differentiable
learner. If `training_data` is `True`, the data is wrapped into
`TrainingData`, otherwise both batches are `MultiAgentBatch`es.
"""
# Generate training data for meta learner and differentiable learner.
train_batch = {}
generated_data = generate_sinusoid_task(
batch_size * 2, noise_std=noise_std, return_params=return_params
)
train_batch[Columns.OBS], train_batch["y"] = generated_data[:2]
# Convert to `MultiAgentBatch`.
meta_train_batch = MultiAgentBatch(
env_steps=batch_size,
policy_batches={
DEFAULT_MODULE_ID: SampleBatch(
{k: train_batch[k][:batch_size] for k in train_batch}
)
},
)
task_train_batch = MultiAgentBatch(
env_steps=batch_size,
policy_batches={
DEFAULT_MODULE_ID: SampleBatch(
{k: train_batch[k][batch_size:] for k in train_batch}
)
},
)
# If necessary convert to `TrainingData`.
if training_data:
meta_train_batch = TrainingData(
batch=meta_train_batch,
)
task_train_batch = TrainingData(
batch=task_train_batch,
)
# If amplitude and phase should be returned add them to the return tuple.
if return_params:
return meta_train_batch, task_train_batch, *generated_data[2:]
# Otherwise return solely train data.
else:
return meta_train_batch, task_train_batch
# Define arguments.
parser = add_rllib_example_script_args(default_iters=70_000)
parser.add_argument(
"--meta-train-batch-size",
type=int,
default=5,
help="The number of samples per train and test update (meta-learning).",
)
parser.add_argument(
"--meta-lr",
type=float,
default=0.001,
help="The learning rate to be used for meta learning (in the `MetaLearner`).",
)
parser.add_argument(
"--fine-tune-batch-size",
type=int,
default=10,
help="The number of samples for the fine-tuning updates.",
)
parser.add_argument(
"--noise-std",
type=float,
default=0.0,
help="The standard deviation for noise added to the single tasks.",
)
parser.add_argument(
"--seed",
type=int,
default=None,
help="An optional random seed. If not set, the experiment is not reproducable.",
)
parser.add_argument(
"--fine-tune-iters",
type=int,
default=10,
help="The number of updates in fine-tuning.",
)
parser.add_argument(
"--fine-tune-lr",
type=float,
default=0.01,
help="The learning rate to be used in fine-tuning the model in the test phase.",
)
parser.add_argument(
"--no-plot",
action="store_true",
help=(
"If plotting should suppressed. Otherwise user action is needed to close "
"the plot early."
),
)
parser.add_argument(
"--pause-plot-secs",
type=int,
default=1000,
help=(
"The number of seconds to keep the plot open. Note the plot can always be "
"closed by the user when open."
),
)
# Parse the arguments.
args = parser.parse_args()
# If a random seed is provided set it for torch and numpy.
if args.seed:
torch.random.manual_seed(args.seed)
np.random.seed(args.seed)
if __name__ == "__main__":
# Define the `RLModule`.
module_spec = RLModuleSpec(
module_class=DifferentiableTorchRLModule,
# Note, the spaces are needed by default but are not used.
observation_space=gym.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float32),
action_space=gym.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float32),
)
# `Learner`s work on `MultiRLModule`s.
multi_module_spec = MultiRLModuleSpec(
rl_module_specs={DEFAULT_MODULE_ID: module_spec}
)
# Build the `MultiRLModule`.
module = multi_module_spec.build()
# Configure the `DifferentiableLearner`.
diff_learner_config = DifferentiableLearnerConfig(
learner_class=MAMLTorchDifferentiableLearner,
minibatch_size=args.meta_train_batch_size,
lr=0.01,
)
# Configure the `TorchMetaLearner` via the `DifferentiableAlgorithmConfig`.
config = (
DifferentiableAlgorithmConfig()
.learners(
# Add the `DifferentiableLearnerConfig`s.
differentiable_learner_configs=[diff_learner_config],
num_gpus_per_learner=args.num_gpus_per_learner or 0,
)
.training(
lr=args.meta_lr,
train_batch_size=args.meta_train_batch_size,
# Use the full batch in a single update.
minibatch_size=args.meta_train_batch_size,
)
)
# Initialize the `TorchMetaLearner`.
meta_learner = MAMLTorchMetaLearner(config=config, module_spec=module_spec)
# Build the `TorchMetaLearner`.
meta_learner.build()
for i in range(args.stop_iters):
# Sample the training data.
meta_training_data, task_training_data = sample_task(
args.meta_train_batch_size, noise_std=args.noise_std, training_data=True
)
# Update the module.
outs = meta_learner.update(
training_data=meta_training_data,
num_epochs=1,
others_training_data=[task_training_data],
)
iter = i + 1
if iter % 1000 == 0:
total_loss = outs["default_policy"]["total_loss"].peek()
print("-------------------------\n")
print(f"Iteration: {iter}")
print(f"Total loss: {total_loss}")
# Generate test data.
test_batch, _, amplitude, phase = sample_task(
batch_size=args.fine_tune_batch_size,
noise_std=args.noise_std,
return_params=True,
)
if config.num_gpus_per_learner > 0:
test_batch = meta_learner._convert_batch_type(test_batch)
# Run inference and plot results.
with torch.no_grad():
# Generate a grid for the support.
x_grid = torch.tensor(
np.arange(-5.0, 5.0, 0.02), dtype=torch.float32, device=meta_learner._device
).view(-1, 1)
# Get label prediction from the model trained by MAML.
y_pred = meta_learner.module[DEFAULT_MODULE_ID]({Columns.OBS: x_grid})["y_pred"]
# Plot the results if requested.
if not args.no_plot:
# Sort the data by the support.
x_order = np.argsort(test_batch[DEFAULT_MODULE_ID][Columns.OBS].numpy()[:, 0])
x_sorted = test_batch[DEFAULT_MODULE_ID][Columns.OBS].numpy()[:, 0][x_order]
y_sorted = test_batch[DEFAULT_MODULE_ID]["y"][:, 0][x_order]
# Plot the data.
def sinusoid(t):
return amplitude * np.sin(t - phase)
plt.ion()
plt.figure(figsize=(5, 3))
# Plot the true sinusoid curve.
plt.plot(x_grid, sinusoid(x_grid), "r", label="Ground Truth")
# Add the sampled support values.
plt.plot(x_sorted, y_sorted, "^", color="purple")
# Add the prediction made by the model after MAML training.
plt.plot(x_grid, y_pred, ":", label="Prediction", color="#90EE90")
plt.title(f"MAML Results from {args.fine_tune_iters} fine-tuning steps.")
# Fine-tune with the meta loss for just a few steps.
optim = meta_learner.get_optimizers_for_module(DEFAULT_MODULE_ID)[0][1]
# Set the learning rate to a larger value.
for g in optim.param_groups:
g["lr"] = args.fine_tune_lr
# Now run the fine-tune iterations and update the model via the meta-learner loss.
for i in range(args.fine_tune_iters):
# Forward pass.
fwd_out = {
DEFAULT_MODULE_ID: meta_learner.module[DEFAULT_MODULE_ID](
test_batch[DEFAULT_MODULE_ID]
)
}
# Compute the MSE prediction loss.
loss_per_module = meta_learner.compute_losses(fwd_out=fwd_out, batch=test_batch)
# Optimize parameters.
optim.zero_grad(set_to_none=True)
loss_per_module[DEFAULT_MODULE_ID].backward()
optim.step()
# Show the loss for few-shot learning (fine-tuning).
print(f"Few shot loss: {loss_per_module[DEFAULT_MODULE_ID].item()}")
# Run the model again after fine-tuning.
with torch.no_grad():
y_pred_fine_tuned = meta_learner.module[DEFAULT_MODULE_ID](
{Columns.OBS: x_grid}
)["y_pred"]
if not args.no_plot:
# Plot the predictions of the fine-tuned model.
plt.plot(
x_grid,
y_pred_fine_tuned,
"-.",
label="Tuned Prediction",
color="green",
mfc="gray",
)
plt.legend()
plt.show()
# Pause the plot until the user closes it.
plt.pause(args.pause_plot_secs)
| for |
python | pytorch__pytorch | torch/_inductor/freezing.py | {
"start": 5399,
"end": 11051
} | class ____(torch.Tensor):
@staticmethod
def __new__(cls, elem, name, owning_mod):
return super().__new__(cls, elem.to(device="meta"))
def __init__(self, elem, name: Optional[str], mod) -> None:
self.erased_name = name
self.owning_mod_ref = weakref.ref(mod)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None): # type: ignore[override]
erased_tensors = [
e
# pyrefly: ignore [bad-unpacking]
for e in pytree.arg_tree_leaves(*args, **kwargs)
if isinstance(e, ErasedTensor)
]
assert len(erased_tensors) > 0
e = erased_tensors[0]
raise RuntimeError(
f"Trying to run Pytorch Eager Module after Dynamo Freezing. "
"The original parameters have been discarded for memory efficiency. "
f"Found in op {func} for erased parameter {e.erased_name} of {e.owning_mod_ref()}"
)
def invalidate_eager_modules():
with torch.utils._python_dispatch._disable_current_modes():
for (
mod
) in torch._guards.TracingContext.get().module_context.nn_modules.values():
if not isinstance(mod, torch.nn.Module):
continue
for attr_name, tensor in list(
itertools.chain(
mod.named_parameters(recurse=False),
# pyrefly: ignore [bad-argument-type]
mod.named_buffers(recurse=False),
)
):
with torch._dispatch.python.no_python_dispatcher():
e_t = ErasedTensor(tensor, attr_name, mod)
if isinstance(tensor, torch.nn.Parameter):
e_t.requires_grad_(True)
e_t._is_param = True
setattr(mod, attr_name, e_t)
def discard_traced_gm_params(mod: torch.fx.GraphModule):
with torch.utils._python_dispatch._disable_current_modes():
for attr_name, tensor in list(
itertools.chain(
mod.named_parameters(recurse=False),
# pyrefly: ignore [bad-argument-type]
mod.named_buffers(recurse=False),
)
):
with torch._dispatch.python.no_python_dispatcher():
e_t = ErasedTensor(tensor, attr_name, mod)
if isinstance(tensor, torch.nn.Parameter):
e_t.requires_grad_(True)
e_t._is_param = True
setattr(mod, attr_name, e_t)
def enforce_output_layout(gm: torch.fx.GraphModule):
"""
Make sure the output node's layout does not change due to compiler optimizations
by adding aten.as_strided nodes with the expected strides.
Only used for inference so we can assume all graph outputs are model outputs.
"""
*_, output_node = gm.graph.nodes
out_list = output_node.args[0]
with gm.graph.inserting_before(output_node):
for n in out_list:
if not isinstance(
n.meta["val"], torch.Tensor
) or not torch._prims_common.is_non_overlapping_and_dense(n.meta["val"]):
continue
# add a node to enforce eager layout
ft = n.meta["val"]
new_node = gm.graph.call_function(
prims.inductor_force_stride_order.default, (n, ft.stride())
)
# can not call
# n.replace_all_uses_with(new_node)
# since it will replace the usage of n in new_node itself.
output_node.replace_input_with(n, new_node)
gm.graph.lint()
gm.recompile()
def enforce_as_strided_input_layout(gm: torch.fx.GraphModule):
"""
Make sure the as_strided node's input's layout does not change due to compiler
optimizations, because the as_strided strides info depends on input tensor stride info.
"""
as_strided_ops = [
torch.ops.aten.as_strided.default,
torch.ops.aten.as_strided_.default,
torch.ops.aten.as_strided_scatter.default,
]
strided_nodes = [n for n in gm.graph.nodes if n.target in as_strided_ops]
for n in strided_nodes:
with gm.graph.inserting_before(n):
# add a node to enforce eager layout
ft = n.args[0].meta["val"]
new_node = gm.graph.call_function(
prims.inductor_force_stride_order.default, (n.args[0], ft.stride())
)
n.replace_input_with(n.args[0], new_node)
gm.graph.lint()
gm.recompile()
def convert_conv_weights_to_channels_last(gm: torch.fx.GraphModule):
"""
Convert 4d convolution weight tensor to channels last format.
This pass is performed before freezing so the added nodes can be constant
folded by freezing.
"""
with dynamo_timed("convert_conv_weights_to_channels_last"):
convs = [n for n in gm.graph.nodes if n.target is aten.convolution.default]
for conv in convs:
weight_node = conv.args[1]
if len(weight_node.meta["val"].size()) != 4 or weight_node.meta[
"val"
].is_contiguous(memory_format=torch.channels_last):
# not a 4d tensor or already channels last, skip
continue
with gm.graph.inserting_before(conv):
new_node = gm.graph.call_function(
aten.clone.default,
(weight_node,),
{"memory_format": torch.channels_last},
)
conv.replace_input_with(weight_node, new_node)
enforce_as_strided_input_layout(gm)
enforce_output_layout(gm)
| ErasedTensor |
python | has2k1__plotnine | plotnine/geoms/geom_vline.py | {
"start": 659,
"end": 3387
} | class ____(geom):
"""
Vertical line
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {
"color": "black",
"linetype": "solid",
"size": 0.5,
"alpha": 1,
}
REQUIRED_AES = {"xintercept"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
"inherit_aes": False,
}
legend_key_size = staticmethod(geom_segment.legend_key_size)
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
data, mapping = order_as_data_mapping(data, mapping)
xintercept = kwargs.pop("xintercept", None)
if xintercept is not None:
if mapping:
warn(
"The 'xintercept' parameter has overridden "
"the aes() mapping.",
PlotnineWarning,
)
data = pd.DataFrame({"xintercept": np.repeat(xintercept, 1)})
mapping = aes(xintercept="xintercept")
kwargs["show_legend"] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data["x"] = data["xintercept"]
data["xend"] = data["xintercept"]
data["y"] = ranges.y[0]
data["yend"] = ranges.y[1]
data = data.drop_duplicates()
for _, gdata in data.groupby("group"):
gdata.reset_index(inplace=True)
geom_segment.draw_group(
gdata, panel_params, coord, ax, self.params
)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: layer
) -> DrawingArea:
"""
Draw a vertical line in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.lines import Line2D
x = [0.5 * da.width] * 2
y = [0, da.height]
linewidth = data["size"] * SIZE_FACTOR
color = to_rgba(data["color"], data["alpha"])
key = Line2D(
x,
y,
linestyle=data["linetype"],
linewidth=linewidth,
color=color,
solid_capstyle="butt",
antialiased=False,
)
da.add_artist(key)
return da
| geom_vline |
python | numba__numba | numba/tests/test_random.py | {
"start": 47337,
"end": 53194
} | class ____(BaseTest):
"""
Test np.random.choice.
"""
def _check_results(self, pop, res, replace=True):
"""
Check basic expectations about a batch of samples.
"""
spop = set(pop)
sres = set(res)
# All results are in the population
self.assertLessEqual(sres, spop)
# Sorted results are unlikely
self.assertNotEqual(sorted(res), list(res))
if replace:
# Duplicates are likely
self.assertLess(len(sres), len(res), res)
else:
# No duplicates
self.assertEqual(len(sres), len(res), res)
def _check_dist(self, pop, samples):
"""
Check distribution of some samples.
"""
# Sanity check that we have enough samples
self.assertGreaterEqual(len(samples), len(pop) * 100)
# Check equidistribution of samples
expected_frequency = len(samples) / len(pop)
c = collections.Counter(samples)
for value in pop:
n = c[value]
self.assertGreaterEqual(n, expected_frequency * 0.5)
self.assertLessEqual(n, expected_frequency * 2.0)
def _accumulate_array_results(self, func, nresults):
"""
Accumulate array results produced by *func* until they reach
*nresults* elements.
"""
res = []
while len(res) < nresults:
res += list(func().flat)
return res[:nresults]
def _check_choice_1(self, a, pop):
"""
Check choice(a) against pop.
"""
cfunc = jit(nopython=True)(numpy_choice1)
n = len(pop)
res = [cfunc(a) for i in range(n)]
self._check_results(pop, res)
dist = [cfunc(a) for i in range(n * 100)]
self._check_dist(pop, dist)
def test_choice_scalar_1(self):
"""
Test choice(int)
"""
n = 50
pop = list(range(n))
self._check_choice_1(n, pop)
def test_choice_array_1(self):
"""
Test choice(array)
"""
pop = np.arange(50) * 2 + 100
self._check_choice_1(pop, pop)
def _check_array_results(self, func, pop, replace=True):
"""
Check array results produced by *func* and their distribution.
"""
n = len(pop)
res = list(func().flat)
self._check_results(pop, res, replace)
dist = self._accumulate_array_results(func, n * 100)
self._check_dist(pop, dist)
def _check_choice_2(self, a, pop):
"""
Check choice(a, size) against pop.
"""
cfunc = jit(nopython=True)(numpy_choice2)
n = len(pop)
# Final sizes should be large enough, so as to stress
# replacement
sizes = [n - 10, (3, (n - 1) // 3), n * 10]
for size in sizes:
# Check result shape
res = cfunc(a, size)
expected_shape = size if isinstance(size, tuple) else (size,)
self.assertEqual(res.shape, expected_shape)
# Check results and their distribution
self._check_array_results(lambda: cfunc(a, size), pop)
def test_choice_scalar_2(self):
"""
Test choice(int, size)
"""
n = 50
pop = np.arange(n)
self._check_choice_2(n, pop)
def test_choice_array_2(self):
"""
Test choice(array, size)
"""
pop = np.arange(50) * 2 + 100
self._check_choice_2(pop, pop)
def _check_choice_3(self, a, pop):
"""
Check choice(a, size, replace) against pop.
"""
cfunc = jit(nopython=True)(numpy_choice3)
n = len(pop)
# Final sizes should be close but slightly <= n, so as to stress
# replacement (or not)
sizes = [n - 10, (3, (n - 1) // 3)]
replaces = [True, False]
# Check result shapes
for size in sizes:
for replace in [True, False]:
res = cfunc(a, size, replace)
expected_shape = size if isinstance(size, tuple) else (size,)
self.assertEqual(res.shape, expected_shape)
# Check results for replace=True
for size in sizes:
self._check_array_results(lambda: cfunc(a, size, True), pop)
# Check results for replace=False
for size in sizes:
self._check_array_results(lambda: cfunc(a, size, False), pop, False)
# Can't ask for more samples than population size with replace=False
for size in [n + 1, (3, n // 3 + 1)]:
with self.assertRaises(ValueError):
cfunc(a, size, False)
def test_choice_scalar_3(self):
"""
Test choice(int, size, replace)
"""
n = 50
pop = np.arange(n)
self._check_choice_3(n, pop)
def test_choice_array_3(self):
"""
Test choice(array, size, replace)
"""
pop = np.arange(50) * 2 + 100
self._check_choice_3(pop, pop)
def test_choice_follows_seed(self):
# See issue #3888, np.random.choice must acknowledge the seed
@jit(nopython=True)
def numba_rands(n_to_return, choice_array):
np.random.seed(1337)
out = np.empty((n_to_return, 2), np.int32)
for i in range(n_to_return):
out[i] = np.random.choice(choice_array, 2, False)
return out
choice_array = np.random.randint(300, size=1000).astype(np.int32)
tmp_np = choice_array.copy()
expected = numba_rands.py_func(5, tmp_np)
tmp_nb = choice_array.copy()
got = numba_rands(5, tmp_nb)
np.testing.assert_allclose(expected, got)
# check no mutation
np.testing.assert_allclose(choice_array, tmp_np)
np.testing.assert_allclose(choice_array, tmp_nb)
| TestRandomChoice |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 78747,
"end": 81855
} | class ____:
def test_hanning(self, dtype: str, M: int) -> None:
scalar = np.array(M, dtype=dtype)[()]
w = hanning(scalar)
if dtype == "O":
ref_dtype = np.float64
else:
ref_dtype = np.result_type(scalar.dtype, np.float64)
assert w.dtype == ref_dtype
# check symmetry
assert_equal(w, flipud(w))
# check known value
if scalar < 1:
assert_array_equal(w, np.array([]))
elif scalar == 1:
assert_array_equal(w, np.ones(1))
else:
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
def test_hamming(self, dtype: str, M: int) -> None:
scalar = np.array(M, dtype=dtype)[()]
w = hamming(scalar)
if dtype == "O":
ref_dtype = np.float64
else:
ref_dtype = np.result_type(scalar.dtype, np.float64)
assert w.dtype == ref_dtype
# check symmetry
assert_equal(w, flipud(w))
# check known value
if scalar < 1:
assert_array_equal(w, np.array([]))
elif scalar == 1:
assert_array_equal(w, np.ones(1))
else:
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
def test_bartlett(self, dtype: str, M: int) -> None:
scalar = np.array(M, dtype=dtype)[()]
w = bartlett(scalar)
if dtype == "O":
ref_dtype = np.float64
else:
ref_dtype = np.result_type(scalar.dtype, np.float64)
assert w.dtype == ref_dtype
# check symmetry
assert_equal(w, flipud(w))
# check known value
if scalar < 1:
assert_array_equal(w, np.array([]))
elif scalar == 1:
assert_array_equal(w, np.ones(1))
else:
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
def test_blackman(self, dtype: str, M: int) -> None:
scalar = np.array(M, dtype=dtype)[()]
w = blackman(scalar)
if dtype == "O":
ref_dtype = np.float64
else:
ref_dtype = np.result_type(scalar.dtype, np.float64)
assert w.dtype == ref_dtype
# check symmetry
assert_equal(w, flipud(w))
# check known value
if scalar < 1:
assert_array_equal(w, np.array([]))
elif scalar == 1:
assert_array_equal(w, np.ones(1))
else:
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
def test_kaiser(self, dtype: str, M: int) -> None:
scalar = np.array(M, dtype=dtype)[()]
w = kaiser(scalar, 0)
if dtype == "O":
ref_dtype = np.float64
else:
ref_dtype = np.result_type(scalar.dtype, np.float64)
assert w.dtype == ref_dtype
# check symmetry
assert_equal(w, flipud(w))
# check known value
if scalar < 1:
assert_array_equal(w, np.array([]))
elif scalar == 1:
assert_array_equal(w, np.ones(1))
else:
assert_almost_equal(np.sum(w, axis=0), 10, 15)
| TestFilterwindows |
python | crytic__slither | slither/slithir/operations/unary.py | {
"start": 640,
"end": 1012
} | class ____(Enum):
BANG = "!"
TILD = "~"
@staticmethod
def get_type(operation_type, isprefix):
if isprefix:
if operation_type == "!":
return UnaryType.BANG
if operation_type == "~":
return UnaryType.TILD
raise SlithIRError(f"get_type: Unknown operation type {operation_type}")
| UnaryType |
python | walkccc__LeetCode | solutions/697. Degree of an Array/697.py | {
"start": 0,
"end": 422
} | class ____:
def findShortestSubArray(self, nums: list[int]) -> int:
ans = 0
degree = 0
debut = {}
count = collections.Counter()
for i, num in enumerate(nums):
debut.setdefault(num, i)
count[num] += 1
if count[num] > degree:
degree = count[num]
ans = i - debut[num] + 1
elif count[num] == degree:
ans = min(ans, i - debut[num] + 1)
return ans
| Solution |
python | doocs__leetcode | solution/0300-0399/0385.Mini Parser/Solution2.py | {
"start": 1328,
"end": 2069
} | class ____:
def deserialize(self, s: str) -> NestedInteger:
if s[0] != '[':
return NestedInteger(int(s))
stk, x, neg = [], 0, False
for i, c in enumerate(s):
if c == '-':
neg = True
elif c.isdigit():
x = x * 10 + int(c)
elif c == '[':
stk.append(NestedInteger())
elif c in ',]':
if s[i - 1].isdigit():
if neg:
x = -x
stk[-1].add(NestedInteger(x))
x, neg = 0, False
if c == ']' and len(stk) > 1:
t = stk.pop()
stk[-1].add(t)
return stk.pop()
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/response_model/tutorial001.py | {
"start": 134,
"end": 1091
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=Hero)
def create_hero(hero: Hero):
with Session(engine) as session:
session.add(hero)
session.commit()
session.refresh(hero)
return hero
@app.get("/heroes/", response_model=List[Hero])
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
| Hero |
python | openai__openai-python | src/openai/resources/conversations/conversations.py | {
"start": 8666,
"end": 16281
} | class ____(AsyncAPIResource):
@cached_property
def items(self) -> AsyncItems:
return AsyncItems(self._client)
@cached_property
def with_raw_response(self) -> AsyncConversationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncConversationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncConversationsWithStreamingResponse(self)
async def create(
self,
*,
items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Create a conversation.
Args:
items: Initial items to include in the conversation context. You may add up to 20 items
at a time.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/conversations",
body=await async_maybe_transform(
{
"items": items,
"metadata": metadata,
},
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def retrieve(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Get a conversation
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._get(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def update(
self,
conversation_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Update a conversation
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
f"/conversations/{conversation_id}",
body=await async_maybe_transform(
{"metadata": metadata}, conversation_update_params.ConversationUpdateParams
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def delete(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationDeletedResource:
"""Delete a conversation.
Items in the conversation will not be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._delete(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ConversationDeletedResource,
)
| AsyncConversations |
python | numpy__numpy | numpy/polynomial/tests/test_hermite_e.py | {
"start": 1073,
"end": 3464
} | class ____:
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0] * i + [1]
tgt = [0] * (i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0] * i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = f"At i={i}, j={j}"
pol2 = [0] * j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1 * val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
ci = [0] * i + [1]
cj = [0] * j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermepow(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(herme.hermemul, [c] * j, np.array([1]))
res = herme.hermepow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
| TestArithmetic |
python | spyder-ide__spyder | spyder/app/tests/spyder-boilerplate/spyder_boilerplate/spyder/plugin.py | {
"start": 3651,
"end": 5064
} | class ____(SpyderDockablePlugin):
"""
Spyder Boilerplate plugin.
"""
NAME = "spyder_boilerplate"
REQUIRES = []
OPTIONAL = []
WIDGET_CLASS = SpyderBoilerplateWidget
CONF_SECTION = NAME
CONF_WIDGET_CLASS = SpyderBoilerplateConfigPage
CUSTOM_LAYOUTS = [VerticalSplitLayout2]
CONF_DEFAULTS = [
(CONF_SECTION, {}),
(
"shortcuts",
# Note: These shortcut names are capitalized to check we can
# set/get/reset them correctly.
{f"{NAME}/Change text": "Ctrl+B", "editor/New text": "Ctrl+H"},
),
]
# --- Signals
# --- SpyderDockablePlugin API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
return "Spyder boilerplate plugin"
@staticmethod
def get_description():
return "A boilerplate plugin for testing."
@staticmethod
def get_icon():
return qta.icon('mdi6.alpha-b-box', color=SpyderPalette.ICON_1)
def on_initialize(self):
pass
def check_compatibility(self):
valid = True
message = "" # Note: Remember to use _("") to localize the string
return valid, message
def on_close(self, cancellable=True):
return True
# --- Public API
# ------------------------------------------------------------------------
| SpyderBoilerplate |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 61533,
"end": 61676
} | class ____(_PrintableStructure):
_fields_ = [
('timeStamp', c_ulonglong),
('sampleValue', c_nvmlValue_t),
]
| c_nvmlSample_t |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/plus/deploy/configure/utils.py | {
"start": 693,
"end": 6582
} | class ____:
"""Configuration for deployment scaffolding."""
dg_context: DgContext
cli_config: DgRawCliConfig
plus_config: Optional[DagsterPlusCliConfig]
agent_type: DgPlusAgentType
agent_platform: Optional[DgPlusAgentPlatform]
organization_name: Optional[str]
deployment_name: str
git_root: Optional[Path]
python_version: str
skip_confirmation_prompt: bool
git_provider: Optional[GitProvider]
use_editable_dagster: bool
def get_cli_version_or_main() -> str:
from dagster_dg_cli.version import __version__ as cli_version
return "main" if cli_version.endswith("+dev") else f"v{cli_version}"
def search_for_git_root(path: Path) -> Optional[Path]:
if path.joinpath(".git").exists():
return path
elif path.parent == path:
return None
else:
return search_for_git_root(path.parent)
def get_project_contexts(dg_context: DgContext, cli_config: DgRawCliConfig) -> list[DgContext]:
if dg_context.is_in_workspace:
return [
dg_context.for_project_environment(project.path, cli_config)
for project in dg_context.project_specs
]
else:
return [dg_context]
def get_git_web_url(git_root: Path) -> Optional[str]:
from dagster_cloud_cli.core.pex_builder.code_location import get_local_repo_name
try:
local_repo_name = get_local_repo_name(str(git_root))
return f"https://github.com/{local_repo_name}"
except subprocess.CalledProcessError:
return None
def get_scaffolded_container_context_yaml(agent_platform: DgPlusAgentPlatform) -> Optional[str]:
if agent_platform == DgPlusAgentPlatform.K8S:
return textwrap.dedent(
"""
### Uncomment to add configuration for k8s resources.
# k8s:
# server_k8s_config: # Raw kubernetes config for code servers launched by the agent
# pod_spec_config: # Config for the code server pod spec
# node_selector:
# disktype: standard
# pod_template_spec_metadata: # Metadata for the code server pod
# annotations:
# mykey: myvalue
# container_config: # Config for the main dagster container in the code server pod
# resources:
# limits:
# cpu: 100m
# memory: 128Mi
# run_k8s_config: # Raw kubernetes config for runs launched by the agent
# pod_spec_config: # Config for the run's PodSpec
# node_selector:
# disktype: ssd
# container_config: # Config for the main dagster container in the run pod
# resources:
# limits:
# cpu: 500m
# memory: 1024Mi
# pod_template_spec_metadata: # Metadata for the run pod
# annotations:
# mykey: myvalue
# job_spec_config: # Config for the Kubernetes job for the run
# ttl_seconds_after_finished: 7200
"""
)
elif agent_platform == DgPlusAgentPlatform.ECS:
return textwrap.dedent(
"""
### Uncomment to add configuration for ECS resources.
# ecs:
# env_vars:
# - DATABASE_NAME=staging
# - DATABASE_PASSWORD
# secrets:
# - name: 'MY_API_TOKEN'
# valueFrom: 'arn:aws:secretsmanager:us-east-1:123456789012:secret:FOO-AbCdEf:token::'
# - name: 'MY_PASSWORD'
# valueFrom: 'arn:aws:secretsmanager:us-east-1:123456789012:secret:FOO-AbCdEf:password::'
# server_resources: # Resources for code servers launched by the agent for this location
# cpu: "256"
# memory: "512"
# run_resources: # Resources for runs launched by the agent for this location
# cpu: "4096"
# memory: "16384"
# execution_role_arn: arn:aws:iam::123456789012:role/MyECSExecutionRole
# task_role_arn: arn:aws:iam::123456789012:role/MyECSTaskRole
# mount_points:
# - sourceVolume: myEfsVolume
# containerPath: '/mount/efs'
# readOnly: True
# volumes:
# - name: myEfsVolume
# efsVolumeConfiguration:
# fileSystemId: fs-1234
# rootDirectory: /path/to/my/data
# server_sidecar_containers:
# - name: DatadogAgent
# image: public.ecr.aws/datadog/agent:latest
# environment:
# - name: ECS_FARGATE
# value: true
# run_sidecar_containers:
# - name: DatadogAgent
# image: public.ecr.aws/datadog/agent:latest
# environment:
# - name: ECS_FARGATE
# value: true
"""
)
elif agent_platform == DgPlusAgentPlatform.DOCKER:
return textwrap.dedent(
"""
### Uncomment to add configuration for Docker resources.
# docker:
# env_vars:
# - DATABASE_NAME=staging
# - DATABASE_PASSWORD
"""
)
else:
return None
# Template file paths
TEMPLATES_DIR = Path(__file__).parent.parent.parent.parent.parent / "templates"
SERVERLESS_GITHUB_ACTION_FILE = TEMPLATES_DIR / "serverless-github-action.yaml"
HYBRID_GITHUB_ACTION_FILE = TEMPLATES_DIR / "hybrid-github-action.yaml"
BUILD_LOCATION_FRAGMENT = TEMPLATES_DIR / "build-location-fragment.yaml"
| DeploymentScaffoldConfig |
python | huggingface__transformers | src/transformers/models/xmod/modeling_xmod.py | {
"start": 40513,
"end": 44111
} | class ____(XmodPreTrainedModel):
_tied_weights_keys = {
"lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
# Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.__init__ with Roberta->Xmod
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `XmodForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = XmodModel(config, add_pooling_layer=False)
self.lm_head = XmodLMHead(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.get_output_embeddings
def get_output_embeddings(self):
return self.lm_head.decoder
# Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.set_output_embeddings
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
lang_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
r"""
lang_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of the language adapters that should be activated for each sample, respectively. Default: the index
that corresponds to `self.config.default_language`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.roberta(
input_ids,
lang_ids=lang_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead
| XmodForMaskedLM |
python | FactoryBoy__factory_boy | tests/test_regression.py | {
"start": 294,
"end": 356
} | class ____(T.NamedTuple):
title: str
author: Author
| Book |
python | Textualize__textual | src/textual/events.py | {
"start": 24214,
"end": 24995
} | class ____(Event, bubble=False):
"""Sent to a widget that is capturing [`print`][print].
- [ ] Bubbles
- [ ] Verbose
Args:
text: Text that was printed.
stderr: `True` if the print was to stderr, or `False` for stdout.
Note:
Python's [`print`][print] output can be captured with
[`App.begin_capture_print`][textual.app.App.begin_capture_print].
"""
def __init__(self, text: str, stderr: bool = False) -> None:
super().__init__()
self.text = text
"""The text that was printed."""
self.stderr = stderr
"""`True` if the print was to stderr, or `False` for stdout."""
def __rich_repr__(self) -> rich.repr.Result:
yield self.text
yield self.stderr
@dataclass
| Print |
python | cython__cython | Cython/Compiler/TypeSlots.py | {
"start": 12881,
"end": 13080
} | class ____(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", ifdef=ifdef)
| EmptySlot |
python | python__mypy | mypy/partially_defined.py | {
"start": 10771,
"end": 25872
} | class ____(ExtendedTraverserVisitor):
"""Detects the following cases:
- A variable that's defined only part of the time.
- If a variable is used before definition
An example of a partial definition:
if foo():
x = 1
print(x) # Error: "x" may be undefined.
Example of a used before definition:
x = y
y: int = 2
Note that this code does not detect variables not defined in any of the branches -- that is
handled by the semantic analyzer.
"""
def __init__(
self,
msg: MessageBuilder,
type_map: dict[Expression, Type],
options: Options,
names: SymbolTable,
) -> None:
self.msg = msg
self.type_map = type_map
self.options = options
self.builtins = SymbolTable()
builtins_mod = names.get("__builtins__", None)
if builtins_mod:
assert isinstance(builtins_mod.node, MypyFile)
self.builtins = builtins_mod.node.names
self.loops: list[Loop] = []
self.try_depth = 0
self.tracker = DefinedVariableTracker()
for name in implicit_module_attrs:
self.tracker.record_definition(name)
def var_used_before_def(self, name: str, context: Context) -> None:
if self.msg.errors.is_error_code_enabled(errorcodes.USED_BEFORE_DEF):
self.msg.var_used_before_def(name, context)
def variable_may_be_undefined(self, name: str, context: Context) -> None:
if self.msg.errors.is_error_code_enabled(errorcodes.POSSIBLY_UNDEFINED):
self.msg.variable_may_be_undefined(name, context)
def process_definition(self, name: str) -> None:
# Was this name previously used? If yes, it's a used-before-definition error.
if not self.tracker.in_scope(ScopeType.Class):
refs = self.tracker.pop_undefined_ref(name)
for ref in refs:
if self.loops:
self.variable_may_be_undefined(name, ref)
else:
self.var_used_before_def(name, ref)
else:
# Errors in class scopes are caught by the semantic analyzer.
pass
self.tracker.record_definition(name)
def visit_global_decl(self, o: GlobalDecl) -> None:
for name in o.names:
self.process_definition(name)
super().visit_global_decl(o)
def visit_nonlocal_decl(self, o: NonlocalDecl) -> None:
for name in o.names:
self.process_definition(name)
super().visit_nonlocal_decl(o)
def process_lvalue(self, lvalue: Lvalue | None) -> None:
if isinstance(lvalue, NameExpr):
self.process_definition(lvalue.name)
elif isinstance(lvalue, StarExpr):
self.process_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
for item in lvalue.items:
self.process_lvalue(item)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
for lvalue in o.lvalues:
self.process_lvalue(lvalue)
super().visit_assignment_stmt(o)
def visit_assignment_expr(self, o: AssignmentExpr) -> None:
o.value.accept(self)
self.process_lvalue(o.target)
def visit_if_stmt(self, o: IfStmt) -> None:
for e in o.expr:
e.accept(self)
self.tracker.start_branch_statement()
for b in o.body:
if b.is_unreachable:
continue
b.accept(self)
self.tracker.next_branch()
if o.else_body:
if not o.else_body.is_unreachable:
o.else_body.accept(self)
else:
self.tracker.skip_branch()
self.tracker.end_branch_statement()
def visit_match_stmt(self, o: MatchStmt) -> None:
o.subject.accept(self)
self.tracker.start_branch_statement()
for i in range(len(o.patterns)):
pattern = o.patterns[i]
pattern.accept(self)
guard = o.guards[i]
if guard is not None:
guard.accept(self)
if not o.bodies[i].is_unreachable:
o.bodies[i].accept(self)
else:
self.tracker.skip_branch()
is_catchall = infer_pattern_value(pattern) == ALWAYS_TRUE
if not is_catchall:
self.tracker.next_branch()
self.tracker.end_branch_statement()
def visit_func_def(self, o: FuncDef) -> None:
self.process_definition(o.name)
super().visit_func_def(o)
def visit_func(self, o: FuncItem) -> None:
if o.is_dynamic() and not self.options.check_untyped_defs:
return
args = o.arguments or []
# Process initializers (defaults) outside the function scope.
for arg in args:
if arg.initializer is not None:
arg.initializer.accept(self)
self.tracker.enter_scope(ScopeType.Func)
for arg in args:
self.process_definition(arg.variable.name)
super().visit_var(arg.variable)
o.body.accept(self)
self.tracker.exit_scope()
def visit_generator_expr(self, o: GeneratorExpr) -> None:
self.tracker.enter_scope(ScopeType.Generator)
for idx in o.indices:
self.process_lvalue(idx)
super().visit_generator_expr(o)
self.tracker.exit_scope()
def visit_dictionary_comprehension(self, o: DictionaryComprehension) -> None:
self.tracker.enter_scope(ScopeType.Generator)
for idx in o.indices:
self.process_lvalue(idx)
super().visit_dictionary_comprehension(o)
self.tracker.exit_scope()
def visit_for_stmt(self, o: ForStmt) -> None:
o.expr.accept(self)
self.process_lvalue(o.index)
o.index.accept(self)
self.tracker.start_branch_statement()
loop = Loop()
self.loops.append(loop)
o.body.accept(self)
self.tracker.next_branch()
self.tracker.end_branch_statement()
if o.else_body is not None:
# If the loop has a `break` inside, `else` is executed conditionally.
# If the loop doesn't have a `break` either the function will return or
# execute the `else`.
has_break = loop.has_break
if has_break:
self.tracker.start_branch_statement()
self.tracker.next_branch()
o.else_body.accept(self)
if has_break:
self.tracker.end_branch_statement()
self.loops.pop()
def visit_return_stmt(self, o: ReturnStmt) -> None:
super().visit_return_stmt(o)
self.tracker.skip_branch()
def visit_lambda_expr(self, o: LambdaExpr) -> None:
self.tracker.enter_scope(ScopeType.Func)
super().visit_lambda_expr(o)
self.tracker.exit_scope()
def visit_assert_stmt(self, o: AssertStmt) -> None:
super().visit_assert_stmt(o)
if checker.is_false_literal(o.expr):
self.tracker.skip_branch()
def visit_raise_stmt(self, o: RaiseStmt) -> None:
super().visit_raise_stmt(o)
self.tracker.skip_branch()
def visit_continue_stmt(self, o: ContinueStmt) -> None:
super().visit_continue_stmt(o)
self.tracker.skip_branch()
def visit_break_stmt(self, o: BreakStmt) -> None:
super().visit_break_stmt(o)
if self.loops:
self.loops[-1].has_break = True
self.tracker.skip_branch()
def visit_expression_stmt(self, o: ExpressionStmt) -> None:
typ = self.type_map.get(o.expr)
if typ is None or isinstance(get_proper_type(typ), UninhabitedType):
self.tracker.skip_branch()
super().visit_expression_stmt(o)
def visit_try_stmt(self, o: TryStmt) -> None:
"""
Note that finding undefined vars in `finally` requires different handling from
the rest of the code. In particular, we want to disallow skipping branches due to jump
statements in except/else clauses for finally but not for other cases. Imagine a case like:
def f() -> int:
try:
x = 1
except:
# This jump statement needs to be handled differently depending on whether or
# not we're trying to process `finally` or not.
return 0
finally:
# `x` may be undefined here.
pass
# `x` is always defined here.
return x
"""
self.try_depth += 1
if o.finally_body is not None:
# In order to find undefined vars in `finally`, we need to
# process try/except with branch skipping disabled. However, for the rest of the code
# after finally, we need to process try/except with branch skipping enabled.
# Therefore, we need to process try/finally twice.
# Because processing is not idempotent, we should make a copy of the tracker.
old_tracker = self.tracker.copy()
self.tracker.disable_branch_skip = True
self.process_try_stmt(o)
self.tracker = old_tracker
self.process_try_stmt(o)
self.try_depth -= 1
def process_try_stmt(self, o: TryStmt) -> None:
"""
Processes try statement decomposing it into the following:
if ...:
body
else_body
elif ...:
except 1
elif ...:
except 2
else:
except n
finally
"""
self.tracker.start_branch_statement()
o.body.accept(self)
if o.else_body is not None:
o.else_body.accept(self)
if len(o.handlers) > 0:
assert len(o.handlers) == len(o.vars) == len(o.types)
for i in range(len(o.handlers)):
self.tracker.next_branch()
exc_type = o.types[i]
if exc_type is not None:
exc_type.accept(self)
var = o.vars[i]
if var is not None:
self.process_definition(var.name)
var.accept(self)
o.handlers[i].accept(self)
if var is not None:
self.tracker.delete_var(var.name)
self.tracker.end_branch_statement()
if o.finally_body is not None:
self.tracker.in_finally = True
o.finally_body.accept(self)
self.tracker.in_finally = False
def visit_while_stmt(self, o: WhileStmt) -> None:
o.expr.accept(self)
self.tracker.start_branch_statement()
loop = Loop()
self.loops.append(loop)
o.body.accept(self)
has_break = loop.has_break
if not checker.is_true_literal(o.expr):
# If this is a loop like `while True`, we can consider the body to be
# a single branch statement (we're guaranteed that the body is executed at least once).
# If not, call next_branch() to make all variables defined there conditional.
self.tracker.next_branch()
self.tracker.end_branch_statement()
if o.else_body is not None:
# If the loop has a `break` inside, `else` is executed conditionally.
# If the loop doesn't have a `break` either the function will return or
# execute the `else`.
if has_break:
self.tracker.start_branch_statement()
self.tracker.next_branch()
if o.else_body:
o.else_body.accept(self)
if has_break:
self.tracker.end_branch_statement()
self.loops.pop()
def visit_as_pattern(self, o: AsPattern) -> None:
if o.name is not None:
self.process_lvalue(o.name)
super().visit_as_pattern(o)
def visit_starred_pattern(self, o: StarredPattern) -> None:
if o.capture is not None:
self.process_lvalue(o.capture)
super().visit_starred_pattern(o)
def visit_name_expr(self, o: NameExpr) -> None:
if o.name in self.builtins and self.tracker.in_scope(ScopeType.Global):
return
if (
self.tracker.is_possibly_undefined(o.name)
and self.tracker.in_finally == self.tracker.disable_branch_skip
):
# A variable is only defined in some branches.
self.variable_may_be_undefined(o.name, o)
# We don't want to report the error on the same variable multiple times.
self.tracker.record_definition(o.name)
elif self.tracker.is_defined_in_different_branch(o.name):
# A variable is defined in one branch but used in a different branch.
if self.loops or self.try_depth > 0:
# If we're in a loop or in a try, we can't be sure that this variable
# is undefined. Report it as "may be undefined".
self.variable_may_be_undefined(o.name, o)
else:
self.var_used_before_def(o.name, o)
elif self.tracker.is_undefined(o.name):
# A variable is undefined. It could be due to two things:
# 1. A variable is just totally undefined
# 2. The variable is defined later in the code.
# Case (1) will be caught by semantic analyzer. Case (2) is a forward ref that should
# be caught by this visitor. Save the ref for later, so that if we see a definition,
# we know it's a used-before-definition scenario.
self.tracker.record_undefined_ref(o)
super().visit_name_expr(o)
def visit_with_stmt(self, o: WithStmt) -> None:
for expr, idx in zip(o.expr, o.target):
expr.accept(self)
self.process_lvalue(idx)
o.body.accept(self)
def visit_class_def(self, o: ClassDef) -> None:
self.process_definition(o.name)
self.tracker.enter_scope(ScopeType.Class)
super().visit_class_def(o)
self.tracker.exit_scope()
def visit_import(self, o: Import) -> None:
for mod, alias in o.ids:
if alias is not None:
self.tracker.record_definition(alias)
else:
# When you do `import x.y`, only `x` becomes defined.
names = mod.split(".")
if names:
# `names` should always be nonempty, but we don't want mypy
# to crash on invalid code.
self.tracker.record_definition(names[0])
super().visit_import(o)
def visit_import_from(self, o: ImportFrom) -> None:
for mod, alias in o.names:
name = alias
if name is None:
name = mod
self.tracker.record_definition(name)
super().visit_import_from(o)
def visit_type_alias_stmt(self, o: TypeAliasStmt) -> None:
# Type alias target may contain forward references
self.tracker.record_definition(o.name.name)
| PossiblyUndefinedVariableVisitor |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 34069,
"end": 34167
} | class ____(Structure):
_fields_ = (("tool", p_uint32), ("version", p_uint32))
| build_tool_version |
python | walkccc__LeetCode | solutions/255. Verify Preorder Sequence in Binary Search Tree/255.py | {
"start": 0,
"end": 388
} | class ____:
def verifyPreorder(self, preorder: list[int]) -> bool:
i = 0
def dfs(min: int, max: int) -> None:
nonlocal i
if i == len(preorder):
return
if preorder[i] < min or preorder[i] > max:
return
val = preorder[i]
i += 1
dfs(min, val)
dfs(val, max)
dfs(-math.inf, math.inf)
return i == len(preorder)
| Solution |
python | plotly__plotly.py | plotly/graph_objs/_choroplethmapbox.py | {
"start": 231,
"end": 66007
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "choroplethmapbox"
_valid_props = {
"autocolorscale",
"below",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"featureidkey",
"geojson",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"locations",
"locationssrc",
"marker",
"meta",
"metasrc",
"name",
"reversescale",
"selected",
"selectedpoints",
"showlegend",
"showscale",
"stream",
"subplot",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def below(self):
"""
Determines if the choropleth polygons will be inserted before
the layer with the specified ID. By default, choroplethmapbox
traces are placed above the water layers. If set to '', the
layer will be inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def featureidkey(self):
"""
Sets the key in GeoJSON features which is used as id to match
the items included in the `locations` array. Support nested
property, for example "properties.name".
The 'featureidkey' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["featureidkey"]
@featureidkey.setter
def featureidkey(self, val):
self["featureidkey"] = val
@property
def geojson(self):
"""
Sets the GeoJSON data associated with this trace. It can be set
as a valid GeoJSON object or as a URL string. Note that we only
accept GeoJSONs of type "FeatureCollection" or "Feature" with
geometries of type "Polygon" or "MultiPolygon".
The 'geojson' property accepts values of any type
Returns
-------
Any
"""
return self["geojson"]
@geojson.setter
def geojson(self, val):
self["geojson"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['location', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'location+z')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variable
`properties` Anything contained in tag `<extra>` is displayed
in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def locations(self):
"""
Sets which features found in "geojson" to plot using their
feature `id` field.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def subplot(self):
"""
mapbox subplots and traces are deprecated! Please consider
switching to `map` subplots and traces. Learn more at:
https://plotly.com/python/maplibre-migration/ as well as
https://plotly.com/javascript/maplibre-migration/ Sets a
reference between this trace's data coordinates and a mapbox
subplot. If "mapbox" (the default value), the data refer to
`layout.mapbox`. If "mapbox2", the data refer to
`layout.mapbox2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'mapbox', that may be specified as the string 'mapbox'
optionally followed by an integer >= 1
(e.g. 'mapbox', 'mapbox1', 'mapbox2', 'mapbox3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
@property
def text(self):
"""
Sets the text elements associated with each location.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def z(self):
"""
Sets the color values.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the choropleth polygons will be inserted
before the layer with the specified ID. By default,
choroplethmapbox traces are placed above the water
layers. If set to '', the layer will be inserted above
every existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choroplethmapbox.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array.
Support nested property, for example "properties.name".
geojson
Sets the GeoJSON data associated with this trace. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.choroplethmapbox.Hoverlabe
l` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `properties` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.choroplethmapbox.Legendgro
uptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
locations
Sets which features found in "geojson" to plot using
their feature `id` field.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
marker
:class:`plotly.graph_objects.choroplethmapbox.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choroplethmapbox.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choroplethmapbox.Stream`
instance or dict with compatible properties
subplot
mapbox subplots and traces are deprecated! Please
consider switching to `map` subplots and traces. Learn
more at: https://plotly.com/python/maplibre-migration/
as well as https://plotly.com/javascript/maplibre-
migration/ Sets a reference between this trace's data
coordinates and a mapbox subplot. If "mapbox" (the
default value), the data refer to `layout.mapbox`. If
"mapbox2", the data refer to `layout.mapbox2`, and so
on.
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choroplethmapbox.Unselecte
d` instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
below=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
featureidkey=None,
geojson=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
locations=None,
locationssrc=None,
marker=None,
meta=None,
metasrc=None,
name=None,
reversescale=None,
selected=None,
selectedpoints=None,
showlegend=None,
showscale=None,
stream=None,
subplot=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Choroplethmapbox object
"choroplethmapbox" trace is deprecated! Please consider
switching to the "choroplethmap" trace type and `map` subplots.
Learn more at: https://plotly.com/python/maplibre-migration/ as
well as https://plotly.com/javascript/maplibre-migration/
GeoJSON features to be filled are set in `geojson` The data
that describes the choropleth value-to-color mapping is set in
`locations` and `z`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.Choroplethmapbox`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the choropleth polygons will be inserted
before the layer with the specified ID. By default,
choroplethmapbox traces are placed above the water
layers. If set to '', the layer will be inserted above
every existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choroplethmapbox.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array.
Support nested property, for example "properties.name".
geojson
Sets the GeoJSON data associated with this trace. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.choroplethmapbox.Hoverlabe
l` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `properties` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.choroplethmapbox.Legendgro
uptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
locations
Sets which features found in "geojson" to plot using
their feature `id` field.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
marker
:class:`plotly.graph_objects.choroplethmapbox.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choroplethmapbox.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choroplethmapbox.Stream`
instance or dict with compatible properties
subplot
mapbox subplots and traces are deprecated! Please
consider switching to `map` subplots and traces. Learn
more at: https://plotly.com/python/maplibre-migration/
as well as https://plotly.com/javascript/maplibre-
migration/ Sets a reference between this trace's data
coordinates and a mapbox subplot. If "mapbox" (the
default value), the data refer to `layout.mapbox`. If
"mapbox2", the data refer to `layout.mapbox2`, and so
on.
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choroplethmapbox.Unselecte
d` instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Choroplethmapbox
"""
super().__init__("choroplethmapbox")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Choroplethmapbox
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Choroplethmapbox`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("below", arg, below)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("featureidkey", arg, featureidkey)
self._set_property("geojson", arg, geojson)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("locations", arg, locations)
self._set_property("locationssrc", arg, locationssrc)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("reversescale", arg, reversescale)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("stream", arg, stream)
self._set_property("subplot", arg, subplot)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._set_property("z", arg, z)
self._set_property("zauto", arg, zauto)
self._set_property("zmax", arg, zmax)
self._set_property("zmid", arg, zmid)
self._set_property("zmin", arg, zmin)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "choroplethmapbox"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
warnings.warn(
"*choroplethmapbox* is deprecated!"
+ " Use *choroplethmap* instead."
+ " Learn more at: https://plotly.com/python/mapbox-to-maplibre/",
stacklevel=2,
category=DeprecationWarning,
)
| Choroplethmapbox |
python | django__django | tests/gis_tests/geo3d/models.py | {
"start": 722,
"end": 885
} | class ____(NamedModel):
line = models.LineStringField(dim=3, srid=32140)
class Meta:
required_db_features = {"supports_3d_storage"}
| InterstateProj3D |
python | streamlit__streamlit | lib/tests/streamlit/elements/layouts_test.py | {
"start": 37482,
"end": 45074
} | class ____(DeltaGeneratorTestCase):
"""Run unit tests for the non-public delta-generator dialog and also the dialog
decorator."""
title = "Test Dialog"
def test_dialog_deltagenerator_usage_with_context_manager(self):
"""Test that the delta-generator dialog works as a context manager"""
dialog = st._main._dialog(DialogTest.title)
with dialog:
"""No content so that 'get_delta_from_queue' returns the dialog."""
pass
dialog_block = self.get_delta_from_queue()
assert dialog_block.add_block.dialog.title == DialogTest.title
assert not dialog_block.add_block.dialog.is_open
assert dialog_block.add_block.dialog.dismissible
assert not dialog_block.add_block.dialog.id
@parameterized.expand(
[
("medium", BlockProto.Dialog.DialogWidth.MEDIUM),
("large", BlockProto.Dialog.DialogWidth.LARGE),
("small", BlockProto.Dialog.DialogWidth.SMALL),
]
)
def test_dialog_width(
self, width: str, expected_width: BlockProto.Dialog.DialogWidth.ValueType
):
"""Test that the dialog width parameter works correctly for all supported values"""
dialog = st._main._dialog(DialogTest.title, width=width)
with dialog:
# No content so that 'get_delta_from_queue' returns the dialog.
pass
dialog_block = self.get_delta_from_queue()
assert dialog_block.add_block.dialog.width == expected_width
def test_dialog_deltagenerator_opens_and_closes(self):
"""Test that dialog opens and closes"""
dialog = st._main._dialog(DialogTest.title)
assert dialog is not None
dialog_block = self.get_delta_from_queue()
assert not dialog_block.add_block.dialog.is_open
dialog.open()
dialog_block = self.get_delta_from_queue()
assert dialog_block.add_block.dialog.is_open
dialog.close()
dialog_block = self.get_delta_from_queue()
assert not dialog_block.add_block.dialog.is_open
def test_dialog_deltagenerator_only_call_open_once(self):
"""Test that only a single dialog can be opened"""
dialog = st._main._dialog(DialogTest.title)
assert dialog is not None
# Open first time
dialog.open()
with pytest.raises(StreamlitAPIException):
# Cannot call open while the dialog is already open
dialog.open()
dialog.close()
with pytest.raises(StreamlitAPIException):
# Close does not reset the dialog-flag as this is handled per script-run
# context
dialog.open()
def test_dialog_decorator_with_title_opens(self):
"""Test that the dialog decorator having a title does not throw an error"""
@st.dialog("example title")
def dialog():
return None
dialog()
def test_dialog_decorator_title_required(self):
"""Test that the title is required in decorator"""
with pytest.raises(TypeError) as e:
@st.dialog()
def dialog():
return None
dialog()
assert e.value.args[0].startswith(
"dialog_decorator() missing 1 required positional argument: 'title'"
)
with pytest.raises(TypeError) as e:
@st.dialog()
def dialog_with_arguments(a, b):
return None
dialog_with_arguments("", "")
assert e.value.args[0].startswith(
"dialog_decorator() missing 1 required positional argument: 'title'"
)
with pytest.raises(StreamlitAPIException) as e:
@st.dialog("")
def dialog():
return None
dialog()
assert e.value.args[0].startswith("A non-empty `title`")
def test_dialog_decorator_must_be_called_like_a_function_with_a_title(self):
"""Test that the decorator must be called like a function."""
with pytest.raises(StreamlitAPIException):
@st.dialog
def dialog():
return None
dialog()
with pytest.raises(StreamlitAPIException):
@st.dialog
def dialog_with_arg(a):
return None
dialog_with_arg("a")
with pytest.raises(StreamlitAPIException):
@st.dialog
def dialog_with_args(a, b):
return None
dialog_with_args("a", "b")
def test_nested_dialog_raises_error(self):
"""Test that dialogs cannot be called nested."""
@st.dialog("Level2 dialog")
def level2_dialog():
st.empty()
@st.dialog("Level1 dialog")
def level1_dialog():
level2_dialog()
with pytest.raises(FragmentHandledException) as e:
level1_dialog()
assert str(e.value) == "Dialogs may not be nested inside other dialogs."
def test_only_one_dialog_can_be_opened_at_same_time(self):
@st.dialog("Dialog1")
def dialog1():
st.empty()
@st.dialog("Dialog2")
def dialog2():
st.empty()
with pytest.raises(StreamlitAPIException) as e:
dialog1()
dialog2()
assert e.value.args[0].startswith(
"Only one dialog is allowed to be opened at the same time."
)
def test_dialog_deltagenerator_dismissible_false(self):
"""Test that the delta-generator dialog properly handles dismissible=False"""
dialog = st._main._dialog(DialogTest.title, dismissible=False)
with dialog:
"""No content so that 'get_delta_from_queue' returns the dialog."""
pass
dialog_block = self.get_delta_from_queue()
assert dialog_block.add_block.dialog.title == DialogTest.title
assert not dialog_block.add_block.dialog.is_open
assert dialog_block.add_block.dialog.dismissible is False
def test_dialog_decorator_invalid_on_dismiss(self):
"""Test dialog decorator with invalid on_dismiss raises error"""
with pytest.raises(StreamlitAPIException) as exc_info:
@dialog_decorator("Test Dialog", on_dismiss="invalid")
def test_dialog():
pass
test_dialog()
assert "You have passed invalid to `on_dismiss`" in str(exc_info.value)
def test_dialog_on_dismiss_rerun(self):
"""Test that the dialog decorator with on_dismiss='rerun'."""
with patch("streamlit.elements.lib.dialog.register_widget") as mock_register:
dialog = st._main._dialog(DialogTest.title, on_dismiss="rerun")
with dialog:
# No content so that 'get_delta_from_queue' returns the dialog.
pass
mock_register.assert_called_once()
dialog_block = self.get_delta_from_queue()
assert dialog_block.add_block.dialog.id
def test_dialog_on_dismiss_callback(self):
"""Test that the dialog decorator with on_dismiss=callback."""
def callback():
pass
with patch("streamlit.elements.lib.dialog.register_widget") as mock_register:
dialog = st._main._dialog(DialogTest.title, on_dismiss=callback)
with dialog:
# No content so that 'get_delta_from_queue' returns the dialog.
pass
mock_register.assert_called_once()
dialog_block = self.get_delta_from_queue()
assert dialog_block.add_block.dialog.id
| DialogTest |
python | huggingface__transformers | src/transformers/models/got_ocr2/modular_got_ocr2.py | {
"start": 10110,
"end": 11126
} | class ____(nn.Module):
def __init__(self, config: GotOcr2Config):
super().__init__()
vision_output_channels = config.vision_config.output_channels
language_hidden_size = config.text_config.hidden_size
self.conv_upsampler1 = nn.Conv2d(
vision_output_channels, vision_output_channels * 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.conv_upsampler2 = nn.Conv2d(
vision_output_channels * 2, language_hidden_size, kernel_size=3, stride=2, padding=1, bias=False
)
self.multimodal_projector = nn.Linear(language_hidden_size, language_hidden_size)
def forward(self, vision_embeddings: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv_upsampler1(vision_embeddings)
hidden_state = self.conv_upsampler2(hidden_state)
hidden_state = hidden_state.flatten(2).permute(0, 2, 1)
hidden_state = self.multimodal_projector(hidden_state)
return hidden_state
| GotOcr2MultiModalProjector |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/comms/kernelcomm.py | {
"start": 596,
"end": 9251
} | class ____(CommBase, QObject):
"""
Class with the necessary attributes and methods to handle
communications with a console.
"""
_sig_got_reply = Signal()
sig_exception_occurred = Signal(dict)
sig_comm_ready = Signal()
def __init__(self):
super().__init__()
self.kernel_client = None
# Register handlers
self.register_call_handler('_async_error', self._async_error)
self.register_call_handler('_comm_ready', self._comm_ready)
def is_open(self, comm_id=None):
"""
Check to see if the comm is open and ready to communicate.
"""
id_list = self.get_comm_id_list(comm_id)
if len(id_list) == 0:
return False
return all([self._comms[cid]['status'] == 'ready' for cid in id_list])
@contextmanager
def comm_channel_manager(self, comm_id, queue_message=False):
"""Use control_channel instead of shell_channel."""
if queue_message:
# Send without control_channel
yield
return
id_list = self.get_comm_id_list(comm_id)
for comm_id in id_list:
self._comms[comm_id]['comm']._send_channel = (
self.kernel_client.control_channel)
try:
yield
finally:
id_list = self.get_comm_id_list(comm_id)
for comm_id in id_list:
self._comms[comm_id]['comm']._send_channel = (
self.kernel_client.shell_channel)
def _set_call_return_value(self, call_dict, return_value, is_error=False):
"""Override to use the comm_channel for all replies."""
with self.comm_channel_manager(self.calling_comm_id, False):
if is_error and (get_debug_level() or running_under_pytest()):
# Disable error muting when debugging or testing
call_dict['settings']['display_error'] = True
super()._set_call_return_value(
call_dict, return_value, is_error=is_error
)
def remove(self, comm_id=None, only_closing=False):
"""
Remove the comm without notifying the other side.
Use when the other side is already down.
"""
id_list = self.get_comm_id_list(comm_id)
for comm_id in id_list:
if only_closing and self._comms[comm_id]['status'] != 'closing':
continue
del self._comms[comm_id]
def close(self, comm_id=None):
"""Ask kernel to close comm and send confirmation."""
id_list = self.get_comm_id_list(comm_id)
for comm_id in id_list:
# Send comm_close directly to avoid really closing the comm
self._comms[comm_id]['comm']._send_msg(
'comm_close', {}, None, None, None)
self._comms[comm_id]['status'] = 'closing'
def open_comm(self, kernel_client):
"""Open comm through the kernel client."""
self.kernel_client = kernel_client
try:
logger.debug(
f"Opening kernel comm for "
f"{'<' + repr(kernel_client).split('.')[-1]}"
)
self._register_comm(
# Create new comm and send the highest protocol
kernel_client.comm_manager.new_comm(self._comm_name)
)
except AttributeError:
logger.info(
"Unable to open comm due to unexistent comm manager: " +
"kernel_client.comm_manager=" + str(kernel_client.comm_manager)
)
def remote_call(self, interrupt=False, blocking=False, callback=None,
comm_id=None, timeout=None, display_error=False):
"""Get a handler for remote calls."""
return super().remote_call(
interrupt=interrupt, blocking=blocking, callback=callback,
comm_id=comm_id, timeout=timeout, display_error=display_error)
def on_incoming_call(self, call_dict):
"""A call was received"""
super().on_incoming_call(call_dict)
# Just in case the call was not received
self._comm_ready()
# ---- Private -----
def _comm_ready(self):
"""If this function is called, the comm is ready"""
if self._comms[self.calling_comm_id]['status'] != 'ready':
self._comms[self.calling_comm_id]['status'] = 'ready'
self.sig_comm_ready.emit()
def _send_call(self, call_dict, comm_id, buffers):
"""Send call and interupt the kernel if needed."""
settings = call_dict['settings']
blocking = 'blocking' in settings and settings['blocking']
interrupt = 'interrupt' in settings and settings['interrupt']
queue_message = not interrupt and not blocking
if not self.kernel_client.is_alive():
if blocking:
raise RuntimeError("Kernel is dead")
else:
# The user has other problems
logger.info(
"Dropping message because kernel is dead: %s",
str(call_dict)
)
return
with self.comm_channel_manager(
comm_id, queue_message=queue_message):
return super()._send_call(
call_dict, comm_id, buffers
)
def _get_call_return_value(self, call_dict, comm_id):
"""
Catch exception if call is not blocking.
"""
try:
return super()._get_call_return_value(
call_dict, comm_id)
except RuntimeError as e:
settings = call_dict['settings']
blocking = 'blocking' in settings and settings['blocking']
if blocking:
raise
else:
# The user has other problems
logger.info(
"Dropping message because of exception: ",
str(e),
str(call_dict)
)
return
def _wait_reply(self, comm_id, call_id, call_name, timeout):
"""Wait for the other side reply."""
def got_reply():
return call_id in self._reply_inbox
timeout_msg = "Timeout while waiting for {}".format(
self._reply_waitlist)
self._wait(got_reply, self._sig_got_reply, timeout_msg, timeout)
def _wait(self, condition, signal, timeout_msg, timeout):
"""
Wait until condition() is True by running an event loop.
signal: qt signal that should interrupt the event loop.
timeout_msg: Message to display in case of a timeout.
timeout: time in seconds before a timeout
"""
# Exit if condition is fulfilled or the kernel is dead.
if condition():
return
if not self.kernel_client.is_alive():
raise RuntimeError("Kernel is dead")
# Create event loop to wait with
wait_loop = QEventLoop(None)
wait_timeout = QTimer(self)
wait_timeout.setSingleShot(True)
# Connect signals to stop kernel loop
wait_timeout.timeout.connect(wait_loop.quit)
self.kernel_client.hb_channel.kernel_died.connect(wait_loop.quit)
signal.connect(wait_loop.quit)
# Wait until the kernel returns the value
wait_timeout.start(timeout * 1000)
while not condition():
if not wait_timeout.isActive():
signal.disconnect(wait_loop.quit)
self.kernel_client.hb_channel.kernel_died.disconnect(
wait_loop.quit)
if condition():
return
if not self.kernel_client.is_alive():
raise RuntimeError("Kernel is dead")
raise TimeoutError(timeout_msg)
wait_loop.exec_()
wait_timeout.stop()
signal.disconnect(wait_loop.quit)
self.kernel_client.hb_channel.kernel_died.disconnect(
wait_loop.quit)
def _handle_remote_call_reply(self, *args, **kwargs):
"""
A blocking call received a reply.
"""
super()._handle_remote_call_reply(*args, **kwargs)
self._sig_got_reply.emit()
def _async_error(self, error_wrapper):
"""
Handle an error that was raised on the other side and sent back.
"""
if not isinstance(error_wrapper, CommsErrorWrapper):
error_wrapper = CommsErrorWrapper.from_json(error_wrapper)
for line in error_wrapper.format_error():
self.sig_exception_occurred.emit(
dict(text=line, is_traceback=True)
)
| KernelComm |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 22324,
"end": 23875
} | class ____(NonStrictDataModel):
"""
:param key: Entry key
:type key: str
:param mode: System defined input/output indication
:type mode: ArtifactModeEnum
"""
_schema = {
"properties": {
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
},
"required": ["key"],
"type": "object",
}
def __init__(self, key: str, mode: Any = None, **kwargs: Any) -> None:
super(ArtifactId, self).__init__(**kwargs)
self.key = key
self.mode = mode
@schema_property("key")
def key(self) -> str:
return self._property_key
@key.setter
def key(self, value: str) -> None:
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
@schema_property("mode")
def mode(self) -> Any:
return self._property_mode
@mode.setter
def mode(self, value: Any) -> None:
if value is None:
self._property_mode = None
return
if isinstance(value, six.string_types):
try:
value = ArtifactModeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "mode", enum.Enum)
self._property_mode = value
| ArtifactId |
python | astropy__astropy | astropy/modeling/bounding_box.py | {
"start": 18488,
"end": 30769
} | class ____(_BoundingDomain):
"""
A model's bounding box.
Parameters
----------
intervals : dict
A dictionary containing all the intervals for each model input
keys -> input index
values -> interval for that index
model : `~astropy.modeling.Model`
The Model this bounding_box is for.
ignored : list
A list containing all the inputs (index) which will not be
checked for whether or not their elements are in/out of an interval.
order : optional, str
The ordering that is assumed for the tuple representation of this
bounding_box. Options: 'C': C/Python order, e.g. z, y, x.
(default), 'F': Fortran/mathematical notation order, e.g. x, y, z.
"""
def __init__(
self,
intervals: dict[int, _Interval],
model,
ignored: list[int] | None = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._intervals = {}
if intervals != () and intervals != {}:
self._validate(intervals, order=order)
def copy(self, ignored=None):
intervals = {
index: interval.copy() for index, interval in self._intervals.items()
}
if ignored is None:
ignored = self._ignored.copy()
return ModelBoundingBox(
intervals, self._model, ignored=ignored, order=self._order
)
@property
def intervals(self) -> dict[int, _Interval]:
"""Return bounding_box labeled using input positions."""
return self._intervals
@property
def named_intervals(self) -> dict[str, _Interval]:
"""Return bounding_box labeled using input names."""
return {self._get_name(index): bbox for index, bbox in self._intervals.items()}
def __repr__(self):
parts = ["ModelBoundingBox(", " intervals={"]
for name, interval in self.named_intervals.items():
parts.append(f" {name}: {interval}")
parts.append(" }")
if len(self._ignored) > 0:
parts.append(f" ignored={self.ignored_inputs}")
parts.append(
f" model={self._model.__class__.__name__}(inputs={self._model.inputs})"
)
parts.append(f" order='{self._order}'")
parts.append(")")
return "\n".join(parts)
def __len__(self):
return len(self._intervals)
def __contains__(self, key):
try:
return self._get_index(key) in self._intervals or self._ignored
except (IndexError, ValueError):
return False
def has_interval(self, key):
return self._get_index(key) in self._intervals
def __getitem__(self, key):
"""Get bounding_box entries by either input name or input index."""
index = self._get_index(key)
if index in self._ignored:
return _ignored_interval
else:
return self._intervals[self._get_index(key)]
def bounding_box(
self, order: str | None = None
) -> tuple[float, float] | tuple[tuple[float, float], ...]:
"""
Return the old tuple of tuples representation of the bounding_box
order='C' corresponds to the old bounding_box ordering
order='F' corresponds to the gwcs bounding_box ordering.
"""
if len(self._intervals) == 1:
return tuple(next(iter(self._intervals.values())))
else:
order = self._get_order(order)
inputs = self._model.inputs
if order == "C":
inputs = inputs[::-1]
bbox = tuple(tuple(self[input_name]) for input_name in inputs)
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def __eq__(self, value):
"""Note equality can be either with old representation or new one."""
if isinstance(value, tuple):
return self.bounding_box() == value
elif isinstance(value, ModelBoundingBox):
return (self.intervals == value.intervals) and (
self.ignored == value.ignored
)
else:
return False
def __setitem__(self, key, value):
"""Validate and store interval under key (input index or input name)."""
index = self._get_index(key)
if index in self._ignored:
self._ignored.remove(index)
self._intervals[index] = _Interval.validate(value)
def __delitem__(self, key):
"""Delete stored interval."""
index = self._get_index(key)
if index in self._ignored:
raise RuntimeError(f"Cannot delete ignored input: {key}!")
del self._intervals[index]
self._ignored.append(index)
def _validate_dict(self, bounding_box: dict):
"""Validate passing dictionary of intervals and setting them."""
for key, value in bounding_box.items():
self[key] = value
@property
def _available_input_index(self):
model_input_index = [self._get_index(_input) for _input in self._model.inputs]
return [_input for _input in model_input_index if _input not in self._ignored]
def _validate_sequence(self, bounding_box, order: str | None = None):
"""
Validate passing tuple of tuples representation (or related) and setting them.
"""
order = self._get_order(order)
if order == "C":
# If bounding_box is C/python ordered, it needs to be reversed
# to be in Fortran/mathematical/input order.
bounding_box = bounding_box[::-1]
for index, value in enumerate(bounding_box):
self[self._available_input_index[index]] = value
@property
def _n_inputs(self) -> int:
n_inputs = self._model.n_inputs - len(self._ignored)
if n_inputs > 0:
return n_inputs
else:
return 0
def _validate_iterable(self, bounding_box, order: str | None = None):
"""Validate and set any iterable representation."""
if len(bounding_box) != self._n_inputs:
raise ValueError(
f"Found {len(bounding_box)} intervals, "
f"but must have exactly {self._n_inputs}."
)
if isinstance(bounding_box, dict):
self._validate_dict(bounding_box)
else:
self._validate_sequence(bounding_box, order)
def _validate(self, bounding_box, order: str | None = None):
"""Validate and set any representation."""
if self._n_inputs == 1 and not isinstance(bounding_box, dict):
self[self._available_input_index[0]] = bounding_box
else:
self._validate_iterable(bounding_box, order)
@classmethod
def validate(
cls,
model,
bounding_box,
ignored: list | None = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwargs,
) -> Self:
"""
Construct a valid bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict, tuple
A possible representation of the bounding box
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, ModelBoundingBox):
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.named_intervals
new = cls({}, model, ignored=ignored, order=order)
new._validate(bounding_box)
return new
def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False) -> Self:
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
keep_ignored : bool
Keep the ignored inputs of the bounding box (internal argument only)
"""
new = self.copy()
for _input in fixed_inputs.keys():
del new[_input]
if _keep_ignored:
ignored = new.ignored
else:
ignored = None
return ModelBoundingBox.validate(
model, new.named_intervals, ignored=ignored, order=new._order
)
@property
def dimension(self):
return len(self)
def domain(self, resolution, order: str | None = None) -> list[np.ndarray]:
inputs = self._model.inputs
order = self._get_order(order)
if order == "C":
inputs = inputs[::-1]
return [self[input_name].domain(resolution) for input_name in inputs]
def _outside(self, input_shape, inputs):
"""
Get all the input positions which are outside the bounding_box,
so that the corresponding outputs can be filled with the fill
value (default NaN).
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
outside_index : bool-numpy array
True -> position outside bounding_box
False -> position inside bounding_box
all_out : bool
if all of the inputs are outside the bounding_box
"""
all_out = False
outside_index = np.zeros(input_shape, dtype=bool)
for index, _input in enumerate(inputs):
_input = np.asanyarray(_input)
outside = np.broadcast_to(self[index].outside(_input), input_shape)
outside_index[outside] = True
if outside_index.all():
all_out = True
break
return outside_index, all_out
def _valid_index(self, input_shape, inputs):
"""
Get the indices of all the inputs inside the bounding_box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_index : numpy array
array of all indices inside the bounding box
all_out : bool
if all of the inputs are outside the bounding_box
"""
outside_index, all_out = self._outside(input_shape, inputs)
valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero()
if len(valid_index[0]) == 0:
all_out = True
return valid_index, all_out
def prepare_inputs(self, input_shape, inputs) -> tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
valid_index, all_out = self._valid_index(input_shape, inputs)
valid_inputs = []
if not all_out:
for _input in inputs:
if input_shape:
valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[
valid_index
]
if np.isscalar(_input):
valid_input = valid_input.item(0)
valid_inputs.append(valid_input)
else:
valid_inputs.append(_input)
return tuple(valid_inputs), valid_index, all_out
| ModelBoundingBox |
python | Textualize__textual | examples/splash.py | {
"start": 485,
"end": 954
} | class ____(Container):
"""Custom widget that extends Container."""
DEFAULT_CSS = """
Splash {
align: center middle;
}
Static {
width: 40;
padding: 2 4;
}
"""
def on_mount(self) -> None:
self.auto_refresh = 1 / 30
def compose(self) -> ComposeResult:
yield Static("Making a splash with Textual!")
def render(self) -> RenderableType:
return LinearGradient(time() * 90, STOPS)
| Splash |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 6827,
"end": 20539
} | class ____:
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_pearsonr_misaligned_mask(self):
mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
x = np.array([1, 4, 5, 6])
y = np.array([9, 6, 5, 9])
mr, mp = mstats.pearsonr(mx, my)
r, p = stats.pearsonr(x, y)
assert_equal(mr, r)
assert_equal(mp, p)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# Next test is to make sure calculation uses sufficient precision.
# The denominator's value is ~n^3 and used to be represented as an
# int. 2000**3 > 2**32 so these arrays would cause overflow on
# some machines.
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_spearmanr_alternative(self):
# check against R
# options(digits=16)
# cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
# 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),
# c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
# 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),
# alternative='two.sided', method='spearman')
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
r_exp = 0.6887298747763864 # from cor.test
r, p = mstats.spearmanr(x, y)
assert_allclose(r, r_exp)
assert_allclose(p, 0.004519192910756)
r, p = mstats.spearmanr(x, y, alternative='greater')
assert_allclose(r, r_exp)
assert_allclose(p, 0.002259596455378)
r, p = mstats.spearmanr(x, y, alternative='less')
assert_allclose(r, r_exp)
assert_allclose(p, 0.9977404035446)
# intuitive test (with obvious positive correlation)
rng = np.random.default_rng(9607138567)
n = 100
x = np.linspace(0, 5, n)
y = 0.1*x + rng.random(n) # y is positively correlated w/ x
stat1, p1 = mstats.spearmanr(x, y)
stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
assert_allclose(p2, p1 / 2) # positive correlation -> small p
stat3, p3 = mstats.spearmanr(x, y, alternative="less")
assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p
assert stat1 == stat2 == stat3
with pytest.raises(ValueError, match="alternative must be 'less'..."):
mstats.spearmanr(x, y, alternative="ekki-ekki")
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
def test_kendalltau(self):
# check case with maximum disorder and p=1
x = ma.array(np.array([9, 2, 5, 6]))
y = ma.array(np.array([4, 7, 9, 11]))
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.0, 1.0]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# simple case without ties
x = ma.array(np.arange(10))
y = ma.array(np.arange(10))
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# check exception in case of invalid method keyword
assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# same in opposite direction
x = ma.array(np.arange(10))
y = ma.array(np.arange(10)[::-1])
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
[+0.3333333, 0.75])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
[+0.3333333, 0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
[-0.5477226, 0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
result = mstats.kendalltau(x, y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
attributes = ('correlation', 'pvalue')
check_named_results(result, attributes, ma=True)
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
@pytest.mark.slow
def test_kendalltau_large(self):
# make sure internal variable use correct precision with
# larger arrays
x = np.arange(2000, dtype=float)
x = ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
@pytest.mark.parametrize("method", ("exact", "asymptotic"))
@pytest.mark.parametrize("alternative", ("two-sided", "greater", "less"))
def test_kendalltau_mstats_vs_stats(self, method, alternative):
# Test that mstats.kendalltau and stats.kendalltau with
# nan_policy='omit' matches behavior of stats.kendalltau
# Accuracy of the alternatives is tested in stats/tests/test_stats.py
rng = np.random.default_rng(8755235945)
n = 50
x = rng.random(n)
y = rng.random(n)
mask = rng.random(n) > 0.5
x_masked = ma.array(x, mask=mask)
y_masked = ma.array(y, mask=mask)
res_masked = mstats.kendalltau(
x_masked, y_masked, method=method, alternative=alternative)
x_compressed = x_masked.compressed()
y_compressed = y_masked.compressed()
res_compressed = stats.kendalltau(
x_compressed, y_compressed, method=method, alternative=alternative)
x[mask] = np.nan
y[mask] = np.nan
res_nan = stats.kendalltau(
x, y, method=method, nan_policy='omit', alternative=alternative)
assert_allclose(res_masked, res_compressed)
assert_allclose(res_nan, res_compressed)
def test_kendall_p_exact_medium(self):
# Test for the exact method with medium samples (some n >= 171)
# expected values generated using SymPy
expectations = {(100, 2393): 0.62822615287956040664,
(101, 2436): 0.60439525773513602669,
(170, 0): 2.755801935583541e-307,
(171, 0): 0.0,
(171, 1): 2.755801935583541e-307,
(172, 1): 0.0,
(200, 9797): 0.74753983745929675209,
(201, 9656): 0.40959218958120363618}
for nc, expected in expectations.items():
res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
@pytest.mark.xslow
def test_kendall_p_exact_large(self):
# Test for the exact method with large samples (n >= 171)
# expected values generated using SymPy
expectations = {(400, 38965): 0.48444283672113314099,
(401, 39516): 0.66363159823474837662,
(800, 156772): 0.42265448483120932055,
(801, 157849): 0.53437553412194416236,
(1600, 637472): 0.84200727400323538419,
(1601, 630304): 0.34465255088058593946}
for nc, expected in expectations.items():
res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
@skip_xp_invalid_arg
# mstats.pointbiserialr returns a NumPy float for the statistic, but converts
# it to a masked array with no masked elements before calling `special.betainc`,
# which won't accept masked arrays when `SCIPY_ARRAY_API=1`.
def test_pointbiserial(self):
x = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, -1]
y = [14.8, 13.8, 12.4, 10.1, 7.1, 6.1, 5.8, 4.6, 4.3, 3.5, 3.3, 3.2,
3.0, 2.8, 2.8, 2.5, 2.4, 2.3, 2.1, 1.7, 1.7, 1.5, 1.3, 1.3, 1.2,
1.2, 1.1, 0.8, 0.7, 0.6, 0.5, 0.2, 0.2, 0.1, np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
@skip_xp_invalid_arg
| TestCorr |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 12800,
"end": 13779
} | class ____(ProjectVersions, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-project-versions/#api-rest-api-3-version-post
"""
def path(self, **kwargs) -> str:
return "version"
def generate(self):
projects_stream = Projects(authenticator=self._session.auth, domain=self._domain)
for project in projects_stream.read_records(sync_mode=SyncMode.full_refresh):
for index in range(random.randrange(6)):
payload = json.dumps(
{
"archived": False,
"releaseDate": "2010-07-06",
"name": f"New Version {index}",
"description": "An excellent version",
"projectId": project.get("id"),
"released": True,
}
)
self.generate_record(payload)
| ProjectVersionsGenerator |
python | wandb__wandb | wandb/sdk/lib/retry.py | {
"start": 1057,
"end": 7285
} | class ____(Generic[_R]):
"""Create a retryable version of a function.
Calling this will call the passed function, retrying if any exceptions in
retryable_exceptions are caught, with exponential backoff.
"""
MAX_SLEEP_SECONDS = 5 * 60
def __init__(
self,
call_fn: Callable[..., _R],
retry_timedelta: Optional[datetime.timedelta] = None,
retry_cancel_event: Optional[threading.Event] = None,
num_retries: Optional[int] = None,
check_retry_fn: CheckRetryFnType = lambda e: True,
retryable_exceptions: Optional[Tuple[Type[Exception], ...]] = None,
error_prefix: str = "Network error",
retry_callback: Optional[Callable[[int, str], Any]] = None,
) -> None:
self._call_fn = call_fn
self._check_retry_fn = check_retry_fn
self._error_prefix = error_prefix
self._last_print = datetime.datetime.now() - datetime.timedelta(minutes=1)
self._retry_timedelta = retry_timedelta
self._retry_cancel_event = retry_cancel_event
self._num_retries = num_retries
if retryable_exceptions is not None:
self._retryable_exceptions = retryable_exceptions
else:
self._retryable_exceptions = (TransientError,)
self.retry_callback = retry_callback
self._num_iter = 0
def _sleep_check_cancelled(
self, wait_seconds: float, cancel_event: Optional[threading.Event]
) -> bool:
if not cancel_event:
SLEEP_FN(wait_seconds)
return False
cancelled = cancel_event.wait(wait_seconds)
return cancelled
@property
def num_iters(self) -> int:
"""The number of iterations the previous __call__ retried."""
return self._num_iter
def __call__(
self,
*args: Any,
num_retries: Optional[int] = None,
retry_timedelta: Optional[datetime.timedelta] = None,
retry_sleep_base: Optional[float] = None,
retry_cancel_event: Optional[threading.Event] = None,
check_retry_fn: Optional[CheckRetryFnType] = None,
**kwargs: Any,
) -> _R:
"""Call the wrapped function, with retries.
Args:
num_retries: The number of retries after which to give up.
retry_timedelta: An amount of time after which to give up.
retry_sleep_base: Number of seconds to sleep for the first retry.
This is used as the base for exponential backoff.
retry_cancel_event: An event that causes this to raise
a RetryCancelledException on the next attempted retry.
check_retry_fn: A custom check for deciding whether an exception
should be retried. Retrying is prevented if this returns a falsy
value, even if more retries are left. This may also return a
timedelta that represents a shorter timeout: retrying is
prevented if the value is less than the amount of time that has
passed since the last timedelta was returned.
"""
if os.environ.get("WANDB_TEST"):
max_retries = 0
elif num_retries is not None:
max_retries = num_retries
elif self._num_retries is not None:
max_retries = self._num_retries
else:
max_retries = 1000000
if retry_timedelta is not None:
timeout = retry_timedelta
elif self._retry_timedelta is not None:
timeout = self._retry_timedelta
else:
timeout = datetime.timedelta(days=365)
if retry_sleep_base is not None:
initial_sleep = retry_sleep_base
else:
initial_sleep = 1
retry_loop = _RetryLoop(
max_retries=max_retries,
timeout=timeout,
initial_sleep=initial_sleep,
max_sleep=self.MAX_SLEEP_SECONDS,
cancel_event=retry_cancel_event or self._retry_cancel_event,
retry_check=check_retry_fn or self._check_retry_fn,
)
start_time = NOW_FN()
self._num_iter = 0
while True:
try:
result = self._call_fn(*args, **kwargs)
except self._retryable_exceptions as e:
if not retry_loop.should_retry(e):
raise
if self._num_iter == 2:
logger.info("Retry attempt failed:", exc_info=e)
self._print_entered_retry_loop(e)
retry_loop.wait_before_retry()
self._num_iter += 1
else:
if self._num_iter > 2:
self._print_recovered(start_time)
return result
def _print_entered_retry_loop(self, exception: Exception) -> None:
"""Emit a message saying we've begun retrying.
Either calls the retry callback or prints a warning to console.
Args:
exception: The most recent exception we will retry.
"""
from requests import HTTPError
if (
isinstance(exception, HTTPError)
and exception.response is not None
and self.retry_callback is not None
):
self.retry_callback(
exception.response.status_code,
exception.response.text,
)
else:
wandb.termlog(
f"{self._error_prefix}"
+ f" ({exception.__class__.__name__}), entering retry loop."
)
def _print_recovered(self, start_time: datetime.datetime) -> None:
"""Emit a message saying we've recovered after retrying.
Args:
start_time: When we started retrying.
"""
if not self.retry_callback:
return
now = NOW_FN()
if now - self._last_print < datetime.timedelta(minutes=1):
return
self._last_print = now
time_to_recover = now - start_time
self.retry_callback(
200,
(
f"{self._error_prefix} resolved after"
f" {time_to_recover}, resuming normal operation."
),
)
| Retry |
python | sphinx-doc__sphinx | sphinx/util/parallel.py | {
"start": 628,
"end": 1234
} | class ____:
"""Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc: int = 1) -> None:
pass
def add_task(
self,
task_func: Callable[[Any], Any] | Callable[[], Any],
arg: Any = None,
result_func: Callable[[Any], Any] | None = None,
) -> None:
if arg is not None:
res = task_func(arg) # type: ignore[call-arg]
else:
res = task_func() # type: ignore[call-arg]
if result_func:
result_func(res)
def join(self) -> None:
pass
| SerialTasks |
python | allegroai__clearml | clearml/backend_interface/task/models.py | {
"start": 432,
"end": 1026
} | class ____(UserList):
def __init__(self, models_dict: Any) -> None:
# noqa: F821
self._models = models_dict
super(ModelsList, self).__init__(models_dict.values())
def __getitem__(self, item: Any) -> Any:
if isinstance(item, str):
return self._models[item]
return super(ModelsList, self).__getitem__(item)
def get(self, key: Any, default: Any = None) -> Any:
try:
return self[key]
except KeyError:
return default
def keys(self) -> KeysView:
return self._models.keys()
| ModelsList |
python | pypa__pip | src/pip/_internal/network/download.py | {
"start": 4614,
"end": 5250
} | class ____:
"""Stores the state of a single link download."""
link: Link
output_file: BinaryIO
size: int | None
bytes_received: int = 0
reattempts: int = 0
def is_incomplete(self) -> bool:
return bool(self.size is not None and self.bytes_received < self.size)
def write_chunk(self, data: bytes) -> None:
self.bytes_received += len(data)
self.output_file.write(data)
def reset_file(self) -> None:
"""Delete any saved data and reset progress to zero."""
self.output_file.seek(0)
self.output_file.truncate()
self.bytes_received = 0
| _FileDownload |
python | pydantic__pydantic | tests/mypy/modules/plugin_optional_inheritance.py | {
"start": 108,
"end": 155
} | class ____(BaseModel):
foo: Optional[Foo]
| Bar |
python | ipython__ipython | IPython/lib/demo.py | {
"start": 6537,
"end": 19623
} | class ____:
re_stop = re_mark(r'-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
formatter='terminal', style='default'):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
IPython.Demo? in IPython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
- format_rst(False): a bool to enable comments and doc strings
formatting with pygments rst lexer
- formatter('terminal'): a string of pygments formatter name to be
used. Useful values for terminals: terminal, terminal256,
terminal16m
- style('default'): a string of pygments style name to be used.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
try:
ip = get_ipython() # this is in builtins whenever IPython is running
self.inside_ipython = True
except NameError:
self.inside_ipython = False
if self.inside_ipython:
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
self.ip_ns = ip.user_ns
self.ip_colorize = ip.pycolorize
self.ip_showtb = ip.showtraceback
self.ip_run_cell = ip.run_cell
self.shell = ip
self.formatter = pygments.formatters.get_formatter_by_name(formatter,
style=style)
self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
self.format_rst = format_rst
if format_rst:
self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = openpy.open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename, 1)
with open(Path(filename), "r", encoding="utf-8") as f:
new_block = f.read()
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.highlight(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)))
print(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
else:
print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
print(block, end=' ')
sys.stdout.flush()
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
exec(source, self.user_ns)
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print(marquee('output:'))
else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
ans = py3compat.input().strip()
if ans:
print(marquee('Block NOT executed'))
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.run_cell(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
if self.inside_ipython:
self.ip_showtb(filename=self.fname)
else:
if self.inside_ipython:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print if empty marquees are used
print()
print(mq1)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
def highlight(self, block):
"""Method called on each block to highlight it content"""
tokens = pygments.lex(block, self.python_lexer)
if self.format_rst:
from pygments.token import Token
toks = []
for token in tokens:
if token[0] == Token.String.Doc and len(token[1]) > 6:
toks += pygments.lex(token[1][:3], self.python_lexer)
# parse doc string content by rst lexer
toks += pygments.lex(token[1][3:-3], self.rst_lexer)
toks += pygments.lex(token[1][-3:], self.python_lexer)
elif token[0] == Token.Comment.Single:
toks.append((Token.Comment.Single, token[1][0]))
# parse comment content by rst lexer
# remove the extra newline added by rst lexer
toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
else:
toks.append(token)
tokens = toks
return pygments.format(tokens, self.formatter)
| Demo |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/chat_models/test_base.py | {
"start": 41192,
"end": 42102
} | class ____:
"""Mock response for testing _generate_response_from_error."""
def __init__(
self,
status_code: int = 400,
headers: dict[str, str] | None = None,
json_data: dict[str, Any] | None = None,
json_raises: type[Exception] | None = None,
text_raises: type[Exception] | None = None,
):
self.status_code = status_code
self.headers = headers or {}
self._json_data = json_data
self._json_raises = json_raises
self._text_raises = text_raises
def json(self) -> dict[str, Any]:
if self._json_raises:
msg = "JSON parsing failed"
raise self._json_raises(msg)
return self._json_data or {}
@property
def text(self) -> str:
if self._text_raises:
msg = "Text access failed"
raise self._text_raises(msg)
return ""
| MockResponse |
python | pytorch__pytorch | test/distributed/test_c10d_gloo.py | {
"start": 100823,
"end": 102129
} | class ____(
test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
):
@requires_gloo()
def test_collectives(self):
self._test_collectives(backend="gloo")
@requires_gloo()
def test_allreduce_coalesced(self):
self._test_allreduce_coalesced(backend="gloo")
@requires_gloo()
def test_all_to_all_single(self):
self._test_all_to_all_single(backend="gloo")
@requires_gloo()
def test_allgather_coalesced(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"gloo",
world_size=self.world_size,
rank=self.rank,
store=store,
)
input_tensor = torch.ones(10, 10, dtype=torch.float32)
output_tensor_list = [torch.zeros_like(input_tensor)]
dist.all_gather_coalesced([output_tensor_list], [input_tensor])
self.assertEqual(output_tensor_list, [input_tensor])
@requires_gloo()
def test_monitored_barrier(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"gloo",
world_size=self.world_size,
rank=self.rank,
store=store,
)
dist.monitored_barrier()
| GlooProcessGroupWithDispatchedCollectivesTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultTypeAlias2.py | {
"start": 2899,
"end": 3547
} | class ____(Generic[T1, T2, *Ts1]): ...
TA_TA = ClassTA[T1, T2, *Ts1]
def func5(
ta1: TA_TA,
ta2: TA_TA[int],
ta3: TA_TA[int, float],
ta4: TA_TA[int, float, *tuple[None, ...]],
):
reveal_type(ta1, expected_text="ClassTA[str, str, str, str]")
reveal_type(ta2, expected_text="ClassTA[int, int, int, int]")
reveal_type(ta3, expected_text="ClassTA[int, float, int, float]")
reveal_type(ta4, expected_text="ClassTA[int, float, *tuple[None, ...]]")
# This should generate an error because Ts1 depends on T2.
# It should also generate a second error because T2 follows a TypeVarTuple.
TA_TB = tuple[T1, *Ts1, T2]
| ClassTA |
python | ray-project__ray | python/ray/dag/tests/experimental/test_compiled_graphs.py | {
"start": 45352,
"end": 48555
} | class ____:
"""
Leaf nodes are not allowed right now because the exception thrown by the leaf
node will not be propagated to the driver and silently ignored, which is undesired.
"""
LEAF_NODE_EXCEPTION_TEMPLATE = (
"Compiled DAG doesn't support leaf nodes, i.e., nodes that don't have "
"downstream nodes and are not output nodes. There are {num_leaf_nodes} "
"leaf nodes in the DAG. Please add the outputs of"
)
def test_leaf_node_one_actor(self, ray_start_regular):
"""
driver -> a.inc
|
-> a.inc -> driver
"""
a = Actor.remote(0)
with InputNode() as i:
input_data = a.read_input.bind(i)
a.inc.bind(input_data) # branch1: leaf node
branch2 = a.inc.bind(input_data)
dag = MultiOutputNode([branch2])
with pytest.raises(
ValueError,
match=TestLeafNode.LEAF_NODE_EXCEPTION_TEMPLATE.format(num_leaf_nodes=1),
):
dag.experimental_compile()
def test_leaf_node_two_actors(self, ray_start_regular):
"""
driver -> b.inc -> a.inc --
| | |
| -> b.inc ----> driver
|
-> a.inc (branch 1)
"""
a = Actor.remote(0)
b = Actor.remote(100)
with InputNode() as i:
a.inc.bind(i) # branch1: leaf node
branch2 = b.inc.bind(i)
dag = MultiOutputNode([a.inc.bind(branch2), b.inc.bind(branch2)])
with pytest.raises(
ValueError,
match=TestLeafNode.LEAF_NODE_EXCEPTION_TEMPLATE.format(num_leaf_nodes=1),
):
dag.experimental_compile()
def test_multi_leaf_nodes(self, ray_start_regular):
"""
driver -> a.inc -> a.inc (branch 1, leaf node)
| |
| -> a.inc -> driver
|
-> a.inc (branch 2, leaf node)
"""
a = Actor.remote(0)
with InputNode() as i:
dag = a.inc.bind(i)
a.inc.bind(dag) # branch1: leaf node
a.inc.bind(i) # branch2: leaf node
dag = MultiOutputNode([a.inc.bind(dag)])
with pytest.raises(
ValueError,
match=TestLeafNode.LEAF_NODE_EXCEPTION_TEMPLATE.format(num_leaf_nodes=2),
):
dag.experimental_compile()
def test_two_returns_first(self, ray_start_regular):
a = Actor.remote(0)
with InputNode() as i:
o1, o2 = a.return_two.bind(i)
dag = o1
with pytest.raises(
ValueError,
match=TestLeafNode.LEAF_NODE_EXCEPTION_TEMPLATE.format(num_leaf_nodes=1),
):
dag.experimental_compile()
def test_two_returns_second(self, ray_start_regular):
a = Actor.remote(0)
with InputNode() as i:
o1, o2 = a.return_two.bind(i)
dag = o2
with pytest.raises(
ValueError,
match=TestLeafNode.LEAF_NODE_EXCEPTION_TEMPLATE.format(num_leaf_nodes=1),
):
dag.experimental_compile()
| TestLeafNode |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/datetime.py | {
"start": 893,
"end": 1343
} | class ____(SeriesDefault):
"""Builder for default-to-pandas methods which is executed under datetime accessor."""
@classmethod
def frame_wrapper(cls, df):
"""
Get datetime accessor of the passed frame.
Parameters
----------
df : pandas.DataFrame
Returns
-------
pandas.core.indexes.accessors.DatetimeProperties
"""
return df.squeeze(axis=1).dt
| DateTimeDefault |
python | django__django | django/contrib/redirects/models.py | {
"start": 131,
"end": 1075
} | class ____(models.Model):
site = models.ForeignKey(Site, models.CASCADE, verbose_name=_("site"))
old_path = models.CharField(
_("redirect from"),
max_length=200,
db_index=True,
help_text=_(
"This should be an absolute path, excluding the domain name. Example: "
"“/events/search/”."
),
)
new_path = models.CharField(
_("redirect to"),
max_length=200,
blank=True,
help_text=_(
"This can be either an absolute path (as above) or a full URL "
"starting with a scheme such as “https://”."
),
)
class Meta:
verbose_name = _("redirect")
verbose_name_plural = _("redirects")
db_table = "django_redirect"
unique_together = [["site", "old_path"]]
ordering = ["old_path"]
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
| Redirect |
python | facebookresearch__faiss | tests/test_fast_scan_ivf.py | {
"start": 13106,
"end": 13164
} | class ____(TestIVFImplem12):
IMPLEM = 11
| TestIVFImplem11 |
python | scrapy__scrapy | tests/test_http2_client_protocol.py | {
"start": 3558,
"end": 3726
} | class ____(LeafResource, PostDataJsonMixin):
def render_POST(self, request: TxRequest):
return self.make_response(request, Data.EXTRA_SMALL)
| PostDataJsonSmall |
python | walkccc__LeetCode | solutions/1008. Construct Binary Search Tree from Preorder Traversal/1008.py | {
"start": 0,
"end": 547
} | class ____:
def bstFromPreorder(self, preorder: list[int]) -> TreeNode | None:
root = TreeNode(preorder[0])
stack = [root]
for i in range(1, len(preorder)):
parent = stack[-1]
child = TreeNode(preorder[i])
# Adjust the parent.
while stack and stack[-1].val < child.val:
parent = stack.pop()
# Create parent-child link according to BST property.
if parent.val > child.val:
parent.left = child
else:
parent.right = child
stack.append(child)
return root
| Solution |
python | django__django | tests/template_tests/syntax_tests/test_regroup.py | {
"start": 143,
"end": 4777
} | class ____(SimpleTestCase):
@setup(
{
"regroup01": ""
"{% regroup data by bar as grouped %}"
"{% for group in grouped %}"
"{{ group.grouper }}:"
"{% for item in group.list %}"
"{{ item.foo }}"
"{% endfor %},"
"{% endfor %}"
}
)
def test_regroup01(self):
output = self.engine.render_to_string(
"regroup01",
{
"data": [
{"foo": "c", "bar": 1},
{"foo": "d", "bar": 1},
{"foo": "a", "bar": 2},
{"foo": "b", "bar": 2},
{"foo": "x", "bar": 3},
],
},
)
self.assertEqual(output, "1:cd,2:ab,3:x,")
@setup(
{
"regroup02": ""
"{% regroup data by bar as grouped %}"
"{% for group in grouped %}"
"{{ group.grouper }}:"
"{% for item in group.list %}"
"{{ item.foo }}"
"{% endfor %}"
"{% endfor %}"
}
)
def test_regroup02(self):
"""
Test for silent failure when target variable isn't found
"""
output = self.engine.render_to_string("regroup02", {})
self.assertEqual(output, "")
@setup(
{
"regroup03": ""
'{% regroup data by at|date:"m" as grouped %}'
"{% for group in grouped %}"
"{{ group.grouper }}:"
"{% for item in group.list %}"
'{{ item.at|date:"d" }}'
"{% endfor %},"
"{% endfor %}"
}
)
def test_regroup03(self):
"""
Regression tests for #17675
The date template filter has expects_localtime = True
"""
output = self.engine.render_to_string(
"regroup03",
{
"data": [
{"at": date(2012, 2, 14)},
{"at": date(2012, 2, 28)},
{"at": date(2012, 7, 4)},
],
},
)
self.assertEqual(output, "02:1428,07:04,")
@setup(
{
"regroup04": ""
'{% regroup data by bar|join:"" as grouped %}'
"{% for group in grouped %}"
"{{ group.grouper }}:"
"{% for item in group.list %}"
"{{ item.foo|first }}"
"{% endfor %},"
"{% endfor %}"
}
)
def test_regroup04(self):
"""
The join template filter has needs_autoescape = True
"""
output = self.engine.render_to_string(
"regroup04",
{
"data": [
{"foo": "x", "bar": ["ab", "c"]},
{"foo": "y", "bar": ["a", "bc"]},
{"foo": "z", "bar": ["a", "d"]},
],
},
)
self.assertEqual(output, "abc:xy,ad:z,")
# Test syntax errors
@setup({"regroup05": "{% regroup data by bar as %}"})
def test_regroup05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("regroup05")
@setup({"regroup06": "{% regroup data by bar thisaintright grouped %}"})
def test_regroup06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("regroup06")
@setup({"regroup07": "{% regroup data thisaintright bar as grouped %}"})
def test_regroup07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("regroup07")
@setup({"regroup08": "{% regroup data by bar as grouped toomanyargs %}"})
def test_regroup08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("regroup08")
@setup(
{
"regroup_unpack": "{% regroup data by bar as grouped %}"
"{% for grouper, group in grouped %}"
"{{ grouper }}:"
"{% for item in group %}"
"{{ item.foo }}"
"{% endfor %},"
"{% endfor %}"
}
)
def test_regroup_unpack(self):
output = self.engine.render_to_string(
"regroup_unpack",
{
"data": [
{"foo": "c", "bar": 1},
{"foo": "d", "bar": 1},
{"foo": "a", "bar": 2},
{"foo": "b", "bar": 2},
{"foo": "x", "bar": 3},
],
},
)
self.assertEqual(output, "1:cd,2:ab,3:x,")
| RegroupTagTests |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 3167,
"end": 3324
} | class ____(enum.Enum):
"""Action to take if the resource exist."""
IGNORE = "ignore"
LOG = "log"
FAIL = "fail"
SKIP = "skip"
| IfExistAction |
python | aio-libs__aiohttp | tests/test_pytest_plugin.py | {
"start": 7311,
"end": 8014
} | class ____(TestClient):
pass
@pytest.fixture
def aiohttp_client_cls(request):
if request.node.get_closest_marker('rest') is not None:
return RESTfulClient
elif request.node.get_closest_marker('graphql') is not None:
return GraphQLClient
return TestClient
@pytest.mark.rest
async def test_rest(aiohttp_client) -> None:
client = await aiohttp_client(Application())
assert isinstance(client, RESTfulClient)
@pytest.mark.graphql
async def test_graphql(aiohttp_client) -> None:
client = await aiohttp_client(Application())
assert isinstance(client, GraphQLClient)
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=2)
| GraphQLClient |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 29889,
"end": 30005
} | class ____(Structure):
_fields_ = (("symbol_index", p_uint32), ("module_index", p_uint32))
| dylib_table_of_contents |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/datafusion.py | {
"start": 1263,
"end": 1481
} | class ____(BaseGoogleLink):
"""Helper class for constructing Data Fusion Instance link."""
name = "Data Fusion Instance"
key = "instance_conf"
format_str = DATAFUSION_INSTANCE_LINK
| DataFusionInstanceLink |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 78130,
"end": 80413
} | class ____:
def test_reduce_method(self) -> None:
ds = xr.Dataset({"a": ("x", [False, True, False])})
dt = DataTree.from_dict({"/": ds, "/results": ds})
expected = DataTree.from_dict({"/": ds.any(), "/results": ds.any()})
result = dt.any()
assert_equal(result, expected)
def test_nan_reduce_method(self) -> None:
ds = xr.Dataset({"a": ("x", [1, 2, 3])})
dt = DataTree.from_dict({"/": ds, "/results": ds})
expected = DataTree.from_dict({"/": ds.mean(), "/results": ds.mean()})
result = dt.mean()
assert_equal(result, expected)
def test_cum_method(self) -> None:
ds = xr.Dataset({"a": ("x", [1, 2, 3])})
dt = DataTree.from_dict({"/": ds, "/results": ds})
expected = DataTree.from_dict(
{
"/": ds.cumsum(),
"/results": ds.cumsum(),
}
)
result = dt.cumsum()
assert_equal(result, expected)
def test_dim_argument(self) -> None:
dt = DataTree.from_dict(
{
"/a": xr.Dataset({"A": ("x", [1, 2])}),
"/b": xr.Dataset({"B": ("y", [1, 2])}),
}
)
expected = DataTree.from_dict(
{
"/a": xr.Dataset({"A": 1.5}),
"/b": xr.Dataset({"B": 1.5}),
}
)
actual = dt.mean()
assert_equal(expected, actual)
actual = dt.mean(dim=...)
assert_equal(expected, actual)
expected = DataTree.from_dict(
{
"/a": xr.Dataset({"A": 1.5}),
"/b": xr.Dataset({"B": ("y", [1.0, 2.0])}),
}
)
actual = dt.mean("x")
assert_equal(expected, actual)
with pytest.raises(
ValueError,
match=re.escape("Dimension(s) 'invalid' do not exist."),
):
dt.mean("invalid")
def test_subtree(self) -> None:
tree = DataTree.from_dict(
{
"/child": Dataset({"a": ("x", [1, 2])}),
}
)
expected = DataTree(dataset=Dataset({"a": 1.5}), name="child")
actual = tree.children["child"].mean()
assert_identical(expected, actual)
| TestAggregations |
python | celery__celery | t/unit/app/test_amqp.py | {
"start": 247,
"end": 657
} | class ____:
def test_accept_content(self, app):
with app.pool.acquire(block=True) as con:
app.conf.accept_content = ['application/json']
assert app.amqp.TaskConsumer(con).accept == {
'application/json',
}
assert app.amqp.TaskConsumer(con, accept=['json']).accept == {
'application/json',
}
| test_TaskConsumer |
python | pytorch__pytorch | torch/_library/fake_class_registry.py | {
"start": 269,
"end": 2379
} | class ____:
def __init__(
self, wrapped_obj: Any, script_class_name: str, x: Optional[torch.ScriptObject]
):
# Use object.__setattr__ to bypass our custom __setattr__ during initialization
object.__setattr__(self, "wrapped_obj", wrapped_obj)
object.__setattr__(self, "script_class_name", script_class_name)
try:
with _disable_current_modes():
real_obj = copy.deepcopy(x)
except RuntimeError as e:
log.warning( # noqa: G200
"Unable to deepcopy the custom object %s due to %s. "
"Defaulting to the user given object. This might be "
"dangerous as side effects may be directly applied "
"to the object.",
script_class_name,
str(e),
)
real_obj = x
object.__setattr__(self, "real_obj", real_obj)
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError as e:
raise AttributeError(
f"Tried to call __getattr__ with attr '{name}' on a FakeScriptObject, "
"implying that you are calling this inside of a fake kernel. "
"The fake kernel should not depend on the contents of the "
"OpaqueObject at all, so we're erroring out. If you need this"
"functionality, consider creating a custom TorchBind Object instead"
"(but note that this is more difficult)."
) from e
def __setattr__(self, name, value):
raise AttributeError(
f"Tried to call __setattr__ with attr '{name}' on a FakeScriptObject, "
"implying that you are calling this inside of a fake kernel. "
"The fake kernel should not depend on the contents of the "
"OpaqueObject at all, so we're erroring out. If you need this"
"functionality, consider creating a custom TorchBind Object instead"
"(but note that this is more difficult)."
)
| FakeScriptObject |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 17159,
"end": 20095
} | class ____(FunctionPass):
"""A pass to canonicalize loop header by splitting it from function entry.
This is needed for loop-lifting; esp in py3.8
"""
_name = "canonicalize_loop_entry"
_supported_globals = {range, enumerate, zip}
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
fir = state.func_ir
cfg = compute_cfg_from_blocks(fir.blocks)
status = False
for loop in cfg.loops().values():
if len(loop.entries) == 1:
[entry_label] = loop.entries
if entry_label == cfg.entry_point():
self._split_entry_block(fir, cfg, loop, entry_label)
status = True
fir._reset_analysis_variables()
vlt = postproc.VariableLifetime(fir.blocks)
fir.variable_lifetime = vlt
return status
def _split_entry_block(self, fir, cfg, loop, entry_label):
# Find iterator inputs into the for-loop header
header_block = fir.blocks[loop.header]
deps = set()
for expr in header_block.find_exprs(op="iternext"):
deps.add(expr.value)
# Find the getiter for each iterator
entry_block = fir.blocks[entry_label]
# Find the start of loop entry statement that needs to be included.
startpt = None
list_of_insts = list(entry_block.find_insts(ir.Assign))
for assign in reversed(list_of_insts):
if assign.target in deps:
rhs = assign.value
if isinstance(rhs, ir.Var):
if rhs.is_temp:
deps.add(rhs)
elif isinstance(rhs, ir.Expr):
expr = rhs
if expr.op == 'getiter':
startpt = assign
if expr.value.is_temp:
deps.add(expr.value)
elif expr.op == 'call':
defn = guard(get_definition, fir, expr.func)
if isinstance(defn, ir.Global):
if expr.func.is_temp:
deps.add(expr.func)
elif (isinstance(rhs, ir.Global)
and rhs.value in self._supported_globals):
startpt = assign
if startpt is None:
return
splitpt = entry_block.body.index(startpt)
new_block = entry_block.copy()
new_block.body = new_block.body[splitpt:]
new_block.loc = new_block.body[0].loc
new_label = find_max_label(fir.blocks) + 1
entry_block.body = entry_block.body[:splitpt]
entry_block.append(ir.Jump(new_label, loc=new_block.loc))
fir.blocks[new_label] = new_block
# Rename all labels
fir.blocks = rename_labels(fir.blocks)
@register_pass(mutates_CFG=False, analysis_only=True)
| CanonicalizeLoopEntry |
python | dateutil__dateutil | tests/test_relativedelta.py | {
"start": 28325,
"end": 29326
} | class ____(unittest.TestCase):
"""Test the weeks setter which makes a "smart" update of the days attribute"""
def test_one_day_set_one_week(self):
rd = relativedelta(days=1)
rd.weeks = 1 # add 7 days
self.assertEqual(rd.days, 8)
self.assertEqual(rd.weeks, 1)
def test_minus_one_day_set_one_week(self):
rd = relativedelta(days=-1)
rd.weeks = 1 # add 7 days
self.assertEqual(rd.days, 6)
self.assertEqual(rd.weeks, 0)
def test_height_days_set_minus_one_week(self):
rd = relativedelta(days=8)
rd.weeks = -1 # change from 1 week, 1 day to -1 week, 1 day
self.assertEqual(rd.days, -6)
self.assertEqual(rd.weeks, 0)
def test_minus_height_days_set_minus_one_week(self):
rd = relativedelta(days=-8)
rd.weeks = -1 # does not change anything
self.assertEqual(rd.days, -8)
self.assertEqual(rd.weeks, -1)
# vim:ts=4:sw=4:et
| RelativeDeltaWeeksPropertySetterTest |
python | run-llama__llama_index | llama-index-core/tests/indices/property_graph/test_property_graph.py | {
"start": 536,
"end": 2278
} | class ____(TransformComponent):
"""A mock knowledge graph extractor that extracts a simple relation from a text."""
def __call__(self, nodes: List[BaseNode], **kwargs: Any) -> List[BaseNode]:
entity1 = EntityNode(name="Logan", label="PERSON")
entity2 = EntityNode(name="Canada", label="LOCATION")
relation = Relation(label="BORN_IN", source_id=entity1.id, target_id=entity2.id)
return [
TextNode(
id_="test",
text="Logan was born in Canada",
metadata={
KG_NODES_KEY: [entity1, entity2],
KG_RELATIONS_KEY: [relation],
},
),
]
def test_construction() -> None:
graph_store = SimplePropertyGraphStore()
vector_store = SimpleVectorStore()
kg_extractor = MockKGExtractor()
# test construction
index = PropertyGraphIndex.from_documents(
[Document.example()],
property_graph_store=graph_store,
vector_store=vector_store,
llm=MockLLM(),
embed_model=MockEmbedding(embed_dim=256),
kg_extractors=[kg_extractor],
)
embeddings = vector_store.get("Logan")
assert len(embeddings) == 256
embeddings = vector_store.get("Canada")
assert len(embeddings) == 256
kg_nodes = graph_store.get(ids=["Logan", "Canada"])
assert kg_nodes is not None
assert len(kg_nodes) == 2
assert kg_nodes[0].embedding is None
assert kg_nodes[0].embedding is None
# test inserting a duplicate node (should not insert)
index._insert_nodes_to_vector_index = MagicMock()
index.insert_nodes(kg_extractor([]))
assert index._insert_nodes_to_vector_index.call_count == 0
| MockKGExtractor |
python | numba__numba | numba/core/typing/mathdecl.py | {
"start": 2254,
"end": 2592
} | class ____(ConcreteTemplate):
cases = [
signature(types.float64, types.int64, types.int64),
signature(types.float64, types.uint64, types.uint64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
@infer_global(math.nextafter)
| Math_hypot |
python | django-guardian__django-guardian | example_project_custom_group/core/models.py | {
"start": 439,
"end": 532
} | class ____(Group, GuardianGroupMixin):
label = models.CharField(max_length=120)
| CustomGroup |
python | streamlit__streamlit | lib/tests/streamlit/web/server/routes_test.py | {
"start": 9629,
"end": 10184
} | class ____(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
return tornado.web.Application(
[
(
r"/(.*)",
AddSlashHandler,
)
]
)
def test_parse_url_path_301(self):
paths = ["/page1"]
responses = [self.fetch(path, follow_redirects=False) for path in paths]
for idx, r in enumerate(responses):
assert r.code == 301
assert r.headers["Location"] == paths[idx] + "/"
| AddSlashHandlerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 10364,
"end": 10434
} | class ____(sqltypes.Integer):
render_bind_cast = True
| AsyncpgInteger |
python | kamyu104__LeetCode-Solutions | Python/find-resultant-array-after-removing-anagrams.py | {
"start": 67,
"end": 516
} | class ____(object):
def removeAnagrams(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
result = []
prev = None
for x in words:
cnt = collections.Counter(x)
if prev and prev == cnt:
continue
prev = cnt
result.append(x)
return result
# Time: O(n * llogl)
# Space: O(l)
import collections
# sort
| Solution |
python | django__django | django/test/testcases.py | {
"start": 60574,
"end": 62478
} | class ____(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url.path) and not self.base_url.netloc
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url.removeprefix(self.base_url.path)
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace("\\", "/").lstrip("/")
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
| FSFilesHandler |
python | pytorch__pytorch | torch/optim/lr_scheduler.py | {
"start": 12703,
"end": 17612
} | class ____(LRScheduler):
"""Sets the initial learning rate.
The learning rate of each parameter group is set to the initial lr
times a given function. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in optimizer.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # xdoctest: +SKIP
>>> # Assuming optimizer has two groups.
>>> num_epochs = 100
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95**epoch
>>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])
>>> for epoch in range(num_epochs):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
>>>
>>> # Alternatively, you can use a single lambda function for all groups.
>>> scheduler = LambdaLR(opt, lr_lambda=lambda epoch: epoch // 30)
>>> for epoch in range(num_epochs):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
.. image:: ../scripts/lr_scheduler_images/LambdaLR.png
"""
def __init__(
self,
optimizer: Optimizer,
lr_lambda: Union[Callable[[int], float], list[Callable[[int], float]]],
last_epoch: int = -1,
) -> None: # noqa: D107
self.optimizer = optimizer
self.lr_lambdas: list[Callable[[int], float]]
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError(
f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}"
)
self.lr_lambdas = list(lr_lambda)
super().__init__(optimizer, last_epoch)
@override
def state_dict(self) -> dict[str, Any]:
"""Return the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in ``self.__dict__`` which is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
"""
state_dict = {
key: value
for key, value in self.__dict__.items()
if key not in ("optimizer", "lr_lambdas")
}
state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
# pyrefly: ignore [unsupported-operation]
state_dict["lr_lambdas"][idx] = fn.__dict__.copy()
return state_dict
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Load the scheduler's state.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
lr_lambdas = state_dict.pop("lr_lambdas")
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
state_dict["lr_lambdas"] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
@override
def get_lr(self) -> list[float | Tensor]:
r"""Compute the next learning rate for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups`.
Scales the :attr:`base_lrs` by the outputs of the :attr:`lr_lambdas` at
:attr:`last_epoch`.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
.. note::
If you're trying to inspect the most recent learning rate, use
:meth:`get_last_lr()` instead.
.. note::
The returned :class:`~torch.Tensor`\s are copies, and never alias
the optimizer's ``group["lr"]``\s.
"""
_warn_get_lr_called_within_step(self)
return [
base_lr * lmbda(self.last_epoch)
for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs, strict=True)
]
| LambdaLR |
python | celery__celery | celery/worker/components.py | {
"start": 1825,
"end": 2860
} | class ____(bootsteps.StartStopStep):
"""Worker starts the event loop."""
requires = (Timer,)
def __init__(self, w, **kwargs):
w.hub = None
super().__init__(w, **kwargs)
def include_if(self, w):
return w.use_eventloop
def create(self, w):
w.hub = get_event_loop()
if w.hub is None:
required_hub = getattr(w._conninfo, 'requires_hub', None)
w.hub = set_event_loop((
required_hub if required_hub else _Hub)(w.timer))
self._patch_thread_primitives(w)
return self
def start(self, w):
pass
def stop(self, w):
w.hub.close()
def terminate(self, w):
w.hub.close()
def _patch_thread_primitives(self, w):
# make clock use dummy lock
w.app.clock.mutex = DummyLock()
# multiprocessing's ApplyResult uses this lock.
try:
from billiard import pool
except ImportError:
pass
else:
pool.Lock = DummyLock
| Hub |
python | getsentry__sentry | src/sentry/auth/providers/saml2/onelogin/apps.py | {
"start": 36,
"end": 276
} | class ____(AppConfig):
name = "sentry.auth.providers.saml2.onelogin"
def ready(self) -> None:
from sentry.auth import register
from .provider import OneLoginSAML2Provider
register(OneLoginSAML2Provider)
| Config |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard_python3/bundled-services/blobstore/django/main.py | {
"start": 1292,
"end": 1644
} | class ____(blobstore.BlobstoreUploadHandler):
def post(self, environ):
upload = self.get_uploads(environ)[0]
photo_key = upload.key()
user_photo = UserPhoto(blob_key=photo_key)
user_photo.put()
logger.log_text("Photo key: %s" % photo_key)
return redirect("view_photo", key=photo_key)
| PhotoUploadHandler |
python | sphinx-doc__sphinx | tests/test_util/test_util_inspect.py | {
"start": 1047,
"end": 1519
} | class ____(MyInt):
@classmethod
def from_bytes(cls, *a, **kw):
return super().from_bytes(*a, **kw)
def conjugate(self):
return super().conjugate()
def func():
pass
async def coroutinefunc():
pass
async def asyncgenerator(): # NoQA: RUF029
yield
partial_func = functools.partial(func)
partial_coroutinefunc = functools.partial(coroutinefunc)
builtin_func = print
partial_builtin_func = functools.partial(print)
| MyIntOverride |
python | tensorflow__tensorflow | tensorflow/python/framework/test_combinations.py | {
"start": 4360,
"end": 6817
} | class ____:
"""Customizes the behavior of a particular parameter.
Users should override `modified_arguments()` to modify the parameter they
want, eg: change the value of certain parameter or filter it from the params
passed to the test case.
See the sample usage below, it will change any negative parameters to zero
before it gets passed to test case.
```
class NonNegativeParameterModifier(ParameterModifier):
def modified_arguments(self, kwargs, requested_parameters):
updates = {}
for name, value in kwargs.items():
if value < 0:
updates[name] = 0
return updates
```
"""
DO_NOT_PASS_TO_THE_TEST = object()
def __init__(self, parameter_name=None):
"""Construct a parameter modifier that may be specific to a parameter.
Args:
parameter_name: A `ParameterModifier` instance may operate on a class of
parameters or on a parameter with a particular name. Only
`ParameterModifier` instances that are of a unique type or were
initialized with a unique `parameter_name` will be executed.
See `__eq__` and `__hash__`.
"""
self._parameter_name = parameter_name
def modified_arguments(self, kwargs, requested_parameters):
"""Replace user-provided arguments before they are passed to a test.
This makes it possible to adjust user-provided arguments before passing
them to the test method.
Args:
kwargs: The combined arguments for the test.
requested_parameters: The set of parameters that are defined in the
signature of the test method.
Returns:
A dictionary with updates to `kwargs`. Keys with values set to
`ParameterModifier.DO_NOT_PASS_TO_THE_TEST` are going to be deleted and
not passed to the test.
"""
del kwargs, requested_parameters
return {}
def __eq__(self, other):
"""Compare `ParameterModifier` by type and `parameter_name`."""
if self is other:
return True
elif type(self) is type(other):
return self._parameter_name == other._parameter_name
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""Compare `ParameterModifier` by type or `parameter_name`."""
if self._parameter_name:
return hash(self._parameter_name)
else:
return id(self.__class__)
@tf_export("__internal__.test.combinations.OptionalParameter", v1=[])
| ParameterModifier |
python | google__jax | jax/experimental/mosaic/gpu/core.py | {
"start": 8354,
"end": 8492
} | class ____(Generic[T]):
members: Sequence[T]
def __iter__(self):
return iter(self.members)
@dataclasses.dataclass(frozen=True)
| Union |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 13456,
"end": 15661
} | class ____(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.Hinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.3, 3)
# Reduction = "sum"
hinge_obj = losses.Hinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 2.6, 3)
# Reduction = None
hinge_obj = losses.Hinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.1, 1.5])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.Hinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.Hinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.55, 3)
# Reduction = "sum"
hinge_obj = losses.Hinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.1, 3)
# Reduction = None
hinge_obj = losses.Hinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.1, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.Hinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
def test_dtype_arg(self):
hinge_obj = losses.Hinge(dtype="bfloat16")
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
loss = hinge_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
| HingeTest |
python | marshmallow-code__marshmallow | tests/test_fields.py | {
"start": 20353,
"end": 23244
} | class ____:
@pytest.mark.parametrize("param", ("only", "exclude", "dump_only", "load_only"))
def test_dict_nested_only_exclude_dump_only_load_only_propagated_to_nested(
self, param
):
class Child(Schema):
name = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.Dict(values=fields.Nested(Child))
schema = Family(**{param: ["children.name"]}) # type: ignore[arg-type]
children_field = schema.fields["children"]
assert isinstance(children_field, fields.Dict)
assert isinstance(children_field.value_field, fields.Nested)
assert getattr(children_field.value_field.schema, param) == {"name"}
@pytest.mark.parametrize(
("param", "expected"),
(("only", {"name"}), ("exclude", {"name", "surname", "age"})),
)
def test_dict_nested_only_and_exclude_merged_with_nested(self, param, expected):
class Child(Schema):
name = fields.String()
surname = fields.String()
age = fields.Integer()
class Family(Schema):
children = fields.Dict(
values=fields.Nested(Child, **{param: ("name", "surname")}) # type: ignore[arg-type]
)
schema = Family(**{param: ["children.name", "children.age"]}) # type: ignore[arg-type]
children_field = schema.fields["children"]
assert isinstance(children_field, fields.Dict)
assert getattr(children_field.value_field, param) == expected
def test_dict_nested_partial_propagated_to_nested(self):
class Child(Schema):
name = fields.String(required=True)
age = fields.Integer(required=True)
class Family(Schema):
children = fields.Dict(values=fields.Nested(Child))
payload = {"children": {"daughter": {"name": "Lucette"}}}
for val in (True, ("children.age",)):
result = Family(partial=val).load(payload)
assert result["children"]["daughter"]["name"] == "Lucette"
result = Family().load(payload, partial=val)
assert result["children"]["daughter"]["name"] == "Lucette"
for val in (False, ("children.name",)):
with pytest.raises(ValidationError) as excinfo:
result = Family(partial=val).load(payload)
assert excinfo.value.args[0] == {
"children": {
"daughter": {"value": {"age": ["Missing data for required field."]}}
}
}
with pytest.raises(ValidationError) as excinfo:
result = Family().load(payload, partial=val)
assert excinfo.value.args[0] == {
"children": {
"daughter": {"value": {"age": ["Missing data for required field."]}}
}
}
| TestDictNested |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py | {
"start": 243,
"end": 3435
} | class ____(BaseImagePreprocessingLayer):
"""Ensure the maximum number of bounding boxes.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
max_number: Desired output number of bounding boxes.
padding_value: The padding value of the `boxes` and `labels` in
`bounding_boxes`. Defaults to `-1`.
"""
def __init__(self, max_number, fill_value=-1, **kwargs):
super().__init__(**kwargs)
self.max_number = int(max_number)
self.fill_value = int(fill_value)
def transform_images(self, images, transformation=None, training=True):
return images
def transform_labels(self, labels, transformation=None, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
ops = self.backend
boxes = bounding_boxes["boxes"]
labels = bounding_boxes["labels"]
boxes_shape = ops.shape(boxes)
batch_size = boxes_shape[0]
num_boxes = boxes_shape[1]
# Get pad size
pad_size = ops.numpy.maximum(
ops.numpy.subtract(self.max_number, num_boxes), 0
)
boxes = boxes[:, : self.max_number, ...]
boxes = ops.numpy.pad(
boxes,
[[0, 0], [0, pad_size], [0, 0]],
constant_values=self.fill_value,
)
labels = labels[:, : self.max_number]
labels = ops.numpy.pad(
labels, [[0, 0], [0, pad_size]], constant_values=self.fill_value
)
# Ensure shape
boxes = ops.numpy.reshape(boxes, [batch_size, self.max_number, 4])
labels = ops.numpy.reshape(labels, [batch_size, self.max_number])
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes["labels"] = labels
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation=None, training=True
):
return self.transform_images(segmentation_masks)
def compute_output_shape(self, input_shape):
if isinstance(input_shape, dict) and "bounding_boxes" in input_shape:
input_keys = set(input_shape["bounding_boxes"].keys())
extra_keys = input_keys - set(("boxes", "labels"))
if extra_keys:
raise KeyError(
"There are unsupported keys in `bounding_boxes`: "
f"{list(extra_keys)}. "
"Only `boxes` and `labels` are supported."
)
boxes_shape = list(input_shape["bounding_boxes"]["boxes"])
boxes_shape[1] = self.max_number
labels_shape = list(input_shape["bounding_boxes"]["labels"])
labels_shape[1] = self.max_number
input_shape["bounding_boxes"]["boxes"] = boxes_shape
input_shape["bounding_boxes"]["labels"] = labels_shape
return input_shape
def get_config(self):
config = super().get_config()
config.update({"max_number": self.max_number})
return config
| MaxNumBoundingBoxes |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/dag.py | {
"start": 9045,
"end": 68401
} | class ____:
"""
A dag is a collection of tasks with directional dependencies.
A dag also has a schedule, a start date and an end date (optional). For each schedule,
(say daily or hourly), the DAG needs to run each individual tasks as their dependencies
are met. Certain tasks have the property of depending on their own past, meaning that
they can't run until their previous schedule (and upstream tasks) are completed.
Dags essentially act as namespaces for tasks. A task_id can only be
added once to a Dag.
Note that if you plan to use time zones all the dates provided should be pendulum
dates. See :ref:`timezone_aware_dags`.
.. versionadded:: 2.4
The *schedule* argument to specify either time-based scheduling logic
(timetable), or dataset-driven triggers.
.. versionchanged:: 3.0
The default value of *schedule* has been changed to *None* (no schedule).
The previous default was ``timedelta(days=1)``.
:param dag_id: The id of the DAG; must consist exclusively of alphanumeric
characters, dashes, dots and underscores (all ASCII)
:param description: The description for the DAG to e.g. be shown on the webserver
:param schedule: If provided, this defines the rules according to which DAG
runs are scheduled. Possible values include a cron expression string,
timedelta object, Timetable, or list of Asset objects.
See also :external:doc:`howto/timetable`.
:param start_date: The timestamp from which the scheduler will
attempt to backfill. If this is not provided, backfilling must be done
manually with an explicit time range.
:param end_date: A date beyond which your DAG won't run, leave to None
for open-ended scheduling.
:param template_searchpath: This list of folders (non-relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:param template_undefined: Template undefined type.
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:param max_active_tasks: the number of task instances allowed to run
concurrently
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:param max_consecutive_failed_dag_runs: (experimental) maximum number of consecutive failed DAG runs,
beyond this the scheduler will disable the DAG
:param dagrun_timeout: Specify the duration a DagRun should be allowed to run before it times out or
fails. Task instances that are running when a DagRun is timed out will be marked as skipped.
:param sla_miss_callback: DEPRECATED - The SLA feature is removed in Airflow 3.0, to be replaced with DeadlineAlerts in 3.1
:param deadline: An optional DeadlineAlert for the Dag.
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to False
:param on_failure_callback: A function or list of functions to be called when a DagRun of this dag fails.
A context dictionary is passed as a single parameter to this function.
:param on_success_callback: Much like the ``on_failure_callback`` except
that it is executed when the dag succeeds.
:param access_control: Specify optional DAG-level actions, e.g.,
"{'role1': {'can_read'}, 'role2': {'can_read', 'can_edit', 'can_delete'}}"
or it can specify the resource name if there is a DAGs Run resource, e.g.,
"{'role1': {'DAG Runs': {'can_create'}}, 'role2': {'DAGs': {'can_read', 'can_edit', 'can_delete'}}"
:param is_paused_upon_creation: Specifies if the dag is paused when created for the first time.
If the dag exists already, this flag will be ignored. If this optional parameter
is not specified, the global config setting will be used.
:param jinja_environment_kwargs: additional configuration options to be passed to Jinja
``Environment`` for template rendering
**Example**: to avoid Jinja from removing a trailing newline from template strings ::
DAG(
dag_id="my-dag",
jinja_environment_kwargs={
"keep_trailing_newline": True,
# some other jinja2 Environment options here
},
)
**See**: `Jinja Environment documentation
<https://jinja.palletsprojects.com/en/2.11.x/api/#jinja2.Environment>`_
:param render_template_as_native_obj: If True, uses a Jinja ``NativeEnvironment``
to render templates as native Python types. If False, a Jinja
``Environment`` is used to render templates as string values.
:param tags: List of tags to help filtering Dags in the UI.
:param owner_links: Dict of owners and their links, that will be clickable on the Dags view UI.
Can be used as an HTTP link (for example the link to your Slack channel), or a mailto link.
e.g: ``{"dag_owner": "https://airflow.apache.org/"}``
:param auto_register: Automatically register this DAG when it is used in a ``with`` block
:param fail_fast: Fails currently running tasks when task in Dag fails.
**Warning**: A fail stop dag can only have tasks with the default trigger rule ("all_success").
An exception will be thrown if any task in a fail stop dag has a non default trigger rule.
:param dag_display_name: The display name of the Dag which appears on the UI.
"""
__serialized_fields: ClassVar[frozenset[str]]
# Note: mypy gets very confused about the use of `@${attr}.default` for attrs without init=False -- and it
# doesn't correctly track/notice that they have default values (it gives errors about `Missing positional
# argument "description" in call to "DAG"`` etc), so for init=True args we use the `default=Factory()`
# style
def __rich_repr__(self):
yield "dag_id", self.dag_id
yield "schedule", self.schedule
yield "#tasks", len(self.tasks)
__rich_repr__.angular = True # type: ignore[attr-defined]
# NOTE: When updating arguments here, please also keep arguments in @dag()
# below in sync. (Search for 'def dag(' in this file.)
dag_id: str = attrs.field(kw_only=False, validator=lambda i, a, v: validate_key(v))
description: str | None = attrs.field(
default=None,
validator=attrs.validators.optional(attrs.validators.instance_of(str)),
)
default_args: dict[str, Any] = attrs.field(
factory=dict, validator=attrs.validators.instance_of(dict), converter=dict_copy
)
start_date: datetime | None = attrs.field(
default=attrs.Factory(_default_start_date, takes_self=True),
)
end_date: datetime | None = None
timezone: FixedTimezone | Timezone = attrs.field(init=False)
schedule: ScheduleArg = attrs.field(default=None, on_setattr=attrs.setters.frozen)
timetable: Timetable = attrs.field(init=False)
template_searchpath: str | Iterable[str] | None = attrs.field(
default=None, converter=_convert_str_to_tuple
)
# TODO: Task-SDK: Work out how to not import jinj2 until we need it! It's expensive
template_undefined: type[jinja2.StrictUndefined] = jinja2.StrictUndefined
user_defined_macros: dict | None = None
user_defined_filters: dict | None = None
max_active_tasks: int = attrs.field(
factory=_config_int_factory("core", "max_active_tasks_per_dag"),
converter=attrs.converters.default_if_none( # type: ignore[misc]
# attrs only supports named callables or lambdas, but partial works
# OK here too. This is a false positive from attrs's Mypy plugin.
factory=_config_int_factory("core", "max_active_tasks_per_dag"),
),
validator=attrs.validators.instance_of(int),
)
max_active_runs: int = attrs.field(
factory=_config_int_factory("core", "max_active_runs_per_dag"),
converter=attrs.converters.default_if_none( # type: ignore[misc]
# attrs only supports named callables or lambdas, but partial works
# OK here too. This is a false positive from attrs's Mypy plugin.
factory=_config_int_factory("core", "max_active_runs_per_dag"),
),
validator=attrs.validators.instance_of(int),
)
max_consecutive_failed_dag_runs: int = attrs.field(
factory=_config_int_factory("core", "max_consecutive_failed_dag_runs_per_dag"),
converter=attrs.converters.default_if_none( # type: ignore[misc]
# attrs only supports named callables or lambdas, but partial works
# OK here too. This is a false positive from attrs's Mypy plugin.
factory=_config_int_factory("core", "max_consecutive_failed_dag_runs_per_dag"),
),
validator=attrs.validators.instance_of(int),
)
dagrun_timeout: timedelta | None = attrs.field(
default=None,
validator=attrs.validators.optional(attrs.validators.instance_of(timedelta)),
)
deadline: list[DeadlineAlert] | DeadlineAlert | None = attrs.field(
default=None,
converter=_convert_deadline,
validator=attrs.validators.optional(
attrs.validators.deep_iterable(
member_validator=attrs.validators.instance_of(DeadlineAlert),
iterable_validator=attrs.validators.instance_of(list),
)
),
)
sla_miss_callback: None = attrs.field(default=None)
catchup: bool = attrs.field(
factory=_config_bool_factory("scheduler", "catchup_by_default"),
)
on_success_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None
on_failure_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None
doc_md: str | None = attrs.field(default=None, converter=_convert_doc_md)
params: ParamsDict = attrs.field(
# mypy doesn't really like passing the Converter object
default=None,
converter=attrs.Converter(_convert_params, takes_self=True), # type: ignore[misc, call-overload]
)
access_control: dict[str, dict[str, Collection[str]]] | None = attrs.field(
default=None,
converter=attrs.Converter(_convert_access_control), # type: ignore[misc, call-overload]
)
is_paused_upon_creation: bool | None = None
jinja_environment_kwargs: dict | None = None
render_template_as_native_obj: bool = attrs.field(default=False, converter=bool)
tags: MutableSet[str] = attrs.field(factory=set, converter=_convert_tags)
owner_links: dict[str, str] = attrs.field(factory=dict)
auto_register: bool = attrs.field(default=True, converter=bool)
fail_fast: bool = attrs.field(default=False, converter=bool)
dag_display_name: str = attrs.field(
default=attrs.Factory(_default_dag_display_name, takes_self=True),
validator=attrs.validators.instance_of(str),
)
task_dict: dict[str, Operator] = attrs.field(factory=dict, init=False)
task_group: TaskGroup = attrs.field(
on_setattr=attrs.setters.frozen, default=attrs.Factory(_default_task_group, takes_self=True)
)
fileloc: str = attrs.field(init=False, factory=_default_fileloc)
relative_fileloc: str | None = attrs.field(init=False, default=None)
partial: bool = attrs.field(init=False, default=False)
edge_info: dict[str, dict[str, EdgeInfoType]] = attrs.field(init=False, factory=dict)
has_on_success_callback: bool = attrs.field(init=False)
has_on_failure_callback: bool = attrs.field(init=False)
disable_bundle_versioning: bool = attrs.field(
factory=_config_bool_factory("dag_processor", "disable_bundle_versioning")
)
# TODO (GH-52141): This is never used in the sdk dag (it only makes sense
# after this goes through the dag processor), but various parts of the code
# depends on its existence. We should remove this after completely splitting
# DAG classes in the SDK and scheduler.
last_loaded: datetime | None = attrs.field(init=False, default=None)
def __attrs_post_init__(self):
from airflow.sdk import timezone
# Apply the timezone we settled on to start_date, end_date if it wasn't supplied
if isinstance(_start_date := self.default_args.get("start_date"), str):
self.default_args["start_date"] = timezone.parse(_start_date, timezone=self.timezone)
if isinstance(_end_date := self.default_args.get("end_date"), str):
self.default_args["end_date"] = timezone.parse(_end_date, timezone=self.timezone)
self.start_date = timezone.convert_to_utc(self.start_date)
self.end_date = timezone.convert_to_utc(self.end_date)
if start_date := self.default_args.get("start_date", None):
self.default_args["start_date"] = timezone.convert_to_utc(start_date)
if end_date := self.default_args.get("end_date", None):
self.default_args["end_date"] = timezone.convert_to_utc(end_date)
if self.access_control is not None:
warnings.warn(
"The airflow.security.permissions module is deprecated; please see https://airflow.apache.org/docs/apache-airflow/stable/security/deprecated_permissions.html",
RemovedInAirflow4Warning,
stacklevel=2,
)
if (
active_runs_limit := self.timetable.active_runs_limit
) is not None and active_runs_limit < self.max_active_runs:
raise ValueError(
f"Invalid max_active_runs: {type(self.timetable)} "
f"requires max_active_runs <= {active_runs_limit}"
)
@params.validator
def _validate_params(self, _, params: ParamsDict):
"""
Validate Param values when the Dag has schedule defined.
Raise exception if there are any Params which can not be resolved by their schema definition.
"""
if not self.timetable or not self.timetable.can_be_scheduled:
return
try:
params.validate()
except ParamValidationError as pverr:
raise ValueError(
f"Dag {self.dag_id!r} is not allowed to define a Schedule, "
"as there are required params without default values, or the default values are not valid."
) from pverr
@catchup.validator
def _validate_catchup(self, _, catchup: bool):
requires_automatic_backfilling = self.timetable.can_be_scheduled and catchup
if requires_automatic_backfilling and not ("start_date" in self.default_args or self.start_date):
raise ValueError("start_date is required when catchup=True")
@tags.validator
def _validate_tags(self, _, tags: Collection[str]):
if tags and any(len(tag) > TAG_MAX_LEN for tag in tags):
raise ValueError(f"tag cannot be longer than {TAG_MAX_LEN} characters")
@max_active_runs.validator
def _validate_max_active_runs(self, _, max_active_runs):
if self.timetable.active_runs_limit is not None:
if self.timetable.active_runs_limit < self.max_active_runs:
raise ValueError(
f"Invalid max_active_runs: {type(self.timetable).__name__} "
f"requires max_active_runs <= {self.timetable.active_runs_limit}"
)
@timetable.default
def _default_timetable(instance: DAG):
schedule = instance.schedule
# TODO: Once
# delattr(self, "schedule")
if isinstance(schedule, Timetable):
return schedule
if isinstance(schedule, BaseAsset):
return AssetTriggeredTimetable(schedule)
if isinstance(schedule, Collection) and not isinstance(schedule, str):
if not all(isinstance(x, BaseAsset) for x in schedule):
raise ValueError(
"All elements in 'schedule' should be either assets, asset references, or asset aliases"
)
return AssetTriggeredTimetable(AssetAll(*schedule))
return _create_timetable(schedule, instance.timezone)
@timezone.default
def _extract_tz(instance):
import pendulum
from airflow.sdk import timezone
start_date = instance.start_date or instance.default_args.get("start_date")
if start_date:
if not isinstance(start_date, datetime):
start_date = timezone.parse(start_date)
tzinfo = start_date.tzinfo or settings.TIMEZONE
tz = pendulum.instance(start_date, tz=tzinfo).timezone
else:
tz = settings.TIMEZONE
return tz
@has_on_success_callback.default
def _has_on_success_callback(self) -> bool:
return self.on_success_callback is not None
@has_on_failure_callback.default
def _has_on_failure_callback(self) -> bool:
return self.on_failure_callback is not None
@sla_miss_callback.validator
def _validate_sla_miss_callback(self, _, value):
if value is not None:
warnings.warn(
"The SLA feature is removed in Airflow 3.0, and replaced with a Deadline Alerts in >=3.1",
stacklevel=2,
)
return value
def __repr__(self):
return f"<DAG: {self.dag_id}>"
def __eq__(self, other: Self | Any):
# TODO: This subclassing behaviour seems wrong, but it's what Airflow has done for ~ever.
if type(self) is not type(other):
return False
return all(getattr(self, c, None) == getattr(other, c, None) for c in _DAG_HASH_ATTRS)
def __ne__(self, other: Any):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components: list[Any] = [type(self)]
for c in _DAG_HASH_ATTRS:
# If it is a list, convert to tuple because lists can't be hashed
if isinstance(getattr(self, c, None), list):
val = tuple(getattr(self, c))
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
def __enter__(self) -> Self:
from airflow.sdk.definitions._internal.contextmanager import DagContext
DagContext.push(self)
return self
def __exit__(self, _type, _value, _tb):
from airflow.sdk.definitions._internal.contextmanager import DagContext
_ = DagContext.pop()
def validate(self):
"""
Validate the Dag has a coherent setup.
This is called by the Dag bag before bagging the Dag.
"""
self.timetable.validate()
self.validate_setup_teardown()
# We validate owner links on set, but since it's a dict it could be mutated without calling the
# setter. Validate again here
self._validate_owner_links(None, self.owner_links)
def validate_setup_teardown(self):
"""
Validate that setup and teardown tasks are configured properly.
:meta private:
"""
for task in self.tasks:
if task.is_setup:
for down_task in task.downstream_list:
if not down_task.is_teardown and down_task.trigger_rule != TriggerRule.ALL_SUCCESS:
# todo: we can relax this to allow out-of-scope tasks to have other trigger rules
# this is required to ensure consistent behavior of dag
# when clearing an indirect setup
raise ValueError("Setup tasks must be followed with trigger rule ALL_SUCCESS.")
def param(self, name: str, default: Any = NOTSET) -> DagParam:
"""
Return a DagParam object for current dag.
:param name: dag parameter name.
:param default: fallback value for dag parameter.
:return: DagParam instance for specified name and current dag.
"""
return DagParam(current_dag=self, name=name, default=default)
@property
def tasks(self) -> list[Operator]:
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError("DAG.tasks can not be modified. Use dag.add_task() instead.")
@property
def task_ids(self) -> list[str]:
return list(self.task_dict)
@property
def teardowns(self) -> list[Operator]:
return [task for task in self.tasks if getattr(task, "is_teardown", None)]
@property
def tasks_upstream_of_teardowns(self) -> list[Operator]:
upstream_tasks = [t.upstream_list for t in self.teardowns]
return [val for sublist in upstream_tasks for val in sublist if not getattr(val, "is_teardown", None)]
@property
def folder(self) -> str:
"""Folder location of where the Dag object is instantiated."""
return os.path.dirname(self.fileloc)
@property
def owner(self) -> str:
"""
Return list of all owners found in Dag tasks.
:return: Comma separated list of owners in Dag tasks
"""
return ", ".join({t.owner for t in self.tasks})
@property
def timetable_summary(self) -> str:
return self.timetable.summary
def resolve_template_files(self):
for t in self.tasks:
# TODO: TaskSDK: move this on to BaseOperator and remove the check?
if hasattr(t, "resolve_template_files"):
t.resolve_template_files()
def get_template_env(self, *, force_sandboxed: bool = False) -> jinja2.Environment:
"""Build a Jinja2 environment."""
from airflow.sdk.definitions._internal.templater import NativeEnvironment, SandboxedEnvironment
# Collect directories to search for template files
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
# Default values (for backward compatibility)
jinja_env_options = {
"loader": jinja2.FileSystemLoader(searchpath),
"undefined": self.template_undefined,
"extensions": ["jinja2.ext.do"],
"cache_size": 0,
}
if self.jinja_environment_kwargs:
jinja_env_options.update(self.jinja_environment_kwargs)
env: jinja2.Environment
if self.render_template_as_native_obj and not force_sandboxed:
env = NativeEnvironment(**jinja_env_options)
else:
env = SandboxedEnvironment(**jinja_env_options)
# Add any user defined items. Safe to edit globals as long as no templates are rendered yet.
# http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment.globals
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""Set dependency between two tasks that already have been added to the Dag using add_task()."""
self.get_task(upstream_task_id).set_downstream(self.get_task(downstream_task_id))
@property
def roots(self) -> list[Operator]:
"""Return nodes with no parents. These are first to execute and are called roots or root nodes."""
return [task for task in self.tasks if not task.upstream_list]
@property
def leaves(self) -> list[Operator]:
"""Return nodes with no children. These are last to execute and are called leaves or leaf nodes."""
return [task for task in self.tasks if not task.downstream_list]
def topological_sort(self):
"""
Sorts tasks in topographical order, such that a task comes after any of its upstream dependencies.
Deprecated in place of ``task_group.topological_sort``
"""
from airflow.sdk.definitions.taskgroup import TaskGroup
# TODO: Remove in RemovedInAirflow3Warning
def nested_topo(group):
for node in group.topological_sort():
if isinstance(node, TaskGroup):
yield from nested_topo(node)
else:
yield node
return tuple(nested_topo(self.task_group))
def __deepcopy__(self, memo: dict[int, Any]):
# Switcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ("user_defined_macros", "user_defined_filters", "_log"):
object.__setattr__(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
if hasattr(self, "_log"):
result._log = self._log # type: ignore[attr-defined]
return result
def partial_subset(
self,
task_ids: str | Iterable[str],
include_downstream=False,
include_upstream=True,
include_direct_upstream=False,
):
"""
Return a subset of the current dag based on regex matching one or more tasks.
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
:param task_ids: Either a list of task_ids, or a string task_id
:param include_downstream: Include all downstream tasks of matched
tasks, in addition to matched tasks.
:param include_upstream: Include all upstream tasks of matched tasks,
in addition to matched tasks.
:param include_direct_upstream: Include all tasks directly upstream of matched
and downstream (if include_downstream = True) tasks
"""
from airflow.sdk.definitions.mappedoperator import MappedOperator
def is_task(obj) -> TypeGuard[Operator]:
return isinstance(obj, (BaseOperator, MappedOperator))
# deep-copying self.task_dict and self.task_group takes a long time, and we don't want all
# the tasks anyway, so we copy the tasks manually later
memo = {id(self.task_dict): None, id(self.task_group): None}
dag = copy.deepcopy(self, memo)
if isinstance(task_ids, str):
matched_tasks = [t for t in self.tasks if task_ids in t.task_id]
else:
matched_tasks = [t for t in self.tasks if t.task_id in task_ids]
also_include_ids: set[str] = set()
for t in matched_tasks:
if include_downstream:
for rel in t.get_flat_relatives(upstream=False):
also_include_ids.add(rel.task_id)
if rel not in matched_tasks: # if it's in there, we're already processing it
# need to include setups and teardowns for tasks that are in multiple
# non-collinear setup/teardown paths
if not rel.is_setup and not rel.is_teardown:
also_include_ids.update(
x.task_id for x in rel.get_upstreams_only_setups_and_teardowns()
)
if include_upstream:
also_include_ids.update(x.task_id for x in t.get_upstreams_follow_setups())
else:
if not t.is_setup and not t.is_teardown:
also_include_ids.update(x.task_id for x in t.get_upstreams_only_setups_and_teardowns())
if t.is_setup and not include_downstream:
also_include_ids.update(x.task_id for x in t.downstream_list if x.is_teardown)
also_include: list[Operator] = [self.task_dict[x] for x in also_include_ids]
direct_upstreams: list[Operator] = []
if include_direct_upstream:
for t in itertools.chain(matched_tasks, also_include):
upstream = (u for u in t.upstream_list if is_task(u))
direct_upstreams.extend(upstream)
# Make sure to not recursively deepcopy the dag or task_group while copying the task.
# task_group is reset later
def _deepcopy_task(t) -> Operator:
memo.setdefault(id(t.task_group), None)
return copy.deepcopy(t, memo)
# Compiling the unique list of tasks that made the cut
dag.task_dict = {
t.task_id: _deepcopy_task(t)
for t in itertools.chain(matched_tasks, also_include, direct_upstreams)
}
def filter_task_group(group, parent_group):
"""Exclude tasks not included in the partial dag from the given TaskGroup."""
# We want to deepcopy _most but not all_ attributes of the task group, so we create a shallow copy
# and then manually deep copy the instances. (memo argument to deepcopy only works for instances
# of classes, not "native" properties of an instance)
copied = copy.copy(group)
memo[id(group.children)] = {}
if parent_group:
memo[id(group.parent_group)] = parent_group
for attr in type(group).__slots__:
value = getattr(group, attr)
value = copy.deepcopy(value, memo)
object.__setattr__(copied, attr, value)
proxy = weakref.proxy(copied)
for child in group.children.values():
if is_task(child):
if child.task_id in dag.task_dict:
task = copied.children[child.task_id] = dag.task_dict[child.task_id]
task.task_group = proxy
else:
copied.used_group_ids.discard(child.task_id)
else:
filtered_child = filter_task_group(child, proxy)
# Only include this child TaskGroup if it is non-empty.
if filtered_child.children:
copied.children[child.group_id] = filtered_child
return copied
object.__setattr__(dag, "task_group", filter_task_group(self.task_group, None))
# Removing upstream/downstream references to tasks and TaskGroups that did not make
# the cut.
groups = dag.task_group.get_task_group_dict()
for g in groups.values():
g.upstream_group_ids.intersection_update(groups)
g.downstream_group_ids.intersection_update(groups)
g.upstream_task_ids.intersection_update(dag.task_dict)
g.downstream_task_ids.intersection_update(dag.task_dict)
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# make the cut
t.upstream_task_ids.intersection_update(dag.task_dict)
t.downstream_task_ids.intersection_update(dag.task_dict)
dag.partial = len(dag.tasks) < len(self.tasks)
return dag
def has_task(self, task_id: str):
return task_id in self.task_dict
def has_task_group(self, task_group_id: str) -> bool:
return task_group_id in self.task_group_dict
@functools.cached_property
def task_group_dict(self):
return {k: v for k, v in self.task_group.get_task_group_dict().items() if k is not None}
def get_task(self, task_id: str) -> Operator:
if task_id in self.task_dict:
return self.task_dict[task_id]
raise TaskNotFound(f"Task {task_id} not found")
@property
def task(self) -> TaskDecoratorCollection:
from airflow.sdk.definitions.decorators import task
return cast("TaskDecoratorCollection", functools.partial(task, dag=self))
def add_task(self, task: Operator) -> None:
"""
Add a task to the Dag.
:param task: the task you want to add
"""
# FailStopDagInvalidTriggerRule.check(dag=self, trigger_rule=task.trigger_rule)
from airflow.sdk.definitions._internal.contextmanager import TaskGroupContext
# if the task has no start date, assign it the same as the Dag
if not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the Dag's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the Dag's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
task_id = task.node_id
if not task.task_group:
task_group = TaskGroupContext.get_current(self)
if task_group:
task_id = task_group.child_id(task_id)
task_group.add(task)
if (
task_id in self.task_dict and self.task_dict[task_id] is not task
) or task_id in self.task_group.used_group_ids:
raise DuplicateTaskIdFound(f"Task id '{task_id}' has already been added to the DAG")
self.task_dict[task_id] = task
task.dag = self
# Add task_id to used_group_ids to prevent group_id and task_id collisions.
self.task_group.used_group_ids.add(task_id)
FailFastDagInvalidTriggerRule.check(fail_fast=self.fail_fast, trigger_rule=task.trigger_rule)
def add_tasks(self, tasks: Iterable[Operator]) -> None:
"""
Add a list of tasks to the Dag.
:param tasks: a lit of tasks you want to add
"""
for task in tasks:
self.add_task(task)
def _remove_task(self, task_id: str) -> None:
# This is "private" as removing could leave a hole in dependencies if done incorrectly, and this
# doesn't guard against that
task = self.task_dict.pop(task_id)
tg = getattr(task, "task_group", None)
if tg:
tg._remove(task)
def check_cycle(self) -> None:
"""
Check to see if there are any cycles in the Dag.
:raises AirflowDagCycleException: If cycle is found in the Dag.
"""
# default of int is 0 which corresponds to CYCLE_NEW
CYCLE_NEW = 0
CYCLE_IN_PROGRESS = 1
CYCLE_DONE = 2
visited: dict[str, int] = defaultdict(int)
path_stack: deque[str] = deque()
task_dict = self.task_dict
def _check_adjacent_tasks(task_id, current_task):
"""Return first untraversed child task, else None if all tasks traversed."""
for adjacent_task in current_task.get_direct_relative_ids():
if visited[adjacent_task] == CYCLE_IN_PROGRESS:
msg = f"Cycle detected in Dag: {self.dag_id}. Faulty task: {task_id}"
raise AirflowDagCycleException(msg)
if visited[adjacent_task] == CYCLE_NEW:
return adjacent_task
return None
for dag_task_id in self.task_dict.keys():
if visited[dag_task_id] == CYCLE_DONE:
continue
path_stack.append(dag_task_id)
while path_stack:
current_task_id = path_stack[-1]
if visited[current_task_id] == CYCLE_NEW:
visited[current_task_id] = CYCLE_IN_PROGRESS
task = task_dict[current_task_id]
child_to_check = _check_adjacent_tasks(current_task_id, task)
if not child_to_check:
visited[current_task_id] = CYCLE_DONE
path_stack.pop()
else:
path_stack.append(child_to_check)
def cli(self):
"""Exposes a CLI specific to this Dag."""
self.check_cycle()
from airflow.cli import cli_parser
parser = cli_parser.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@classmethod
def get_serialized_fields(cls):
"""Stringified Dags and operators contain exactly these fields."""
return cls.__serialized_fields
def get_edge_info(self, upstream_task_id: str, downstream_task_id: str) -> EdgeInfoType:
"""Return edge information for the given pair of tasks or an empty edge if there is no information."""
empty = cast("EdgeInfoType", {})
if self.edge_info:
return self.edge_info.get(upstream_task_id, {}).get(downstream_task_id, empty)
return empty
def set_edge_info(self, upstream_task_id: str, downstream_task_id: str, info: EdgeInfoType):
"""
Set the given edge information on the Dag.
Note that this will overwrite, rather than merge with, existing info.
"""
self.edge_info.setdefault(upstream_task_id, {})[downstream_task_id] = info
@owner_links.validator
def _validate_owner_links(self, _, owner_links):
wrong_links = {}
for owner, link in owner_links.items():
result = urlsplit(link)
if result.scheme == "mailto":
# netloc is not existing for 'mailto' link, so we are checking that the path is parsed
if not result.path:
wrong_links[result.path] = link
elif not result.scheme or not result.netloc:
wrong_links[owner] = link
if wrong_links:
raise ValueError(
"Wrong link format was used for the owner. Use a valid link \n"
f"Bad formatted links are: {wrong_links}"
)
def test(
self,
run_after: datetime | None = None,
logical_date: datetime | None | ArgNotSet = NOTSET,
run_conf: dict[str, Any] | None = None,
conn_file_path: str | None = None,
variable_file_path: str | None = None,
use_executor: bool = False,
mark_success_pattern: Pattern | str | None = None,
):
"""
Execute one single DagRun for a given Dag and logical date.
:param run_after: the datetime before which to Dag cannot run.
:param logical_date: logical date for the Dag run
:param run_conf: configuration to pass to newly created dagrun
:param conn_file_path: file path to a connection file in either yaml or json
:param variable_file_path: file path to a variable file in either yaml or json
:param use_executor: if set, uses an executor to test the Dag
:param mark_success_pattern: regex of task_ids to mark as success instead of running
"""
import re
import time
from contextlib import ExitStack
from unittest.mock import patch
from airflow import settings
from airflow.models.dagrun import DagRun, get_or_create_dagrun
from airflow.sdk import DagRunState, timezone
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.utils.types import DagRunTriggeredByType, DagRunType
exit_stack = ExitStack()
if conn_file_path or variable_file_path:
backend_kwargs = {}
if conn_file_path:
backend_kwargs["connections_file_path"] = conn_file_path
if variable_file_path:
backend_kwargs["variables_file_path"] = variable_file_path
exit_stack.enter_context(
patch.dict(
os.environ,
{
"AIRFLOW__SECRETS__BACKEND": "airflow.secrets.local_filesystem.LocalFilesystemBackend",
"AIRFLOW__SECRETS__BACKEND_KWARGS": json.dumps(backend_kwargs),
},
)
)
if settings.Session is None:
raise RuntimeError("Session not configured. Call configure_orm() first.")
session = settings.Session()
with exit_stack:
self.validate()
# Allow users to explicitly pass None. If it isn't set, we default to current time.
logical_date = logical_date if is_arg_set(logical_date) else timezone.utcnow()
log.debug("Clearing existing task instances for logical date %s", logical_date)
# TODO: Replace with calling client.dag_run.clear in Execution API at some point
SerializedDAG.clear_dags(
dags=[self],
start_date=logical_date,
end_date=logical_date,
dag_run_state=False,
)
log.debug("Getting dagrun for dag %s", self.dag_id)
logical_date = timezone.coerce_datetime(logical_date)
run_after = timezone.coerce_datetime(run_after) or timezone.coerce_datetime(timezone.utcnow())
data_interval = (
self.timetable.infer_manual_data_interval(run_after=logical_date) if logical_date else None
)
from airflow.models.dag_version import DagVersion
version = DagVersion.get_version(self.dag_id)
if not version:
from airflow.dag_processing.bundles.manager import DagBundlesManager
from airflow.dag_processing.dagbag import DagBag, sync_bag_to_db
from airflow.sdk.definitions._internal.dag_parsing_context import (
_airflow_parsing_context_manager,
)
manager = DagBundlesManager()
manager.sync_bundles_to_db(session=session)
session.commit()
# sync all bundles? or use the dags-folder bundle?
# What if the test dag is in a different bundle?
for bundle in manager.get_all_dag_bundles():
if not bundle.is_initialized:
bundle.initialize()
with _airflow_parsing_context_manager(dag_id=self.dag_id):
dagbag = DagBag(
dag_folder=bundle.path, bundle_path=bundle.path, include_examples=False
)
sync_bag_to_db(dagbag, bundle.name, bundle.version)
version = DagVersion.get_version(self.dag_id)
if version:
break
scheduler_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(self))
# Preserve callback functions from original Dag since they're lost during serialization
# and yes it is a hack for now! It is a tradeoff for code simplicity.
# Without it, we need "Scheduler Dag" (Serialized dag) for the scheduler bits
# -- dep check, scheduling tis
# and need real dag to get and run callbacks without having to load the dag model
# Scheduler DAG shouldn't have these attributes, but assigning them
# here is an easy hack to get this test() thing working.
scheduler_dag.on_success_callback = self.on_success_callback # type: ignore[attr-defined, union-attr]
scheduler_dag.on_failure_callback = self.on_failure_callback # type: ignore[attr-defined, union-attr]
dr: DagRun = get_or_create_dagrun(
dag=scheduler_dag,
start_date=logical_date or run_after,
logical_date=logical_date,
data_interval=data_interval,
run_after=run_after,
run_id=DagRun.generate_run_id(
run_type=DagRunType.MANUAL,
logical_date=logical_date,
run_after=run_after,
),
session=session,
conf=run_conf,
triggered_by=DagRunTriggeredByType.TEST,
triggering_user_name="dag_test",
)
# Start a mock span so that one is present and not started downstream. We
# don't care about otel in dag.test and starting the span during dagrun update
# is not functioning properly in this context anyway.
dr.start_dr_spans_if_needed(tis=[])
log.debug("starting dagrun")
# Instead of starting a scheduler, we run the minimal loop possible to check
# for task readiness and dependency management.
# Instead of starting a scheduler, we run the minimal loop possible to check
# for task readiness and dependency management.
# ``Dag.test()`` works in two different modes depending on ``use_executor``:
# - if ``use_executor`` is False, runs the task locally with no executor using ``_run_task``
# - if ``use_executor`` is True, sends workloads to the executor with
# ``BaseExecutor.queue_workload``
if use_executor:
from airflow.executors.base_executor import ExecutorLoader
executor = ExecutorLoader.get_default_executor()
executor.start()
while dr.state == DagRunState.RUNNING:
session.expire_all()
schedulable_tis, _ = dr.update_state(session=session)
for s in schedulable_tis:
if s.state != TaskInstanceState.UP_FOR_RESCHEDULE:
s.try_number += 1
s.state = TaskInstanceState.SCHEDULED
s.scheduled_dttm = timezone.utcnow()
session.commit()
# triggerer may mark tasks scheduled so we read from DB
all_tis = set(dr.get_task_instances(session=session))
scheduled_tis = {x for x in all_tis if x.state == TaskInstanceState.SCHEDULED}
ids_unrunnable = {x for x in all_tis if x.state not in FINISHED_STATES} - scheduled_tis
if not scheduled_tis and ids_unrunnable:
log.warning("No tasks to run. unrunnable tasks: %s", ids_unrunnable)
time.sleep(1)
for ti in scheduled_tis:
task = self.task_dict[ti.task_id]
mark_success = (
re.compile(mark_success_pattern).fullmatch(ti.task_id) is not None
if mark_success_pattern is not None
else False
)
if use_executor:
if executor.has_task(ti):
continue
from pathlib import Path
from airflow.executors import workloads
from airflow.executors.base_executor import ExecutorLoader
from airflow.executors.workloads import BundleInfo
workload = workloads.ExecuteTask.make(
ti,
dag_rel_path=Path(self.fileloc),
generator=executor.jwt_generator,
sentry_integration=executor.sentry_integration,
# For the system test/debug purpose, we use the default bundle which uses
# local file system. If it turns out to be a feature people want, we could
# plumb the Bundle to use as a parameter to dag.test
bundle_info=BundleInfo(name="dags-folder"),
)
executor.queue_workload(workload, session=session)
ti.state = TaskInstanceState.QUEUED
session.commit()
else:
# Run the task locally
try:
if mark_success:
ti.set_state(TaskInstanceState.SUCCESS)
log.info("[DAG TEST] Marking success for %s on %s", task, ti.logical_date)
else:
_run_task(ti=ti, task=task, run_triggerer=True)
except Exception:
log.exception("Task failed; ti=%s", ti)
if use_executor:
executor.heartbeat()
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.models.dagbag import DBDagBag
SchedulerJobRunner.process_executor_events(
executor=executor, job_id=None, scheduler_dag_bag=DBDagBag(), session=session
)
if use_executor:
executor.end()
return dr
def _run_task(
*,
ti: SchedulerTaskInstance,
task: Operator,
run_triggerer: bool = False,
) -> TaskRunResult | None:
"""
Run a single task instance, and push result to Xcom for downstream tasks.
Bypasses a lot of extra steps used in `task.run` to keep our local running as fast as
possible. This function is only meant for the `dag.test` function as a helper function.
"""
from airflow.sdk.module_loading import import_string
taskrun_result: TaskRunResult | None
log.info("[DAG TEST] starting task_id=%s map_index=%s", ti.task_id, ti.map_index)
while True:
try:
log.info("[DAG TEST] running task %s", ti)
from airflow.sdk.api.datamodels._generated import TaskInstance as TaskInstanceSDK
from airflow.sdk.execution_time.comms import DeferTask
from airflow.sdk.execution_time.supervisor import run_task_in_process
from airflow.serialization.serialized_objects import create_scheduler_operator
# The API Server expects the task instance to be in QUEUED state before
# it is run.
ti.set_state(TaskInstanceState.QUEUED)
task_sdk_ti = TaskInstanceSDK(
id=UUID(str(ti.id)),
task_id=ti.task_id,
dag_id=ti.dag_id,
run_id=ti.run_id,
try_number=ti.try_number,
map_index=ti.map_index,
dag_version_id=UUID(str(ti.dag_version_id)),
)
taskrun_result = run_task_in_process(ti=task_sdk_ti, task=task)
msg = taskrun_result.msg
ti.set_state(taskrun_result.ti.state)
ti.task = create_scheduler_operator(taskrun_result.ti.task)
if ti.state == TaskInstanceState.DEFERRED and isinstance(msg, DeferTask) and run_triggerer:
from airflow.utils.session import create_session
# API Server expects the task instance to be in QUEUED state before
# resuming from deferral.
ti.set_state(TaskInstanceState.QUEUED)
log.info("[DAG TEST] running trigger in line")
trigger = import_string(msg.classpath)(**msg.trigger_kwargs)
event = _run_inline_trigger(trigger, task_sdk_ti)
ti.next_method = msg.next_method
ti.next_kwargs = {"event": event.payload} if event else msg.next_kwargs
log.info("[DAG TEST] Trigger completed")
# Set the state to SCHEDULED so that the task can be resumed.
with create_session() as session:
ti.state = TaskInstanceState.SCHEDULED
session.add(ti)
continue
break
except Exception:
log.exception("[DAG TEST] Error running task %s", ti)
if ti.state not in FINISHED_STATES:
ti.set_state(TaskInstanceState.FAILED)
taskrun_result = None
break
raise
log.info("[DAG TEST] end task task_id=%s map_index=%s", ti.task_id, ti.map_index)
return taskrun_result
def _run_inline_trigger(trigger, task_sdk_ti):
from airflow.sdk.execution_time.supervisor import InProcessTestSupervisor
return InProcessTestSupervisor.run_trigger_in_process(trigger=trigger, ti=task_sdk_ti)
# Since we define all the attributes of the class with attrs, we can compute this statically at parse time
DAG._DAG__serialized_fields = frozenset(a.name for a in attrs.fields(DAG)) - { # type: ignore[attr-defined]
"schedule_asset_references",
"schedule_asset_alias_references",
"task_outlet_asset_references",
"_old_context_manager_dags",
"safe_dag_id",
"last_loaded",
"user_defined_filters",
"user_defined_macros",
"partial",
"params",
"_log",
"task_dict",
"template_searchpath",
"sla_miss_callback",
"on_success_callback",
"on_failure_callback",
"template_undefined",
"jinja_environment_kwargs",
# has_on_*_callback are only stored if the value is True, as the default is False
"has_on_success_callback",
"has_on_failure_callback",
"auto_register",
"schedule",
}
if TYPE_CHECKING:
# NOTE: Please keep the list of arguments in sync with DAG.__init__.
# Only exception: dag_id here should have a default value, but not in DAG.
@overload
def dag(
dag_id: str = "",
*,
description: str | None = None,
schedule: ScheduleArg = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
template_searchpath: str | Iterable[str] | None = None,
template_undefined: type[jinja2.StrictUndefined] = jinja2.StrictUndefined,
user_defined_macros: dict | None = None,
user_defined_filters: dict | None = None,
default_args: dict[str, Any] | None = None,
max_active_tasks: int = ...,
max_active_runs: int = ...,
max_consecutive_failed_dag_runs: int = ...,
dagrun_timeout: timedelta | None = None,
catchup: bool = ...,
on_success_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None,
on_failure_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None,
deadline: list[DeadlineAlert] | DeadlineAlert | None = None,
doc_md: str | None = None,
params: ParamsDict | dict[str, Any] | None = None,
access_control: dict[str, dict[str, Collection[str]]] | dict[str, Collection[str]] | None = None,
is_paused_upon_creation: bool | None = None,
jinja_environment_kwargs: dict | None = None,
render_template_as_native_obj: bool = False,
tags: Collection[str] | None = None,
owner_links: dict[str, str] | None = None,
auto_register: bool = True,
fail_fast: bool = False,
dag_display_name: str | None = None,
disable_bundle_versioning: bool = False,
) -> Callable[[Callable], Callable[..., DAG]]:
"""
Python dag decorator which wraps a function into an Airflow Dag.
Accepts kwargs for operator kwarg. Can be used to parameterize Dags.
:param dag_args: Arguments for DAG object
:param dag_kwargs: Kwargs for DAG object.
"""
@overload
def dag(func: Callable[..., DAG]) -> Callable[..., DAG]:
"""Python dag decorator to use without any arguments."""
def dag(dag_id_or_func=None, __DAG_class=DAG, __warnings_stacklevel_delta=2, **decorator_kwargs):
from airflow.sdk.definitions._internal.decorators import fixup_decorator_warning_stack
# TODO: Task-SDK: remove __DAG_class
# __DAG_class is a temporary hack to allow the dag decorator in airflow.models.dag to continue to
# return SchedulerDag objects
DAG = __DAG_class
def wrapper(f: Callable) -> Callable[..., DAG]:
# Determine dag_id: prioritize keyword arg, then positional string, fallback to function name
if "dag_id" in decorator_kwargs:
dag_id = decorator_kwargs.pop("dag_id", "")
elif isinstance(dag_id_or_func, str) and dag_id_or_func.strip():
dag_id = dag_id_or_func
else:
dag_id = f.__name__
@functools.wraps(f)
def factory(*args, **kwargs):
# Generate signature for decorated function and bind the arguments when called
# we do this to extract parameters, so we can annotate them on the DAG object.
# In addition, this fails if we are missing any args/kwargs with TypeError as expected.
f_sig = signature(f).bind(*args, **kwargs)
# Apply defaults to capture default values if set.
f_sig.apply_defaults()
# Initialize Dag with bound arguments
with DAG(dag_id, **decorator_kwargs) as dag_obj:
# Set Dag documentation from function documentation if it exists and doc_md is not set.
if f.__doc__ and not dag_obj.doc_md:
dag_obj.doc_md = f.__doc__
# Generate DAGParam for each function arg/kwarg and replace it for calling the function.
# All args/kwargs for function will be DAGParam object and replaced on execution time.
f_kwargs = {}
for name, value in f_sig.arguments.items():
f_kwargs[name] = dag_obj.param(name, value)
# set file location to caller source path
back = sys._getframe().f_back
dag_obj.fileloc = back.f_code.co_filename if back else ""
# Invoke function to create operators in the Dag scope.
f(**f_kwargs)
# Return dag object such that it's accessible in Globals.
return dag_obj
# Ensure that warnings from inside DAG() are emitted from the caller, not here
fixup_decorator_warning_stack(factory)
return factory
if callable(dag_id_or_func) and not isinstance(dag_id_or_func, str):
return wrapper(dag_id_or_func)
return wrapper
| DAG |
python | walkccc__LeetCode | solutions/294. Flip Game II/294.py | {
"start": 0,
"end": 465
} | class ____:
@functools.lru_cache(None)
def canWin(self, currentState: str) -> bool:
# If any of currentState[i:i + 2] == "++" and your friend can't win after
# changing currentState[i:i + 2] to "--" (or "-"), then you can win.
return any(True
for i, (a, b) in enumerate(zip(currentState, currentState[1:]))
if a == '+' and b == '+' and
not self.canWin(currentState[:i] + '-' + currentState[i + 2:]))
| Solution |
python | astropy__astropy | astropy/units/format/base.py | {
"start": 7628,
"end": 11818
} | class ____:
"""Provides private methods used in the formats that parse units."""
_deprecated_units: ClassVar[frozenset[str]] = frozenset()
@classmethod
def _do_parse(cls, s: str, debug: bool = False) -> UnitBase:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit(cls, t: LexToken) -> UnitBase:
try:
return cls._validate_unit(t.value)
except KeyError:
registry = get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(
f"At col {t.lexpos}, {cls._invalid_unit_error_message(t.value)}"
) from None
@classmethod
def _fix_deprecated(cls, x: str) -> list[str]:
return [x + " (deprecated)" if x in cls._deprecated_units else x]
@classmethod
def _did_you_mean_units(cls, unit: str) -> str:
"""
A wrapper around `astropy.utils.misc.did_you_mean` that deals with
the display of deprecated units.
Parameters
----------
unit : str
The invalid unit string
Returns
-------
msg : str
A message with alternatives, or the empty string.
"""
return did_you_mean(unit, cls._units, fix=cls._fix_deprecated)
@classmethod
def _validate_unit(
cls, s: str, deprecations: DeprecatedUnitAction = DeprecatedUnitAction.WARN
) -> UnitBase:
if s in cls._deprecated_units:
alternative = (
unit.represents if isinstance(unit := cls._units[s], Unit) else None
)
msg = f"The unit {s!r} has been deprecated in the {cls.__name__} standard."
if alternative:
msg += f" Suggested: {cls.to_string(alternative)}."
match DeprecatedUnitAction(deprecations):
case DeprecatedUnitAction.CONVERT:
if alternative:
return alternative
warnings.warn(
msg + " It cannot be automatically converted.", UnitsWarning
)
case DeprecatedUnitAction.WARN:
warnings.warn(msg, UnitsWarning)
case DeprecatedUnitAction.RAISE:
raise UnitsError(msg)
case DeprecatedUnitAction.SILENT:
pass
case _:
assert_never(deprecations)
return cls._units[s]
@classmethod
def _invalid_unit_error_message(cls, unit: str) -> str:
return (
f"Unit '{unit}' not supported by the {cls.__name__} standard. "
+ cls._did_you_mean_units(unit)
)
@classmethod
def _decompose_to_known_units(
cls,
unit: CompositeUnit | NamedUnit,
deprecations: DeprecatedUnitAction = DeprecatedUnitAction.WARN,
) -> UnitBase:
"""
Partially decomposes a unit so it is only composed of units that
are "known" to a given format.
"""
if isinstance(unit, CompositeUnit):
return CompositeUnit(
unit.scale,
[
cls._decompose_to_known_units(base, deprecations)
for base in unit.bases
],
unit.powers,
_error_check=False,
)
if isinstance(unit, NamedUnit):
name = unit._get_format_name(cls.name)
try:
return cls._validate_unit(name, deprecations=deprecations)
except KeyError:
if isinstance(unit, Unit):
return cls._decompose_to_known_units(unit._represents, deprecations)
raise ValueError(cls._invalid_unit_error_message(name)) from None
raise TypeError(
f"unit argument must be a 'NamedUnit' or 'CompositeUnit', not {type(unit)}"
)
| _ParsingFormatMixin |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_templatetags.py | {
"start": 9568,
"end": 10687
} | class ____(Base):
restore_settings = ['THUMBNAIL_ALIASES', 'THUMBNAIL_MEDIA_ROOT']
def setUp(self):
super().setUp()
settings.THUMBNAIL_MEDIA_ROOT = self.storage.path('')
settings.THUMBNAIL_ALIASES = {
'': {
'small': {'size': (20, 20), 'crop': True},
},
}
alias.aliases.populate_from_settings()
# Make the temporary storage location the default storage for now.
self._old_default_storage = django_storage.default_storage._wrapped
django_storage.default_storage._wrapped = self.storage
self._old_thumbnail_default_storage = storage.thumbnail_default_storage
storage.thumbnail_default_storage = self.storage
def tearDown(self):
# Put the default storage back how we found it.
storage.thumbnail_default_storage = self._old_thumbnail_default_storage
django_storage.default_storage._wrapped = self._old_default_storage
super().tearDown()
# Repopulate the aliases (setting reverted by super)
alias.aliases.populate_from_settings()
| ThumbnailerBase |
python | Textualize__textual | tests/test_disabled.py | {
"start": 2865,
"end": 4664
} | class ____(App[None]):
"""App for regression test for https://github.com/Textualize/textual/issues/2772."""
def compose(self) -> ComposeResult:
with Vertical():
with Vertical():
yield Button()
yield Checkbox()
yield DataTable()
yield DirectoryTree(".")
yield Input()
with ListView():
yield ListItem(Label("one"))
yield ListItem(Label("two"))
yield ListItem(Label("three"))
yield OptionList("one", "two", "three")
with RadioSet():
yield RadioButton("one")
yield RadioButton("two")
yield RadioButton("three")
yield Select([("one", 1), ("two", 2), ("three", 3)])
yield Switch()
def on_mount(self):
dt = self.query_one(DataTable)
dt.add_columns("one", "two", "three")
dt.add_rows([["a", "b", "c"], ["d", "e", "f"], ["g", "h", "i"]])
@pytest.mark.parametrize(
"widget",
[
Button,
Checkbox,
DataTable,
DirectoryTree,
Input,
ListView,
OptionList,
RadioSet,
Select,
Switch,
],
)
async def test_children_loses_focus_if_container_is_disabled(widget):
"""Regression test for https://github.com/Textualize/textual/issues/2772."""
app = ChildrenNoFocusDisabledContainer()
async with app.run_test() as pilot:
app.query(widget).first().focus()
await pilot.pause()
assert isinstance(app.focused, widget)
app.query(Vertical).first().disabled = True
await pilot.pause()
assert app.focused is None
| ChildrenNoFocusDisabledContainer |
python | coleifer__peewee | playhouse/kv.py | {
"start": 340,
"end": 5608
} | class ____(object):
"""
Persistent dictionary.
:param Field key_field: field to use for key. Defaults to CharField.
:param Field value_field: field to use for value. Defaults to PickleField.
:param bool ordered: data should be returned in key-sorted order.
:param Database database: database where key/value data is stored.
:param str table_name: table name for data.
"""
def __init__(self, key_field=None, value_field=None, ordered=False,
database=None, table_name='keyvalue'):
if key_field is None:
key_field = CharField(max_length=255, primary_key=True)
if not key_field.primary_key:
raise ValueError('key_field must have primary_key=True.')
if value_field is None:
value_field = PickleField()
self._key_field = key_field
self._value_field = value_field
self._ordered = ordered
self._database = database or SqliteExtDatabase(':memory:')
self._table_name = table_name
support_on_conflict = (isinstance(self._database, PostgresqlDatabase) or
(isinstance(self._database, SqliteDatabase) and
self._database.server_version >= (3, 24)))
if support_on_conflict:
self.upsert = self._postgres_upsert
self.update = self._postgres_update
else:
self.upsert = self._upsert
self.update = self._update
self.model = self.create_model()
self.key = self.model.key
self.value = self.model.value
# Ensure table exists.
self.model.create_table()
def create_model(self):
class KeyValue(Model):
key = self._key_field
value = self._value_field
class Meta:
database = self._database
table_name = self._table_name
return KeyValue
def query(self, *select):
query = self.model.select(*select).tuples()
if self._ordered:
query = query.order_by(self.key)
return query
def convert_expression(self, expr):
if not isinstance(expr, Expression):
return (self.key == expr), True
return expr, False
def __contains__(self, key):
expr, _ = self.convert_expression(key)
return self.model.select().where(expr).exists()
def __len__(self):
return len(self.model)
def __getitem__(self, expr):
converted, is_single = self.convert_expression(expr)
query = self.query(self.value).where(converted)
item_getter = operator.itemgetter(0)
result = [item_getter(row) for row in query]
if len(result) == 0 and is_single:
raise KeyError(expr)
elif is_single:
return result[0]
return result
def _upsert(self, key, value):
(self.model
.insert(key=key, value=value)
.on_conflict('replace')
.execute())
def _postgres_upsert(self, key, value):
(self.model
.insert(key=key, value=value)
.on_conflict(conflict_target=[self.key],
preserve=[self.value])
.execute())
def __setitem__(self, expr, value):
if isinstance(expr, Expression):
self.model.update(value=value).where(expr).execute()
else:
self.upsert(expr, value)
def __delitem__(self, expr):
converted, _ = self.convert_expression(expr)
self.model.delete().where(converted).execute()
def __iter__(self):
return iter(self.query().execute())
def keys(self):
return map(operator.itemgetter(0), self.query(self.key))
def values(self):
return map(operator.itemgetter(0), self.query(self.value))
def items(self):
return iter(self.query().execute())
def _update(self, __data=None, **mapping):
if __data is not None:
mapping.update(__data)
return (self.model
.insert_many(list(mapping.items()),
fields=[self.key, self.value])
.on_conflict('replace')
.execute())
def _postgres_update(self, __data=None, **mapping):
if __data is not None:
mapping.update(__data)
return (self.model
.insert_many(list(mapping.items()),
fields=[self.key, self.value])
.on_conflict(conflict_target=[self.key],
preserve=[self.value])
.execute())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, default=Sentinel):
with self._database.atomic():
try:
result = self[key]
except KeyError:
if default is Sentinel:
raise
return default
del self[key]
return result
def clear(self):
self.model.delete().execute()
| KeyValue |
python | graphql-python__graphene | graphene/types/tests/test_mountedtype.py | {
"start": 57,
"end": 650
} | class ____(Field):
def __init__(self, *args, **kwargs):
self.metadata = kwargs.pop("metadata", None)
super(CustomField, self).__init__(*args, **kwargs)
def test_mounted_type():
unmounted = String()
mounted = Field.mounted(unmounted)
assert isinstance(mounted, Field)
assert mounted.type == String
def test_mounted_type_custom():
unmounted = String(metadata={"hey": "yo!"})
mounted = CustomField.mounted(unmounted)
assert isinstance(mounted, CustomField)
assert mounted.type == String
assert mounted.metadata == {"hey": "yo!"}
| CustomField |
python | pyqtgraph__pyqtgraph | pyqtgraph/icons/__init__.py | {
"start": 141,
"end": 1603
} | class ____:
"""An icon place holder for lazy loading of QIcons
The icon must reside in the icons folder and the path refers to the full
name including suffix of the icon file, e.g.:
tiny = GraphIcon("tiny.png")
Icons can be later retrieved via the function `getGraphIcon` and providing
the name:
tiny = getGraphIcon("tiny")
"""
def __init__(self, path):
self._path = path
name = path.split('.')[0]
_ICON_REGISTRY[name] = self
self._icon = None
def _build_qicon(self):
icon = QtGui.QIcon(op.join(op.dirname(__file__), self._path))
name = self._path.split('.')[0]
_ICON_REGISTRY[name] = icon
self._icon = icon
@property
def qicon(self):
if self._icon is None:
self._build_qicon()
return self._icon
def getGraphIcon(name):
"""Return a `PyQtGraph` icon from the registry by `name`"""
icon = _ICON_REGISTRY[name]
if isinstance(icon, GraphIcon):
icon = icon.qicon
_ICON_REGISTRY[name] = icon
return icon
def getGraphPixmap(name, size=(20, 20)):
"""Return a `QPixmap` from the registry by `name`"""
icon = getGraphIcon(name)
return icon.pixmap(*size)
# Note: List all graph icons here ...
auto = GraphIcon("auto.png")
ctrl = GraphIcon("ctrl.png")
default = GraphIcon("default.png")
invisibleEye = GraphIcon("invisibleEye.svg")
lock = GraphIcon("lock.png")
| GraphIcon |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 14866,
"end": 32995
} | class ____(TestCase, BaseIncidentsTest):
def setUp(self) -> None:
super().setUp()
class _DynamicMetricAlertSettings(TypedDict):
name: str
query: str
aggregate: str
time_window: int
threshold_type: AlertRuleThresholdType
threshold_period: int
event_types: list[SnubaQueryEventType.EventType]
detection_type: AlertRuleDetectionType
sensitivity: AlertRuleSensitivity
seasonality: AlertRuleSeasonality
self.dynamic_metric_alert_settings: _DynamicMetricAlertSettings = {
"name": "hello",
"query": "level:error",
"aggregate": "count(*)",
"time_window": 30,
"threshold_type": AlertRuleThresholdType.ABOVE,
"threshold_period": 1,
"event_types": [SnubaQueryEventType.EventType.ERROR],
"detection_type": AlertRuleDetectionType.DYNAMIC,
"sensitivity": AlertRuleSensitivity.LOW,
"seasonality": AlertRuleSeasonality.AUTO,
}
def test_create_alert_rule(self) -> None:
name = "hello"
query = "level:error"
aggregate = "count(*)"
time_window = 10
threshold_type = AlertRuleThresholdType.ABOVE
resolve_threshold = 10
threshold_period = 1
event_types = [SnubaQueryEventType.EventType.ERROR]
alert_rule = create_alert_rule(
self.organization,
[self.project],
name,
query,
aggregate,
time_window,
threshold_type,
threshold_period,
resolve_threshold=resolve_threshold,
event_types=event_types,
)
assert alert_rule.name == name
assert alert_rule.user_id is None
assert alert_rule.team_id is None
assert alert_rule.status == AlertRuleStatus.PENDING.value
if alert_rule.snuba_query.subscriptions.exists():
assert alert_rule.snuba_query.subscriptions.get().project == self.project
assert alert_rule.snuba_query.subscriptions.all().count() == 1
assert alert_rule.snuba_query.type == SnubaQuery.Type.ERROR.value
assert alert_rule.snuba_query.dataset == Dataset.Events.value
assert alert_rule.snuba_query.query == query
assert alert_rule.snuba_query.aggregate == aggregate
assert alert_rule.snuba_query.time_window == time_window * 60
assert alert_rule.snuba_query.resolution == DEFAULT_ALERT_RULE_RESOLUTION * 60
assert set(alert_rule.snuba_query.event_types) == set(event_types)
assert alert_rule.threshold_type == threshold_type.value
assert alert_rule.resolve_threshold == resolve_threshold
assert alert_rule.threshold_period == threshold_period
assert alert_rule.projects.all().count() == 1
def test_ignore(self) -> None:
name = "hello"
query = "status:unresolved"
aggregate = "count(*)"
time_window = 10
threshold_type = AlertRuleThresholdType.ABOVE
resolve_threshold = 10
threshold_period = 1
event_types = [SnubaQueryEventType.EventType.ERROR]
alert_rule = create_alert_rule(
self.organization,
[self.project],
name,
query,
aggregate,
time_window,
threshold_type,
threshold_period,
resolve_threshold=resolve_threshold,
event_types=event_types,
)
assert alert_rule.snuba_query.subscriptions.get().project == self.project
assert alert_rule.name == name
assert alert_rule.user_id is None
assert alert_rule.team_id is None
assert alert_rule.status == AlertRuleStatus.PENDING.value
assert alert_rule.snuba_query.subscriptions.all().count() == 1
assert alert_rule.snuba_query.type == SnubaQuery.Type.ERROR.value
assert alert_rule.snuba_query.dataset == Dataset.Events.value
assert alert_rule.snuba_query.query == query
assert alert_rule.snuba_query.aggregate == aggregate
assert alert_rule.snuba_query.time_window == time_window * 60
assert alert_rule.snuba_query.resolution == DEFAULT_ALERT_RULE_RESOLUTION * 60
assert set(alert_rule.snuba_query.event_types) == set(event_types)
assert alert_rule.threshold_type == threshold_type.value
assert alert_rule.resolve_threshold == resolve_threshold
assert alert_rule.threshold_period == threshold_period
def test_release_version(self) -> None:
name = "hello"
query = "release.version:1.2.3"
aggregate = "count(*)"
time_window = 10
threshold_type = AlertRuleThresholdType.ABOVE
resolve_threshold = 10
threshold_period = 1
event_types = [SnubaQueryEventType.EventType.ERROR]
alert_rule = create_alert_rule(
self.organization,
[self.project],
name,
query,
aggregate,
time_window,
threshold_type,
threshold_period,
resolve_threshold=resolve_threshold,
event_types=event_types,
)
assert alert_rule.snuba_query.subscriptions.get().project == self.project
assert alert_rule.name == name
assert alert_rule.user_id is None
assert alert_rule.team_id is None
assert alert_rule.status == AlertRuleStatus.PENDING.value
assert alert_rule.snuba_query.subscriptions.all().count() == 1
assert alert_rule.snuba_query.type == SnubaQuery.Type.ERROR.value
assert alert_rule.snuba_query.dataset == Dataset.Events.value
assert alert_rule.snuba_query.query == query
assert alert_rule.snuba_query.aggregate == aggregate
assert alert_rule.snuba_query.time_window == time_window * 60
assert alert_rule.snuba_query.resolution == DEFAULT_ALERT_RULE_RESOLUTION * 60
assert set(alert_rule.snuba_query.event_types) == set(event_types)
assert alert_rule.threshold_type == threshold_type.value
assert alert_rule.resolve_threshold == resolve_threshold
assert alert_rule.threshold_period == threshold_period
def test_alert_rule_owner(self) -> None:
alert_rule_1 = create_alert_rule(
self.organization,
[self.project],
"alert rule 1",
"level:error",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
owner=Actor.from_identifier(self.user.id),
)
assert alert_rule_1.user_id == self.user.id
assert alert_rule_1.team_id is None
alert_rule_2 = create_alert_rule(
self.organization,
[self.project],
"alert rule 2",
"level:error",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
owner=Actor.from_identifier(f"team:{self.team.id}"),
)
assert alert_rule_2.user_id is None
assert alert_rule_2.team_id == self.team.id
def test_comparison_delta(self) -> None:
comparison_delta = 60
alert_rule = create_alert_rule(
self.organization,
[self.project],
"alert rule 1",
"level:error",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
comparison_delta=comparison_delta,
detection_type=AlertRuleDetectionType.PERCENT,
)
assert alert_rule.snuba_query.subscriptions.get().project == self.project
assert alert_rule.comparison_delta == comparison_delta * 60
assert (
alert_rule.snuba_query.resolution == DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER * 60
)
def test_performance_metric_alert(self) -> None:
alert_rule = create_alert_rule(
self.organization,
[self.project],
"performance alert",
"",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.PerformanceMetrics,
)
assert alert_rule.snuba_query.type == SnubaQuery.Type.PERFORMANCE.value
assert alert_rule.snuba_query.dataset == Dataset.PerformanceMetrics.value
@patch("sentry.incidents.logic.schedule_update_project_config")
def test_on_demand_metric_alert(self, mocked_schedule_update_project_config: MagicMock) -> None:
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
)
mocked_schedule_update_project_config.assert_called_once_with(alert_rule, [self.project])
def test_create_alert_resolution_load_shedding(self) -> None:
time_window = 1440
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
time_window,
AlertRuleThresholdType.ABOVE,
1440,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
)
assert (
alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window] * 60
)
def test_create_alert_load_shedding_comparison(self) -> None:
time_window = 1440
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
time_window,
AlertRuleThresholdType.ABOVE,
1440,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
comparison_delta=60,
detection_type=AlertRuleDetectionType.PERCENT,
)
assert (
alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window]
* 60
* DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER
)
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_create_alert_rule_anomaly_detection(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_event(two_weeks_ago + timedelta(minutes=1))
self.create_event(two_weeks_ago + timedelta(days=10))
alert_rule = create_alert_rule(
self.organization,
[self.project],
**self.dynamic_metric_alert_settings,
)
assert mock_seer_request.call_count == 1
call_args_str = mock_seer_request.call_args_list[0].kwargs["body"].decode("utf-8")
assert json.loads(call_args_str)["alert"] == {
"id": alert_rule.id,
"source_id": alert_rule.snuba_query.subscriptions.get().id,
"source_type": 1,
}
assert alert_rule.name == self.dynamic_metric_alert_settings["name"]
assert alert_rule.user_id is None
assert alert_rule.team_id is None
assert alert_rule.status == AlertRuleStatus.PENDING.value
assert alert_rule.sensitivity == self.dynamic_metric_alert_settings["sensitivity"]
assert alert_rule.seasonality == self.dynamic_metric_alert_settings["seasonality"]
assert alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC
assert alert_rule.snuba_query.subscriptions.get().project == self.project
assert alert_rule.snuba_query.subscriptions.all().count() == 1
assert alert_rule.snuba_query.type == SnubaQuery.Type.ERROR.value
assert alert_rule.snuba_query.dataset == Dataset.Events.value
assert alert_rule.snuba_query.query == self.dynamic_metric_alert_settings["query"]
assert alert_rule.snuba_query.aggregate == self.dynamic_metric_alert_settings["aggregate"]
assert (
alert_rule.snuba_query.time_window
== self.dynamic_metric_alert_settings["time_window"] * 60
)
assert (
alert_rule.snuba_query.resolution
== self.dynamic_metric_alert_settings["time_window"] * 60
)
assert set(alert_rule.snuba_query.event_types) == set(
self.dynamic_metric_alert_settings["event_types"]
)
assert (
alert_rule.threshold_type == self.dynamic_metric_alert_settings["threshold_type"].value
)
assert alert_rule.threshold_period == self.dynamic_metric_alert_settings["threshold_period"]
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_create_alert_rule_anomaly_detection_not_enough_data(
self, mock_seer_request: MagicMock
) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
two_days_ago = before_now(days=2).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_event(two_days_ago + timedelta(minutes=1))
self.create_event(two_days_ago + timedelta(days=1))
alert_rule = create_alert_rule(
self.organization,
[self.project],
**self.dynamic_metric_alert_settings,
)
assert mock_seer_request.call_count == 1
assert alert_rule.name == self.dynamic_metric_alert_settings["name"]
assert alert_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_create_alert_rule_anomaly_detection_no_data(
self, mock_seer_request: MagicMock
) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
# no events, so we expect _get_start_and_end to return -1, -1
alert_rule = create_alert_rule(
self.organization,
[self.project],
**self.dynamic_metric_alert_settings,
)
assert mock_seer_request.call_count == 1
assert alert_rule.name == self.dynamic_metric_alert_settings["name"]
assert alert_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
@patch("sentry.seer.anomaly_detection.store_data.logger")
def test_create_alert_rule_anomaly_detection_seer_timeout_max_retry(
self, mock_logger, mock_seer_request
):
mock_seer_request.side_effect = TimeoutError
with pytest.raises(TimeoutError):
create_alert_rule(
self.organization,
[self.project],
**self.dynamic_metric_alert_settings,
)
assert not AlertRule.objects.filter(detection_type=AlertRuleDetectionType.DYNAMIC).exists()
assert not SnubaQuery.objects.filter(
aggregate=self.dynamic_metric_alert_settings["aggregate"],
query=self.dynamic_metric_alert_settings["query"],
time_window=self.dynamic_metric_alert_settings["time_window"],
).exists()
assert mock_logger.warning.call_count == 1
assert mock_seer_request.call_count == 1
mock_seer_request.reset_mock()
mock_logger.reset_mock()
mock_seer_request.side_effect = MaxRetryError(
seer_anomaly_detection_connection_pool, SEER_ANOMALY_DETECTION_STORE_DATA_URL
)
with pytest.raises(TimeoutError):
create_alert_rule(
self.organization,
[self.project],
**self.dynamic_metric_alert_settings,
)
assert not AlertRule.objects.filter(detection_type=AlertRuleDetectionType.DYNAMIC).exists()
assert not SnubaQuery.objects.filter(
aggregate=self.dynamic_metric_alert_settings["aggregate"],
query=self.dynamic_metric_alert_settings["query"],
time_window=self.dynamic_metric_alert_settings["time_window"],
).exists()
assert mock_logger.warning.call_count == 1
assert mock_seer_request.call_count == 1
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_create_alert_rule_anomaly_detection_no_feature(
self, mock_seer_request: MagicMock
) -> None:
with pytest.raises(ResourceDoesNotExist):
create_alert_rule(
self.organization,
[self.project],
**self.dynamic_metric_alert_settings,
)
assert not AlertRule.objects.filter(detection_type=AlertRuleDetectionType.DYNAMIC).exists()
assert not SnubaQuery.objects.filter(
aggregate=self.dynamic_metric_alert_settings["aggregate"],
query=self.dynamic_metric_alert_settings["query"],
time_window=self.dynamic_metric_alert_settings["time_window"],
).exists()
assert mock_seer_request.call_count == 0
| CreateAlertRuleTest |
python | wandb__wandb | wandb/vendor/pygments/styles/autumn.py | {
"start": 417,
"end": 2144
} | class ____(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
| AutumnStyle |
python | fsspec__filesystem_spec | fsspec/tests/test_utils.py | {
"start": 13539,
"end": 14167
} | class ____:
def __init__(self):
self.path = "foo"
@pytest.mark.parametrize(
"path,expected",
[
# coerce to string
("foo", "foo"),
(Path("foo"), "foo"),
(PurePath("foo"), "foo"),
(_HasFspath(), "foo"),
(_HasPathAttr(), "foo"),
# passthrough
(b"bytes", b"bytes"),
(None, None),
(1, 1),
(True, True),
(o := object(), o),
([], []),
((), ()),
(set(), set()),
],
)
def test_stringify_path(path, expected):
path = fsspec.utils.stringify_path(path)
assert path == expected
| _HasPathAttr |
python | ray-project__ray | python/ray/tests/test_runtime_env_packaging.py | {
"start": 3962,
"end": 5100
} | class ____:
def test_invalid_file(self):
with pytest.raises(ValueError):
get_uri_for_file("/does/not/exist.py")
with pytest.raises(ValueError):
get_uri_for_file("does/not/exist.py")
def test_determinism(self, random_file):
# Check that it's deterministic for same data.
uris = {get_uri_for_file(str(random_file)) for _ in range(10)}
assert len(uris) == 1
# Append one line, should be different now.
with open(random_file, "a") as f:
f.write(random_string())
assert {get_uri_for_file(str(random_file))} != uris
def test_relative_paths(self, random_file):
# Check that relative or absolute paths result in the same URI.
p = Path(random_file)
relative_uri = get_uri_for_file(os.path.relpath(p))
absolute_uri = get_uri_for_file(str(p.resolve()))
assert relative_uri == absolute_uri
def test_uri_hash_length(self, random_file):
uri = get_uri_for_file(str(random_file))
hex_hash = uri.split("_")[-1][: -len(".zip")]
assert len(hex_hash) == 16
| TestGetURIForFile |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/rpc_test.py | {
"start": 4248,
"end": 4485
} | class ____:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t,)
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
| SlowPickleClass |
python | pytorch__pytorch | .github/scripts/generate_pytorch_version.py | {
"start": 1465,
"end": 3607
} | class ____:
def __init__(
self,
gpu_arch_type: str,
gpu_arch_version: str,
no_build_suffix: bool,
) -> None:
self.gpu_arch_type = gpu_arch_type
self.gpu_arch_version = gpu_arch_version
self.no_build_suffix = no_build_suffix
def get_post_build_suffix(self) -> str:
if self.no_build_suffix:
return ""
if self.gpu_arch_type == "cuda":
return f"+cu{self.gpu_arch_version.replace('.', '')}"
return f"+{self.gpu_arch_type}{self.gpu_arch_version}"
def get_release_version(self) -> str:
if not get_tag():
raise NoGitTagException(
"Not on a git tag, are you sure you want a release version?"
)
return f"{get_tag()}{self.get_post_build_suffix()}"
def get_nightly_version(self) -> str:
date_str = datetime.today().strftime("%Y%m%d")
build_suffix = self.get_post_build_suffix()
return f"{get_base_version()}.dev{date_str}{build_suffix}"
def main() -> None:
parser = argparse.ArgumentParser(
description="Generate pytorch version for binary builds"
)
parser.add_argument(
"--no-build-suffix",
action="store_true",
help="Whether or not to add a build suffix typically (+cpu)",
default=strtobool(os.environ.get("NO_BUILD_SUFFIX", "False")),
)
parser.add_argument(
"--gpu-arch-type",
type=str,
help="GPU arch you are building for, typically (cpu, cuda, rocm)",
default=os.environ.get("GPU_ARCH_TYPE", "cpu"),
)
parser.add_argument(
"--gpu-arch-version",
type=str,
help="GPU arch version, typically (10.2, 4.0), leave blank for CPU",
default=os.environ.get("GPU_ARCH_VERSION", ""),
)
args = parser.parse_args()
version_obj = PytorchVersion(
args.gpu_arch_type, args.gpu_arch_version, args.no_build_suffix
)
try:
print(version_obj.get_release_version())
except NoGitTagException:
print(version_obj.get_nightly_version())
if __name__ == "__main__":
main()
| PytorchVersion |
python | facebookresearch__faiss | tests/torch_test_neural_net.py | {
"start": 5287,
"end": 7278
} | class ____(nn.Module):
"""
QINCo quantizer, built from a chain of residual quantization steps
"""
def __init__(self, d, K, L, M, h):
nn.Module.__init__(self)
self.d, self.K, self.L, self.M, self.h = d, K, L, M, h
self.codebook0 = nn.Embedding(K, d)
self.steps = []
for m in range(1, M):
step = QINCoStep(d, K, L, h)
self.add_module(f"step{m}", step)
self.steps.append(step)
def decode(self, codes):
xhat = self.codebook0(codes[:, 0])
for i, step in enumerate(self.steps):
xhat = xhat + step.decode(xhat, codes[:, i + 1])
return xhat
def encode(self, x, code0=None):
"""
Encode a batch of vectors x to codes of length M.
If this function is called from IVF-QINCo, codes are 1 index longer,
due to the first index being the IVF index, and codebook0 is the IVF codebook.
"""
M = len(self.steps) + 1
bs, d = x.shape
codes = torch.zeros(bs, M, dtype=int, device=x.device)
if code0 is None:
# at IVF training time, the code0 is fixed (and precomputed)
code0 = assign_to_codebook(x, self.codebook0.weight)
codes[:, 0] = code0
xhat = self.codebook0.weight[code0]
for i, step in enumerate(self.steps):
codes[:, i + 1], toadd = step.encode(xhat, x)
xhat = xhat + toadd
return codes, xhat
######################################################
# QINCo tests
######################################################
def copy_QINCoStep(step):
step2 = faiss.QINCoStep(step.d, step.K, step.L, step.h)
step2.codebook.from_torch(step.codebook)
step2.MLPconcat.from_torch(step.MLPconcat)
for l in range(step.L):
src = step.residual_blocks[l]
dest = step2.get_residual_block(l)
dest.linear1.from_torch(src[0])
dest.linear2.from_torch(src[2])
return step2
| QINCo |
python | sympy__sympy | sympy/functions/elementary/hyperbolic.py | {
"start": 36525,
"end": 36642
} | class ____(DefinedFunction):
"""Base class for inverse hyperbolic functions."""
pass
| InverseHyperbolicFunction |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/models.py | {
"start": 6013,
"end": 11262
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncModelsWithStreamingResponse(self)
async def retrieve(
self,
model_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BetaModelInfo:
"""
Get a specific model.
The Models API response can be used to determine information about a specific
model or resolve a model alias to a model ID.
Args:
model_id: Model identifier or alias.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model_id:
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return await self._get(
f"/v1/models/{model_id}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=BetaModelInfo,
)
def list(
self,
*,
after_id: str | Omit = omit,
before_id: str | Omit = omit,
limit: int | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[BetaModelInfo, AsyncPage[BetaModelInfo]]:
"""
List available models.
The Models API response can be used to determine which models are available for
use in the API. More recently released models are listed first.
Args:
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately after this object.
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately before this object.
limit: Number of items to return per page.
Defaults to `20`. Ranges from `1` to `1000`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return self._get_api_list(
"/v1/models?beta=true",
page=AsyncPage[BetaModelInfo],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after_id": after_id,
"before_id": before_id,
"limit": limit,
},
model_list_params.ModelListParams,
),
),
model=BetaModelInfo,
)
| AsyncModels |
python | apache__avro | lang/py/avro/tether/tether_task.py | {
"start": 2079,
"end": 3979
} | class ____:
"""
Collector for map and reduce output values
"""
def __init__(self, scheme, outputClient):
"""
Parameters
---------------------------------------------
scheme - The scheme for the datums to output - can be a json string
- or an instance of Schema
outputClient - The output client used to send messages to the parent
"""
if not isinstance(scheme, avro.schema.Schema):
scheme = avro.schema.parse(scheme)
self.scheme = scheme
self.datum_writer = avro.io.DatumWriter(writers_schema=self.scheme)
self.outputClient = outputClient
def collect(self, record, partition=None):
"""Collect a map or reduce output value
Parameters
------------------------------------------------------
record - The record to write
partition - Indicates the partition for a pre-partitioned map output
- currently not supported
"""
# Replace the encoder and buffer every time we collect.
with io.BytesIO() as buff:
self.encoder = avro.io.BinaryEncoder(buff)
self.datum_writer.write(record, self.encoder)
value = buff.getvalue()
datum = {"datum": value}
if partition is not None:
datum["partition"] = partition
self.outputClient.request("output", datum)
def keys_are_equal(rec1, rec2, fkeys):
"""Check if the "keys" in two records are equal. The key fields
are all fields for which order isn't marked ignore.
Parameters
-------------------------------------------------------------------------
rec1 - The first record
rec2 - The second record
fkeys - A list of the fields to compare
"""
for f in fkeys:
if not (rec1[f] == rec2[f]):
return False
return True
| Collector |
python | davidhalter__jedi | jedi/inference/compiled/subprocess/__init__.py | {
"start": 8243,
"end": 13436
} | class ____:
"""
A subprocess which runs inference within a target environment.
This class manages the interface to a single instance of such a process as
well as the lifecycle of the process itself. See `.__main__` and `Listener`
for the implementation of the subprocess and details of the protocol.
A single live instance of this is maintained by `jedi.api.environment.Environment`,
so that typically a single subprocess is used at a time.
"""
is_crashed = False
def __init__(self, executable, env_vars=None):
self._executable = executable
self._env_vars = env_vars
self._inference_state_deletion_queue = collections.deque()
self._cleanup_callable = lambda: None
def __repr__(self):
pid = os.getpid()
return '<%s _executable=%r, is_crashed=%r, pid=%r>' % (
self.__class__.__name__,
self._executable,
self.is_crashed,
pid,
)
@memoize_method
def _get_process(self):
debug.dbg('Start environment subprocess %s', self._executable)
parso_path = sys.modules['parso'].__file__
args = (
self._executable,
_MAIN_PATH,
os.path.dirname(os.path.dirname(parso_path)),
'.'.join(str(x) for x in sys.version_info[:3]),
)
process = _GeneralizedPopen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env_vars
)
self._stderr_queue = queue.Queue()
self._stderr_thread = t = Thread(
target=_enqueue_output,
args=(process.stderr, self._stderr_queue)
)
t.daemon = True
t.start()
# Ensure the subprocess is properly cleaned up when the object
# is garbage collected.
self._cleanup_callable = weakref.finalize(self,
_cleanup_process,
process,
t)
return process
def run(self, inference_state_id, function, args=(), kwargs={}):
# Delete old inference_states.
while True:
try:
delete_id = self._inference_state_deletion_queue.pop()
except IndexError:
break
else:
self._send(delete_id, None)
assert callable(function)
return self._send(inference_state_id, function, args, kwargs)
def get_sys_path(self):
return self._send(None, functions.get_sys_path, (), {})
def _kill(self):
self.is_crashed = True
self._cleanup_callable()
def _send(self, inference_state_id, function, args=(), kwargs={}):
if self.is_crashed:
raise InternalError("The subprocess %s has crashed." % self._executable)
data = inference_state_id, function, args, kwargs
try:
pickle_dump(data, self._get_process().stdin, PICKLE_PROTOCOL)
except BrokenPipeError:
self._kill()
raise InternalError("The subprocess %s was killed. Maybe out of memory?"
% self._executable)
try:
is_exception, traceback, result = pickle_load(self._get_process().stdout)
except EOFError as eof_error:
try:
stderr = self._get_process().stderr.read().decode('utf-8', 'replace')
except Exception as exc:
stderr = '<empty/not available (%r)>' % exc
self._kill()
_add_stderr_to_debug(self._stderr_queue)
raise InternalError(
"The subprocess %s has crashed (%r, stderr=%s)." % (
self._executable,
eof_error,
stderr,
))
_add_stderr_to_debug(self._stderr_queue)
if is_exception:
# Replace the attribute error message with a the traceback. It's
# way more informative.
result.args = (traceback,)
raise result
return result
def delete_inference_state(self, inference_state_id):
"""
Indicate that an inference state (in the subprocess) is no longer
needed.
The state corresponding to the given id will become inaccessible and the
id may safely be re-used to refer to a different context.
Note: it is not guaranteed that the corresponding state will actually be
deleted immediately.
"""
# Warning: if changing the semantics of context deletion see the comment
# in `InferenceStateSubprocess.__init__` regarding potential race
# conditions.
# Currently we are not deleting the related state instantly. They only
# get deleted once the subprocess is used again. It would probably a
# better solution to move all of this into a thread. However, the memory
# usage of a single inference_state shouldn't be that high.
self._inference_state_deletion_queue.append(inference_state_id)
| CompiledSubprocess |
python | getsentry__sentry | src/sentry/web/frontend/base.py | {
"start": 29062,
"end": 31527
} | class ____(OrganizationView):
"""
Any view acting on behalf of a project should inherit from this base and the
matching URL pattern must pass 'org_slug' as well as 'project_id_or_slug'.
Three keyword arguments are added to the resulting dispatch:
- organization
- project
"""
def get_context_data(self, request: HttpRequest, organization: Organization, project: Project, **kwargs: Any) -> dict[str, Any]: # type: ignore[override]
from sentry.api.serializers import serialize
context = super().get_context_data(request, organization)
context["project"] = project
context["processing_issues"] = serialize(project).get("processingIssues", 0)
return context
def has_permission(self, request: HttpRequest, organization: Organization, project: Project | None, *args: Any, **kwargs: Any) -> bool: # type: ignore[override]
if project is None:
return False
rv = super().has_permission(request, organization)
if not rv:
return rv
teams = list(project.teams.all())
if self.required_scope:
if not any(request.access.has_team_scope(team, self.required_scope) for team in teams):
logger.info(
"User %s does not have %s permission to access project %s",
request.user,
self.required_scope,
project,
)
return False
elif not any(request.access.has_team_access(team) for team in teams):
logger.info("User %s does not have access to project %s", request.user, project)
return False
return True
def convert_args(self, request: HttpRequest, organization_slug: str, project_id_or_slug: int | str, *args: Any, **kwargs: Any) -> tuple[tuple[Any, ...], dict[str, Any]]: # type: ignore[override]
organization: Organization | None = None
active_project: Project | None = None
if self.active_organization:
organization = self._get_organization()
if organization:
active_project = self.get_active_project(
request=request,
organization=organization,
project_id_or_slug=project_id_or_slug,
)
kwargs["project"] = active_project
kwargs["organization"] = organization
return args, kwargs
| ProjectView |
python | sphinx-doc__sphinx | tests/roots/test-root/autodoc_target.py | {
"start": 234,
"end": 344
} | class ____(Exception):
"""My custom exception."""
def f(self):
"""Exception method."""
| CustomEx |
python | airbytehq__airbyte | airbyte-integrations/bases/base-normalization/normalization/transform_catalog/destination_name_transformer.py | {
"start": 2018,
"end": 15864
} | class ____:
"""
Handles naming conventions in destinations for all kind of sql identifiers:
- schema
- table
- column
"""
def __init__(self, destination_type: DestinationType):
"""
@param destination_type is the destination type of warehouse
"""
self.destination_type: DestinationType = destination_type
# Public methods
def needs_quotes(self, input_name: str) -> bool:
"""
@param input_name to test if it needs to manipulated with quotes or not
"""
if is_reserved_keyword(input_name, self.destination_type):
return True
if self.destination_type.value == DestinationType.BIGQUERY.value:
return False
if self.destination_type.value == DestinationType.ORACLE.value and input_name.startswith("_"):
return True
doesnt_start_with_alphaunderscore = match("[^A-Za-z_]", input_name[0]) is not None
contains_non_alphanumeric = match(".*[^A-Za-z0-9_].*", input_name) is not None
return doesnt_start_with_alphaunderscore or contains_non_alphanumeric
def normalize_schema_name(self, schema_name: str, in_jinja: bool = False, truncate: bool = True) -> str:
"""
@param schema_name is the schema to normalize
@param in_jinja is a boolean to specify if the returned normalized will be used inside a jinja macro or not
@param truncate force ignoring truncate operation on resulting normalized name. For example, if we don't
control how the name would be normalized
"""
if self.destination_type == DestinationType.ORACLE and schema_name.startswith("_"):
schema_name = schema_name[1:]
return self.__normalize_non_column_identifier_name(input_name=schema_name, in_jinja=in_jinja, truncate=truncate)
def normalize_table_name(
self, table_name: str, in_jinja: bool = False, truncate: bool = True, conflict: bool = False, conflict_level: int = 0
) -> str:
"""
@param table_name is the table to normalize
@param in_jinja is a boolean to specify if the returned normalized will be used inside a jinja macro or not
@param truncate force ignoring truncate operation on resulting normalized name. For example, if we don't
control how the name would be normalized
@param conflict if there is a conflict between stream name and fields
@param conflict_level is the json_path level conflict happened
"""
if self.destination_type == DestinationType.ORACLE and table_name.startswith("_"):
table_name = table_name[1:]
return self.__normalize_non_column_identifier_name(
input_name=table_name, in_jinja=in_jinja, truncate=truncate, conflict=conflict, conflict_level=conflict_level
)
def normalize_column_name(
self, column_name: str, in_jinja: bool = False, truncate: bool = True, conflict: bool = False, conflict_level: int = 0
) -> str:
"""
@param column_name is the column to normalize
@param in_jinja is a boolean to specify if the returned normalized will be used inside a jinja macro or not
@param truncate force ignoring truncate operation on resulting normalized name. For example, if we don't
control how the name would be normalized
@param conflict if there is a conflict between stream name and fields
@param conflict_level is the json_path level conflict happened
"""
return self.__normalize_identifier_name(
column_name=column_name, in_jinja=in_jinja, truncate=truncate, conflict=conflict, conflict_level=conflict_level
)
def truncate_identifier_name(self, input_name: str, custom_limit: int = -1, conflict: bool = False, conflict_level: int = 0) -> str:
"""
@param input_name is the identifier name to middle truncate
@param custom_limit uses a custom length as the max instead of the destination max length
@param conflict if there is a conflict between stream name and fields
@param conflict_level is the json_path level conflict happened
"""
limit = custom_limit - 1 if custom_limit > 0 else self.get_name_max_length()
if limit < len(input_name):
middle = round(limit / 2)
# truncate in the middle to preserve prefix/suffix instead
prefix = input_name[: limit - middle - 1]
suffix = input_name[1 - middle :]
# Add extra characters '__', signaling a truncate in identifier
print(f"Truncating {input_name} (#{len(input_name)}) to {prefix}_{suffix} (#{2 + len(prefix) + len(suffix)})")
mid = "__"
if conflict:
mid = f"_{conflict_level}"
input_name = f"{prefix}{mid}{suffix}"
return input_name
def get_name_max_length(self):
if self.destination_type.value in DESTINATION_SIZE_LIMITS:
destination_limit = DESTINATION_SIZE_LIMITS[self.destination_type.value]
return destination_limit - TRUNCATE_DBT_RESERVED_SIZE - TRUNCATE_RESERVED_SIZE
else:
raise KeyError(f"Unknown destination type {self.destination_type}")
# Private methods
def __normalize_non_column_identifier_name(
self, input_name: str, in_jinja: bool = False, truncate: bool = True, conflict: bool = False, conflict_level: int = 0
) -> str:
# We force standard naming for non column names (see issue #1785)
result = transform_standard_naming(input_name)
result = self.__normalize_naming_conventions(result, is_column=False)
if truncate:
result = self.truncate_identifier_name(input_name=result, conflict=conflict, conflict_level=conflict_level)
result = self.__normalize_identifier_case(result, is_quoted=False)
if result[0].isdigit():
if self.destination_type == DestinationType.MSSQL:
result = "_" + result
elif self.destination_type == DestinationType.ORACLE:
result = "ab_" + result
return result
def __normalize_identifier_name(
self, column_name: str, in_jinja: bool = False, truncate: bool = True, conflict: bool = False, conflict_level: int = 0
) -> str:
result = self.__normalize_naming_conventions(column_name, is_column=True)
if truncate:
result = self.truncate_identifier_name(input_name=result, conflict=conflict, conflict_level=conflict_level)
if self.needs_quotes(result):
if self.destination_type.value == DestinationType.CLICKHOUSE.value:
result = result.replace('"', "_")
result = result.replace("`", "_")
result = result.replace("'", "_")
elif (
self.destination_type.value != DestinationType.MYSQL.value
and self.destination_type.value != DestinationType.TIDB.value
and self.destination_type.value != DestinationType.DUCKDB.value
):
result = result.replace('"', '""')
else:
result = result.replace("`", "_")
result = result.replace("'", "\\'")
result = self.__normalize_identifier_case(result, is_quoted=True)
result = self.apply_quote(result)
if not in_jinja:
result = jinja_call(result)
return result
else:
result = self.__normalize_identifier_case(result, is_quoted=False)
if in_jinja:
# to refer to columns while already in jinja context, always quote
return f"'{result}'"
return result
def apply_quote(self, input: str, literal=True) -> str:
if literal:
input = f"'{input}'"
if self.destination_type == DestinationType.ORACLE:
# Oracle dbt lib doesn't implemented adapter quote yet.
return f"quote({input})"
elif self.destination_type == DestinationType.CLICKHOUSE:
return f"quote({input})"
return f"adapter.quote({input})"
def __normalize_naming_conventions(self, input_name: str, is_column: bool = False) -> str:
result = input_name
if self.destination_type.value == DestinationType.ORACLE.value:
return transform_standard_naming(result)
elif self.destination_type.value == DestinationType.BIGQUERY.value:
# Can start with number: datasetId, table
# Can not start with number: column
result = transform_standard_naming(result)
doesnt_start_with_alphaunderscore = match("[^A-Za-z_]", result[0]) is not None
if is_column and doesnt_start_with_alphaunderscore:
result = f"_{result}"
return result
def __normalize_identifier_case(self, input_name: str, is_quoted: bool = False) -> str:
result = input_name
if self.destination_type.value == DestinationType.BIGQUERY.value:
pass
elif self.destination_type.value == DestinationType.REDSHIFT.value:
# all tables (even quoted ones) are coerced to lowercase.
result = input_name.lower()
elif self.destination_type.value == DestinationType.POSTGRES.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
elif self.destination_type.value == DestinationType.SNOWFLAKE.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.upper()
elif self.destination_type.value == DestinationType.MYSQL.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
elif self.destination_type.value == DestinationType.MSSQL.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
elif self.destination_type.value == DestinationType.ORACLE.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
else:
result = input_name.upper()
elif self.destination_type.value == DestinationType.CLICKHOUSE.value:
pass
elif self.destination_type.value == DestinationType.TIDB.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
elif self.destination_type.value == DestinationType.DUCKDB.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
else:
raise KeyError(f"Unknown destination type {self.destination_type}")
return result
def normalize_column_identifier_case_for_lookup(self, input_name: str, is_quoted: bool = False) -> str:
"""
This function adds an additional normalization regarding the column name casing to determine if multiple columns
are in collisions. On certain destinations/settings, case sensitivity matters, in others it does not.
We separate this from standard identifier normalization "__normalize_identifier_case",
so the generated SQL queries are keeping the original casing from the catalog.
But we still need to determine if casing matters or not, thus by using this function.
"""
result = input_name
if self.destination_type.value == DestinationType.BIGQUERY.value:
# Columns are considered identical regardless of casing
result = input_name.lower()
elif self.destination_type.value == DestinationType.REDSHIFT.value:
# Columns are considered identical regardless of casing (even quoted ones)
result = input_name.lower()
elif self.destination_type.value == DestinationType.POSTGRES.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
elif self.destination_type.value == DestinationType.SNOWFLAKE.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.upper()
elif self.destination_type.value == DestinationType.MYSQL.value:
# Columns are considered identical regardless of casing (even quoted ones)
result = input_name.lower()
elif self.destination_type.value == DestinationType.MSSQL.value:
# Columns are considered identical regardless of casing (even quoted ones)
result = input_name.lower()
elif self.destination_type.value == DestinationType.ORACLE.value:
if not is_quoted and not self.needs_quotes(input_name):
result = input_name.lower()
else:
result = input_name.upper()
elif self.destination_type.value == DestinationType.CLICKHOUSE.value:
pass
elif self.destination_type.value == DestinationType.TIDB.value:
result = input_name.lower()
elif self.destination_type.value == DestinationType.DUCKDB.value:
result = input_name.lower()
else:
raise KeyError(f"Unknown destination type {self.destination_type}")
return result
# Static Functions
def transform_standard_naming(input_name: str) -> str:
result = input_name.strip()
result = strip_accents(result)
result = sub(r"\s+", "_", result)
result = sub(r"[^a-zA-Z0-9_]", "_", result)
return result
def transform_json_naming(input_name: str) -> str:
result = sub(r"['\"`]", "_", input_name)
return result
def strip_accents(input_name: str) -> str:
return "".join(c for c in ud.normalize("NFD", input_name) if ud.category(c) != "Mn")
| DestinationNameTransformer |
python | getsentry__sentry | tests/sentry/replays/endpoints/test_organization_replay_index.py | {
"start": 439,
"end": 103087
} | class ____(APITestCase, ReplaysSnubaTestCase):
endpoint = "sentry-api-0-organization-replay-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.url = reverse(self.endpoint, args=(self.organization.slug,))
@property
def features(self) -> dict[str, bool]:
return {"organizations:session-replay": True}
def test_feature_flag_disabled(self) -> None:
"""Test replays can be disabled."""
response = self.client.get(self.url)
assert response.status_code == 404
def test_no_projects(self) -> None:
"""Test replays must be used with a project(s)."""
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert response_data["data"] == []
def test_get_replays(self) -> None:
"""Test replays conform to the interchange format."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq1_timestamp,
project.id,
replay1_id,
# NOTE: This is commented out due to a bug in CI. This will not affect
# production use and have been verfied as working as of 08/10/2022.
#
# error_ids=[uuid.uuid4().hex, replay1_id], # duplicate error-id
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
], # duplicate urls are okay,
tags={"test": "hello", "other": "hello"},
release="test",
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay1_id,
# error_ids=[uuid.uuid4().hex, replay1_id], # duplicate error-id
urls=["http://localhost:3000/"], # duplicate urls are okay
tags={"test": "world", "other": "hello"},
error_ids=[],
release="",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=1,
tag="div",
id="myid",
class_=["class1", "class2"],
component_name="SignUpForm",
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
is_dead=1,
is_rage=1,
text="Hello",
release=None,
)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project.id, "error", replay1_id, "a3a62ef6ac86415b83c2416fc2f76db1"
)
)
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 1
# Assert the response body matches what was expected.
expected_response = mock_expected_response(
project.id,
replay1_id,
seq1_timestamp,
seq2_timestamp,
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
"http://localhost:3000/",
],
count_segments=2,
# count_errors=3,
count_errors=1,
tags={"test": ["hello", "world"], "other": ["hello"]},
activity=4,
count_dead_clicks=1,
count_rage_clicks=1,
releases=["test"],
clicks=[
{
"click.alt": "Alt",
"click.classes": ["class1", "class2"],
"click.id": "myid",
"click.component_name": "SignUpForm",
"click.role": "button",
"click.tag": "div",
"click.testid": "1",
"click.text": "Hello",
"click.title": "MyTitle",
"click.label": "AriaLabel",
}
],
)
assert_expected_response(response_data["data"][0], expected_response)
def test_get_replays_viewed(self) -> None:
"""Test replays conform to the interchange format."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_viewed(seq2_timestamp.timestamp(), project.id, replay1_id, self.user.id)
)
replay2_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=20)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay2_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay2_id))
with self.feature(self.features):
for query in ["", "?field=id&field=has_viewed"]:
response = self.client.get(self.url + query)
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data, query
assert len(response_data["data"]) == 2, query
# Assert the first replay was viewed and the second replay was not.
assert response_data["data"][0]["has_viewed"] is False, query
assert response_data["data"][0]["id"] == replay2_id, query
assert response_data["data"][1]["has_viewed"] is True, query
assert response_data["data"][1]["id"] == replay1_id, query
def test_get_replays_browse_screen_fields(self) -> None:
"""Test replay response with fields requested by the index page in production."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq1_timestamp,
project.id,
replay1_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
],
tags={"test": "hello", "other": "hello"},
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay1_id,
urls=["http://localhost:3000/"],
tags={"test": "world", "other": "hello"},
)
)
with self.feature(self.features):
fields = [
"activity",
"browser",
"count_dead_clicks",
"count_errors",
"count_infos",
"count_rage_clicks",
"count_segments",
"count_urls",
"count_warnings",
"device",
"dist",
"duration",
"environment",
"error_ids",
"finished_at",
"has_viewed",
"id",
"info_ids",
"is_archived",
"os",
"platform",
"project_id",
"releases",
"sdk",
"started_at",
"tags",
"trace_ids",
"urls",
"user",
"warning_ids",
]
qstr = "?" + "&".join([f"field={field}" for field in fields])
response = self.client.get(self.url + qstr)
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 1
assert len(response_data["data"][0]) == len(fields)
for field in fields:
assert field in response_data["data"][0], field
assert len(response_data["data"][0]["user"]) == 6
assert "id" in response_data["data"][0]["user"]
assert "username" in response_data["data"][0]["user"]
assert "email" in response_data["data"][0]["user"]
assert "ip" in response_data["data"][0]["user"]
assert "display_name" in response_data["data"][0]["user"]
assert "geo" in response_data["data"][0]["user"]
assert "city" in response_data["data"][0]["user"]["geo"]
assert "country_code" in response_data["data"][0]["user"]["geo"]
assert "region" in response_data["data"][0]["user"]["geo"]
assert "subdivision" in response_data["data"][0]["user"]["geo"]
def test_get_replays_tags_field(self) -> None:
"""Test replay response with fields requested in production."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq1_timestamp,
project.id,
replay1_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
],
tags={"test": "hello", "other": "hello"},
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay1_id,
urls=["http://localhost:3000/"],
tags={"test": "world", "other": "hello"},
)
)
with self.feature(self.features):
response = self.client.get(self.url + "?field=tags")
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 1
assert len(response_data["data"][0]) == 1
assert "tags" in response_data["data"][0]
assert sorted(response_data["data"][0]["tags"]["test"]) == ["hello", "world"]
assert response_data["data"][0]["tags"]["other"] == ["hello"]
def test_get_replays_minimum_field_set(self) -> None:
"""Test replay response with fields requested in production."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay1_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
],
tags={"test": "hello", "other": "hello"},
user_id=123,
replay_start_timestamp=int(seq1_timestamp.timestamp()),
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay2_id,
urls=["http://localhost:3000/"],
tags={"test": "world", "other": "hello"},
replay_start_timestamp=int(seq1_timestamp.timestamp()),
)
)
with self.feature(self.features):
response = self.client.get(
self.url + "?field=id&orderBy=count_errors&query=test:hello OR user_id:123"
)
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 1
assert len(response_data["data"][0]) == 1
assert "id" in response_data["data"][0]
def test_get_replays_filter_environment(self) -> None:
"""Test returned replays can not partially fall outside of range."""
project = self.create_project(teams=[self.team])
self.create_environment(name="development", project=self.project)
self.create_environment(name="production", project=self.project)
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=20)
timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=10)
self.store_replays(
mock_replay(timestamp0, project.id, replay1_id, environment="development")
)
self.store_replays(
mock_replay(timestamp1, project.id, replay1_id, environment="development")
)
self.store_replays(
mock_replay(timestamp0, project.id, replay2_id, environment="production")
)
self.store_replays(
mock_replay(timestamp1, project.id, replay2_id, environment="production")
)
with self.feature(self.features):
response = self.client.get(self.url + "?environment=development")
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert response_data["data"][0]["id"] == replay1_id
response = self.client.get(self.url + "?environment=production")
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert response_data["data"][0]["id"] == replay2_id
def test_get_replays_started_at_sorted(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
replay1_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=15)
replay1_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=5)
replay2_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=10)
replay2_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=2)
self.store_replays(mock_replay(replay1_timestamp0, project.id, replay1_id))
self.store_replays(mock_replay(replay1_timestamp1, project.id, replay1_id))
self.store_replays(mock_replay(replay2_timestamp0, project.id, replay2_id))
self.store_replays(mock_replay(replay2_timestamp1, project.id, replay2_id))
with self.feature(self.features):
# Latest first.
response = self.client.get(self.url + "?orderBy=-started_at")
response_data = response.json()
assert response_data["data"][0]["id"] == replay2_id
assert response_data["data"][1]["id"] == replay1_id
# Earlist first.
response = self.client.get(self.url + "?orderBy=started_at")
response_data = response.json()
assert response_data["data"][0]["id"] == replay1_id
assert response_data["data"][1]["id"] == replay2_id
def test_get_replays_finished_at_sorted(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
replay1_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=15)
replay1_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=5)
replay2_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=10)
replay2_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=2)
self.store_replays(mock_replay(replay1_timestamp0, project.id, replay1_id))
self.store_replays(mock_replay(replay1_timestamp1, project.id, replay1_id))
self.store_replays(mock_replay(replay2_timestamp0, project.id, replay2_id))
self.store_replays(mock_replay(replay2_timestamp1, project.id, replay2_id))
with self.feature(self.features):
# Latest first.
response = self.client.get(self.url + "?orderBy=-finished_at")
response_data = response.json()
assert response_data["data"][0]["id"] == replay2_id
assert response_data["data"][1]["id"] == replay1_id
# Earlist first.
response = self.client.get(self.url + "?orderBy=finished_at")
response_data = response.json()
assert response_data["data"][0]["id"] == replay1_id
assert response_data["data"][1]["id"] == replay2_id
def test_get_replays_duration_sorted(self) -> None:
"""Test replays can be sorted by duration."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
replay1_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=15)
replay1_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=10)
replay2_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=9)
replay2_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=2)
self.store_replays(mock_replay(replay1_timestamp0, project.id, replay1_id))
self.store_replays(mock_replay(replay1_timestamp1, project.id, replay1_id))
self.store_replays(mock_replay(replay2_timestamp0, project.id, replay2_id))
self.store_replays(mock_replay(replay2_timestamp1, project.id, replay2_id))
with self.feature(self.features):
# Smallest duration first.
response = self.client.get(self.url + "?orderBy=duration")
assert response.status_code == 200, response
response_data = response.json()
assert response_data["data"][0]["id"] == replay1_id
assert response_data["data"][1]["id"] == replay2_id
# Largest duration first.
response = self.client.get(self.url + "?orderBy=-duration")
response_data = response.json()
assert response_data["data"][0]["id"] == replay2_id
assert response_data["data"][1]["id"] == replay1_id
def test_get_replays_duration_sorted_tiebreaker(self) -> None:
"""Test that replays with identical durations have deterministic order when ordering by duration."""
project = self.create_project(teams=[self.team])
replay_ids = [uuid.uuid4().hex for _ in range(5)]
timestamp_start = datetime.datetime.now() - datetime.timedelta(seconds=10)
timestamp_end = datetime.datetime.now() - datetime.timedelta(seconds=5)
for replay_id in replay_ids:
self.store_replays(mock_replay(timestamp_start, project.id, replay_id, segment_id=0))
self.store_replays(mock_replay(timestamp_end, project.id, replay_id, segment_id=1))
with self.feature(self.features):
response = self.client.get(self.url + "?orderBy=duration")
assert response.status_code == 200, response
asc_order = [r["id"] for r in response.json()["data"]]
response = self.client.get(self.url + "?orderBy=-duration")
assert response.status_code == 200, response
desc_order = [r["id"] for r in response.json()["data"]]
assert desc_order == list(reversed(asc_order))
assert set(asc_order) == set(replay_ids)
def test_get_replays_pagination(self) -> None:
"""Test replays can be paginated."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
replay1_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=15)
replay1_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=5)
replay2_timestamp0 = datetime.datetime.now() - datetime.timedelta(seconds=10)
replay2_timestamp1 = datetime.datetime.now() - datetime.timedelta(seconds=2)
self.store_replays(mock_replay(replay1_timestamp0, project.id, replay1_id, segment_id=0))
self.store_replays(mock_replay(replay1_timestamp1, project.id, replay1_id, segment_id=1))
self.store_replays(mock_replay(replay2_timestamp0, project.id, replay2_id, segment_id=0))
self.store_replays(mock_replay(replay2_timestamp1, project.id, replay2_id, segment_id=1))
with self.feature(self.features):
# First page.
response = self.get_success_response(
self.organization.slug,
cursor=Cursor(0, 0),
per_page=1,
)
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 1
assert response_data["data"][0]["id"] == replay2_id
link_header = response.headers["Link"]
assert 'rel="next"; results="true"' in link_header
# Next page.
response = self.get_success_response(
self.organization.slug,
cursor=Cursor(0, 1),
per_page=1,
)
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 1
assert response_data["data"][0]["id"] == replay1_id
link_header = response.headers["Link"]
assert 'rel="next"; results="false"' in link_header
# Beyond pages.
response = self.get_success_response(
self.organization.slug,
cursor=Cursor(0, 2),
per_page=1,
)
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 0
link_header = response.headers["Link"]
assert 'rel="next"; results="false"' in link_header
def test_get_replays_user_filters(self) -> None:
"""Test replays conform to the interchange format."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq1_timestamp,
project.id,
replay1_id,
platform="javascript",
dist="abc123",
user_id="123",
user_email="username@example.com",
user_name="username123",
user_ip_address="127.0.0.1",
sdk_name="sentry.javascript.react",
sdk_version="6.18.10",
ota_updates_channel="stable",
ota_updates_runtime_version="1.2.3",
ota_updates_update_id="1234567890",
os_name="macOS",
os_version="15",
browser_name="Firefox",
browser_version="99",
device_name="Macbook",
device_brand="Apple",
device_family="Macintosh",
device_model="10",
tags={"a": "m", "b": "q", "c": "test"},
urls=["example.com"],
segment_id=0,
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay1_id,
user_id=None,
user_name=None,
user_email=None,
ipv4=None,
os_name=None,
os_version=None,
browser_name=None,
browser_version=None,
device_name=None,
device_brand=None,
device_family=None,
device_model=None,
ota_updates_channel=None,
ota_updates_runtime_version=None,
ota_updates_update_id=None,
tags={"a": "n", "b": "o"},
error_ids=[],
segment_id=1,
)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "fatal", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "fatal", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project.id, "error", replay1_id, "a3a62ef6ac86415b83c2416fc2f76db1"
)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project.id, "warning", replay1_id, uuid.uuid4().hex
)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "info", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "debug", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
mock_replay_viewed(
seq1_timestamp.timestamp(), project.id, replay1_id, viewed_by_id=self.user.id
)
)
with self.feature(self.features):
# Run all the queries individually to determine compliance.
queries = [
"replay_type:session",
"error_ids:a3a62ef6ac86415b83c2416fc2f76db1",
"error_id:a3a62ef6ac86415b83c2416fc2f76db1",
"trace_ids:4491657243ba4dbebd2f6bd62b733080",
"trace_id:4491657243ba4dbebd2f6bd62b733080",
"trace:4491657243ba4dbebd2f6bd62b733080",
"count_urls:1",
"count_urls:>0",
"count_screens:1",
"count_screens:>0",
"count_dead_clicks:0",
"count_rage_clicks:0",
"count_traces:>0",
"!count_traces:0",
"platform:javascript",
"releases:version@1.3",
"releases:[a,version@1.3]",
"release:version@1.3",
"release:[a,version@1.3]",
"duration:17s",
"!duration:16s",
"duration:>16s",
"duration:<18s",
"duration:>=17s",
"duration:<=17s",
"duration:17000ms", # If duration value is not equal to a whole number of seconds, the endpoint fails.
"duration:<1m",
"duration:<1min",
"duration:<1.5min",
"duration:<2.25h",
"duration:<2.25hr",
"duration:<10d",
"duration:<10day",
"duration:<3w",
"duration:<3wk",
# Though it's discouraged by the frontend search bar, we still support values w/no units, read as ms.
"duration:17000",
"duration:>=16000",
"user.id:123",
"user.id:1*3",
"user.id:[4000, 123]",
"!user.id:[321, 1230]",
"user:username123", # user is an alias for user.username
"user.username:username123",
"user.username:*3",
"user.username:[username123, bob456]",
"!user.username:[bob456, bob123]",
"user.email:username@example.com",
"user.email:*@example.com",
"user.email:[user2@example.com, username@example.com]",
"!user.email:[user2@example.com]",
"user.ip:127.0.0.1",
"user.ip:[127.0.0.1, 10.0.4.4]",
"!user.ip:[127.1.1.1, 10.0.4.4]",
'user.geo.city:"San Francisco"',
"user.geo.country_code:USA",
'user.geo.region:"United States"',
"user.geo.subdivision:California",
'user.geo.city:"San Francisco" AND user.geo.country_code:USA AND user.geo.region:"United States" AND user.geo.subdivision:California',
"sdk.name:sentry.javascript.react",
"ota_updates.channel:stable",
"ota_updates.runtime_version:1.2.3",
"ota_updates.update_id:1234567890",
"ota_updates.channel:stable AND ota_updates.runtime_version:1.2.3 AND ota_updates.update_id:1234567890",
"!ota_updates.channel:unstable",
"!ota_updates.runtime_version:4.5.6",
"!ota_updates.update_id:9876543210",
"os.name:macOS",
"os.version:15",
"browser.name:Firefox",
"browser.version:99",
"dist:abc123",
"releases:*3",
"!releases:*4",
"release:*3",
"!release:*4",
"count_segments:>=2",
"device.name:Macbook",
"device.brand:Apple",
"device.family:Macintosh",
"device.model:10",
# Contains operator.
f"id:[{replay1_id},{uuid.uuid4().hex},{uuid.uuid4().hex}]",
f"!id:[{uuid.uuid4().hex}]",
# Or expression.
f"id:{replay1_id} OR id:{uuid.uuid4().hex} OR id:{uuid.uuid4().hex}",
# Paren wrapped expression.
f"((id:{replay1_id} OR duration:0s) AND (duration:>15s OR platform:nothing))",
# Implicit paren wrapped expression.
f"(id:{replay1_id} OR duration:0s) AND (duration:>15s OR platform:nothing)",
# Implicit And.
f"(id:{replay1_id} OR duration:0s) OR (duration:>15s platform:javascript)",
# Tag filters.
"tags[a]:m",
"a:m",
"c:*st",
"!c:*zz",
"urls:example.com",
"url:example.com",
"screens:example.com",
"screen:example.com",
"activity:8",
"activity:>2",
"count_warnings:1",
"count_warnings:>0",
"count_warnings:<2",
"count_infos:2",
"count_infos:>1",
"count_infos:<3",
f"viewed_by_id:{self.user.id}",
f"!viewed_by_id:{self.user.id+1}",
f"viewed_by_id:[{self.user.id+3},{self.user.id}]",
f"seen_by_id:{self.user.id}",
f"!seen_by_id:{self.user.id + 1}",
f"seen_by_id:[{self.user.id + 3},{self.user.id}]",
"viewed_by_me:true",
"seen_by_me:true",
"is_archived:false",
"!is_archived:true",
"is_archived:0",
"!is_archived:1",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200, (query, response.json())
response_data = response.json()
assert len(response_data["data"]) == 1, (query, response.json())
# Test all queries as a single AND expression.
all_queries = " ".join(queries)
response = self.client.get(self.url + f"?query={all_queries}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, "all queries"
missing_uuid = "f8a783a4261a4b559f108c3721fc05cc"
# Assert returns empty result sets.
null_queries = [
"!replay_type:session",
"!error_ids:a3a62ef6ac86415b83c2416fc2f76db1",
f"error_ids:{missing_uuid}",
"!error_id:a3a62ef6ac86415b83c2416fc2f76db1",
f"error_id:{missing_uuid}",
"!trace_ids:4491657243ba4dbebd2f6bd62b733080",
"!trace_id:4491657243ba4dbebd2f6bd62b733080",
"!trace:4491657243ba4dbebd2f6bd62b733080",
"count_urls:0",
"count_screens:0",
"count_urls:<1",
"count_screens:<1",
"count_dead_clicks:>0",
"count_rage_clicks:>0",
"count_traces:0",
f"id:{replay1_id} AND id:{missing_uuid}",
f"id:{replay1_id} AND duration:>1000s",
f"id:{missing_uuid} OR duration:>1000s",
"a:o",
"a:[o,p]",
"releases:a",
"releases:*4",
"!releases:*3",
"releases:[a,b]",
"release:a",
"release:*4",
"!release:*3",
"release:[a,b]",
"c:*zz",
"!c:*st",
"!activity:8",
"activity:<2",
f"viewed_by_id:{self.user.id+1}",
f"seen_by_id:{self.user.id+1}",
"viewed_by_me:false",
"seen_by_me:false",
"user.email:[user2@example.com]",
"!user.email:[username@example.com, user2@example.com]",
'!user.geo.city:"San Francisco"',
"!user.geo.country_code:USA",
'!user.geo.region:"United States"',
"!user.geo.subdivision:California",
'user.geo.city:"San Francisco" AND !user.geo.country_code:USA',
'!user.geo.subdivision:California OR !user.geo.region:"United States"',
"!ota_updates.channel:stable",
"!ota_updates.runtime_version:1.2.3",
"!ota_updates.update_id:1234567890",
"ota_updates.channel:unstable",
"ota_updates.runtime_version:4.5.6",
"ota_updates.update_id:9876543210",
"is_archived:true",
"!is_archived:false",
"is_archived:1",
"!is_archived:0",
]
for query in null_queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200, (query, response.json())
response_data = response.json()
assert len(response_data["data"]) == 0, (query, response.json())
def test_get_replays_user_sorts(self) -> None:
"""Test replays conform to the interchange format."""
project = self.create_project(teams=[self.team])
project2 = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=15)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq1_timestamp,
project2.id,
replay1_id,
error_ids=[uuid.uuid4().hex, uuid.uuid4().hex],
platform="b",
dist="b",
user_id="b",
user_email="b",
user_name="b",
user_ip_address="127.0.0.2",
sdk_name="b",
sdk_version="b",
os_name="b",
os_version="b",
browser_name="b",
browser_version="b",
device_name="b",
device_brand="b",
device_family="b",
device_model="b",
segment_id=0,
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project2.id,
replay1_id,
platform="b",
dist="b",
user_id="b",
user_email="b",
user_name="b",
user_ip_address="127.0.0.2",
sdk_name="b",
sdk_version="b",
os_name="b",
os_version="b",
browser_name="b",
browser_version="b",
device_name="b",
device_brand="b",
device_family="b",
device_model="b",
segment_id=1,
)
)
replay2_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=15)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
self.store_replays(
mock_replay(
seq1_timestamp,
project.id,
replay2_id,
error_ids=[uuid.uuid4().hex],
platform="a",
dist="a",
user_id="a",
user_email="a",
user_name="a",
user_ip_address="127.0.0.1",
sdk_name="a",
sdk_version="a",
os_name="a",
os_version="a",
browser_name="a",
browser_version="a",
device_name="a",
device_brand="a",
device_family="a",
device_model="a",
segment_id=0,
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay2_id,
platform="a",
dist="a",
user_id="a",
user_email="a",
user_name="a",
user_ip_address="127.0.0.1",
sdk_name="a",
sdk_version="a",
os_name="a",
os_version="a",
browser_name="a",
browser_version="a",
device_name="a",
device_brand="a",
device_family="a",
device_model="a",
segment_id=1,
)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project2.id, "fatal", replay1_id, uuid.uuid4().hex
)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project2.id, "error", replay1_id, uuid.uuid4().hex
)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project2.id, "warning", replay1_id, uuid.uuid4().hex
)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project2.id, "info", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project2.id, "debug", replay1_id, uuid.uuid4().hex
)
)
with self.feature(self.features):
# Run all the queries individually to determine compliance.
queries = [
"activity",
"browser.name",
"browser.version",
"device.brand",
"device.family",
"device.model",
"device.name",
"dist",
"duration",
"os.name",
"os.version",
"platform",
"project_id",
"sdk.name",
"user.email",
"user.id",
"user.username",
"count_warnings",
"count_infos",
]
for key in queries:
# Ascending
response = self.client.get(self.url + f"?orderBy={key}")
assert response.status_code == 200, key
r = response.json()
assert len(r["data"]) == 2, key
assert r["data"][0]["id"] == replay2_id, key
assert r["data"][1]["id"] == replay1_id, key
# Descending
response = self.client.get(self.url + f"?orderBy=-{key}")
assert response.status_code == 200, key
r = response.json()
assert len(r["data"]) == 2, key
assert r["data"][0]["id"] == replay1_id, key
assert r["data"][1]["id"] == replay2_id, key
def test_get_replays_filter_bad_operator(self) -> None:
self.create_project(teams=[self.team])
queries = [
"transaction.duration:>0s",
"viewed_by_me:<true",
"seen_by_me:>false",
"!viewed_by_me:false",
"!seen_by_me:true",
]
with self.feature(self.features):
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400, query
def test_get_replays_filter_bad_value(self) -> None:
"""Test replays conform to the interchange format."""
self.create_project(teams=[self.team])
queries = [
"viewed_by_me:potato",
"duration:a",
# TODO: remove once we support ms timestamps
"duration:1004ms",
"duration:7.3s",
"duration:1.33min",
]
with self.feature(self.features):
for query in queries:
response = self.client.get(self.url + f"?query={query}")
assert response.status_code == 400, query
def test_get_replays_filter_bad_duration_error_messages(self) -> None:
# TODO: remove once we support ms timestamps
self.create_project(teams=[self.team])
queries = [
"duration:1004ms",
"duration:7.3s",
"duration:1.33min",
]
with self.feature(self.features):
for query in queries:
response = self.client.get(self.url + f"?query={query}")
assert response.status_code == 400, query
assert (
b"Replays only supports second-resolution timestamps at this time"
in response.content
), query
assert b"duration" in response.content, query
# Note: there's no such thing as a bad field with the tag filtering behavior.
def test_get_replays_unknown_field(self) -> None:
"""Test replays unknown fields raise a 400 error."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
response = self.client.get(self.url + "?field=unknown")
assert response.status_code == 400
def test_get_replays_activity_field(self) -> None:
"""Test replays activity field does not raise 400."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
response = self.client.get(self.url + "?field=activity")
assert response.status_code == 200
def test_archived_records_are_null_fields(self) -> None:
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=30)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=15)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id))
self.store_replays(
mock_replay(seq2_timestamp, self.project.id, replay1_id, is_archived=True)
)
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.json()["data"] == [
{
"id": replay1_id,
"project_id": str(self.project.id),
"trace_ids": [],
"error_ids": [],
"environment": None,
"tags": [],
"user": {
"id": "Archived Replay",
"display_name": "Archived Replay",
"username": None,
"email": None,
"ip": None,
"geo": {
"city": None,
"country_code": None,
"region": None,
"subdivision": None,
},
},
"sdk": {"name": None, "version": None},
"os": {"name": None, "version": None},
"browser": {"name": None, "version": None},
"device": {"name": None, "brand": None, "model": None, "family": None},
"ota_updates": {"channel": None, "runtime_version": None, "update_id": None},
"urls": None,
"started_at": None,
"count_errors": None,
"count_dead_clicks": None,
"count_rage_clicks": None,
"activity": None,
"finished_at": None,
"duration": None,
"is_archived": True,
"releases": [],
"platform": None,
"dist": None,
"count_segments": None,
"count_urls": None,
"clicks": [],
"warning_ids": [],
"info_ids": [],
"count_warnings": None,
"count_infos": None,
"has_viewed": False,
}
]
# commented out until https://github.com/getsentry/snuba/pull/4137 is merged.
# def test_archived_records_out_of_bounds(self) -> None:
# replay1_id = uuid.uuid4().hex
# seq1_timestamp = datetime.datetime.now() - datetime.timedelta(days=10)
# seq2_timestamp = datetime.datetime.now() - datetime.timedelta(days=3)
# self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id))
# self.store_replays(
# mock_replay(
# seq2_timestamp, self.project.id, replay1_id, is_archived=True, segment_id=None
# )
# )
# with self.feature(self.features):
# response = self.client.get(self.url)
# assert response.status_code == 200
# assert response.json()["data"] == [
# {
# "id": replay1_id,
# "project_id": str(self.project.id),
# "trace_ids": [],
# "error_ids": [],
# "environment": None,
# "tags": [],
# "user": {"id": "Archived Replay", "display_name": "Archived Replay"},
# "sdk": {"name": None, "version": None},
# "os": {"name": None, "version": None},
# "browser": {"name": None, "version": None},
# "device": {"name": None, "brand": None, "model": None, "family": None},
# "urls": None,
# "started_at": None,
# "count_errors": None,
# "activity": None,
# "finished_at": None,
# "duration": None,
# "is_archived": True,
# }
# ]
def test_get_replays_filter_clicks(self) -> None:
"""Test replays conform to the interchange format."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=1,
tag="div",
id="myid",
class_=["class1", "class2", "class:hover"],
component_name="SignUpForm",
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
text="Hello",
is_dead=1,
is_rage=1,
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=2,
tag="button",
id="myid",
class_=["class1", "class3"],
)
)
with self.feature(self.features):
queries = [
"click.alt:Alt",
"click.class:class1",
"click.class:class2",
"click.class:class3",
"click.id:myid",
"click.label:AriaLabel",
"click.component_name:SignUpForm",
"click.role:button",
"click.tag:div",
"click.tag:button",
"click.testid:1",
"click.textContent:Hello",
"click.title:MyTitle",
"click.selector:div#myid",
"click.selector:div[alt=Alt]",
"click.selector:div[title=MyTitle]",
"click.selector:div[data-sentry-component=SignUpForm]",
"click.selector:div[data-testid='1']",
"click.selector:div[data-test-id='1']",
"click.selector:div[role=button]",
"click.selector:div#myid.class1.class2",
"dead.selector:div#myid",
"dead.selector:div#myid.class1.class2[role=button][aria-label='AriaLabel'][data-sentry-component=SignUpForm]",
"rage.selector:div#myid",
"rage.selector:div#myid.class1.class2[role=button][aria-label='AriaLabel'][data-sentry-component=SignUpForm]",
# Assert selectors with special characters in them can be queried.
"click.selector:div.class%5C:hover",
# Single quotes around attribute value.
"click.selector:div[role='button']",
"click.selector:div#myid.class1.class2[role=button][aria-label='AriaLabel'][data-sentry-component='SignUpForm']",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 1, query
queries = [
"click.alt:NotAlt",
"click.class:class4",
"click.id:other",
"click.label:NotAriaLabel",
"click.component_name:NotSignUpForm",
"click.role:form",
"click.tag:header",
"click.testid:2",
"click.textContent:World",
"click.title:NotMyTitle",
"!click.selector:div#myid",
"click.selector:div#notmyid",
"dead.selector:button#myid",
"rage.selector:button#myid",
# Assert all classes must match.
"click.selector:div#myid.class1.class2.class3",
# Invalid selectors return no rows.
"click.selector:$#%^#%",
# Integer type role values are not allowed and must be wrapped in single quotes.
"click.selector:div[title=1]",
]
for query in queries:
response = self.client.get(self.url + f"?query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_get_replays_filter_taps(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_tap(
seq2_timestamp,
project.id,
replay1_id,
message="TappedSignIn",
view_class="UIButton",
view_id="btn_signin",
)
)
self.store_replays(
mock_replay_tap(
seq2_timestamp,
project.id,
replay1_id,
message="TappedCancel",
view_class="UIButtonSecondary",
view_id="btn_cancel",
)
)
with self.feature(self.features):
queries = [
"tap.message:TappedSignIn",
"tap.message:TappedCancel",
"tap.view_class:UIButton",
"tap.view_class:UIButtonSecondary",
"tap.view_id:btn_signin",
"tap.view_id:btn_cancel",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 1, query
negation_queries = [
"!tap.message:TappedSignIn",
"!tap.view_class:UIButton",
"!tap.view_id:btn_signin",
]
for query in negation_queries:
response = self.client.get(self.url + f"?query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_get_replays_click_fields(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=1,
tag="div",
id="myid",
class_=["class1", "class2"],
component_name="SignUpForm",
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
text="Hello",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=2,
tag="button",
id="myid",
class_=["class1", "class3"],
)
)
with self.feature(self.features):
response = self.client.get(self.url + "?field=clicks")
assert response.status_code == 200, response.content
response_data = response.json()
assert response_data["data"] == [
{
"clicks": [
{
"click.alt": "Alt",
"click.classes": ["class1", "class3"],
"click.id": "myid",
"click.component_name": "SignUpForm",
"click.role": "button",
"click.tag": "button",
"click.testid": "1",
"click.text": "Hello",
"click.title": "MyTitle",
"click.label": "AriaLabel",
},
{
"click.alt": None,
"click.classes": ["class1", "class2"],
"click.id": "myid",
"click.component_name": None,
"click.role": None,
"click.tag": "div",
"click.testid": None,
"click.text": None,
"click.title": None,
"click.label": None,
},
]
}
]
def test_get_replays_filter_clicks_nested_selector(self) -> None:
"""Test replays do not support nested selectors."""
project = self.create_project(teams=[self.team])
self.store_replays(mock_replay(datetime.datetime.now(), project.id, uuid.uuid4().hex))
with self.feature(self.features):
queries = [
'click.selector:"div button"',
'click.selector:"div + button"',
'click.selector:"div ~ button"',
'click.selector:"div > button"',
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400
assert response.content == b'{"detail":"Nested selectors are not supported."}'
def test_get_replays_filter_clicks_pseudo_element(self) -> None:
"""Assert replays only supports a subset of selector syntax."""
project = self.create_project(teams=[self.team])
self.store_replays(mock_replay(datetime.datetime.now(), project.id, uuid.uuid4().hex))
with self.feature(self.features):
queries = [
"click.selector:a::visited",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400, query
assert response.content == b'{"detail":"Pseudo-elements are not supported."}', query
def test_get_replays_filter_clicks_unsupported_selector(self) -> None:
"""Assert replays only supports a subset of selector syntax."""
project = self.create_project(teams=[self.team])
self.store_replays(mock_replay(datetime.datetime.now(), project.id, uuid.uuid4().hex))
with self.feature(self.features):
queries = [
"click.selector:div:is(2)",
"click.selector:p:active",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400, query
assert (
response.content
== b'{"detail":"Only attribute, class, id, and tag name selectors are supported."}'
), query
def test_get_replays_filter_clicks_unsupported_attribute_selector(self) -> None:
"""Assert replays only supports a subset of selector syntax."""
project = self.create_project(teams=[self.team])
self.store_replays(mock_replay(datetime.datetime.now(), project.id, uuid.uuid4().hex))
with self.feature(self.features):
queries = ["click.selector:div[xyz=test]"]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400, query
assert response.content == (
b'{"detail":"Invalid attribute specified. Only alt, aria-label, role, '
b'data-testid, data-test-id, data-sentry-component, and title are supported."}'
), query
def test_get_replays_filter_clicks_unsupported_operators(self) -> None:
"""Assert replays only supports a subset of selector syntax."""
project = self.create_project(teams=[self.team])
self.store_replays(mock_replay(datetime.datetime.now(), project.id, uuid.uuid4().hex))
with self.feature(self.features):
queries = [
'click.selector:"[aria-label~=button]"',
'click.selector:"[aria-label|=button]"',
'click.selector:"[aria-label^=button]"',
'click.selector:"[aria-label$=button]"',
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400, query
assert (
response.content == b'{"detail":"Only the \'=\' operator is supported."}'
), query
def test_get_replays_field_order(self) -> None:
"""Test replay response with fields requested in production."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
# Invalid field-names error regardless of ordering.
response = self.client.get(self.url + "?field=invalid&field=browser")
assert response.status_code == 400
response = self.client.get(self.url + "?field=browser&field=invalid")
assert response.status_code == 400
# Correct field-names never error.
response = self.client.get(self.url + "?field=count_urls&field=browser")
assert response.status_code == 200
response = self.client.get(self.url + "?field=browser&field=count_urls")
assert response.status_code == 200
def test_get_replays_memory_error(self) -> None:
"""Test replay response with fields requested in production."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
# Invalid field-names error regardless of ordering.
with mock.patch(
"sentry.replays.endpoints.organization_replay_index.query_replays_collection_paginated",
side_effect=QueryMemoryLimitExceeded("mocked error"),
):
response = self.client.get(self.url)
assert response.status_code == 400
assert (
response.content
== b'{"detail":"Query limits exceeded. Try narrowing your request."}'
)
def test_get_replays_filter_clicks_non_click_rows(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=1,
tag="div",
id="id1",
class_=["id1"],
text="id1",
role="id1",
alt="id1",
testid="id1",
aria_label="id1",
title="id1",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
project.id,
replay1_id,
node_id=2,
tag="",
id="id2",
class_=["id2"],
text="id2",
role="id2",
alt="id2",
testid="id2",
aria_label="id2",
title="id2",
)
)
with self.feature(self.features):
success_queries = [
"click.id:id1",
"click.class:[id1]",
"click.textContent:id1",
"click.role:id1",
"click.alt:id1",
"click.testid:id1",
"click.label:id1",
"click.title:id1",
]
for query in success_queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
# These tests demonstrate what happens when you match a click value on non-click row.
failure_queries = [
"click.id:id2",
"click.class:[id2]",
"click.textContent:id2",
"click.role:id2",
"click.alt:id2",
"click.testid:id2",
"click.label:id2",
"click.title:id2",
]
for query in failure_queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 0, query
# The following section tests the valid branches of the condition classes.
def test_query_branches_string_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
queries = [
"device.brand:Apple",
"!device.brand:Microsoft",
"device.brand:[Apple,Microsoft]",
"!device.brand:[Oracle,Microsoft]",
"device.brand:App*",
"!device.brand:Micro*",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_click_scalar_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp, project.id, replay1_id, node_id=1, tag="div", id="id1"
)
)
with self.feature(self.features):
queries = [
"click.id:id1",
"!click.id:id2",
"click.id:[id1,id2]",
"!click.id:[id3,id2]",
"click.id:*1",
"!click.id:*2",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_click_array_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp, project.id, replay1_id, node_id=1, tag="div", class_=["class1"]
)
)
with self.feature(self.features):
queries = [
"click.class:class1",
"!click.class:class2",
"click.class:[class1,class2]",
"!click.class:[class3,class2]",
"click.class:*1",
"!click.class:*2",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_array_of_string_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id, urls=["Apple"]))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, urls=[]))
with self.feature(self.features):
queries = [
"urls:Apple",
"!urls:Microsoft",
"urls:[Apple,Microsoft]",
"!urls:[Oracle,Microsoft]",
"urls:App*",
"!urls:Micro*",
]
new_queries = [q.replace("url", "screen") for q in queries]
queries.extend(new_queries)
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_integer_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, error_ids=[]))
self.store_replays(
self.mock_event_links(
seq1_timestamp, project.id, "error", replay1_id, "a3a62ef6ac86415b83c2416fc2f76db1"
)
)
with self.feature(self.features):
queries = [
"count_errors:1",
"!count_errors:2",
"count_errors:>0",
"count_errors:<2",
"count_errors:>=1",
"count_errors:<=1",
"count_errors:[1,2]",
"!count_errors:[2,3]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_error_ids_conditions(self) -> None:
project = self.create_project(teams=[self.team])
uid1 = uuid.uuid4().hex
uid2 = uuid.uuid4().hex
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id, error_ids=[uid1]))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
queries = [
f"error_ids:{uid1}",
f"!error_ids:{uid2}",
f"error_ids:[{uid1},{uid2}]",
f"!error_ids:[{uid2}]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_uuid_conditions(self) -> None:
project = self.create_project(teams=[self.team])
uid1 = uuid.uuid4().hex
uid2 = uuid.uuid4().hex
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id, trace_ids=[uid1]))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
queries = [
f"trace_ids:{uid1}",
f"!trace_ids:{uid2}",
f"trace_ids:[{uid1},{uid2}]",
f"!trace_ids:[{uid2}]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_string_uuid_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
uid2 = uuid.uuid4().hex
queries = [
f"id:{replay1_id}",
f"!id:{uid2}",
f"id:[{replay1_id},{uid2}]",
f"!id:[{uid2}]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_branches_ip_address_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
queries = [
"user.ip_address:127.0.0.1",
"!user.ip_address:192.168.0.1",
"user.ip_address:[127.0.0.1,192.168.0.1]",
"!user.ip_address:[192.168.0.1]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_invalid_ipv4_addresses(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
queries = [
"user.ip:127.256.0.1",
"!user.ip_address:192.168.z34.1",
"user.ip_address:bacontest",
"user.ip_address:[127.0.0.,192.168.0.1]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 400
def _test_empty_filters(
self,
query_key: str,
field: str,
null_value: str | int | None,
nonnull_value: str | int | bool,
) -> None:
"""
Tests filters on a nullable field such as user.email:"", !user.email:"", user.email:["", ...].
Due to clickhouse aggregations, these queries are handled as a special case which needs testing.
@param query_key name of field in URL query string, ex `user.email`.
@param field name of kwarg used for testutils.mock_replay, ex `user_email`.
@param null_value null value for this field, stored by Snuba processor (ex: null user_email is translated to "").
@param nonnull_value a non-null value to use for testing.
"""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay1_id, **{field: null_value})
)
self.store_replays(
mock_replay(seq2_timestamp, project.id, replay1_id, **{field: nonnull_value})
)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay2_id, **{field: null_value})
)
self.store_replays(
mock_replay(seq2_timestamp, project.id, replay2_id, **{field: null_value})
)
with self.feature(self.features):
null_query = f'{query_key}:""'
response = self.client.get(self.url + f"?field=id&query={null_query}")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 1
assert data[0]["id"] == replay2_id
non_null_query = "!" + null_query
response = self.client.get(self.url + f"?field=id&query={non_null_query}")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 1
assert data[0]["id"] == replay1_id
list_queries = [
f'{query_key}:[{nonnull_value}, ""]',
f'{query_key}:["{nonnull_value}", ""]',
]
for query in list_queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 2
assert {item["id"] for item in data} == {replay1_id, replay2_id}
for query in ["!" + query for query in list_queries]:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 0
def test_query_empty_email(self) -> None:
self._test_empty_filters("user.email", "user_email", "", "andrew@example.com")
def test_query_empty_ipv4(self) -> None:
self._test_empty_filters("user.ip", "ipv4", None, "127.0.0.1")
def test_query_empty_username(self) -> None:
self._test_empty_filters("user.username", "user_name", "", "andrew1")
def test_query_empty_user_id(self) -> None:
self._test_empty_filters("user.id", "user_id", "", "12ef6")
def test_query_branches_computed_activity_conditions(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, error_ids=[]))
with self.feature(self.features):
queries = [
"activity:1",
"!activity:0",
"activity:>0",
"activity:<2",
"activity:>=1",
"activity:<=1",
"activity:[1,2]",
"!activity:[0,2]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
def test_query_scalar_optimization_multiple_varying(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay1_id, urls=["apple", "microsoft"])
)
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, urls=[]))
with self.feature(self.features):
response = self.client.get(self.url + "?field=id&query=urls:apple urls:microsoft")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1
def test_query_scalar_optimization_varying_with_tags(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay1_id, tags={"something": "else"})
)
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
with self.feature(self.features):
# EQ and IN supported.
response = self.client.get(self.url + "?field=id&query=something:else&statsPeriod=1d")
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "scalar-subquery"
response = self.client.get(
self.url + "?field=id&query=something:else,other&statsPeriod=1d"
)
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "scalar-subquery"
# Not operators are not supported.
response = self.client.get(self.url + "?field=id&query=!something:else&statsPeriod=1d")
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "aggregated-subquery"
response = self.client.get(
self.url + "?field=id&query=!something:else,other&statsPeriod=1d"
)
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "aggregated-subquery"
# Match not supported.
response = self.client.get(self.url + "?field=id&query=something:*else*&statsPeriod=1d")
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "aggregated-subquery"
response = self.client.get(
self.url + "?field=id&query=!something:*else*&statsPeriod=1d"
)
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "aggregated-subquery"
def test_get_replays_missing_segment_0(self) -> None:
"""Test fetching replays when the 0th segment is missing."""
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id, segment_id=2))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, segment_id=1))
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
response_data = response.json()
assert "data" in response_data
assert len(response_data["data"]) == 0
def test_new_errors_column(self) -> None:
project = self.create_project(teams=[self.team])
uid1 = uuid.uuid4().hex
uid2 = uuid.uuid4().hex
uid3 = uuid.uuid4().hex
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay1_id, error_ids=[uid1, uid2])
)
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, error_ids=[]))
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "error", replay1_id, uid1)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "fatal", replay1_id, uid2)
)
with self.feature(self.features):
queries = [
f"error_id:{uid1}",
f"error_id:{uid2}",
f"error_id:[{uid1}]",
f"!error_id:[{uid3}]",
f"!error_id:{uid3}",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&field=error_ids&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
assert len(response_data["data"][0]["error_ids"]) == 2, query
response = self.client.get(
self.url + f"?field=id&field=error_ids&query=error_id:{uid3}"
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_warnings_column(self) -> None:
project = self.create_project(teams=[self.team])
uid1 = uuid.uuid4().hex
uid2 = uuid.uuid4().hex
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "warning", replay1_id, uid1)
)
with self.feature(self.features):
queries = [
f"warning_id:{uid1}",
f"warning_id:[{uid1}]",
f"!warning_id:[{uid2}]",
f"!warning_id:{uid2}",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&field=warning_ids&query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 1, query
assert len(response_data["data"][0]["warning_ids"]) == 1, query
response = self.client.get(
self.url + f"?field=id&field=warning_ids&query=warning_id:{uid2}"
)
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_infos_column(self) -> None:
project = self.create_project(teams=[self.team])
uid1 = uuid.uuid4().hex
uid2 = uuid.uuid4().hex
uid3 = uuid.uuid4().hex
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "info", replay1_id, uid1)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "debug", replay1_id, uid2)
)
with self.feature(self.features):
queries = [
f"info_id:{uid1}",
f"info_id:{uid2}",
f"info_id:[{uid1}]",
f"!info_id:[{uid3}]",
f"!info_id:{uid3}",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&field=info_ids&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
assert len(response_data["data"][0]["info_ids"]) == 2, query
response = self.client.get(self.url + f"?field=id&field=info_ids&query=info_id:{uid3}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_exp_query_branches_error_ids_conditions(self) -> None:
"""
Test that the new columns work the same w/ only the previous errors populated
"""
project = self.create_project(teams=[self.team])
uid1 = uuid.uuid4().hex
uid2 = uuid.uuid4().hex
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id, error_ids=[uid1]))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id, error_ids=[]))
with self.feature(self.features):
queries = [
f"error_ids:{uid1}",
f"!error_ids:{uid2}",
f"error_ids:[{uid1},{uid2}]",
f"!error_ids:[{uid2}]",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&field=error_ids&query={query}")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 1, query
assert len(response_data["data"][0]["error_ids"]) == 1, query
def test_event_id_count_columns(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
other_replay = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, project.id, other_replay))
self.store_replays(mock_replay(seq2_timestamp, project.id, other_replay))
self.store_replays(mock_replay(seq1_timestamp, project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, project.id, replay1_id))
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "fatal", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "error", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project.id, "warning", replay1_id, uuid.uuid4().hex
)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "info", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(seq1_timestamp, project.id, "debug", replay1_id, uuid.uuid4().hex)
)
self.store_replays(
self.mock_event_links(
seq1_timestamp, project.id, "debug", other_replay, uuid.uuid4().hex
)
)
with self.feature(self.features):
response = self.client.get(
self.url + f"?field=id&field=count_warnings&field=count_infos&query=id:{replay1_id}"
)
assert response.status_code == 200
response_data = response.json()
assert response_data["data"][0]["count_warnings"] == 1
def test_non_empty_string_scalar(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay1_id, segment_id=0, dist="")
)
self.store_replays(
mock_replay(seq1_timestamp, project.id, replay1_id, segment_id=0, dist="1")
)
# "dist" is used as a placeholder to test the "NonEmptyStringScalar" class. Empty
# strings should be ignored when performing negation queries.
with self.feature(self.features):
# dist should be findable if any of its filled values match the query.
queries = [
"dist:1",
"dist:[1]",
"dist:*1*",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 1, query
# If we explicitly negate dist's filled value we should also ignore empty
# values.
queries = [
"!dist:1",
"!dist:[1]",
"!dist:*1*",
]
for query in queries:
response = self.client.get(self.url + f"?field=id&query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_get_replays_preferred_source(self) -> None:
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id, segment_id=0))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, replay1_id, segment_id=1))
with self.feature(self.features):
response = self.client.get(self.url, headers={"X-Preferred-Data-Source": "scalar"})
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "scalar-subquery"
response = self.client.get(self.url, headers={"X-Preferred-Data-Source": "aggregated"})
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "aggregated-subquery"
def test_get_replays_default_data_source(self) -> None:
"""Assert default data source is conditional on flag."""
features = self.features.copy()
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id, segment_id=0))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, replay1_id, segment_id=1))
with self.feature(features):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.headers["X-Data-Source"] == "scalar-subquery"
def test_viewed_by_denylist(self) -> None:
project = self.create_project(teams=[self.team])
replay1_id = uuid.uuid4().hex
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
seq1_timestamp,
project.id,
replay1_id,
platform="javascript",
dist="abc123",
user_id="123",
user_email="username@example.com",
user_name="username123",
user_ip_address="127.0.0.1",
sdk_name="sentry.javascript.react",
sdk_version="6.18.10",
os_name="macOS",
os_version="15",
browser_name="Firefox",
browser_version="99",
device_name="Macbook",
device_brand="Apple",
device_family="Macintosh",
device_model="10",
tags={"a": "m", "b": "q", "c": "test"},
urls=["example.com"],
segment_id=0,
)
)
self.store_replays(
mock_replay(
seq2_timestamp,
project.id,
replay1_id,
user_id=None,
user_name=None,
user_email=None,
ipv4=None,
os_name=None,
os_version=None,
browser_name=None,
browser_version=None,
device_name=None,
device_brand=None,
device_family=None,
device_model=None,
tags={"a": "n", "b": "o"},
error_ids=[],
segment_id=1,
)
)
self.store_replays(
mock_replay_viewed(
seq1_timestamp.timestamp(), project.id, replay1_id, viewed_by_id=self.user.id
)
)
with self.feature(self.features):
with self.options({"replay.viewed-by.project-denylist": [project.id]}):
for query in [
f"viewed_by_id:{self.user.id}",
f"seen_by_id:{self.user.id}",
"viewed_by_me:true",
"seen_by_me:true",
]:
response = self.client.get(self.url + "?field=id&query=" + query)
assert response.status_code == 400
assert (
response.json()["detail"]["message"]
== "Viewed by search has been disabled for your project due to a data irregularity."
)
| OrganizationReplayIndexTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.