language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numpy__numpy | numpy/_core/tests/test_half.py | {
"start": 515,
"end": 25260
} | class ____:
def _create_arrays_all(self):
# An array of all possible float16 values
all_f16 = np.arange(0x10000, dtype=uint16)
all_f16 = all_f16.view(float16)
# NaN value can cause an invalid FP exception if HW is being used
with np.errstate(invalid='ignore'):
all_f32 = np.array(all_f16, dtype=float32)
all_f64 = np.array(all_f16, dtype=float64)
return all_f16, all_f32, all_f64
def _create_arrays_nonan(self):
# An array of all non-NaN float16 values, in sorted order
nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
nonan_f16 = nonan_f16.view(float16)
nonan_f32 = np.array(nonan_f16, dtype=float32)
nonan_f64 = np.array(nonan_f16, dtype=float64)
return nonan_f16, nonan_f32, nonan_f64
def _create_arrays_finite(self):
nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan()
finite_f16 = nonan_f16[1:-1]
finite_f32 = nonan_f32[1:-1]
finite_f64 = nonan_f64[1:-1]
return finite_f16, finite_f32, finite_f64
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
all_f16, all_f32, all_f64 = self._create_arrays_all()
nonan_f16, _, _ = self._create_arrays_nonan()
# Convert from float32 back to float16
with np.errstate(invalid='ignore'):
b = np.array(all_f32, dtype=float16)
# avoid testing NaNs due to differing bit patterns in Q/S NaNs
b_nn = b == b
assert_equal(all_f16[b_nn].view(dtype=uint16),
b[b_nn].view(dtype=uint16))
# Convert from float64 back to float16
with np.errstate(invalid='ignore'):
b = np.array(all_f64, dtype=float16)
b_nn = b == b
assert_equal(all_f16[b_nn].view(dtype=uint16),
b[b_nn].view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
@pytest.mark.parametrize("string_dt", ["S", "U"])
def test_half_conversion_to_string(self, string_dt):
# Currently uses S/U32 (which is sufficient for float32)
expected_dt = np.dtype(f"{string_dt}32")
assert np.promote_types(np.float16, string_dt) == expected_dt
assert np.promote_types(string_dt, np.float16) == expected_dt
arr = np.ones(3, dtype=np.float16).astype(string_dt)
assert arr.dtype == expected_dt
@pytest.mark.parametrize("dtype", ["S", "U", object])
def test_to_half_cast_error(self, dtype):
arr = np.array(["3M"], dtype=dtype)
with pytest.raises(ValueError):
arr.astype(np.float16)
arr = np.array(["23490349034"], dtype=dtype)
with np.errstate(all="warn"):
with pytest.warns(RuntimeWarning):
arr.astype(np.float16)
with np.errstate(all="raise"):
with pytest.raises(FloatingPointError):
arr.astype(np.float16)
@pytest.mark.parametrize("string_dt", ["S", "U"])
def test_half_conversion_from_string(self, string_dt):
string = np.array("3.1416", dtype=string_dt)
assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16)
@pytest.mark.parametrize("offset", [None, "up", "down"])
@pytest.mark.parametrize("shift", [None, "up", "down"])
@pytest.mark.parametrize("float_t", [np.float32, np.float64])
def test_half_conversion_rounding(self, float_t, shift, offset):
# Assumes that round to even is used during casting.
max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
# Test all (positive) finite numbers, denormals are most interesting
# however:
f16s_patterns = np.arange(0, max_pattern + 1, dtype=np.uint16)
f16s_float = f16s_patterns.view(np.float16).astype(float_t)
# Shift the values by half a bit up or a down (or do not shift),
if shift == "up":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
elif shift == "down":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
else:
f16s_float = f16s_float[1:-1]
# Increase the float by a minimal value:
if offset == "up":
f16s_float = np.nextafter(f16s_float, float_t(np.inf))
elif offset == "down":
f16s_float = np.nextafter(f16s_float, float_t(-np.inf))
# Convert back to float16 and its bit pattern:
res_patterns = f16s_float.astype(np.float16).view(np.uint16)
# The above calculation tries the original values, or the exact
# midpoints between the float16 values. It then further offsets them
# by as little as possible. If no offset occurs, "round to even"
# logic will be necessary, an arbitrarily small offset should cause
# normal up/down rounding always.
# Calculate the expected pattern:
cmp_patterns = f16s_patterns[1:-1].copy()
if shift == "down" and offset != "up":
shift_pattern = -1
elif shift == "up" and offset != "down":
shift_pattern = 1
else:
# There cannot be a shift, either shift is None, so all rounding
# will go back to original, or shift is reduced by offset too much.
shift_pattern = 0
# If rounding occurs, is it normal rounding or round to even?
if offset is None:
# Round to even occurs, modify only non-even, cast to allow + (-1)
cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
else:
cmp_patterns.view(np.int16)[...] += shift_pattern
assert_equal(res_patterns, cmp_patterns)
@pytest.mark.parametrize(["float_t", "uint_t", "bits"],
[(np.float32, np.uint32, 23),
(np.float64, np.uint64, 52)])
def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
# Test specifically that all bits are considered when deciding
# whether round to even should occur (i.e. no bits are lost at the
# end. Compare also gh-12721. The most bits can get lost for the
# smallest denormal:
smallest_value = np.uint16(1).view(np.float16).astype(float_t)
assert smallest_value == 2**-24
# Will be rounded to zero based on round to even rule:
rounded_to_zero = smallest_value / float_t(2)
assert rounded_to_zero.astype(np.float16) == 0
# The significand will be all 0 for the float_t, test that we do not
# lose the lower ones of these:
for i in range(bits):
# slightly increasing the value should make it round up:
larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
larger_value = larger_pattern.view(float_t)
assert larger_value.astype(np.float16) == smallest_value
def test_nans_infs(self):
all_f16, all_f32, _ = self._create_arrays_all()
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(all_f16), np.isnan(all_f32))
assert_equal(np.isinf(all_f16), np.isinf(all_f32))
assert_equal(np.isfinite(all_f16), np.isfinite(all_f32))
assert_equal(np.signbit(all_f16), np.signbit(all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (all_f16 == nan).any())
assert_(not (nan == all_f16).any())
assert_((all_f16 != nan).all())
assert_((nan != all_f16).all())
assert_(not (all_f16 < nan).any())
assert_(not (nan < all_f16).any())
assert_(not (all_f16 <= nan).any())
assert_(not (nan <= all_f16).any())
assert_(not (all_f16 > nan).any())
assert_(not (nan > all_f16).any())
assert_(not (all_f16 >= nan).any())
assert_(not (nan >= all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1 / 1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b = b.view(dtype=float16)
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0 + 2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0 + 2.0**-11, # rounds to 1.0 (nearest even mode)
1.0 + 2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0 + 2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
with np.errstate(over="ignore"):
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
with np.errstate(over="ignore"):
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
finite_f16, finite_f32, finite_f64 = self._create_arrays_finite()
# Create an array of all finite float16s
a_bits = finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(finite_f32, a_manual,
"First non-equal is half value 0x%x -> %g != %g" %
(a_bits[bad_index],
finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(finite_f64, a_manual,
"First non-equal is half value 0x%x -> %g != %g" %
(a_bits[bad_index],
finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
nonan_f16, _, _ = self._create_arrays_nonan()
# All non-NaN float16 values in reverse order
a = nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size - 2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size - 2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap()
a = a.view(a.dtype.newbyteorder())
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
hnan = np.array((np.nan,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:] - a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
assert_equal(np.nextafter(hinf, a_f16), a_f16[-1])
assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1])
assert_equal(np.nextafter(hinf, hinf), hinf)
assert_equal(np.nextafter(hinf, -hinf), a_f16[-1])
assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1])
assert_equal(np.nextafter(-hinf, -hinf), -hinf)
assert_equal(np.nextafter(a_f16, hnan), hnan[0])
assert_equal(np.nextafter(hnan, a_f16), hnan[0])
assert_equal(np.nextafter(hnan, hnan), hnan)
assert_equal(np.nextafter(hinf, hnan), hnan)
assert_equal(np.nextafter(hnan, hinf), hnan)
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1] - a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1])
assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1])
assert_equal(np.nextafter(a_f16, hnan), hnan[0])
assert_equal(np.nextafter(hnan, a_f16), hnan[0])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert np.power(a16, 2).dtype == float16
assert np.power(a16, 2.0).dtype == float16
assert np.power(a16, b16).dtype == float16
assert np.power(a16, b32).dtype == float32
assert np.power(a16, a16).dtype == float16
assert np.power(a16, a32).dtype == float32
assert np.power(b16, 2).dtype == float16
assert np.power(b16, 2.0).dtype == float16
assert np.power(b16, b16).dtype, float16
assert np.power(b16, b32).dtype, float32
assert np.power(b16, a16).dtype, float16
assert np.power(b16, a32).dtype, float32
assert np.power(a32, a16).dtype == float32
assert np.power(a32, b16).dtype == float32
assert np.power(b32, a16).dtype == float32
assert np.power(b32, b16).dtype == float32
@pytest.mark.skipif(platform.machine() == "armv5tel",
reason="See gh-413.")
@pytest.mark.skipif(IS_WASM,
reason="fp exceptions don't work in wasm.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 = np.array((1e4,), dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sx16)
assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sy16)
assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sx16)
assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sy16)
assert_raises_fpe('underflow', lambda a, b: a / b, sx16, bx16)
assert_raises_fpe('underflow', lambda a, b: a / b, sx16, by16)
assert_raises_fpe('underflow', lambda a, b: a / b, sy16, bx16)
assert_raises_fpe('underflow', lambda a, b: a / b, sy16, by16)
assert_raises_fpe('underflow', lambda a, b: a / b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b: a / b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b: a / b,
float16(2.**-14 + 2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b: a / b,
float16(-2.**-14 - 2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b: a / b,
float16(2.**-14 + 2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a, b: a * b, bx16, bx16)
assert_raises_fpe('overflow', lambda a, b: a * b, bx16, by16)
assert_raises_fpe('overflow', lambda a, b: a * b, by16, bx16)
assert_raises_fpe('overflow', lambda a, b: a * b, by16, by16)
assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sx16)
assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sy16)
assert_raises_fpe('overflow', lambda a, b: a / b, by16, sx16)
assert_raises_fpe('overflow', lambda a, b: a / b, by16, sy16)
assert_raises_fpe('overflow', lambda a, b: a + b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a, b: a - b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
# These should not raise
float16(65472) + float16(32)
float16(2**-13) / float16(2)
float16(2**-14) / float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
np.nextafter(float16(np.inf), float16(0))
np.nextafter(float16(-np.inf), float16(0))
np.nextafter(float16(0), float16(np.nan))
np.nextafter(float16(np.nan), float16(0))
float16(2**-14) / float16(2**10)
float16(-2**-14) / float16(2**10)
float16(2**-14 + 2**-23) / float16(2)
float16(-2**-14 - 2**-23) / float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
| TestHalf |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_object/generate/async_.py | {
"start": 318,
"end": 473
} | class ____(
Generic[Properties, References],
_NearObjectGenerateExecutor[ConnectionAsync, Properties, References],
):
pass
| _NearObjectGenerateAsync |
python | networkx__networkx | networkx/utils/tests/test_mapped_queue.py | {
"start": 1064,
"end": 5344
} | class ____:
def setup_method(self):
pass
def _check_map(self, q):
assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
def _make_mapped_queue(self, h):
q = MappedQueue()
q.heap = h
q.position = {elt: pos for pos, elt in enumerate(h)}
return q
def test_heapify(self):
h = [5, 4, 3, 2, 1, 0]
q = self._make_mapped_queue(h)
q._heapify()
self._check_map(q)
def test_init(self):
h = [5, 4, 3, 2, 1, 0]
q = MappedQueue(h)
self._check_map(q)
def test_incomparable(self):
h = [5, 4, "a", 2, 1, 0]
pytest.raises(TypeError, MappedQueue, h)
def test_len(self):
h = [5, 4, 3, 2, 1, 0]
q = MappedQueue(h)
self._check_map(q)
assert len(q) == 6
def test_siftup_leaf(self):
h = [2]
h_sifted = [2]
q = self._make_mapped_queue(h)
q._siftup(0)
assert q.heap == h_sifted
self._check_map(q)
def test_siftup_one_child(self):
h = [2, 0]
h_sifted = [0, 2]
q = self._make_mapped_queue(h)
q._siftup(0)
assert q.heap == h_sifted
self._check_map(q)
def test_siftup_left_child(self):
h = [2, 0, 1]
h_sifted = [0, 2, 1]
q = self._make_mapped_queue(h)
q._siftup(0)
assert q.heap == h_sifted
self._check_map(q)
def test_siftup_right_child(self):
h = [2, 1, 0]
h_sifted = [0, 1, 2]
q = self._make_mapped_queue(h)
q._siftup(0)
assert q.heap == h_sifted
self._check_map(q)
def test_siftup_multiple(self):
h = [0, 1, 2, 4, 3, 5, 6]
h_sifted = [0, 1, 2, 4, 3, 5, 6]
q = self._make_mapped_queue(h)
q._siftup(0)
assert q.heap == h_sifted
self._check_map(q)
def test_siftdown_leaf(self):
h = [2]
h_sifted = [2]
q = self._make_mapped_queue(h)
q._siftdown(0, 0)
assert q.heap == h_sifted
self._check_map(q)
def test_siftdown_single(self):
h = [1, 0]
h_sifted = [0, 1]
q = self._make_mapped_queue(h)
q._siftdown(0, len(h) - 1)
assert q.heap == h_sifted
self._check_map(q)
def test_siftdown_multiple(self):
h = [1, 2, 3, 4, 5, 6, 7, 0]
h_sifted = [0, 1, 3, 2, 5, 6, 7, 4]
q = self._make_mapped_queue(h)
q._siftdown(0, len(h) - 1)
assert q.heap == h_sifted
self._check_map(q)
def test_push(self):
to_push = [6, 1, 4, 3, 2, 5, 0]
h_sifted = [0, 2, 1, 6, 3, 5, 4]
q = MappedQueue()
for elt in to_push:
q.push(elt)
assert q.heap == h_sifted
self._check_map(q)
def test_push_duplicate(self):
to_push = [2, 1, 0]
h_sifted = [0, 2, 1]
q = MappedQueue()
for elt in to_push:
inserted = q.push(elt)
assert inserted
assert q.heap == h_sifted
self._check_map(q)
inserted = q.push(1)
assert not inserted
def test_pop(self):
h = [3, 4, 6, 0, 1, 2, 5]
h_sorted = sorted(h)
q = self._make_mapped_queue(h)
q._heapify()
popped = [q.pop() for _ in range(len(h))]
assert popped == h_sorted
self._check_map(q)
def test_remove_leaf(self):
h = [0, 2, 1, 6, 3, 5, 4]
h_removed = [0, 2, 1, 6, 4, 5]
q = self._make_mapped_queue(h)
removed = q.remove(3)
assert q.heap == h_removed
def test_remove_root(self):
h = [0, 2, 1, 6, 3, 5, 4]
h_removed = [1, 2, 4, 6, 3, 5]
q = self._make_mapped_queue(h)
removed = q.remove(0)
assert q.heap == h_removed
def test_update_leaf(self):
h = [0, 20, 10, 60, 30, 50, 40]
h_updated = [0, 15, 10, 60, 20, 50, 40]
q = self._make_mapped_queue(h)
removed = q.update(30, 15)
assert q.heap == h_updated
def test_update_root(self):
h = [0, 20, 10, 60, 30, 50, 40]
h_updated = [10, 20, 35, 60, 30, 50, 40]
q = self._make_mapped_queue(h)
removed = q.update(0, 35)
assert q.heap == h_updated
| TestMappedQueue |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 96128,
"end": 98701
} | class ____(fixtures.MappedTest):
"""'viewonly' mappings with overlapping PK column names."""
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(40)),
)
Table(
"t2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(40)),
Column("t1id", Integer, ForeignKey("t1.id")),
)
Table(
"t3",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(40)),
Column("t2id", Integer, ForeignKey("t2.id")),
)
def test_three_table_view(self):
"""A three table join with overlapping PK names.
A third table is pulled into the primary join condition using
overlapping PK column names and should not produce 'conflicting column'
error.
"""
t2, t3, t1 = (self.tables.t2, self.tables.t3, self.tables.t1)
class C1(BasicEntity):
pass
class C2(BasicEntity):
pass
class C3(BasicEntity):
pass
self.mapper_registry.map_imperatively(
C1,
t1,
properties={
"t2s": relationship(C2),
"t2_view": relationship(
C2,
viewonly=True,
primaryjoin=sa.and_(
t1.c.id == t2.c.t1id,
t3.c.t2id == t2.c.id,
t3.c.data == t1.c.data,
),
),
},
)
self.mapper_registry.map_imperatively(C2, t2)
self.mapper_registry.map_imperatively(
C3, t3, properties={"t2": relationship(C2)}
)
c1 = C1()
c1.data = "c1data"
c2a = C2()
c1.t2s.append(c2a)
c2b = C2()
c1.t2s.append(c2b)
c3 = C3()
c3.data = "c1data"
c3.t2 = c2b
sess = fixture_session()
sess.add(c1)
sess.add(c3)
sess.flush()
sess.expunge_all()
c1 = sess.get(C1, c1.id)
assert {x.id for x in c1.t2s} == {c2a.id, c2b.id}
assert {x.id for x in c1.t2_view} == {c2b.id}
| ViewOnlyOverlappingNames |
python | django__django | tests/m2o_recursive/tests.py | {
"start": 73,
"end": 676
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.r = Category.objects.create(id=None, name="Root category", parent=None)
cls.c = Category.objects.create(id=None, name="Child category", parent=cls.r)
def test_m2o_recursive(self):
self.assertSequenceEqual(self.r.child_set.all(), [self.c])
self.assertEqual(self.r.child_set.get(name__startswith="Child").id, self.c.id)
self.assertIsNone(self.r.parent)
self.assertSequenceEqual(self.c.child_set.all(), [])
self.assertEqual(self.c.parent.id, self.r.id)
| ManyToOneRecursiveTests |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/base.py | {
"start": 292,
"end": 1932
} | class ____(ABC, Generic[TVisited]):
"""
Abstract visitor that defines a visiting behavior of a `QueryExpression`.
"""
def visit(self, query_expression: QueryExpression) -> TVisited:
if isinstance(query_expression, Formula):
return self._visit_formula(query_expression)
elif isinstance(query_expression, Timeseries):
return self._visit_timeseries(query_expression)
elif isinstance(query_expression, int):
return self._visit_int(query_expression)
elif isinstance(query_expression, float):
return self._visit_float(query_expression)
elif isinstance(query_expression, str):
return self._visit_string(query_expression)
raise AssertionError(
f"Unhandled query expression {query_expression} of type {type(query_expression)}"
)
def _visit_formula(self, formula: Formula) -> TVisited:
# The default implementation just mutates the parameters of the `Formula`.
parameters = []
for parameter in formula.parameters:
parameters.append(self.visit(parameter))
return formula.set_parameters(parameters)
def _visit_timeseries(self, timeseries: Timeseries) -> TVisited:
return timeseries
def _visit_int(self, int_number: float) -> TVisited:
return int_number # type: ignore[return-value]
def _visit_float(self, float_number: float) -> TVisited:
return float_number # type: ignore[return-value]
def _visit_string(self, string: str) -> TVisited:
return string # type: ignore[return-value]
| QueryExpressionVisitor |
python | huggingface__transformers | tests/models/video_llava/test_modeling_video_llava.py | {
"start": 16492,
"end": 22969
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@require_bitsandbytes
def test_small_model_integration_test(self):
# Let' s make sure we test the preprocessing to replace what is used
model = VideoLlavaForConditionalGeneration.from_pretrained(
"LanguageBind/Video-LLaVA-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:"
video_file = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset"
)
video_file = np.load(video_file)
inputs = self.processor(text=prompt, videos=video_file, return_tensors="pt").to(torch_device)
EXPECTED_INPUT_IDS = torch.tensor([1, 3148, 1001, 29901, 29871, 13, 11008, 338, 445, 4863, 2090, 1460, 29973, 319, 1799, 9047, 13566, 29901], device=torch_device) # fmt: skip
non_video_inputs = inputs["input_ids"][inputs["input_ids"] != 32001]
self.assertTrue(torch.equal(non_video_inputs, EXPECTED_INPUT_IDS))
output = model.generate(**inputs, do_sample=False, max_new_tokens=20)
EXPECTED_DECODED_TEXT = "USER: \nWhy is this video funny? ASSISTANT: The video is funny because it shows a baby sitting on a bed and reading a book, which" # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_mixed_inputs(self):
model = VideoLlavaForConditionalGeneration.from_pretrained(
"LanguageBind/Video-LLaVA-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
prompts = [
"USER: <image>\nWhat are the cats in the image doing? ASSISTANT:",
"USER: <video>\nWhy is this video funny? ASSISTANT:",
]
video_file = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset"
)
video_file = np.load(video_file)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
inputs = self.processor(
text=prompts, images=[image], videos=[video_file], padding=True, return_tensors="pt"
).to(torch_device)
output = model.generate(**inputs, do_sample=False, max_new_tokens=20)
EXPECTED_DECODED_TEXT = [
'USER: \nWhat are the cats in the image doing? ASSISTANT: The cats in the image are sleeping or resting on a couch.',
'USER: \nWhy is this video funny? ASSISTANT: The video is funny because it shows a baby sitting on a bed and reading a book, which'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_llama(self):
model = VideoLlavaForConditionalGeneration.from_pretrained(
"LanguageBind/Video-LLaVA-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf")
prompt = "USER: <video>\nDescribe the video in details. ASSISTANT:"
video_file = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset"
)
video_file = np.load(video_file)
inputs = self.processor(text=prompt, videos=video_file, return_tensors="pt").to(torch_device, torch.float16)
output = model.generate(**inputs, max_new_tokens=900, do_sample=False)
EXPECTED_DECODED_TEXT = "USER: \nDescribe the video in details. ASSISTANT: The video features a young child sitting on a bed, holding a book and reading it. " \
"The child appears to be enjoying the book, as they are fully engaged in the activity. The bed is located in a bedroom, and there is a chair nearby. The " \
"child is wearing a blue shirt and glasses, which suggests that they might have a visual impairment. The room is well-lit, and there is a clock on the wall, " \
"indicating the time. The child's focus on the book indicates that they are interested in the content and are actively participating in the reading process. " \
"Overall, the video captures a heartwarming moment of a child engaging in a simple yet essential activity, which is reading." # fmt: skip
self.assertEqual(
processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_llama_batched(self):
model = VideoLlavaForConditionalGeneration.from_pretrained(
"LanguageBind/Video-LLaVA-7B-hf", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf")
processor.tokenizer.padding_side = "left"
prompts = [
"USER: <video>\nWhat is the baby doing? ASSISTANT:",
"USER: <video>\nWho is sitting next to the woman? ASSISTANT:",
]
video_1 = np.load(
hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset")
)
video_2 = np.load(
hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo_2.npy", repo_type="dataset")
)
inputs = processor(text=prompts, videos=[video_1, video_2], return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = [
'USER: \nWhat is the baby doing? ASSISTANT: The baby is sitting on a bed and reading a book.',
'USER: \nWho is sitting next to the woman? ASSISTANT: A small dog is sitting next to the woman.'
] # fmt: skip
self.assertEqual(processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT)
| VideoLlavaForConditionalGenerationIntegrationTest |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 111421,
"end": 149173
} | class ____(UserDict[PackedDeviceProperties, Any]):
def get_expectation(self) -> Any:
"""
Find best matching expectation based on environment device properties. We look at device_type, major and minor
versions of the drivers. Expectations are stored as a dictionary with keys of the form
(device_type, (major, minor)). If the major and minor versions are not provided, we use None.
"""
return self.find_expectation(get_device_properties())
def unpacked(self) -> list[tuple[DeviceProperties, Any]]:
return [(unpack_device_properties(k), v) for k, v in self.data.items()]
@staticmethod
def is_default(expectation_key: PackedDeviceProperties) -> bool:
"""
This function returns True if the expectation_key is the Default expectation (None, None).
When an Expectation dict contains a Default value, it is generally because the test existed before Expectations.
When we modify a test to use Expectations for a specific hardware, we don't want to affect the tests on other
hardwares. Thus we set the previous value as the Default expectation with key (None, None) and add a value for
the specific hardware with key (hardware_type, (major, minor)).
"""
return all(p is None for p in expectation_key)
@staticmethod
def score(properties: DeviceProperties, other: DeviceProperties) -> float:
"""
Returns score indicating how similar two instances of the `Properties` tuple are.
Rules are as follows:
* Matching `type` adds one point, semi-matching `type` adds 0.1 point (e.g. cuda and rocm).
* If types match, matching `major` adds another point, and then matching `minor` adds another.
* The Default expectation (None, None) is worth 0.5 point, which is better than semi-matching. More on this
in the `is_default` function.
"""
device_type, major, minor = properties
other_device_type, other_major, other_minor = other
score = 0
# Matching device type, maybe major and minor
if device_type is not None and device_type == other_device_type:
score += 1
if major is not None and major == other_major:
score += 1
if minor is not None and minor == other_minor:
score += 1
# Semi-matching device type, which carries less importance than the default expectation
elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]:
score = 0.1
# Default expectation
if Expectations.is_default(other):
score = 0.5
return score
def find_expectation(self, properties: DeviceProperties = (None, None, None)) -> Any:
"""
Find best matching expectation based on provided device properties. We score each expectation, and to
distinguish between expectations with the same score, we use the major and minor version numbers, prioritizing
most recent versions.
"""
(result_key, result) = max(
self.unpacked(),
key=lambda x: (
Expectations.score(properties, x[0]), # x[0] is a device properties tuple (device_type, major, minor)
x[0][1] if x[0][1] is not None else -1, # This key is the major version, -1 if major is None
x[0][2] if x[0][2] is not None else -1, # This key is the minor version, -1 if minor is None
),
)
if Expectations.score(properties, result_key) == 0:
raise ValueError(f"No matching expectation found for {properties}")
return result
def __repr__(self):
return f"{self.data}"
def patch_torch_compile_force_graph():
"""
Patch `torch.compile` to always use `fullgraph=True`.
This is useful when some `torch.compile` tests are running with `fullgraph=False` and we want to be able to run
them with `fullgraph=True` in some occasion (without introducing new tests) to make sure there is no graph break.
After PR #40137, `CompileConfig.fullgraph` is `False` by default, this patch is necessary.
"""
force_fullgraph = os.environ.get("TORCH_COMPILE_FORCE_FULLGRAPH", "")
force_fullgraph = force_fullgraph.lower() in ("yes", "true", "on", "t", "y", "1")
if force_fullgraph:
import torch
orig_method = torch.compile
def patched(*args, **kwargs):
# In `torch_compile`, all arguments except `model` is keyword only argument.
kwargs["fullgraph"] = True
return orig_method(*args, **kwargs)
torch.compile = patched
def _get_test_info():
"""
Collect some information about the current test.
For example, test full name, line number, stack, traceback, etc.
"""
full_test_name = os.environ.get("PYTEST_CURRENT_TEST", "").split(" ")[0]
test_file, test_class, test_name = full_test_name.split("::")
# from the most recent frame to the top frame
stack_from_inspect = inspect.stack()
# but visit from the top frame to the most recent frame
actual_test_file, _actual_test_class = test_file, test_class
test_frame, test_obj, test_method = None, None, None
for frame in reversed(stack_from_inspect):
# if test_file in str(frame).replace(r"\\", "/"):
# check frame's function + if it has `self` as locals; double check if self has the (function) name
# TODO: Question: How about expanded?
if (
frame.function == test_name
and "self" in frame.frame.f_locals
and hasattr(frame.frame.f_locals["self"], test_name)
):
# if test_name == frame.frame.f_locals["self"]._testMethodName:
test_frame = frame
# The test instance
test_obj = frame.frame.f_locals["self"]
# TODO: Do we get the (relative?) path or it's just a file name?
# TODO: Does `test_obj` always have `tearDown` object?
actual_test_file = frame.filename
# TODO: check `test_method` will work used at the several places!
test_method = getattr(test_obj, test_name)
break
if test_frame is not None:
line_number = test_frame.lineno
# The frame of `patched` being called (the one and the only one calling `_get_test_info`)
# This is used to get the original method being patched in order to get the context.
frame_of_patched_obj = None
captured_frames = []
to_capture = False
# From the most outer (i.e. python's `runpy.py`) frame to most inner frame (i.e. the frame of this method)
# Between `the test method being called` and `before entering `patched``.
for frame in reversed(stack_from_inspect):
if (
frame.function == test_name
and "self" in frame.frame.f_locals
and hasattr(frame.frame.f_locals["self"], test_name)
):
to_capture = True
# TODO: check simply with the name is not robust.
elif "patched" == frame.frame.f_code.co_name:
frame_of_patched_obj = frame
to_capture = False
break
if to_capture:
captured_frames.append(frame)
tb_next = None
for frame_info in reversed(captured_frames):
tb = types.TracebackType(tb_next, frame_info.frame, frame_info.frame.f_lasti, frame_info.frame.f_lineno)
tb_next = tb
test_traceback = tb
origin_method_being_patched = frame_of_patched_obj.frame.f_locals["orig_method"]
# An iterable of type `traceback.StackSummary` with each element of type `FrameSummary`
stack = traceback.extract_stack()
# The frame which calls `the original method being patched`
caller_frame = None
# From the most inner (i.e. recent) frame to the most outer frame
for frame in reversed(stack):
if origin_method_being_patched.__name__ in frame.line:
caller_frame = frame
caller_path = os.path.relpath(caller_frame.filename)
caller_lineno = caller_frame.lineno
test_lineno = line_number
# Get the code context in the test function/method.
from _pytest._code.source import Source
with open(actual_test_file) as fp:
s = fp.read()
source = Source(s)
test_code_context = "\n".join(source.getstatement(test_lineno - 1).lines)
# Get the code context in the caller (to the patched function/method).
with open(caller_path) as fp:
s = fp.read()
source = Source(s)
caller_code_context = "\n".join(source.getstatement(caller_lineno - 1).lines)
test_info = f"test:\n\n{full_test_name}\n\n{'-' * 80}\n\ntest context: {actual_test_file}:{test_lineno}\n\n{test_code_context}"
test_info = f"{test_info}\n\n{'-' * 80}\n\ncaller context: {caller_path}:{caller_lineno}\n\n{caller_code_context}"
return (
full_test_name,
test_file,
test_lineno,
test_obj,
test_method,
test_frame,
test_traceback,
test_code_context,
caller_path,
caller_lineno,
caller_code_context,
test_info,
)
def _get_call_arguments(code_context):
"""
Analyze the positional and keyword arguments in a call expression.
This will extract the expressions of the positional and kwyword arguments, and associate them to the positions and
the keyword arugment names.
"""
def get_argument_name(node):
"""Extract the name/expression from an AST node"""
if isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.Attribute):
return ast.unparse(node)
elif isinstance(node, ast.Constant):
return repr(node.value)
else:
return ast.unparse(node)
indent = len(code_context) - len(code_context.lstrip())
code_context = code_context.replace(" " * indent, "")
try:
# Parse the line
tree = ast.parse(code_context, mode="eval")
assert isinstance(tree.body, ast.Call)
call_node = tree.body
if call_node:
result = {
"positional_args": [],
"keyword_args": {},
"starargs": None, # *args
"kwargs": None, # **kwargs
}
# Extract positional arguments
for arg in call_node.args:
arg_name = get_argument_name(arg)
result["positional_args"].append(arg_name)
# Extract keyword arguments
for keyword in call_node.keywords:
if keyword.arg is None:
# This is **kwargs
result["kwargs"] = get_argument_name(keyword.value)
else:
# Regular keyword argument
arg_name = get_argument_name(keyword.value)
result["keyword_args"][keyword.arg] = arg_name
return result
except (SyntaxError, AttributeError) as e:
print(f"Error parsing: {e}")
return None
def _prepare_debugging_info(test_info, info):
"""Combine the information about the test and the call information to a patched function/method within it."""
info = f"{test_info}\n\n{info}"
p = os.path.join(os.environ.get("_PATCHED_TESTING_METHODS_OUTPUT_DIR", ""), "captured_info.txt")
# TODO (ydshieh): This is not safe when we use pytest-xdist with more than 1 worker.
with open(p, "a") as fp:
fp.write(f"{info}\n\n{'=' * 120}\n\n")
return info
def _patched_tearDown(self, *args, **kwargs):
"""Used to report a test that has failures captured and handled by patched functions/methods (without re-raise).
The patched functions/methods refer to the `patched` defined in `_patch_with_call_info`, which is applied to
`torch.testing.assert_close` and `unittest.case.TestCase.assertEqual`.
The objective is to avoid a failure being silence after being processed.
If there is any failure that is not handled by the patched functions/methods, we add custom error message for them
along with the usual pytest failure report.
"""
# Check for regular failures before clearing:
# when `_patched_tearDown` is called, the current test fails due to an assertion error given by a method being
# patched by `_patch_with_call_info`. The patched method catches such an error and continue running the remaining
# statements within the test. If the test fails with another error not handled by the patched methods, we don't let
# pytest to fail and report it but the original failure (the first one that was processed) instead.
# We still record those failures not handled by the patched methods, and add custom messages along with the usual
# pytest failure report.
regular_failures_info = []
errors = None
if hasattr(self._outcome, "errors"):
errors = self._outcome.errors
elif hasattr(self._outcome, "result") and hasattr(self._outcome.result, "errors"):
errors = self._outcome.result.errors
if hasattr(self, "_outcome") and errors:
for error_entry in errors:
test_instance, (exc_type, exc_obj, exc_tb) = error_entry
# breakpoint()
regular_failures_info.append(
{
"message": f"{str(exc_obj)}\n\n",
"type": exc_type.__name__,
"file": "test_modeling_vit.py",
"line": 237, # get_deepest_frame_line(exc_tb) # Your helper function
}
)
# Clear the regular failure (i.e. that is not from any of our patched assertion methods) from pytest's records.
if hasattr(self._outcome, "errors"):
self._outcome.errors.clear()
elif hasattr(self._outcome, "result") and hasattr(self._outcome.result, "errors"):
self._outcome.result.errors.clear()
# reset back to the original tearDown method, so `_patched_tearDown` won't be run by the subsequent tests if they
# have only test failures that are not handle by the patched methods (or no test failure at all).
orig_tearDown = _patched_tearDown.orig_tearDown
type(self).tearDown = orig_tearDown
# Call the original tearDown
orig_tearDown(self, *args, **kwargs)
# Get the failure
test_method = getattr(self, self._testMethodName)
captured_failures = test_method.__func__.captured_failures[id(test_method)]
# TODO: How could we show several exceptions in a sinigle test on the terminal? (Maybe not a good idea)
captured_exceptions = captured_failures[0]["exception"]
captured_traceback = captured_failures[0]["traceback"]
# Show the cpatured information on the terminal.
capturued_info = [x["info"] for x in captured_failures]
capturued_info_str = f"\n\n{'=' * 80}\n\n".join(capturued_info)
# Enhance the exception message if there were suppressed failures
if regular_failures_info:
enhanced_message = f"""{str(captured_exceptions)}
{"=" * 80}
Handled Failures: ({len(capturued_info)} handled):
{"-" * 80}\n
{capturued_info_str}
{"=" * 80}
Unhandled Failures: ({len(regular_failures_info)} unhandled):
{"-" * 80}\n
{", ".join(f"{info['type']}: {info['message']}{info['file']}:{info['line']}" for info in regular_failures_info)}
{"-" * 80}
Note: This failure occurred after other failures analyzed by the patched assertion methods.
To see the full details, temporarily disable assertion patching.
{"=" * 80}"""
# Create new exception with enhanced message
enhanced_exception = type(captured_exceptions)(enhanced_message)
enhanced_exception.__cause__ = captured_exceptions.__cause__
enhanced_exception.__context__ = captured_exceptions.__context__
# Raise with your existing traceback reconstruction
captured_exceptions = enhanced_exception
# clean up the recorded status
del test_method.__func__.captured_failures
raise captured_exceptions.with_traceback(captured_traceback)
def _patch_with_call_info(module_or_class, attr_name, _parse_call_info_func, target_args):
"""
Patch a callerable `attr_name` of a module or class `module_or_class`.
This will allow us to collect the call information, e.g. the argument names and values, also the literal expressions
passed as the arguments.
"""
orig_method = getattr(module_or_class, attr_name)
if not callable(orig_method):
return
def patched(*args, **kwargs):
# If the target callable is not called within a test, simply call it without modification.
if not os.environ.get("PYTEST_CURRENT_TEST", ""):
return orig_method(*args, **kwargs)
try:
orig_method(*args, **kwargs)
except AssertionError as e:
captured_exception = e
# captured_traceback = e.__traceback__
(
full_test_name,
test_file,
test_lineno,
test_obj,
test_method,
test_frame,
test_traceback,
test_code_context,
caller_path,
caller_lineno,
caller_code_context,
test_info,
) = _get_test_info()
test_info = f"{test_info}\n\n{'-' * 80}\n\npatched method: {orig_method.__module__}.{orig_method.__name__}"
call_argument_expressions = _get_call_arguments(caller_code_context)
# This is specific
info = _parse_call_info_func(orig_method, args, kwargs, call_argument_expressions, target_args)
info = _prepare_debugging_info(test_info, info)
# If the test is running in a CI environment (e.g. not a manual run), let's raise and fail the test, so it
# behaves as usual.
# On Github Actions or CircleCI, this is set automatically.
# When running manually, it's the user to determine if to set it.
# This is to avoid the patched function being called `with self.assertRaises(AssertionError):` and fails
# because of the missing expected `AssertionError`.
# TODO (ydshieh): If there is way to raise only when we are inside such context managers?
# TODO (ydshieh): How not to record the failure if it happens inside `self.assertRaises(AssertionError)`?
if os.getenv("CI") == "true":
raise captured_exception.with_traceback(test_traceback)
# Save this, so we can raise at the end of the current test
captured_failure = {
"result": "failed",
"exception": captured_exception,
"traceback": test_traceback,
"info": info,
}
# Record the failure status and its information, so we can raise it later.
# We are modifying the (unbound) function at class level: not its logic but only adding a new extra
# attribute.
if getattr(test_method.__func__, "captured_failures", None) is None:
test_method.__func__.captured_failures = {}
if id(test_method) not in test_method.__func__.captured_failures:
test_method.__func__.captured_failures[id(test_method)] = []
test_method.__func__.captured_failures[id(test_method)].append(captured_failure)
# This modifies the `tearDown` which will be called after every tests, but we reset it back inside
# `_patched_tearDown`.
if not hasattr(type(test_obj).tearDown, "orig_tearDown"):
orig_tearDown = type(test_obj).tearDown
_patched_tearDown.orig_tearDown = orig_tearDown
type(test_obj).tearDown = _patched_tearDown
setattr(module_or_class, attr_name, patched)
def _parse_call_info(func, args, kwargs, call_argument_expressions, target_args):
"""
Prepare a string containing the call info to `func`, e.g. argument names/values/expressions.
"""
signature = inspect.signature(func)
signature_names = [param.name for param_name, param in signature.parameters.items()]
# called as `self.method_name()` or `xxx.method_name()`.
if len(args) == len(call_argument_expressions["positional_args"]) + 1:
# We simply add "self" as the expression despite it might not be the actual argument name.
# (This part is very unlikely what a user would be interest to know)
call_argument_expressions["positional_args"] = ["self"] + call_argument_expressions["positional_args"]
param_position_mapping = {param_name: idx for idx, param_name in enumerate(signature_names)}
arg_info = {}
for arg_name in target_args:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
arg_expr = call_argument_expressions["keyword_args"][arg_name]
else:
arg_pos = param_position_mapping[arg_name]
arg_value = args[arg_pos]
arg_expr = call_argument_expressions["positional_args"][arg_pos]
arg_value_str = _format_py_obj(arg_value)
arg_info[arg_name] = {"arg_expr": arg_expr, "arg_value_str": arg_value_str}
info = ""
for arg_name in arg_info:
arg_expr, arg_value_str = arg_info[arg_name]["arg_expr"], arg_info[arg_name]["arg_value_str"]
info += f"{'-' * 80}\n\nargument name: `{arg_name}`\nargument expression: `{arg_expr}`\n\nargument value:\n\n{arg_value_str}\n\n"
# remove the trailing \n\n
info = info[:-2]
return info
def patch_testing_methods_to_collect_info():
"""
Patch some methods (`torch.testing.assert_close`, `unittest.case.TestCase.assertEqual`, etc).
This will allow us to collect the call information, e.g. the argument names and values, also the literal expressions
passed as the arguments.
"""
p = os.path.join(os.environ.get("_PATCHED_TESTING_METHODS_OUTPUT_DIR", ""), "captured_info.txt")
Path(p).unlink(missing_ok=True)
if is_torch_available():
import torch
_patch_with_call_info(torch.testing, "assert_close", _parse_call_info, target_args=("actual", "expected"))
_patch_with_call_info(unittest.case.TestCase, "assertEqual", _parse_call_info, target_args=("first", "second"))
_patch_with_call_info(unittest.case.TestCase, "assertListEqual", _parse_call_info, target_args=("list1", "list2"))
_patch_with_call_info(
unittest.case.TestCase, "assertTupleEqual", _parse_call_info, target_args=("tuple1", "tuple2")
)
_patch_with_call_info(unittest.case.TestCase, "assertSetEqual", _parse_call_info, target_args=("set1", "set1"))
_patch_with_call_info(unittest.case.TestCase, "assertDictEqual", _parse_call_info, target_args=("d1", "d2"))
_patch_with_call_info(unittest.case.TestCase, "assertIn", _parse_call_info, target_args=("member", "container"))
_patch_with_call_info(unittest.case.TestCase, "assertNotIn", _parse_call_info, target_args=("member", "container"))
_patch_with_call_info(unittest.case.TestCase, "assertLess", _parse_call_info, target_args=("a", "b"))
_patch_with_call_info(unittest.case.TestCase, "assertLessEqual", _parse_call_info, target_args=("a", "b"))
_patch_with_call_info(unittest.case.TestCase, "assertGreater", _parse_call_info, target_args=("a", "b"))
_patch_with_call_info(unittest.case.TestCase, "assertGreaterEqual", _parse_call_info, target_args=("a", "b"))
def torchrun(script: str, nproc_per_node: int, is_torchrun: bool = True, env: dict | None = None):
"""Run the `script` using `torchrun` command for multi-processing in a subprocess. Captures errors as necessary."""
with tempfile.NamedTemporaryFile(mode="w+", suffix=".py") as tmp:
tmp.write(script)
tmp.flush()
tmp.seek(0)
if is_torchrun:
cmd = (
f"torchrun --nproc_per_node {nproc_per_node} --master_port {get_torch_dist_unique_port()} {tmp.name}"
).split()
else:
cmd = ["python3", tmp.name]
# Note that the subprocess will be waited for here, and raise an error if not successful
try:
_ = subprocess.run(cmd, capture_output=True, env=env, text=True, check=True)
except subprocess.CalledProcessError as e:
raise Exception(f"The following error was captured: {e.stderr}")
def _format_tensor(t, indent_level=0, sci_mode=None):
"""Format torch's tensor in a pretty way to be shown 👀 in the test report."""
# `torch.testing.assert_close` could accept python int/float numbers.
if not isinstance(t, torch.Tensor):
t = torch.tensor(t)
# Simply make the processing below simpler (not to hande both case)
is_scalar = False
if t.ndim == 0:
t = torch.tensor([t])
is_scalar = True
# For scalar or one-dimensional tensor, keep it as one-line. If there is only one element along any dimension except
# the last one, we also keep it as one-line.
if t.ndim <= 1 or set(t.shape[0:-1]) == {1}:
# Use `detach` to remove `grad_fn=<...>`, and use `to("cpu")` to remove `device='...'`
t = t.detach().to("cpu")
# We work directly with the string representation instead the tensor itself
t_str = str(t)
# remove `tensor( ... )` so keep only the content
t_str = t_str.replace("tensor(", "").replace(")", "")
# Sometimes there are extra spaces between `[` and the first digit of the first value (for alignment).
# For example `[[ 0.06, -0.51], [-0.76, -0.49]]`. It may have multiple consecutive spaces.
# Let's remove such extra spaces.
while "[ " in t_str:
t_str = t_str.replace("[ ", "[")
# Put everything in a single line. We replace `\n` by a space ` ` so we still keep `,\n` as `, `.
t_str = t_str.replace("\n", " ")
# Remove repeated spaces (introduced by the previous step)
while " " in t_str:
t_str = t_str.replace(" ", " ")
# remove leading `[` and `]` for scalar tensor
if is_scalar:
t_str = t_str[1:-1]
t_str = " " * 4 * indent_level + t_str
return t_str
# Otherwise, we separte the representations of every elements along an outer dimension by new lines (after a `,`).
# The representatioin each element is obtained by calling this function recursively with corrent `indent_level`.
else:
t_str = str(t)
# (For the recursive calls should receive this value)
if sci_mode is None:
sci_mode = "e+" in t_str or "e-" in t_str
# Use the original content to determine the scientific mode to use. This is required as the representation of
# t[index] (computed below) maybe have different format regarding scientific notation.
torch.set_printoptions(sci_mode=sci_mode)
t_str = " " * 4 * indent_level + "[\n"
# Keep the ending `,` for all outer dimensions whose representations are not put in one-line, even if there is
# only one element along that dimension.
t_str += ",\n".join(_format_tensor(x, indent_level=indent_level + 1, sci_mode=sci_mode) for x in t)
t_str += ",\n" + " " * 4 * indent_level + "]"
torch.set_printoptions(sci_mode=None)
return t_str
def _quote_string(s):
"""Given a string `s`, return a python literal expression that give `s` when it is used in a python source code.
For example, if `s` is the string `abc`, the return value is `"abc"`.
We choice double quotes over single quote despite `str(s)` would give `'abc'` instead of `"abc"`.
"""
has_single_quote = "'" in s
has_double_quote = '"' in s
if has_single_quote and has_double_quote:
# replace any double quote by the raw string r'\"'.
s = s.replace('"', r"\"")
return f'"{s}"'
elif has_single_quote:
return f'"{s}"'
elif has_double_quote:
return f"'{s}'"
else:
return f'"{s}"'
def _format_py_obj(obj, indent=0, mode="", cache=None, prefix=""):
"""Format python objects of basic built-in type in a pretty way so we could copy-past them to code editor easily.
Currently, this support int, float, str, list, tuple, and dict.
It also works with `torch.Tensor` via calling `format_tesnor`.
"""
if cache is None:
cache = {}
else:
if (id(obj), indent, mode, prefix) in cache:
return cache[(id(obj), indent, mode, prefix)]
# special format method for `torch.Tensor`
if str(obj.__class__) == "<class 'torch.Tensor'>":
return _format_tensor(obj)
elif obj.__class__.__name__ == "str":
quoted_string = _quote_string(obj)
# we don't want the newline being interpreted
quoted_string = quoted_string.replace("\n", r"\n")
output = quoted_string
elif obj.__class__.__name__ in ["int", "float"]:
# for float like `1/3`, we will get `0.3333333333333333`
output = str(obj)
elif obj.__class__.__name__ in ["list", "tuple", "dict"]:
parenthesis = {
"list": "[]",
"tuple": "()",
"dict": "{}",
}
p1, p2 = parenthesis[obj.__class__.__name__]
elements_without_indent = []
if isinstance(obj, dict):
for idx, (k, v) in enumerate(obj.items()):
last_element = idx == len(obj) - 1
ok = _format_py_obj(k, indent=indent + 1, mode="one-line", cache=cache)
ov = _format_py_obj(
v,
indent=indent + 1,
mode=mode,
cache=cache,
prefix=ok.lstrip() + ": " + "," if not last_element else "",
)
# Each element could be multiple-line, but the indent of its first line is removed
elements_without_indent.append(f"{ok.lstrip()}: {ov.lstrip()}")
else:
for idx, x in enumerate(obj):
last_element = idx == len(obj) - 1
o = _format_py_obj(
x, indent=indent + 1, mode=mode, cache=cache, prefix="," if not last_element else ""
)
# Each element could be multiple-line, but the indent of its first line is removed
elements_without_indent.append(o.lstrip())
groups = []
buf = []
for idx, x in enumerate(elements_without_indent):
buf.append(x)
x_expanded = "\n" in buf[-1]
not_last_element = idx != len(elements_without_indent) - 1
# if `x` should be separated from subsequent elements
should_finalize_x = x_expanded or len(f"{' ' * (4 * (indent + 1))}") + len(
", ".join(buf[-1:])
) > 120 - int(not_last_element)
# if `buf[:-1]` (i.e. without `x`) should be combined together (into one line)
should_finalize_buf = x_expanded
# the recursive call returns single line, so we can use it to determine if we can fit the width limit
if not should_finalize_buf:
buf_not_fit_into_one_line = len(f"{' ' * (4 * (indent + 1))}") + len(", ".join(buf)) > 120 - int(
not_last_element
)
should_finalize_buf = buf_not_fit_into_one_line
# any element of iterable type need to be on its own line
if (type(obj[idx]) if type(obj) is not dict else type(list(obj.values())[idx])) in [list, tuple, dict]:
should_finalize_x = True
should_finalize_buf = True
# any type change --> need to be added after a new line
prev_type = None
current_type = type(obj[idx]) if type(obj) is not dict else type(list(obj.values())[idx])
if len(buf) > 1:
prev_type = type(obj[idx - 1]) if type(obj) is not dict else type(list(obj.values())[idx - 1])
type_changed = current_type != prev_type
if type_changed:
should_finalize_buf = True
# all elements in the buf are string --> don't finalize the buf by width limit
if prev_type is None or (prev_type is str and current_type is str):
should_finalize_buf = False
# collect as many elements of string type as possible (without width limit).
# These will be examined as a whole (if not fit into the width, each element would be in its own line)
if current_type is str:
should_finalize_x = False
# `len(buf) == 1` or `obj[idx-1]` is a string
if prev_type in [None, str]:
should_finalize_buf = False
if should_finalize_buf:
orig_buf_len = len(buf)
if orig_buf_len > 1:
not_fit_into_one_line = None
# all elements in `obj` that give `buf[:-1]` are string.
if prev_type is str:
# `-1` at the end: because buf[-2] is not the last element
not_fit_into_one_line = len(f"{' ' * (4 * (indent + 1))}") + len(", ".join(buf[:-1])) > 120 - 1
if not_fit_into_one_line:
for x in buf[:-1]:
groups.append([x])
else:
groups.append(buf[:-1])
buf = buf[-1:]
if should_finalize_x:
groups.append(buf)
buf = []
# The last buf
if len(buf) > 0:
not_fit_into_one_line = None
if current_type is str:
# no `-1` at the end: because buf[-1] is the last element
not_fit_into_one_line = len(f"{' ' * (4 * (indent + 1))}") + len(", ".join(buf)) > 120
if not_fit_into_one_line:
for x in buf:
groups.append([x])
else:
groups.append(buf)
output = f"{' ' * 4 * indent}{p1}\n"
element_strings = [f"{' ' * (4 * (indent + 1))}" + ", ".join(buf) for buf in groups]
output += ",\n".join(element_strings)
output += f"\n{' ' * 4 * indent}{p2}"
# if all elements are in one-line
no_new_line_in_elements = all("\n" not in x for x in element_strings)
# if yes, we can form a one-line representation of `obj`
could_use_one_line = no_new_line_in_elements
# if mode == "one-line", this function always returns one-line representation, so `no_new_line_in_elements`
# will be `True`.
if could_use_one_line:
one_line_form = ", ".join([x.lstrip() for x in element_strings])
one_line_form = f"{p1}{one_line_form}{p2}"
if mode == "one-line":
return output
# check with the width limit
could_use_one_line = len(f"{' ' * 4 * indent}") + len(prefix) + len(one_line_form) <= 120
# extra conditions for returning one-line representation
def use_one_line_repr(obj):
# interable types
if type(obj) in (list, tuple, dict):
# get all types
element_types = []
if type(obj) is dict:
element_types.extend(type(x) for x in obj.values())
elif type(obj) in [list, tuple]:
element_types.extend(type(x) for x in obj)
# At least one element is of iterable type
if any(x in (list, tuple, dict) for x in element_types):
# If `obj` has more than one element and at least one of them is iterable --> no one line repr.
if len(obj) > 1:
return False
# only one element that is iterable, but not the same type as `obj` --> no one line repr.
if type(obj) is not type(obj[0]):
return False
# one-line repr. if possible, without width limit
return no_new_line_in_elements
# all elements are of simple types, but more than one type --> no one line repr.
if len(set(element_types)) > 1:
return False
# all elements are of the same simple type
if element_types[0] in [int, float]:
# one-line repr. without width limit
return no_new_line_in_elements
elif element_types[0] is str:
if len(obj) == 1:
# one single string element --> one-line repr. without width limit
return no_new_line_in_elements
else:
# multiple string elements --> one-line repr. if fit into width limit
return could_use_one_line
# simple types (int, flat, string)
return True
# width condition combined with specific mode conditions
if use_one_line_repr(obj):
output = f"{' ' * 4 * indent}{one_line_form}"
cache[(id(obj), indent, mode, prefix)] = output
return output
def write_file(file, content):
with open(file, "w") as f:
f.write(content)
def read_json_file(file):
with open(file, "r") as fh:
return json.load(fh)
| Expectations |
python | getsentry__sentry-python | sentry_sdk/tracing_utils.py | {
"start": 18103,
"end": 40697
} | class ____:
"""
The W3C Baggage header information (see https://www.w3.org/TR/baggage/).
Before mutating a `Baggage` object, calling code must check that `mutable` is `True`.
Mutating a `Baggage` object that has `mutable` set to `False` is not allowed, but
it is the caller's responsibility to enforce this restriction.
"""
__slots__ = ("sentry_items", "third_party_items", "mutable")
SENTRY_PREFIX = "sentry-"
SENTRY_PREFIX_REGEX = re.compile("^sentry-")
def __init__(
self,
sentry_items, # type: Dict[str, str]
third_party_items="", # type: str
mutable=True, # type: bool
):
self.sentry_items = sentry_items
self.third_party_items = third_party_items
self.mutable = mutable
@classmethod
def from_incoming_header(
cls,
header, # type: Optional[str]
*,
_sample_rand=None, # type: Optional[str]
):
# type: (...) -> Baggage
"""
freeze if incoming header already has sentry baggage
"""
sentry_items = {}
third_party_items = ""
mutable = True
if header:
for item in header.split(","):
if "=" not in item:
continue
with capture_internal_exceptions():
item = item.strip()
key, val = item.split("=")
if Baggage.SENTRY_PREFIX_REGEX.match(key):
baggage_key = unquote(key.split("-")[1])
sentry_items[baggage_key] = unquote(val)
mutable = False
else:
third_party_items += ("," if third_party_items else "") + item
if _sample_rand is not None:
sentry_items["sample_rand"] = str(_sample_rand)
mutable = False
return Baggage(sentry_items, third_party_items, mutable)
@classmethod
def from_options(cls, scope):
# type: (sentry_sdk.scope.Scope) -> Optional[Baggage]
sentry_items = {} # type: Dict[str, str]
third_party_items = ""
mutable = False
client = sentry_sdk.get_client()
if not client.is_active() or scope._propagation_context is None:
return Baggage(sentry_items)
options = client.options
propagation_context = scope._propagation_context
if propagation_context is not None:
sentry_items["trace_id"] = propagation_context.trace_id
if options.get("environment"):
sentry_items["environment"] = options["environment"]
if options.get("release"):
sentry_items["release"] = options["release"]
if client.parsed_dsn:
sentry_items["public_key"] = client.parsed_dsn.public_key
if client.parsed_dsn.org_id:
sentry_items["org_id"] = client.parsed_dsn.org_id
if options.get("traces_sample_rate"):
sentry_items["sample_rate"] = str(options["traces_sample_rate"])
return Baggage(sentry_items, third_party_items, mutable)
@classmethod
def populate_from_transaction(cls, transaction):
# type: (sentry_sdk.tracing.Transaction) -> Baggage
"""
Populate fresh baggage entry with sentry_items and make it immutable
if this is the head SDK which originates traces.
"""
client = sentry_sdk.get_client()
sentry_items = {} # type: Dict[str, str]
if not client.is_active():
return Baggage(sentry_items)
options = client.options or {}
sentry_items["trace_id"] = transaction.trace_id
sentry_items["sample_rand"] = f"{transaction._sample_rand:.6f}" # noqa: E231
if options.get("environment"):
sentry_items["environment"] = options["environment"]
if options.get("release"):
sentry_items["release"] = options["release"]
if client.parsed_dsn:
sentry_items["public_key"] = client.parsed_dsn.public_key
if client.parsed_dsn.org_id:
sentry_items["org_id"] = client.parsed_dsn.org_id
if (
transaction.name
and transaction.source not in LOW_QUALITY_TRANSACTION_SOURCES
):
sentry_items["transaction"] = transaction.name
if transaction.sample_rate is not None:
sentry_items["sample_rate"] = str(transaction.sample_rate)
if transaction.sampled is not None:
sentry_items["sampled"] = "true" if transaction.sampled else "false"
# there's an existing baggage but it was mutable,
# which is why we are creating this new baggage.
# However, if by chance the user put some sentry items in there, give them precedence.
if transaction._baggage and transaction._baggage.sentry_items:
sentry_items.update(transaction._baggage.sentry_items)
return Baggage(sentry_items, mutable=False)
def freeze(self):
# type: () -> None
self.mutable = False
def dynamic_sampling_context(self):
# type: () -> Dict[str, str]
header = {}
for key, item in self.sentry_items.items():
header[key] = item
return header
def serialize(self, include_third_party=False):
# type: (bool) -> str
items = []
for key, val in self.sentry_items.items():
with capture_internal_exceptions():
item = Baggage.SENTRY_PREFIX + quote(key) + "=" + quote(str(val))
items.append(item)
if include_third_party:
items.append(self.third_party_items)
return ",".join(items)
@staticmethod
def strip_sentry_baggage(header):
# type: (str) -> str
"""Remove Sentry baggage from the given header.
Given a Baggage header, return a new Baggage header with all Sentry baggage items removed.
"""
return ",".join(
(
item
for item in header.split(",")
if not Baggage.SENTRY_PREFIX_REGEX.match(item.strip())
)
)
def _sample_rand(self):
# type: () -> Optional[float]
"""Convenience method to get the sample_rand value from the sentry_items.
We validate the value and parse it as a float before returning it. The value is considered
valid if it is a float in the range [0, 1).
"""
sample_rand = try_convert(float, self.sentry_items.get("sample_rand"))
if sample_rand is not None and 0.0 <= sample_rand < 1.0:
return sample_rand
return None
def __repr__(self):
# type: () -> str
return f'<Baggage "{self.serialize(include_third_party=True)}", mutable={self.mutable}>'
def should_propagate_trace(client, url):
# type: (sentry_sdk.client.BaseClient, str) -> bool
"""
Returns True if url matches trace_propagation_targets configured in the given client. Otherwise, returns False.
"""
trace_propagation_targets = client.options["trace_propagation_targets"]
if is_sentry_url(client, url):
return False
return match_regex_list(url, trace_propagation_targets, substring_matching=True)
def normalize_incoming_data(incoming_data):
# type: (Dict[str, Any]) -> Dict[str, Any]
"""
Normalizes incoming data so the keys are all lowercase with dashes instead of underscores and stripped from known prefixes.
"""
data = {}
for key, value in incoming_data.items():
if key.startswith("HTTP_"):
key = key[5:]
key = key.replace("_", "-").lower()
data[key] = value
return data
def create_span_decorator(
op=None, name=None, attributes=None, template=SPANTEMPLATE.DEFAULT
):
# type: (Optional[Union[str, OP]], Optional[str], Optional[dict[str, Any]], SPANTEMPLATE) -> Any
"""
Create a span decorator that can wrap both sync and async functions.
:param op: The operation type for the span.
:type op: str or :py:class:`sentry_sdk.consts.OP` or None
:param name: The name of the span.
:type name: str or None
:param attributes: Additional attributes to set on the span.
:type attributes: dict or None
:param template: The type of span to create. This determines what kind of
span instrumentation and data collection will be applied. Use predefined
constants from :py:class:`sentry_sdk.consts.SPANTEMPLATE`.
The default is `SPANTEMPLATE.DEFAULT` which is the right choice for most
use cases.
:type template: :py:class:`sentry_sdk.consts.SPANTEMPLATE`
"""
from sentry_sdk.scope import should_send_default_pii
def span_decorator(f):
# type: (Any) -> Any
"""
Decorator to create a span for the given function.
"""
@functools.wraps(f)
async def async_wrapper(*args, **kwargs):
# type: (*Any, **Any) -> Any
current_span = get_current_span()
if current_span is None:
logger.debug(
"Cannot create a child span for %s. "
"Please start a Sentry transaction before calling this function.",
qualname_from_function(f),
)
return await f(*args, **kwargs)
span_op = op or _get_span_op(template)
function_name = name or qualname_from_function(f) or ""
span_name = _get_span_name(template, function_name, kwargs)
send_pii = should_send_default_pii()
with current_span.start_child(
op=span_op,
name=span_name,
) as span:
span.update_data(attributes or {})
_set_input_attributes(
span, template, send_pii, function_name, f, args, kwargs
)
result = await f(*args, **kwargs)
_set_output_attributes(span, template, send_pii, result)
return result
try:
async_wrapper.__signature__ = inspect.signature(f) # type: ignore[attr-defined]
except Exception:
pass
@functools.wraps(f)
def sync_wrapper(*args, **kwargs):
# type: (*Any, **Any) -> Any
current_span = get_current_span()
if current_span is None:
logger.debug(
"Cannot create a child span for %s. "
"Please start a Sentry transaction before calling this function.",
qualname_from_function(f),
)
return f(*args, **kwargs)
span_op = op or _get_span_op(template)
function_name = name or qualname_from_function(f) or ""
span_name = _get_span_name(template, function_name, kwargs)
send_pii = should_send_default_pii()
with current_span.start_child(
op=span_op,
name=span_name,
) as span:
span.update_data(attributes or {})
_set_input_attributes(
span, template, send_pii, function_name, f, args, kwargs
)
result = f(*args, **kwargs)
_set_output_attributes(span, template, send_pii, result)
return result
try:
sync_wrapper.__signature__ = inspect.signature(f) # type: ignore[attr-defined]
except Exception:
pass
if inspect.iscoroutinefunction(f):
return async_wrapper
else:
return sync_wrapper
return span_decorator
def get_current_span(scope=None):
# type: (Optional[sentry_sdk.Scope]) -> Optional[Span]
"""
Returns the currently active span if there is one running, otherwise `None`
"""
scope = scope or sentry_sdk.get_current_scope()
current_span = scope.span
return current_span
def set_span_errored(span=None):
# type: (Optional[Span]) -> None
"""
Set the status of the current or given span to INTERNAL_ERROR.
Also sets the status of the transaction (root span) to INTERNAL_ERROR.
"""
span = span or get_current_span()
if span is not None:
span.set_status(SPANSTATUS.INTERNAL_ERROR)
if span.containing_transaction is not None:
span.containing_transaction.set_status(SPANSTATUS.INTERNAL_ERROR)
def _generate_sample_rand(
trace_id, # type: Optional[str]
*,
interval=(0.0, 1.0), # type: tuple[float, float]
):
# type: (...) -> float
"""Generate a sample_rand value from a trace ID.
The generated value will be pseudorandomly chosen from the provided
interval. Specifically, given (lower, upper) = interval, the generated
value will be in the range [lower, upper). The value has 6-digit precision,
so when printing with .6f, the value will never be rounded up.
The pseudorandom number generator is seeded with the trace ID.
"""
lower, upper = interval
if not lower < upper: # using `if lower >= upper` would handle NaNs incorrectly
raise ValueError("Invalid interval: lower must be less than upper")
rng = Random(trace_id)
lower_scaled = int(lower * 1_000_000)
upper_scaled = int(upper * 1_000_000)
try:
sample_rand_scaled = rng.randrange(lower_scaled, upper_scaled)
except ValueError:
# In some corner cases it might happen that the range is too small
# In that case, just take the lower bound
sample_rand_scaled = lower_scaled
return sample_rand_scaled / 1_000_000
def _sample_rand_range(parent_sampled, sample_rate):
# type: (Optional[bool], Optional[float]) -> tuple[float, float]
"""
Compute the lower (inclusive) and upper (exclusive) bounds of the range of values
that a generated sample_rand value must fall into, given the parent_sampled and
sample_rate values.
"""
if parent_sampled is None or sample_rate is None:
return 0.0, 1.0
elif parent_sampled is True:
return 0.0, sample_rate
else: # parent_sampled is False
return sample_rate, 1.0
def _get_value(source, key):
# type: (Any, str) -> Optional[Any]
"""
Gets a value from a source object. The source can be a dict or an object.
It is checked for dictionary keys and object attributes.
"""
value = None
if isinstance(source, dict):
value = source.get(key)
else:
if hasattr(source, key):
try:
value = getattr(source, key)
except Exception:
value = None
return value
def _get_span_name(template, name, kwargs=None):
# type: (Union[str, SPANTEMPLATE], str, Optional[dict[str, Any]]) -> str
"""
Get the name of the span based on the template and the name.
"""
span_name = name
if template == SPANTEMPLATE.AI_CHAT:
model = None
if kwargs:
for key in ("model", "model_name"):
if kwargs.get(key) and isinstance(kwargs[key], str):
model = kwargs[key]
break
span_name = f"chat {model}" if model else "chat"
elif template == SPANTEMPLATE.AI_AGENT:
span_name = f"invoke_agent {name}"
elif template == SPANTEMPLATE.AI_TOOL:
span_name = f"execute_tool {name}"
return span_name
def _get_span_op(template):
# type: (Union[str, SPANTEMPLATE]) -> str
"""
Get the operation of the span based on the template.
"""
mapping = {
SPANTEMPLATE.AI_CHAT: OP.GEN_AI_CHAT,
SPANTEMPLATE.AI_AGENT: OP.GEN_AI_INVOKE_AGENT,
SPANTEMPLATE.AI_TOOL: OP.GEN_AI_EXECUTE_TOOL,
} # type: dict[Union[str, SPANTEMPLATE], Union[str, OP]]
op = mapping.get(template, OP.FUNCTION)
return str(op)
def _get_input_attributes(template, send_pii, args, kwargs):
# type: (Union[str, SPANTEMPLATE], bool, tuple[Any, ...], dict[str, Any]) -> dict[str, Any]
"""
Get input attributes for the given span template.
"""
attributes = {} # type: dict[str, Any]
if template in [SPANTEMPLATE.AI_AGENT, SPANTEMPLATE.AI_TOOL, SPANTEMPLATE.AI_CHAT]:
mapping = {
"model": (SPANDATA.GEN_AI_REQUEST_MODEL, str),
"model_name": (SPANDATA.GEN_AI_REQUEST_MODEL, str),
"agent": (SPANDATA.GEN_AI_AGENT_NAME, str),
"agent_name": (SPANDATA.GEN_AI_AGENT_NAME, str),
"max_tokens": (SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, int),
"frequency_penalty": (SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, float),
"presence_penalty": (SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, float),
"temperature": (SPANDATA.GEN_AI_REQUEST_TEMPERATURE, float),
"top_p": (SPANDATA.GEN_AI_REQUEST_TOP_P, float),
"top_k": (SPANDATA.GEN_AI_REQUEST_TOP_K, int),
}
def _set_from_key(key, value):
# type: (str, Any) -> None
if key in mapping:
(attribute, data_type) = mapping[key]
if value is not None and isinstance(value, data_type):
attributes[attribute] = value
for key, value in list(kwargs.items()):
if key == "prompt" and isinstance(value, str):
attributes.setdefault(SPANDATA.GEN_AI_REQUEST_MESSAGES, []).append(
{"role": "user", "content": value}
)
continue
if key == "system_prompt" and isinstance(value, str):
attributes.setdefault(SPANDATA.GEN_AI_REQUEST_MESSAGES, []).append(
{"role": "system", "content": value}
)
continue
_set_from_key(key, value)
if template == SPANTEMPLATE.AI_TOOL and send_pii:
attributes[SPANDATA.GEN_AI_TOOL_INPUT] = safe_repr(
{"args": args, "kwargs": kwargs}
)
# Coerce to string
if SPANDATA.GEN_AI_REQUEST_MESSAGES in attributes:
attributes[SPANDATA.GEN_AI_REQUEST_MESSAGES] = safe_repr(
attributes[SPANDATA.GEN_AI_REQUEST_MESSAGES]
)
return attributes
def _get_usage_attributes(usage):
# type: (Any) -> dict[str, Any]
"""
Get usage attributes.
"""
attributes = {}
def _set_from_keys(attribute, keys):
# type: (str, tuple[str, ...]) -> None
for key in keys:
value = _get_value(usage, key)
if value is not None and isinstance(value, int):
attributes[attribute] = value
_set_from_keys(
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS,
("prompt_tokens", "input_tokens"),
)
_set_from_keys(
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS,
("completion_tokens", "output_tokens"),
)
_set_from_keys(
SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS,
("total_tokens",),
)
return attributes
def _get_output_attributes(template, send_pii, result):
# type: (Union[str, SPANTEMPLATE], bool, Any) -> dict[str, Any]
"""
Get output attributes for the given span template.
"""
attributes = {} # type: dict[str, Any]
if template in [SPANTEMPLATE.AI_AGENT, SPANTEMPLATE.AI_TOOL, SPANTEMPLATE.AI_CHAT]:
with capture_internal_exceptions():
# Usage from result, result.usage, and result.metadata.usage
usage_candidates = [result]
usage = _get_value(result, "usage")
usage_candidates.append(usage)
meta = _get_value(result, "metadata")
usage = _get_value(meta, "usage")
usage_candidates.append(usage)
for usage_candidate in usage_candidates:
if usage_candidate is not None:
attributes.update(_get_usage_attributes(usage_candidate))
# Response model
model_name = _get_value(result, "model")
if model_name is not None and isinstance(model_name, str):
attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = model_name
model_name = _get_value(result, "model_name")
if model_name is not None and isinstance(model_name, str):
attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = model_name
# Tool output
if template == SPANTEMPLATE.AI_TOOL and send_pii:
attributes[SPANDATA.GEN_AI_TOOL_OUTPUT] = safe_repr(result)
return attributes
def _set_input_attributes(span, template, send_pii, name, f, args, kwargs):
# type: (Span, Union[str, SPANTEMPLATE], bool, str, Any, tuple[Any, ...], dict[str, Any]) -> None
"""
Set span input attributes based on the given span template.
:param span: The span to set attributes on.
:param template: The template to use to set attributes on the span.
:param send_pii: Whether to send PII data.
:param f: The wrapped function.
:param args: The arguments to the wrapped function.
:param kwargs: The keyword arguments to the wrapped function.
"""
attributes = {} # type: dict[str, Any]
if template == SPANTEMPLATE.AI_AGENT:
attributes = {
SPANDATA.GEN_AI_OPERATION_NAME: "invoke_agent",
SPANDATA.GEN_AI_AGENT_NAME: name,
}
elif template == SPANTEMPLATE.AI_CHAT:
attributes = {
SPANDATA.GEN_AI_OPERATION_NAME: "chat",
}
elif template == SPANTEMPLATE.AI_TOOL:
attributes = {
SPANDATA.GEN_AI_OPERATION_NAME: "execute_tool",
SPANDATA.GEN_AI_TOOL_NAME: name,
}
docstring = f.__doc__
if docstring is not None:
attributes[SPANDATA.GEN_AI_TOOL_DESCRIPTION] = docstring
attributes.update(_get_input_attributes(template, send_pii, args, kwargs))
span.update_data(attributes or {})
def _set_output_attributes(span, template, send_pii, result):
# type: (Span, Union[str, SPANTEMPLATE], bool, Any) -> None
"""
Set span output attributes based on the given span template.
:param span: The span to set attributes on.
:param template: The template to use to set attributes on the span.
:param send_pii: Whether to send PII data.
:param result: The result of the wrapped function.
"""
span.update_data(_get_output_attributes(template, send_pii, result) or {})
# Circular imports
from sentry_sdk.tracing import (
BAGGAGE_HEADER_NAME,
LOW_QUALITY_TRANSACTION_SOURCES,
SENTRY_TRACE_HEADER_NAME,
)
if TYPE_CHECKING:
from sentry_sdk.tracing import Span
| Baggage |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/config.py | {
"start": 9383,
"end": 9569
} | class ____(graphene.ObjectType):
pipeline_name = graphene.NonNull(graphene.String)
class Meta:
name = "PipelineConfigValidationValid"
| GraphenePipelineConfigValidationValid |
python | arrow-py__arrow | arrow/locales.py | {
"start": 38481,
"end": 40237
} | class ____(SlavicBaseLocale):
names = ["ua", "uk", "uk-ua"]
past = "{0} тому"
future = "за {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "зараз",
"second": "секунда",
"seconds": "{0} кілька секунд",
"minute": "хвилину",
"minutes": {
"singular": "{0} хвилину",
"dual": "{0} хвилини",
"plural": "{0} хвилин",
},
"hour": "годину",
"hours": {
"singular": "{0} годину",
"dual": "{0} години",
"plural": "{0} годин",
},
"day": "день",
"days": {"singular": "{0} день", "dual": "{0} дні", "plural": "{0} днів"},
"month": "місяць",
"months": {
"singular": "{0} місяць",
"dual": "{0} місяці",
"plural": "{0} місяців",
},
"year": "рік",
"years": {"singular": "{0} рік", "dual": "{0} роки", "plural": "{0} років"},
}
month_names = [
"",
"січня",
"лютого",
"березня",
"квітня",
"травня",
"червня",
"липня",
"серпня",
"вересня",
"жовтня",
"листопада",
"грудня",
]
month_abbreviations = [
"",
"січ",
"лют",
"бер",
"квіт",
"трав",
"черв",
"лип",
"серп",
"вер",
"жовт",
"лист",
"груд",
]
day_names = [
"",
"понеділок",
"вівторок",
"середа",
"четвер",
"п’ятниця",
"субота",
"неділя",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"]
| UkrainianLocale |
python | sqlalchemy__sqlalchemy | examples/space_invaders/space_invaders.py | {
"start": 6740,
"end": 7030
} | class ____(EnemyGlyph):
"""Describe an enemy that's part of the "army"."""
__mapper_args__ = {"polymorphic_identity": "army"}
def glyph_for_state(self, coord, state):
if state["flip"]:
return self.alt_data
else:
return self.data
| ArmyGlyph |
python | ray-project__ray | python/ray/data/_internal/execution/operators/hash_aggregate.py | {
"start": 3769,
"end": 8579
} | class ____(HashShufflingOperatorBase):
def __init__(
self,
data_context: DataContext,
input_op: PhysicalOperator,
key_columns: Tuple[str],
aggregation_fns: Tuple[AggregateFn],
*,
num_partitions: Optional[int] = None,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
):
super().__init__(
name_factory=(
lambda num_partitions: f"HashAggregate(key_columns={key_columns}, "
f"num_partitions={num_partitions})"
),
input_ops=[input_op],
data_context=data_context,
key_columns=[key_columns],
num_partitions=(
# NOTE: In case of global aggregations (ie with no key columns specified),
# we override number of partitions to 1, since the whole dataset
# will be reduced to just a single row
num_partitions
if len(key_columns) > 0
else 1
),
partition_aggregation_factory=(
lambda aggregator_id, target_partition_ids: ReducingShuffleAggregation(
aggregator_id,
key_columns,
aggregation_fns,
)
),
input_block_transformer=_create_aggregating_transformer(
key_columns, aggregation_fns
),
aggregator_ray_remote_args_override=aggregator_ray_remote_args_override,
shuffle_progress_bar_name="Shuffle",
finalize_progress_bar_name="Aggregation",
)
def _get_operator_num_cpus_override(self) -> float:
return self.data_context.hash_aggregate_operator_actor_num_cpus_override
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
partition_byte_size_estimate = math.ceil(
estimated_dataset_bytes / num_partitions
)
# Estimate of object store memory required to accommodate all partitions
# handled by a single aggregator
aggregator_shuffle_object_store_memory_required: int = math.ceil(
estimated_dataset_bytes / num_aggregators
)
# Estimate of memory required to accommodate single partition as an output
# (inside Object Store)
output_object_store_memory_required: int = partition_byte_size_estimate
aggregator_total_memory_required: int = (
# Inputs (object store)
aggregator_shuffle_object_store_memory_required
+
# Output (object store)
output_object_store_memory_required
)
logger.info(
f"Estimated memory requirement for aggregating aggregator "
f"(partitions={num_partitions}, "
f"aggregators={num_aggregators}, "
f"dataset (estimate)={estimated_dataset_bytes / GiB:.1f}GiB): "
f"shuffle={aggregator_shuffle_object_store_memory_required / MiB:.1f}MiB, "
f"output={output_object_store_memory_required / MiB:.1f}MiB, "
f"total={aggregator_total_memory_required / MiB:.1f}MiB, "
)
return aggregator_total_memory_required
def _create_aggregating_transformer(
key_columns: Tuple[str], aggregation_fns: Tuple[AggregateFn]
) -> BlockTransformer:
"""Method creates input block transformer performing partial aggregation of
the block applied prior to block being shuffled (to reduce amount of bytes shuffled)"""
sort_key = ReducingShuffleAggregation._get_sort_key(key_columns)
def _aggregate(block: Block) -> Block:
from ray.data._internal.planner.exchange.aggregate_task_spec import (
SortAggregateTaskSpec,
)
# TODO unify blocks schemas, to avoid validating every block
# Validate block's schema compatible with aggregations
for agg_fn in aggregation_fns:
agg_fn._validate(BlockAccessor.for_block(block).schema())
# Project block to only carry columns used in aggregation
pruned_block = SortAggregateTaskSpec._prune_unused_columns(
block,
sort_key,
aggregation_fns,
)
# NOTE: If columns to aggregate on have been provided,
# sort the block on these before aggregation
if sort_key.get_columns():
target_block = BlockAccessor.for_block(pruned_block).sort(sort_key)
else:
target_block = pruned_block
return BlockAccessor.for_block(target_block)._aggregate(
sort_key, aggregation_fns
)
return _aggregate
| HashAggregateOperator |
python | huggingface__transformers | src/transformers/models/sam2/modular_sam2.py | {
"start": 2527,
"end": 11625
} | class ____(SamImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
mask_size = {"height": 256, "width": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam2FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam2FastImageProcessorKwargs]):
BaseImageProcessorFast.__init__(self, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> "torch.Tensor":
return BaseImageProcessorFast._preprocess(self, images, return_tensors=return_tensors, **kwargs).pixel_values
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
mask_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def _get_preprocess_shape(self):
raise NotImplementedError("No _get_preprocess_shape for SAM 2.")
def resize(self):
raise NotImplementedError("No need to override resize for SAM 2.")
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
| Sam2ImageProcessorFast |
python | run-llama__llama_index | llama-index-integrations/node_parser/llama-index-node-parser-topic/llama_index/node_parser/topic/base.py | {
"start": 4010,
"end": 12422
} | class ____(NodeParser):
"""Topic Based node parser."""
max_chunk_size: int = Field(
default=1000,
description="The maximum number of tokens in a chunk.",
)
window_size: int = Field(
default=5,
description="Paragraph sliding window size",
)
llm: LLM = Field(
description="The LLM model to use for parsing.",
)
similarity_method: str = Field(
default="llm",
description="The method to use for determining if a new proposition belongs to the same topic. Choose 'llm' or 'embedding'.",
)
embed_model: SerializeAsAny[BaseEmbedding] = Field(
description="The embedding model to use for determining similarity between propositions.",
)
similarity_threshold: float = Field(
default=0.8,
description="The threshold for determining similarity between propositions.",
)
tokenizer: Callable = Field(
description="The tokenizer to use for tokenizing text.",
)
@classmethod
def class_name(cls) -> str:
return "TopicNodeParser"
@classmethod
def from_defaults(
cls,
callback_manager: Optional[CallbackManager] = None,
id_func: Optional[Callable[[int, Document], str]] = None,
tokenizer: Optional[Callable] = None,
max_chunk_size: int = 1000,
window_size: int = 5,
llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
similarity_method: str = "llm",
similarity_threshold: float = 0.8,
) -> "TopicNodeParser":
"""Initialize with parameters."""
from llama_index.core import Settings
callback_manager = callback_manager or CallbackManager([])
id_func = id_func or default_id_func
tokenizer = tokenizer or get_tokenizer()
llm = llm or Settings.llm
embed_model = embed_model or Settings.embed_model
return cls(
callback_manager=callback_manager,
id_func=id_func,
tokenizer=tokenizer,
max_chunk_size=max_chunk_size,
window_size=window_size,
llm=llm,
embed_model=embed_model,
similarity_threshold=similarity_threshold,
similarity_method=similarity_method,
)
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Parse document into nodes."""
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.build_topic_based_nodes_from_documents([node])
all_nodes.extend(nodes)
return all_nodes
def split_into_paragraphs(self, text: str) -> List[str]:
"""Split the document into paragraphs based on line breaks."""
return re.split(r"\n\s*\n", text)
def proposition_transfer(self, paragraph: str) -> List[str]:
"""
Convert a paragraph into a list of self-sustaining statements using LLM.
"""
messages = [
ChatMessage(role="system", content=PROPOSITION_SYSTEM_PROMPT),
ChatMessage(role="user", content=paragraph),
]
response = str(self.llm.chat(messages))
json_start = response.find("[")
json_end = response.rfind("]") + 1
if json_start != -1 and json_end != -1:
json_content = response[json_start:json_end]
# Parse the JSON response
try:
return json.loads(json_content)
except json.JSONDecodeError:
print(f"Failed to parse JSON: {json_content}")
return []
else:
print(f"No valid JSON found in the response: {response}")
return []
def is_same_topic_llm(self, current_chunk: List[str], new_proposition: str) -> bool:
"""
Use zero-shot classification with LLM to determine if the new proposition belongs to the same topic.
"""
current_text = " ".join(current_chunk)
messages = [
ChatMessage(role="system", content=TOPIC_CLASSIFICATION_SYSTEM_PROMPT),
ChatMessage(
role="user",
content=f"Text 1: {current_text}\n\nText 2: {new_proposition}",
),
]
response = self.llm.chat(messages)
return "same topic" in str(response).lower()
def is_same_topic_embedding(
self, current_chunk: List[str], new_proposition: str
) -> bool:
"""
Use embedding-based similarity to determine if the new proposition belongs to the same topic.
"""
current_text = " ".join(current_chunk)
current_text_embedding = self.embed_model.get_text_embedding(current_text)
new_proposition_embedding = self.embed_model.get_text_embedding(new_proposition)
similarity_score = similarity(current_text_embedding, new_proposition_embedding)
return similarity_score > self.similarity_threshold
def semantic_chunking(self, paragraphs: List[str]) -> List[str]:
"""
Perform semantic chunking on the given paragraphs.
max_chunk_size: It is based on hard threshold of 1000 characters.
As per paper the hard threshold that the longest chunk cannot excess the context length limitation of LLM.
Here we are using 1000 tokens as the threshold.
"""
chunks: List[str] = []
current_chunk: List[str] = []
current_chunk_size: int = 0
half_window = self.window_size // 2
# Cache for storing propositions
proposition_cache: Dict[int, List[str]] = {}
for i in range(len(paragraphs)):
# Define the window range
start_idx = max(0, i - half_window)
end_idx = min(len(paragraphs), i + half_window + 1)
# Generate and cache propositions for paragraphs in the window
window_propositions = []
for j in range(start_idx, end_idx):
if j not in proposition_cache:
proposition_cache[j] = self.proposition_transfer(paragraphs[j])
window_propositions.extend(proposition_cache[j])
for prop in window_propositions:
if current_chunk:
if self.similarity_method == "llm":
is_same_topic = self.is_same_topic_llm(current_chunk, prop)
elif self.similarity_method == "embedding":
is_same_topic = self.is_same_topic_embedding(
current_chunk, prop
)
else:
raise ValueError(
"Invalid similarity method. Choose 'llm' or 'embedding'."
)
else:
is_same_topic = True
if not current_chunk or (
is_same_topic
and current_chunk_size + len(self.tokenizer(prop))
<= self.max_chunk_size
):
current_chunk.append(prop)
current_chunk_size += len(prop)
else:
chunks.append(" ".join(current_chunk))
current_chunk = [prop]
current_chunk_size = len(self.tokenizer(prop))
# If we've reached the max chunk size, start a new chunk
if current_chunk_size >= self.max_chunk_size:
chunks.append(" ".join(current_chunk))
current_chunk = []
current_chunk_size = 0
if current_chunk:
chunks.append(" ".join(current_chunk))
return chunks
def build_topic_based_nodes_from_documents(
self, documents: Sequence[Document]
) -> List[BaseNode]:
"""Build topic based nodes from documents."""
all_nodes: List[BaseNode] = []
for doc in documents:
paragraphs = self.split_into_paragraphs(doc.text)
chunks = self.semantic_chunking(paragraphs)
nodes = build_nodes_from_splits(
chunks,
doc,
id_func=self.id_func,
)
all_nodes.extend(nodes)
return all_nodes
| TopicNodeParser |
python | doocs__leetcode | solution/1100-1199/1157.Online Majority Element In Subarray/Solution.py | {
"start": 136,
"end": 1716
} | class ____:
def __init__(self, nums):
self.nums = nums
n = len(nums)
self.tr = [Node() for _ in range(n << 2)]
self.build(1, 1, n)
def build(self, u, l, r):
self.tr[u].l, self.tr[u].r = l, r
if l == r:
self.tr[u].x = self.nums[l - 1]
self.tr[u].cnt = 1
return
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
self.pushup(u)
def query(self, u, l, r):
if self.tr[u].l >= l and self.tr[u].r <= r:
return self.tr[u].x, self.tr[u].cnt
mid = (self.tr[u].l + self.tr[u].r) >> 1
if r <= mid:
return self.query(u << 1, l, r)
if l > mid:
return self.query(u << 1 | 1, l, r)
x1, cnt1 = self.query(u << 1, l, r)
x2, cnt2 = self.query(u << 1 | 1, l, r)
if x1 == x2:
return x1, cnt1 + cnt2
if cnt1 >= cnt2:
return x1, cnt1 - cnt2
else:
return x2, cnt2 - cnt1
def pushup(self, u):
if self.tr[u << 1].x == self.tr[u << 1 | 1].x:
self.tr[u].x = self.tr[u << 1].x
self.tr[u].cnt = self.tr[u << 1].cnt + self.tr[u << 1 | 1].cnt
elif self.tr[u << 1].cnt >= self.tr[u << 1 | 1].cnt:
self.tr[u].x = self.tr[u << 1].x
self.tr[u].cnt = self.tr[u << 1].cnt - self.tr[u << 1 | 1].cnt
else:
self.tr[u].x = self.tr[u << 1 | 1].x
self.tr[u].cnt = self.tr[u << 1 | 1].cnt - self.tr[u << 1].cnt
| SegmentTree |
python | kamyu104__LeetCode-Solutions | Python/hash-divided-string.py | {
"start": 38,
"end": 357
} | class ____(object):
def stringHash(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
result = (chr(ord('a')+reduce(lambda accu, x: (accu+x)%26, (ord(s[i+j])-ord('a') for j in xrange(k)), 0)) for i in xrange(0, len(s), k))
return "".join(result)
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/interval/test_astype.py | {
"start": 336,
"end": 2457
} | class ____:
"""Tests common to IntervalIndex with any subtype"""
def test_astype_idempotent(self, index):
result = index.astype("interval")
tm.assert_index_equal(result, index)
result = index.astype(index.dtype)
tm.assert_index_equal(result, index)
def test_astype_object(self, index):
result = index.astype(object)
expected = Index(index.values, dtype="object")
tm.assert_index_equal(result, expected)
assert not result.equals(index)
def test_astype_category(self, index):
result = index.astype("category")
expected = CategoricalIndex(index.values)
tm.assert_index_equal(result, expected)
result = index.astype(CategoricalDtype())
tm.assert_index_equal(result, expected)
# non-default params
categories = index.dropna().unique().values[:-1]
dtype = CategoricalDtype(categories=categories, ordered=True)
msg = "Constructing a Categorical with a dtype and values containing"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = index.astype(dtype)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected = CategoricalIndex(
index.values, categories=categories, ordered=True
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"int64",
"uint64",
"float64",
"complex128",
"period[M]",
"timedelta64",
"timedelta64[ns]",
"datetime64",
"datetime64[ns]",
"datetime64[ns, US/Eastern]",
],
)
def test_astype_cannot_cast(self, index, dtype):
msg = "Cannot cast IntervalIndex to dtype"
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
def test_astype_invalid_dtype(self, index):
msg = "data type [\"']fake_dtype[\"'] not understood"
with pytest.raises(TypeError, match=msg):
index.astype("fake_dtype")
| AstypeTests |
python | doocs__leetcode | solution/0500-0599/0506.Relative Ranks/Solution.py | {
"start": 0,
"end": 370
} | class ____:
def findRelativeRanks(self, score: List[int]) -> List[str]:
n = len(score)
idx = list(range(n))
idx.sort(key=lambda x: -score[x])
top3 = ["Gold Medal", "Silver Medal", "Bronze Medal"]
ans = [None] * n
for i, j in enumerate(idx):
ans[j] = top3[i] if i < 3 else str(i + 1)
return ans
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py | {
"start": 2341,
"end": 2508
} | class ____(ABC): # error (not an abstract attribute)
foo: int = 2
# this doesn't actually declare a class variable, it's just an expression
| abc_set_class_variable_3 |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/half_cheetah_v5.py | {
"start": 233,
"end": 16005
} | class ____(MujocoEnv, utils.EzPickle):
r"""
## Description
This environment is based on the work of P. Wawrzyński in ["A Cat-Like Robot Real-Time Learning to Run"](http://staff.elka.pw.edu.pl/~pwawrzyn/pub-s/0812_LSCLRR.pdf).
The HalfCheetah is a 2-dimensional robot consisting of 9 body parts and 8 joints connecting them (including two paws).
The goal is to apply torque to the joints to make the cheetah run forward (right) as fast as possible, with a positive reward based on the distance moved forward and a negative reward for moving backward.
The cheetah's torso and head are fixed, and torque can only be applied to the other 6 joints over the front and back thighs (which connect to the torso), the shins (which connect to the thighs), and the feet (which connect to the shins).
## Action Space
```{figure} action_space_figures/half_cheetah.png
:name: half_cheetah
```
The action space is a `Box(-1, 1, (6,), float32)`. An action represents the torques applied at the hinge joints.
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Type (Unit) |
| --- | --------------------------------------- | ----------- | ----------- | -------------------------------- | ----- | ------------ |
| 0 | Torque applied on the back thigh rotor | -1 | 1 | bthigh | hinge | torque (N m) |
| 1 | Torque applied on the back shin rotor | -1 | 1 | bshin | hinge | torque (N m) |
| 2 | Torque applied on the back foot rotor | -1 | 1 | bfoot | hinge | torque (N m) |
| 3 | Torque applied on the front thigh rotor | -1 | 1 | fthigh | hinge | torque (N m) |
| 4 | Torque applied on the front shin rotor | -1 | 1 | fshin | hinge | torque (N m) |
| 5 | Torque applied on the front foot rotor | -1 | 1 | ffoot | hinge | torque (N m) |
## Observation Space
The observation space consists of the following parts (in order):
- *qpos (8 elements by default):* Position values of the robot's body parts.
- *qvel (9 elements):* The velocities of these individual body parts (their derivatives).
By default, the observation does not include the robot's x-coordinate (`rootx`).
This can be included by passing `exclude_current_positions_from_observation=False` during construction.
In this case, the observation space will be a `Box(-Inf, Inf, (18,), float64)`, where the first observation element is the x-coordinate of the robot.
Regardless of whether `exclude_current_positions_from_observation` is set to `True` or `False`, the x- and y-coordinates are returned in `info` with the keys `"x_position"` and `"y_position"`, respectively.
By default, however, the observation space is a `Box(-Inf, Inf, (17,), float64)` where the elements are as follows:
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Type (Unit) |
| --- | ------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------ |
| 0 | z-coordinate of the front tip | -Inf | Inf | rootz | slide | position (m) |
| 1 | angle of the front tip | -Inf | Inf | rooty | hinge | angle (rad) |
| 2 | angle of the back thigh | -Inf | Inf | bthigh | hinge | angle (rad) |
| 3 | angle of the back shin | -Inf | Inf | bshin | hinge | angle (rad) |
| 4 | angle of the back foot | -Inf | Inf | bfoot | hinge | angle (rad) |
| 5 | angle of the front thigh | -Inf | Inf | fthigh | hinge | angle (rad) |
| 6 | angle of the front shin | -Inf | Inf | fshin | hinge | angle (rad) |
| 7 | angle of the front foot | -Inf | Inf | ffoot | hinge | angle (rad) |
| 8 | velocity of the x-coordinate of front tip | -Inf | Inf | rootx | slide | velocity (m/s) |
| 9 | velocity of the z-coordinate of front tip | -Inf | Inf | rootz | slide | velocity (m/s) |
| 10 | angular velocity of the front tip | -Inf | Inf | rooty | hinge | angular velocity (rad/s) |
| 11 | angular velocity of the back thigh | -Inf | Inf | bthigh | hinge | angular velocity (rad/s) |
| 12 | angular velocity of the back shin | -Inf | Inf | bshin | hinge | angular velocity (rad/s) |
| 13 | angular velocity of the back foot | -Inf | Inf | bfoot | hinge | angular velocity (rad/s) |
| 14 | angular velocity of the front thigh | -Inf | Inf | fthigh | hinge | angular velocity (rad/s) |
| 15 | angular velocity of the front shin | -Inf | Inf | fshin | hinge | angular velocity (rad/s) |
| 16 | angular velocity of the front foot | -Inf | Inf | ffoot | hinge | angular velocity (rad/s) |
| excluded | x-coordinate of the front tip | -Inf | Inf | rootx | slide | position (m) |
## Rewards
The total reward is: ***reward*** *=* *forward_reward - ctrl_cost*.
- *forward_reward*:
A reward for moving forward,
this reward would be positive if the Half Cheetah moves forward (in the positive $x$ direction / in the right direction).
$w_{forward} \times \frac{dx}{dt}$, where
$dx$ is the displacement of the "tip" ($x_{after-action} - x_{before-action}$),
$dt$ is the time between actions, which depends on the `frame_skip` parameter (default is $5$),
and `frametime` which is $0.01$ - so the default is $dt = 5 \times 0.01 = 0.05$,
$w_{forward}$ is the `forward_reward_weight` (default is $1$).
- *ctrl_cost*:
A negative reward to penalize the Half Cheetah for taking actions that are too large.
$w_{control} \times \|action\|_2^2$,
where $w_{control}$ is `ctrl_cost_weight` (default is $0.1$).
`info` contains the individual reward terms.
## Starting State
The initial position state is $\mathcal{U}_{[-reset\_noise\_scale \times I_{9}, reset\_noise\_scale \times I_{9}]}$.
The initial velocity state is $\mathcal{N}(0_{9}, reset\_noise\_scale^2 \times I_{9})$.
where $\mathcal{N}$ is the multivariate normal distribution and $\mathcal{U}$ is the multivariate uniform continuous distribution.
## Episode End
### Termination
The Half Cheetah never terminates.
### Truncation
The default duration of an episode is 1000 timesteps.
## Arguments
HalfCheetah provides a range of parameters to modify the observation space, reward function, initial state, and termination condition.
These parameters can be applied during `gymnasium.make` in the following way:
```python
import gymnasium as gym
env = gym.make('HalfCheetah-v5', ctrl_cost_weight=0.1, ....)
```
| Parameter | Type | Default | Description |
| -------------------------------------------- | --------- | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `xml_file` | **str** | `"half_cheetah.xml"` | Path to a MuJoCo model |
| `forward_reward_weight` | **float** | `1` | Weight for _forward_reward_ term (see `Rewards` section) |
| `ctrl_cost_weight` | **float** | `0.1` | Weight for _ctrl_cost_ weight (see `Rewards` section) |
| `reset_noise_scale` | **float** | `0.1` | Scale of random perturbations of initial position and velocity (see `Starting State` section) |
| `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies (see `Observation State` section) |
## Version History
* v5:
- Minimum `mujoco` version is now 2.3.3.
- Added support for fully custom/third party `mujoco` models using the `xml_file` argument (previously only a few changes could be made to the existing models).
- Added `default_camera_config` argument, a dictionary for setting the `mj_camera` properties, mainly useful for custom environments.
- Added `env.observation_structure`, a dictionary for specifying the observation space compose (e.g. `qpos`, `qvel`), useful for building tooling and wrappers for the MuJoCo environments.
- Return a non-empty `info` with `reset()`, previously an empty dictionary was returned, the new keys are the same state information as `step()`.
- Added `frame_skip` argument, used to configure the `dt` (duration of `step()`), default varies by environment check environment documentation pages.
- Restored the `xml_file` argument (was removed in `v4`).
- Renamed `info["reward_run"]` to `info["reward_forward"]` to be consistent with the other environments.
* v4: All MuJoCo environments now use the MuJoCo bindings in mujoco >= 2.1.3.
* v3: Support for `gymnasium.make` kwargs such as `xml_file`, `ctrl_cost_weight`, `reset_noise_scale`, etc. rgb rendering comes from tracking camera (so agent does not run away from screen). Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
* v2: All continuous control environments now use mujoco-py >= 1.50. Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
* v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments.
* v0: Initial versions release.
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
}
def __init__(
self,
xml_file: str = "half_cheetah.xml",
frame_skip: int = 5,
default_camera_config: dict[str, float | int] = DEFAULT_CAMERA_CONFIG,
forward_reward_weight: float = 1.0,
ctrl_cost_weight: float = 0.1,
reset_noise_scale: float = 0.1,
exclude_current_positions_from_observation: bool = True,
**kwargs,
):
utils.EzPickle.__init__(
self,
xml_file,
frame_skip,
default_camera_config,
forward_reward_weight,
ctrl_cost_weight,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs,
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
MujocoEnv.__init__(
self,
xml_file,
frame_skip,
observation_space=None,
default_camera_config=default_camera_config,
**kwargs,
)
self.metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": int(np.round(1.0 / self.dt)),
}
obs_size = (
self.data.qpos.size
+ self.data.qvel.size
- exclude_current_positions_from_observation
)
self.observation_space = Box(
low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float64
)
self.observation_structure = {
"skipped_qpos": 1 * exclude_current_positions_from_observation,
"qpos": self.data.qpos.size
- 1 * exclude_current_positions_from_observation,
"qvel": self.data.qvel.size,
}
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
x_position_before = self.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
observation = self._get_obs()
reward, reward_info = self._get_rew(x_velocity, action)
info = {"x_position": x_position_after, "x_velocity": x_velocity, **reward_info}
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return observation, reward, False, False, info
def _get_rew(self, x_velocity: float, action):
forward_reward = self._forward_reward_weight * x_velocity
ctrl_cost = self.control_cost(action)
reward = forward_reward - ctrl_cost
reward_info = {
"reward_forward": forward_reward,
"reward_ctrl": -ctrl_cost,
}
return reward, reward_info
def _get_obs(self):
position = self.data.qpos.flatten()
velocity = self.data.qvel.flatten()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = (
self.init_qvel
+ self._reset_noise_scale * self.np_random.standard_normal(self.model.nv)
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def _get_reset_info(self):
return {
"x_position": self.data.qpos[0],
}
| HalfCheetahEnv |
python | django__django | tests/admin_views/models.py | {
"start": 10611,
"end": 10711
} | class ____(Title):
the_recommender = models.ForeignKey(Recommender, models.CASCADE)
| Recommendation |
python | pytorch__pytorch | test/distributed/_composable/test_composability/test_2d_composability.py | {
"start": 18279,
"end": 21246
} | class ____(DTensorTestBase):
def init_model(self, device_type, model_parallel_size=2):
torch.manual_seed(0)
model = MLPModule(device_type)
torch.manual_seed(0)
twod_model = MLPModule(device_type)
model = DDP(model)
# 2-D mesh is [dp, tp]
world_size = dist.get_world_size()
mesh_2d = init_device_mesh(
device_type,
(world_size // model_parallel_size, model_parallel_size),
mesh_dim_names=("dp", "tp"),
)
dp_pg = mesh_2d.get_group(mesh_dim=0)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
twod_model = parallelize_module(twod_model, mesh_2d["tp"], parallelize_plan)
_pre_dp_module_transform(twod_model)
# TODO: Add tests when using gradient_as_bucket_view and static_graph for DDP.
twod_model = DDP(twod_model, process_group=dp_pg)
return model, twod_model, dp_pg
def _check_module(self, m1, m2, check_grad=False):
named_parameters = dict(m1.named_parameters())
for name, param_m2 in m2.named_parameters():
if name not in named_parameters:
print(name, named_parameters.keys())
self.assertTrue(name in named_parameters)
param_m1 = named_parameters[name]
if check_grad:
param_m2 = param_m2.grad
param_m1 = param_m1.grad
if isinstance(param_m2, DTensor):
replicate = [Replicate()]
param_m2 = param_m2.redistribute(
device_mesh=param_m2.device_mesh, placements=replicate
).to_local()
self.assertEqual(param_m2, param_m1)
@with_comms
@skip_if_lt_x_gpu(4)
def test_2d_ddp_integration_functionality(self) -> None:
model, twod_model, dp_pg = self.init_model(self.device_type)
optim = torch.optim.Adam(model.parameters(), lr=3e-5)
twod_optim = torch.optim.Adam(twod_model.parameters(), lr=3e-5)
# Create Input
input_seed = dist.get_rank(dp_pg)
torch.manual_seed(input_seed + 1)
input = torch.rand(4, 10, device=self.device_type)
output = model(input)
twod_output = twod_model(input)
self.assertEqual(output, twod_output)
output.sum().backward()
twod_output.sum().backward()
self._check_module(model, twod_model, check_grad=True)
optim.step()
twod_optim.step()
self._check_module(model, twod_model)
torch.manual_seed(input_seed + 1004)
input = torch.rand(16, 10, device=self.device_type)
output = model(input)
twod_output = twod_model(input)
self.assertEqual(output, twod_output)
# TODO: Add save/load of 2D verification.
# TODO: add additional tests for multi_param_group, optim_in_backward,
# and fsdp_nested.
| Test2dFSDP1ParallelIntegration |
python | pypa__warehouse | tests/common/db/classifiers.py | {
"start": 129,
"end": 215
} | class ____(WarehouseFactory):
class Meta:
model = Classifier
| ClassifierFactory |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 4437,
"end": 4648
} | class ____(graphene.ObjectType):
runStatus = graphene.NonNull(GrapheneRunStatus)
count = graphene.NonNull(graphene.Int)
class Meta:
name = "PartitionStatusCounts"
| GraphenePartitionStatusCounts |
python | huggingface__transformers | src/transformers/models/glm46v/video_processing_glm46v.py | {
"start": 2491,
"end": 11541
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 2 * 30000}
max_image_size = {"longest_edge": 28 * 28 * 2 * 30000}
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = True
patch_size = 14
temporal_patch_size = 2
max_duration = 300
merge_size = 2
valid_kwargs = Glm46VVideoProcessorInitKwargs
num_frames = 16
fps = 2
model_input_names = ["pixel_values_videos", "video_grid_thw"]
def __init__(self, **kwargs: Unpack[Glm46VVideoProcessorInitKwargs]):
super().__init__(**kwargs)
if self.size is not None and (
self.size.get("shortest_edge", None) is None or self.size.get("longest_edge", None) is None
):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
return super()._further_process_kwargs(size=size, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
fps: Optional[Union[int, float]] = None,
**kwargs,
):
"""
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if metadata is None or getattr(metadata, "fps", None) is None:
raise ValueError(
"Asked to sample frames per second but no video metadata was provided which is required when sampling in Glm46V. "
"Please pass in `VideoMetadata` object or set `do_sample_frames=False`"
)
total_frames = metadata.total_num_frames
max_frame_idx = total_frames - 1
duration = metadata.duration or round(max_frame_idx / metadata.fps) + 1
DYNAMIC_FPS_THRES = {30: 3, 300: 1, 2400: 0.5}
MAX_FRAME_COUNT_DYNAMIC = 640
MAX_DURATION = 2400
effective_duration = min(duration, MAX_DURATION)
if effective_duration <= 30:
target_fps = DYNAMIC_FPS_THRES[30]
elif effective_duration <= 300:
target_fps = DYNAMIC_FPS_THRES[300]
else:
target_fps = DYNAMIC_FPS_THRES[2400]
extract_t = int(effective_duration * target_fps * self.temporal_patch_size)
extract_t = min(extract_t, MAX_FRAME_COUNT_DYNAMIC)
duration_per_frame = 1 / metadata.fps
timestamps = [i * duration_per_frame for i in range(total_frames)]
max_second = int(duration)
if total_frames < extract_t:
frame_indices = np.linspace(0, total_frames - 1, extract_t, dtype=int).tolist()
else:
frame_indices = []
current_second = 0
inv_fps = 1 / (self.temporal_patch_size * target_fps)
for frame_index in range(total_frames):
if timestamps[frame_index] >= current_second:
current_second += inv_fps
frame_indices.append(frame_index)
if current_second >= max_second:
break
if len(frame_indices) < extract_t:
if len(frame_indices) == 0:
start, end = 0, max(total_frames - 1, 0)
else:
start, end = frame_indices[0], frame_indices[-1]
frame_indices = np.linspace(start, end, extract_t, dtype=int).tolist()
elif len(frame_indices) > extract_t:
frame_indices = np.linspace(0, total_frames - 1, extract_t, dtype=int).tolist()
seen, uniq = set(), []
for idx in frame_indices:
if idx not in seen:
seen.add(idx)
uniq.append(idx)
if len(uniq) & 1:
uniq.append(uniq[-1])
return np.array(uniq)
def _preprocess(
self,
videos: list[torch.Tensor],
do_convert_rgb: bool = True,
do_resize: bool = True,
size: Optional[SizeDict] = None,
interpolation: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: float = 1 / 255.0,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
):
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
B, T, C, H, W = stacked_videos.shape
num_frames, height, width = T, H, W
if do_resize:
resized_height, resized_width = smart_resize(
num_frames=num_frames,
height=height,
width=width,
temporal_factor=temporal_patch_size,
factor=patch_size * merge_size,
min_pixels=size.shortest_edge,
max_pixels=size.longest_edge,
)
stacked_videos = stacked_videos.view(B * T, C, H, W)
stacked_videos = self.resize(
stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
stacked_videos = stacked_videos.view(B, T, C, resized_height, resized_width)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(1, temporal_patch_size - 1, 1, 1, 1)
patches = torch.cat([patches, repeats], dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
data = {
"pixel_values_videos": pixel_values_videos,
"video_grid_thw": video_grid_thw,
}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["Glm46VVideoProcessor"]
| Glm46VVideoProcessor |
python | doocs__leetcode | lcof2/剑指 Offer II 065. 最短的单词编码/Solution2.py | {
"start": 0,
"end": 391
} | class ____:
def __init__(self):
self.children = [None] * 26
def insert(self, w):
node = self
pref = True
for c in w:
idx = ord(c) - ord("a")
if node.children[idx] is None:
node.children[idx] = Trie()
pref = False
node = node.children[idx]
return 0 if pref else len(w) + 1
| Trie |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 51283,
"end": 54875
} | class ____(skipIf):
def __init__(self, dep, reason):
device_type = torch._C._get_privateuse1_backend_name()
super().__init__(dep, reason, device_type=device_type)
def _has_sufficient_memory(device, size):
device_ = torch.device(device)
device_type = device_.type
if device_type in ["cuda", "xpu"]:
acc = torch.accelerator.current_accelerator()
# Case 1: no accelerator found
if not acc:
return False
# Case 2: accelerator found but not matching device type
if acc.type != device_type:
return True
# Case 3: accelerator found and matching device type but not available
if not torch.accelerator.is_available():
return False
# Case 4: accelerator found and matching device type and available
gc.collect()
torch.accelerator.empty_cache()
if device_.index is None:
device_ = torch.device(device_type, 0)
if device_type == "cuda":
return (
torch.cuda.memory.mem_get_info(device_)[0]
* torch.cuda.memory.get_per_process_memory_fraction(device_)
) >= size
if device_type == "xpu":
return torch.xpu.memory.mem_get_info(device_)[0] >= size
if device_type == "xla":
raise unittest.SkipTest("TODO: Memory availability checks for XLA?")
if device_type != "cpu":
raise unittest.SkipTest("Unknown device type")
# CPU
if not HAS_PSUTIL:
raise unittest.SkipTest("Need psutil to determine if memory is sufficient")
# The sanitizers have significant memory overheads
if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:
effective_size = size * 10
else:
effective_size = size
# don't try using all RAM on s390x, leave some for service processes
if IS_S390X:
effective_size = effective_size * 2
if psutil.virtual_memory().available < effective_size:
gc.collect()
return psutil.virtual_memory().available >= effective_size
def largeTensorTest(size, device=None, inductor=TEST_WITH_TORCHINDUCTOR):
"""Skip test if the device has insufficient memory to run the test
size may be a number of bytes, a string of the form "N GB", or a callable
If the test is a device generic test, available memory on the primary device will be checked.
It can also be overridden by the optional `device=` argument.
In other tests, the `device=` argument needs to be specified.
"""
if isinstance(size, str):
assert size.endswith(("GB", "gb")), "only bytes or GB supported"
size = 1024**3 * int(size[:-2])
def inner(fn):
@wraps(fn)
def dep_fn(self, *args, **kwargs):
size_bytes: int = size(self, *args, **kwargs) if callable(size) else size
_device = device
if _device is None:
if hasattr(self, "get_primary_device"):
_device = self.get_primary_device()
else:
_device = self.device
# If this is running with GPU cpp_wrapper, the autotuning step will generate
# an additional array of the same size as the input.
if inductor and torch._inductor.config.cpp_wrapper and _device != "cpu":
size_bytes *= 2
if not _has_sufficient_memory(_device, size_bytes):
raise unittest.SkipTest(f"Insufficient {_device} memory")
return fn(self, *args, **kwargs)
return dep_fn
return inner
| skipPRIVATEUSE1If |
python | Farama-Foundation__Gymnasium | gymnasium/envs/box2d/bipedal_walker.py | {
"start": 27828,
"end": 28274
} | class ____:
def __init__(self):
raise error.Error(
"Error initializing BipedalWalkerHardcore Environment.\n"
"Currently, we do not support initializing this mode of environment by calling the class directly.\n"
"To use this environment, instead create it by specifying the hardcore keyword in gym.make, i.e.\n"
'gym.make("BipedalWalker-v3", hardcore=True)'
)
| BipedalWalkerHardcore |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 96764,
"end": 97109
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("ref_id", "client_mutation_id")
ref_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="refId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| DeleteRefInput |
python | pytorch__pytorch | torch/_dynamo/testing.py | {
"start": 8018,
"end": 8982
} | class ____:
def __init__(self, backend: str) -> None:
self.frame_count: Union[int, CompileCounterInt] = 0
self.backend = backend
self.graphs: list[torch.fx.GraphModule] = []
self.clear()
def __call__(
self, gm: torch.fx.GraphModule, example_inputs: list[torch.Tensor]
) -> Callable[..., Any]:
from .backends.registry import lookup_backend
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
self.graphs.append(gm)
return lookup_backend(self.backend)(gm, example_inputs)
def clear(self) -> None:
if config.debug_disable_compile_counter:
self.frame_count = CompileCounterInt(0)
else:
self.frame_count = 0
self.op_count = 0
self.graphs = []
# Equivalent to backend="eager", but also records graphs that
# we can assert on
| CompileCounterWithBackend |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF033.py | {
"start": 1203,
"end": 1298
} | class ____:
def __post_init__(self, bar: int = (x := 1)) -> None:
pass
@dataclass
| Foo |
python | django__django | tests/staticfiles_tests/test_finders.py | {
"start": 288,
"end": 967
} | class ____:
"""
Base finder test mixin.
On Windows, sometimes the case of the path we ask the finders for and the
path(s) they find can differ. Compare them using os.path.normcase() to
avoid false negatives.
"""
def test_find_first(self):
src, dst = self.find_first
found = self.finder.find(src)
self.assertEqual(os.path.normcase(found), os.path.normcase(dst))
def test_find_all(self):
src, dst = self.find_all
found = self.finder.find(src, find_all=True)
found = [os.path.normcase(f) for f in found]
dst = [os.path.normcase(d) for d in dst]
self.assertEqual(found, dst)
| TestFinders |
python | ansible__ansible | lib/ansible/utils/unsafe_proxy.py | {
"start": 937,
"end": 2166
} | class ____(str):
def __new__(cls, value):
return TrustedAsTemplate.untag(value)
def _wrap_dict(v):
return dict((wrap_var(k), wrap_var(item)) for k, item in v.items())
def _wrap_sequence(v):
"""Wraps a sequence with unsafe, not meant for strings, primarily
``tuple`` and ``list``
"""
v_type = type(v)
return v_type(wrap_var(item) for item in v)
def _wrap_set(v):
return set(wrap_var(item) for item in v)
def wrap_var(v):
# maintain backward compat by recursively *un* marking TrustedAsTemplate
if v is None or isinstance(v, AnsibleUnsafe):
return v
if isinstance(v, Mapping):
v = _wrap_dict(v)
elif isinstance(v, Set):
v = _wrap_set(v)
elif is_sequence(v):
v = _wrap_sequence(v)
elif isinstance(v, bytes):
v = AnsibleUnsafeBytes(v)
elif isinstance(v, str):
v = AnsibleUnsafeText(v)
return v
def to_unsafe_bytes(*args, **kwargs):
return wrap_var(to_bytes(*args, **kwargs))
def to_unsafe_text(*args, **kwargs):
return wrap_var(to_text(*args, **kwargs))
def __getattr__(importable_name):
return _no_six.deprecate(importable_name, __name__, "binary_type", "text_type")
| NativeJinjaUnsafeText |
python | apache__airflow | airflow-e2e-tests/tests/airflow_e2e_tests/remote_log_tests/test_remote_logging.py | {
"start": 971,
"end": 3955
} | class ____:
airflow_client = AirflowClient()
dag_id = "example_xcom_test"
task_count = 6
retry_interval_in_seconds = 1
max_retries = 5
def test_dag_unpause(self):
self.airflow_client.un_pause_dag(
TestRemoteLogging.dag_id,
)
def test_remote_logging_s3(self):
"""Test that a DAG using remote logging to S3 completes successfully."""
self.airflow_client.un_pause_dag(TestRemoteLogging.dag_id)
resp = self.airflow_client.trigger_dag(
TestRemoteLogging.dag_id, json={"logical_date": datetime.now(timezone.utc).isoformat()}
)
state = self.airflow_client.wait_for_dag_run(
dag_id=TestRemoteLogging.dag_id,
run_id=resp["dag_run_id"],
)
assert state == "success", (
f"DAG {TestRemoteLogging.dag_id} did not complete successfully. Final state: {state}"
)
# This bucket will be created part of the docker-compose setup in
bucket_name = "test-airflow-logs"
s3_client = boto3.client(
"s3",
endpoint_url="http://localhost:4566",
aws_access_key_id="test",
aws_secret_access_key="test",
region_name="us-east-1",
)
# Wait for logs to be available in S3 before we call `get_task_logs`
for _ in range(self.max_retries):
response = s3_client.list_objects_v2(Bucket=bucket_name)
contents = response.get("Contents", [])
if len(contents) >= self.task_count:
break
print(f"Expected at least {self.task_count} log files, found {len(contents)}. Retrying...")
time.sleep(self.retry_interval_in_seconds)
if len(contents) < self.task_count:
pytest.fail(
f"Expected at least {self.task_count} log files in S3 bucket {bucket_name}, "
f"but found {len(contents)} objects: {[obj.get('Key') for obj in contents]}. \n"
f"List Objects Response: {response}"
)
task_logs = self.airflow_client.get_task_logs(
dag_id=TestRemoteLogging.dag_id,
task_id="bash_pull",
run_id=resp["dag_run_id"],
)
task_log_sources = [
source for content in task_logs.get("content", [{}]) for source in content.get("sources", [])
]
response = s3_client.list_objects_v2(Bucket=bucket_name)
if "Contents" not in response:
pytest.fail("No objects found in S3 bucket %s", bucket_name)
# s3 key format: dag_id=example_xcom/run_id=manual__2025-09-29T23:32:09.457215+00:00/task_id=bash_pull/attempt=1.log
log_files = [f"s3://{bucket_name}/{obj['Key']}" for obj in response["Contents"]]
assert any(source in log_files for source in task_log_sources), (
f"None of the log sources {task_log_sources} were found in S3 bucket logs {log_files}"
)
| TestRemoteLogging |
python | django__django | tests/sitemaps_tests/urls/http.py | {
"start": 2612,
"end": 2980
} | class ____(Sitemap):
"""All items have `lastmod`."""
location = "/location/"
def items(self):
o1 = TestModel()
o1.lastmod = datetime(2013, 3, 13, 10, 0, 0)
o2 = TestModel()
o2.lastmod = datetime(2014, 3, 13, 10, 0, 0)
return [o1, o2]
def lastmod(self, obj):
return obj.lastmod
| CallableLastmodFullSitemap |
python | huggingface__transformers | src/transformers/models/gemma/modular_gemma.py | {
"start": 10253,
"end": 10592
} | class ____(LlamaPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
if "RMSNorm" in module.__class__.__name__:
init.zeros_(module.weight)
| GemmaPreTrainedModel |
python | automl__auto-sklearn | test/test_pipeline/test_create_searchspace_util_classification.py | {
"start": 821,
"end": 6534
} | class ____(unittest.TestCase):
_multiprocess_can_split_ = True
def test_get_match_array_sparse_and_dense(self):
# preproc is empty
preprocessors = OrderedDict()
preprocessors["pca"] = PCA
classifiers = OrderedDict()
classifiers["lda"] = LDA
# Sparse + dense
class Preprocessors(object):
@classmethod
def get_available_components(self, *args, **kwargs):
return preprocessors
class Classifiers(object):
@classmethod
def get_available_components(self, *args, **kwargs):
return classifiers
# Dense
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, PCA), (1, LDA)), dataset_properties={"sparse": True}
)
self.assertEqual(numpy.sum(m), 0)
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, PCA), (1, LDA)), dataset_properties={"sparse": False}
)
self.assertEqual(m, [[1]])
# Sparse
preprocessors["tSVD"] = TruncatedSVD
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)), dataset_properties={"sparse": True}
)
self.assertEqual(m[0], [0]) # pca
self.assertEqual(m[1], [1]) # svd
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)),
dataset_properties={"sparse": False},
)
self.assertEqual(m[0], [1]) # pca
self.assertEqual(m[1], [0]) # svd
preprocessors["none"] = NoPreprocessing
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)), dataset_properties={"sparse": True}
)
self.assertEqual(m[0, :], [0]) # pca
self.assertEqual(m[1, :], [1]) # tsvd
self.assertEqual(m[2, :], [0]) # none
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)),
dataset_properties={"sparse": False},
)
self.assertEqual(m[0, :], [1]) # pca
self.assertEqual(m[1, :], [0]) # tsvd
self.assertEqual(m[2, :], [1]) # none
classifiers["libsvm"] = LibLinear_SVC
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, Classifiers)),
dataset_properties={"sparse": False},
)
self.assertListEqual(list(m[0, :]), [1, 1]) # pca
self.assertListEqual(list(m[1, :]), [0, 0]) # tsvd
self.assertListEqual(list(m[2, :]), [1, 1]) # none
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, Classifiers)),
dataset_properties={"sparse": True},
)
self.assertListEqual(list(m[0, :]), [0, 0]) # pca
self.assertListEqual(list(m[1, :]), [1, 1]) # tsvd
self.assertListEqual(list(m[2, :]), [0, 1]) # none
# Do fancy 3d stuff
preprocessors["random_trees"] = RandomTreesEmbedding
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, Preprocessors), (2, Classifiers)),
dataset_properties={"sparse": False},
)
# PCA followed by truncated SVD is forbidden
self.assertEqual(list(m[0].flatten()), [1, 1, 0, 0, 1, 1, 0, 1])
# Truncated SVD is forbidden
self.assertEqual(list(m[1].flatten()), [0, 0, 0, 0, 0, 0, 0, 0])
# Truncated SVD is forbidden after no_preprocessing
self.assertEqual(list(m[2].flatten()), [1, 1, 0, 0, 1, 1, 0, 1])
# PCA is forbidden, truncatedSVD allowed after random trees embedding
# lda only allowed after truncatedSVD
self.assertEqual(list(m[3].flatten()), [0, 0, 1, 1, 0, 1, 0, 1])
def test_get_match_array_signed_unsigned_and_binary(self):
pass
@unittest.skip("Not currently working.")
def test_add_forbidden(self):
m = numpy.ones([2, 3])
preprocessors_list = ["pa", "pb"]
classifier_list = ["ca", "cb", "cc"]
cs = ConfigurationSpace()
preprocessor = CategoricalHyperparameter(
name="feature_preprocessor", choices=preprocessors_list
)
classifier = CategoricalHyperparameter(
name="classifier", choices=classifier_list
)
cs.add_hyperparameter(preprocessor)
cs.add_hyperparameter(classifier)
new_cs = autosklearn.pipeline.create_searchspace_util.add_forbidden(
conf_space=cs,
node_0_list=preprocessors_list,
node_1_list=classifier_list,
matches=m,
node_0_name="feature_preprocessor",
node_1_name="classifier",
)
self.assertEqual(len(new_cs.forbidden_clauses), 0)
self.assertIsInstance(new_cs, ConfigurationSpace)
m[1, 1] = 0
new_cs = autosklearn.pipeline.create_searchspace_util.add_forbidden(
conf_space=cs,
node_0_list=preprocessors_list,
node_1_list=classifier_list,
matches=m,
node_0_name="feature_preprocessor",
node_1_name="classifier",
)
self.assertEqual(len(new_cs.forbidden_clauses), 1)
self.assertEqual(new_cs.forbidden_clauses[0].components[0].value, "cb")
self.assertEqual(new_cs.forbidden_clauses[0].components[1].value, "pb")
self.assertIsInstance(new_cs, ConfigurationSpace)
| TestCreateClassificationSearchspace |
python | google__pytype | pytype/tests/test_recovery1.py | {
"start": 79,
"end": 3777
} | class ____(test_base.BaseTest):
"""Tests for recovering after errors.
The type inferencer can warn about bad code, but it should never blow up.
These tests check that we don't faceplant when we encounter difficult code.
"""
def test_bad_subtract(self):
ty = self.Infer(
"""
def f():
t = 0.0
return t - ("bla" - t)
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def f() -> Any: ...
""",
)
def test_inherit_from_instance(self):
ty = self.Infer(
"""
class Foo(3):
pass
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class Foo(Any):
pass
""",
)
def test_name_error(self):
ty = self.Infer(
"""
x = foobar
class A(x):
pass
pow(A(), 2)
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
x = ... # type: Any
class A(Any):
pass
""",
)
def test_object_attr(self):
self.assertNoCrash(
self.Check,
"""
object.bla(int)
""",
)
def test_attr_error(self):
ty = self.Infer(
"""
class A:
pass
x = A.x
class B:
pass
y = "foo".foo()
object.bar(int)
class C:
pass
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
pass
x = ... # type: Any
class B:
pass
y = ... # type: Any
class C:
pass
""",
)
def test_wrong_call(self):
ty = self.Infer(
"""
def f():
pass
f("foo")
x = 3
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
def f() -> None: ...
x = ... # type: int
""",
)
def test_duplicate_identifier(self):
ty = self.Infer("""
class A:
def __init__(self):
self.foo = 3
def foo(self):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
foo = ... # type: Any
def __init__(self) -> None: ...
""",
)
def test_method_with_unknown_decorator(self):
self.InferWithErrors("""
from nowhere import decorator # import-error
class Foo:
@decorator
def f():
name_error # name-error
""")
def test_assert_in_constructor(self):
self.Check("""
class Foo:
def __init__(self):
self._bar = "foo"
assert False
def __str__(self):
return self._bar
""")
@test_base.skip("Line 7, in __str__: No attribute '_bar' on Foo'")
def test_constructor_infinite_loop(self):
self.Check("""
class Foo:
def __init__(self):
self._bar = "foo"
while True: pass
def __str__(self):
return self._bar
""")
def test_attribute_access_in_impossible_path(self):
self.InferWithErrors("""
x = 3.14 if __random__ else 42
if isinstance(x, int):
if isinstance(x, float):
x.upper # not reported
3 in x # unsupported-operands
""")
def test_binary_operator_on_impossible_path(self):
self.InferWithErrors("""
x = "" if __random__ else []
if isinstance(x, list):
if isinstance(x, str):
x / x # unsupported-operands
""")
if __name__ == "__main__":
test_base.main()
| RecoveryTests |
python | celery__celery | t/integration/test_loader.py | {
"start": 104,
"end": 1352
} | class ____:
def test_autodiscovery__when_packages_exist(self, manager):
# Arrange
expected_package_name, _, module_name = __name__.rpartition('.')
unexpected_package_name = 'datetime.datetime'
# Act
manager.app.autodiscover_tasks([expected_package_name, unexpected_package_name], module_name, force=True)
# Assert
assert f'{expected_package_name}.{module_name}.dummy_task' in manager.app.tasks
assert not any(
task.startswith(unexpected_package_name) for task in manager.app.tasks
), 'Expected datetime.datetime to neither have test_loader module nor define a Celery task.'
def test_autodiscovery__when_packages_do_not_exist(self, manager):
# Arrange
existent_package_name, _, module_name = __name__.rpartition('.')
nonexistent_package_name = 'nonexistent.package.name'
# Act
with pytest.raises(ModuleNotFoundError) as exc:
manager.app.autodiscover_tasks(
[existent_package_name, nonexistent_package_name], module_name, force=True
)
# Assert
assert nonexistent_package_name.startswith(exc.value.name), 'Expected to fail on importing "nonexistent"'
| test_loader |
python | sympy__sympy | sympy/functions/special/error_functions.py | {
"start": 42483,
"end": 47256
} | class ____(DefinedFunction):
r"""
The classical logarithmic integral.
Explanation
===========
For use in SymPy, this function is defined as
.. math:: \operatorname{li}(x) = \int_0^x \frac{1}{\log(t)} \mathrm{d}t \,.
Examples
========
>>> from sympy import I, oo, li
>>> from sympy.abc import z
Several special values are known:
>>> li(0)
0
>>> li(1)
-oo
>>> li(oo)
oo
Differentiation with respect to $z$ is supported:
>>> from sympy import diff
>>> diff(li(z), z)
1/log(z)
Defining the ``li`` function via an integral:
>>> from sympy import integrate
>>> integrate(li(z))
z*li(z) - Ei(2*log(z))
>>> integrate(li(z),z)
z*li(z) - Ei(2*log(z))
The logarithmic integral can also be defined in terms of ``Ei``:
>>> from sympy import Ei
>>> li(z).rewrite(Ei)
Ei(log(z))
>>> diff(li(z).rewrite(Ei), z)
1/log(z)
We can numerically evaluate the logarithmic integral to arbitrary precision
on the whole complex plane (except the singular points):
>>> li(2).evalf(30)
1.04516378011749278484458888919
>>> li(2*I).evalf(30)
1.0652795784357498247001125598 + 3.08346052231061726610939702133*I
We can even compute Soldner's constant by the help of mpmath:
>>> from mpmath import findroot
>>> findroot(li, 2)
1.45136923488338
Further transformations include rewriting ``li`` in terms of
the trigonometric integrals ``Si``, ``Ci``, ``Shi`` and ``Chi``:
>>> from sympy import Si, Ci, Shi, Chi
>>> li(z).rewrite(Si)
-log(I*log(z)) - log(1/log(z))/2 + log(log(z))/2 + Ci(I*log(z)) + Shi(log(z))
>>> li(z).rewrite(Ci)
-log(I*log(z)) - log(1/log(z))/2 + log(log(z))/2 + Ci(I*log(z)) + Shi(log(z))
>>> li(z).rewrite(Shi)
-log(1/log(z))/2 + log(log(z))/2 + Chi(log(z)) - Shi(log(z))
>>> li(z).rewrite(Chi)
-log(1/log(z))/2 + log(log(z))/2 + Chi(log(z)) - Shi(log(z))
See Also
========
Li: Offset logarithmic integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
References
==========
.. [1] https://en.wikipedia.org/wiki/Logarithmic_integral
.. [2] https://mathworld.wolfram.com/LogarithmicIntegral.html
.. [3] https://dlmf.nist.gov/6
.. [4] https://mathworld.wolfram.com/SoldnersConstant.html
"""
@classmethod
def eval(cls, z):
if z.is_zero:
return S.Zero
elif z is S.One:
return S.NegativeInfinity
elif z is S.Infinity:
return S.Infinity
if z.is_zero:
return S.Zero
def fdiff(self, argindex=1):
arg = self.args[0]
if argindex == 1:
return S.One / log(arg)
else:
raise ArgumentIndexError(self, argindex)
def _eval_conjugate(self):
z = self.args[0]
# Exclude values on the branch cut (-oo, 0)
if not z.is_extended_negative:
return self.func(z.conjugate())
def _eval_rewrite_as_Li(self, z, **kwargs):
return Li(z) + li(2)
def _eval_rewrite_as_Ei(self, z, **kwargs):
return Ei(log(z))
def _eval_rewrite_as_uppergamma(self, z, **kwargs):
from sympy.functions.special.gamma_functions import uppergamma
return (-uppergamma(0, -log(z)) +
S.Half*(log(log(z)) - log(S.One/log(z))) - log(-log(z)))
def _eval_rewrite_as_Si(self, z, **kwargs):
return (Ci(I*log(z)) - I*Si(I*log(z)) -
S.Half*(log(S.One/log(z)) - log(log(z))) - log(I*log(z)))
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
def _eval_rewrite_as_Shi(self, z, **kwargs):
return (Chi(log(z)) - Shi(log(z)) - S.Half*(log(S.One/log(z)) - log(log(z))))
_eval_rewrite_as_Chi = _eval_rewrite_as_Shi
def _eval_rewrite_as_hyper(self, z, **kwargs):
return (log(z)*hyper((1, 1), (2, 2), log(z)) +
S.Half*(log(log(z)) - log(S.One/log(z))) + EulerGamma)
def _eval_rewrite_as_meijerg(self, z, **kwargs):
return (-log(-log(z)) - S.Half*(log(S.One/log(z)) - log(log(z)))
- meijerg(((), (1,)), ((0, 0), ()), -log(z)))
def _eval_rewrite_as_tractable(self, z, limitvar=None, **kwargs):
return z * _eis(log(z))
def _eval_nseries(self, x, n, logx, cdir=0):
z = self.args[0]
s = [(log(z))**k / (factorial(k) * k) for k in range(1, n)]
return EulerGamma + log(log(z)) + Add(*s)
def _eval_is_zero(self):
z = self.args[0]
if z.is_zero:
return True
| li |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super1.py | {
"start": 1482,
"end": 1555
} | class ____(Generic[T]):
def __init__(self, val: T):
pass
| ClassF |
python | facebookresearch__faiss | tests/test_index_composite.py | {
"start": 2010,
"end": 7214
} | class ____(unittest.TestCase):
def do_merge_then_remove(self, ondisk):
d = 10
nb = 1000
nq = 200
nt = 200
xt, xb, xq = get_dataset_2(d, nt, nb, nq)
quantizer = faiss.IndexFlatL2(d)
index1 = faiss.IndexIVFFlat(quantizer, d, 20)
index1.train(xt)
filename = None
if ondisk:
filename = tempfile.mkstemp()[1]
invlists = faiss.OnDiskInvertedLists(
index1.nlist, index1.code_size,
filename)
index1.replace_invlists(invlists)
index1.add(xb[:int(nb / 2)])
index2 = faiss.IndexIVFFlat(quantizer, d, 20)
assert index2.is_trained
index2.add(xb[int(nb / 2):])
Dref, Iref = index1.search(xq, 10)
index1.merge_from(index2, int(nb / 2))
assert index1.ntotal == nb
index1.remove_ids(faiss.IDSelectorRange(int(nb / 2), nb))
assert index1.ntotal == int(nb / 2)
Dnew, Inew = index1.search(xq, 10)
assert np.all(Dnew == Dref)
assert np.all(Inew == Iref)
if filename is not None:
os.unlink(filename)
def test_remove_regular(self):
self.do_merge_then_remove(False)
@unittest.skipIf(platform.system() == 'Windows',
'OnDiskInvertedLists is unsupported on Windows.')
def test_remove_ondisk(self):
self.do_merge_then_remove(True)
def test_remove(self):
# only tests the python interface
index = faiss.IndexFlat(5)
xb = np.zeros((10, 5), dtype='float32')
xb[:, 0] = np.arange(10, dtype='int64') + 1000
index.add(xb)
index.remove_ids(np.arange(5, dtype='int64') * 2)
xb2 = faiss.vector_float_to_array(index.codes)
xb2 = xb2.view("float32").reshape(5, 5)
assert np.all(xb2[:, 0] == xb[np.arange(5) * 2 + 1, 0])
def test_remove_id_map(self):
sub_index = faiss.IndexFlat(5)
xb = np.zeros((10, 5), dtype='float32')
xb[:, 0] = np.arange(10) + 1000
index = faiss.IndexIDMap2(sub_index)
index.add_with_ids(xb, np.arange(10, dtype='int64') + 100)
assert index.reconstruct(104)[0] == 1004
index.remove_ids(np.array([103], dtype='int64'))
assert index.reconstruct(104)[0] == 1004
try:
index.reconstruct(103)
except RuntimeError:
pass
else:
assert False, 'should have raised an exception'
def test_factory_idmap2_suffix(self):
xb = np.zeros((10, 5), dtype='float32')
xb[:, 0] = np.arange(10) + 1000
index = faiss.index_factory(5, "Flat,IDMap2")
ids = np.arange(10, dtype='int64') + 100
index.add_with_ids(xb, ids)
assert index.reconstruct(104)[0] == 1004
index.remove_ids(np.array([103], dtype='int64'))
assert index.reconstruct(104)[0] == 1004
def test_factory_idmap2_prefix(self):
xb = np.zeros((10, 5), dtype='float32')
xb[:, 0] = np.arange(10) + 1000
index = faiss.index_factory(5, "IDMap2,Flat")
ids = np.arange(10, dtype='int64') + 100
index.add_with_ids(xb, ids)
assert index.reconstruct(109)[0] == 1009
index.remove_ids(np.array([100], dtype='int64'))
assert index.reconstruct(109)[0] == 1009
def test_remove_id_map_2(self):
# from https://github.com/facebookresearch/faiss/issues/255
rs = np.random.RandomState(1234)
X = rs.randn(10, 10).astype(np.float32)
idx = np.array([0, 10, 20, 30, 40, 5, 15, 25, 35, 45], np.int64)
remove_set = np.array([10, 30], dtype=np.int64)
index = faiss.index_factory(10, 'IDMap,Flat')
index.add_with_ids(X[:5, :], idx[:5])
index.remove_ids(remove_set)
index.add_with_ids(X[5:, :], idx[5:])
for i in range(10):
_, searchres = index.search(X[i:i + 1, :], 1)
if idx[i] in remove_set:
assert searchres[0] != idx[i]
else:
assert searchres[0] == idx[i]
def test_remove_id_map_binary(self):
sub_index = faiss.IndexBinaryFlat(40)
xb = np.zeros((10, 5), dtype='uint8')
xb[:, 0] = np.arange(10) + 100
index = faiss.IndexBinaryIDMap2(sub_index)
index.add_with_ids(xb, np.arange(10, dtype='int64') + 1000)
assert index.reconstruct(1004)[0] == 104
index.remove_ids(np.array([1003], dtype='int64'))
assert index.reconstruct(1004)[0] == 104
try:
index.reconstruct(1003)
except RuntimeError:
pass
else:
assert False, 'should have raised an exception'
# while we are there, let's test I/O as well...
fd, tmpnam = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index_binary(index, tmpnam)
index = faiss.read_index_binary(tmpnam)
finally:
os.remove(tmpnam)
assert index.reconstruct(1004)[0] == 104
try:
index.reconstruct(1003)
except RuntimeError:
pass
else:
assert False, 'should have raised an exception'
| TestRemove |
python | pennersr__django-allauth | allauth/mfa/base/forms.py | {
"start": 1273,
"end": 1428
} | class ____(BaseAuthenticateForm):
def save(self):
post_authentication(context.request, self.authenticator, reauthenticated=True)
| ReauthenticateForm |
python | tensorflow__tensorflow | tensorflow/python/distribute/tpu_values.py | {
"start": 14386,
"end": 16682
} | class ____(TPUVariableMixin, values.SyncOnReadVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def assign_sub(self, *args, **kwargs):
if tpu_util.enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign_sub(self, *args, **kwargs)
else:
return tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(self, *args,
**kwargs)
def assign_add(self, *args, **kwargs):
if tpu_util.enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign_add(self, *args, **kwargs)
else:
return tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(self, *args,
**kwargs)
def assign(self, *args, **kwargs):
if tpu_util.enclosing_tpu_context() is None:
return values.SyncOnReadVariable.assign(self, *args, **kwargs)
else:
return tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(self, *args, **kwargs)
# Common method between OnWrite and Mirrored variables.
def assign_sub(var, value, use_locking=False, name=None, read_value=True):
assign_sub_fn = tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_add(var, value, use_locking=False, name=None, read_value=True):
assign_add_fn = tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign(var, value, use_locking=False, name=None, read_value=True):
assign_fn = tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)
return var._update( # pylint: disable=protected-access
update_fn=assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
| TPUSyncOnReadVariable |
python | docker__docker-py | tests/integration/api_config_test.py | {
"start": 165,
"end": 2818
} | class ____(BaseAPIIntegrationTest):
@classmethod
def setup_class(cls):
client = cls.get_client_instance()
force_leave_swarm(client)
cls._init_swarm(client)
@classmethod
def teardown_class(cls):
client = cls.get_client_instance()
force_leave_swarm(client)
def test_create_config(self):
config_id = self.client.create_config(
'favorite_character', 'sakuya izayoi'
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
data = self.client.inspect_config(config_id)
assert data['Spec']['Name'] == 'favorite_character'
def test_create_config_unicode_data(self):
config_id = self.client.create_config(
'favorite_character', 'いざよいさくや'
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
data = self.client.inspect_config(config_id)
assert data['Spec']['Name'] == 'favorite_character'
def test_inspect_config(self):
config_name = 'favorite_character'
config_id = self.client.create_config(
config_name, 'sakuya izayoi'
)
self.tmp_configs.append(config_id)
data = self.client.inspect_config(config_id)
assert data['Spec']['Name'] == config_name
assert 'ID' in data
assert 'Version' in data
def test_remove_config(self):
config_name = 'favorite_character'
config_id = self.client.create_config(
config_name, 'sakuya izayoi'
)
self.tmp_configs.append(config_id)
assert self.client.remove_config(config_id)
with pytest.raises(docker.errors.NotFound):
self.client.inspect_config(config_id)
def test_list_configs(self):
config_name = 'favorite_character'
config_id = self.client.create_config(
config_name, 'sakuya izayoi'
)
self.tmp_configs.append(config_id)
data = self.client.configs(filters={'name': ['favorite_character']})
assert len(data) == 1
assert data[0]['ID'] == config_id['ID']
@requires_api_version('1.37')
def test_create_config_with_templating(self):
config_id = self.client.create_config(
'favorite_character', 'sakuya izayoi',
templating={'name': 'golang'}
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
data = self.client.inspect_config(config_id)
assert data['Spec']['Name'] == 'favorite_character'
assert 'Templating' in data['Spec']
assert data['Spec']['Templating']['Name'] == 'golang'
| ConfigAPITest |
python | doocs__leetcode | solution/1800-1899/1857.Largest Color Value in a Directed Graph/Solution.py | {
"start": 0,
"end": 929
} | class ____:
def largestPathValue(self, colors: str, edges: List[List[int]]) -> int:
n = len(colors)
indeg = [0] * n
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
indeg[b] += 1
q = deque()
dp = [[0] * 26 for _ in range(n)]
for i, v in enumerate(indeg):
if v == 0:
q.append(i)
c = ord(colors[i]) - ord('a')
dp[i][c] += 1
cnt = 0
ans = 1
while q:
i = q.popleft()
cnt += 1
for j in g[i]:
indeg[j] -= 1
if indeg[j] == 0:
q.append(j)
c = ord(colors[j]) - ord('a')
for k in range(26):
dp[j][k] = max(dp[j][k], dp[i][k] + (c == k))
ans = max(ans, dp[j][k])
return -1 if cnt < n else ans
| Solution |
python | python-jsonschema__jsonschema | jsonschema/_utils.py | {
"start": 125,
"end": 923
} | class ____(MutableMapping):
"""
Dictionary which uses normalized URIs as keys.
"""
def normalize(self, uri):
return urlsplit(uri).geturl()
def __init__(self, *args, **kwargs):
self.store = dict()
self.store.update(*args, **kwargs)
def __getitem__(self, uri):
return self.store[self.normalize(uri)]
def __setitem__(self, uri, value):
self.store[self.normalize(uri)] = value
def __delitem__(self, uri):
del self.store[self.normalize(uri)]
def __iter__(self):
return iter(self.store)
def __len__(self): # pragma: no cover -- untested, but to be removed
return len(self.store)
def __repr__(self): # pragma: no cover -- untested, but to be removed
return repr(self.store)
| URIDict |
python | cython__cython | Cython/Compiler/UFuncs.py | {
"start": 807,
"end": 1328
} | class ____:
"""
Everything related to defining an input/output argument for a ufunc
type - PyrexType
type_constant - str such as "NPY_INT8" representing numpy dtype constants
injected_typename - str representing a name that can be used to look up the type
in Cython code
"""
def __init__(self, type, type_constant, injected_typename):
self.type = type
self.type_constant = type_constant
self.injected_typename = injected_typename
| _ArgumentInfo |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 71446,
"end": 71717
} | class ____(_ConfigCreateModel):
name: str
@field_validator("name")
def check_name(cls, v: str) -> str:
if v in ["id", "vector"]:
raise ValueError(f"Property name '{v}' is reserved and cannot be used")
return v
| _ReferencePropertyBase |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 985,
"end": 1046
} | class ____[T]: # trailing comment
pass
| TestTrailingComment3 |
python | PyCQA__isort | tests/unit/profiles/test_django.py | {
"start": 3687,
"end": 3741
} | class ____(OSError):
pass"""
)
| UnreadablePostError |
python | mozilla__bleach | tests/test_parse_shim.py | {
"start": 117,
"end": 4149
} | class ____:
scheme: str = ""
netloc: str = ""
path: str = ""
params: str = ""
query: str = ""
fragment: str = ""
# Tests from
# https://github.com/web-platform-tests/wpt/blob/master/url/resources/urltestdata.json
# commit ee566de4c5c65d7e8af8b2500f9b85a646ffeaa5
@pytest.mark.parametrize(
"uri, expected",
[
("", ParseResult()),
("http://example\t.\norg", ParseResult(scheme="http", netloc="example.org")),
(
"http://user:pass@foo:21/bar;par?b#c",
ParseResult(
scheme="http",
netloc="user:pass@foo:21",
path="/bar",
params="par",
query="b",
fragment="c",
),
),
("https://test:@test", ParseResult(scheme="https", netloc="test:@test")),
("https://:@test", ParseResult(scheme="https", netloc=":@test")),
(
"non-special://test:@test/x",
ParseResult(scheme="non-special", netloc="test:@test", path="/x"),
),
(
"non-special://:@test/x",
ParseResult(scheme="non-special", netloc=":@test", path="/x"),
),
("http:foo.com", ParseResult(scheme="http", path="foo.com")),
# NOTE(willkg): The wpt tests set the scheme to http becaue that's what
# the base url is. Since our parser is not using a baseurl, it sets the
# scheme to "". Further, our parser includes spaces at the beginning,
# but I don't see that as being problematic.
("\t :foo.com \n", ParseResult(path=" :foo.com ")),
# NOTE(willkg): The wpt tests set the path to "/foo/foo.com" because
# the base url is at "/foo"
(" foo.com ", ParseResult(path=" foo.com ")),
("a:\t foo.com", ParseResult(scheme="a", path=" foo.com")),
(
"http://f:21/ b ? d # e ",
ParseResult(
scheme="http", netloc="f:21", path="/ b ", query=" d ", fragment=" e "
),
),
(
"lolscheme:x x#x x",
ParseResult(scheme="lolscheme", path="x x", fragment="x x"),
),
("http://f:/c", ParseResult(scheme="http", netloc="f:", path="/c")),
("http://f:0/c", ParseResult(scheme="http", netloc="f:0", path="/c")),
# NOTE(willkg): The wpt tests normalize the 0000000000000 to 0 so the
# netloc should be "f:0".
(
"http://f:00000000000000/c",
ParseResult(scheme="http", netloc="f:00000000000000", path="/c"),
),
# NOTE(willkg): The wpt tests drop the 0000000000000000000 altogether
# so the netloc should be "f".
(
"http://f:00000000000000000000080/c",
ParseResult(scheme="http", netloc="f:00000000000000000000080", path="/c"),
),
# This is an invalid ipv6 url
("http://2001::1]", ValueError),
# NOTE(willkg): The wpt tests show this as a parse error, but our
# parser "parses" it.
("http://f:b/c", ParseResult(scheme="http", netloc="f:b", path="/c")),
# NOTE(willkg): The wpt tests show this as a parse error, but our
# parser "parses" it.
("http://f: /c", ParseResult(scheme="http", netloc="f: ", path="/c")),
# NOTE(willkg): The wpt tests show this as a parse error, but our
# parser "parses" it.
("http://f:999999/c", ParseResult(scheme="http", netloc="f:999999", path="/c")),
],
)
def test_urlparse(uri, expected):
if inspect.isclass(expected) and issubclass(expected, BaseException):
with pytest.raises(expected):
urlparse(uri)
else:
parsed = urlparse(uri)
print(parsed)
assert parsed.scheme == expected.scheme
assert parsed.netloc == expected.netloc
assert parsed.path == expected.path
assert parsed.params == expected.params
assert parsed.query == expected.query
assert parsed.fragment == expected.fragment
| ParseResult |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 55555,
"end": 56865
} | class ____(fixtures.TestBase):
__only_on__ = "postgresql"
__sparse_driver_backend__ = True
@testing.fixture()
def scalar(self, connection):
def go(expression):
return connection.scalar(select(expression))
return go
def test_cast_name(self, scalar):
eq_(scalar(cast("pg_class", postgresql.REGCLASS)), "pg_class")
def test_cast_path(self, scalar):
eq_(
scalar(cast("pg_catalog.pg_class", postgresql.REGCLASS)),
"pg_class",
)
def test_cast_oid(self, scalar):
regclass = cast("pg_class", postgresql.REGCLASS)
oid = scalar(cast(regclass, postgresql.OID))
assert isinstance(oid, int)
eq_(
scalar(
cast(type_coerce(oid, postgresql.OID), postgresql.REGCLASS)
),
"pg_class",
)
def test_cast_whereclause(self, connection):
pga = Table(
"pg_attribute",
MetaData(),
Column("attrelid", postgresql.OID),
Column("attname", String(64)),
)
oid = connection.scalar(
select(pga.c.attrelid).where(
pga.c.attrelid == cast("pg_class", postgresql.REGCLASS)
)
)
assert isinstance(oid, int)
| RegClassTest |
python | doocs__leetcode | solution/3200-3299/3201.Find the Maximum Length of Valid Subsequence I/Solution.py | {
"start": 0,
"end": 345
} | class ____:
def maximumLength(self, nums: List[int]) -> int:
k = 2
f = [[0] * k for _ in range(k)]
ans = 0
for x in nums:
x %= k
for j in range(k):
y = (j - x + k) % k
f[x][y] = f[y][x] + 1
ans = max(ans, f[x][y])
return ans
| Solution |
python | numba__numba | numba/core/rewrites/static_getitem.py | {
"start": 5349,
"end": 6624
} | class ____(Rewrite):
"""
Rewrite IR statements of the kind `setitem(target=arr, index=$constXX, ...)`
where `$constXX` is a known constant as
`static_setitem(target=arr, index=<constant value>, ...)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.setitems = setitems = {}
self.block = block
# Detect all setitem statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.SetItem):
try:
const = func_ir.infer_constant(inst.index)
except errors.ConstantInferenceError:
continue
setitems[inst] = const
return len(setitems) > 0
def apply(self):
"""
Rewrite all matching setitems as static_setitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if inst in self.setitems:
const = self.setitems[inst]
new_inst = ir.StaticSetItem(inst.target, const,
inst.index, inst.value, inst.loc)
new_block.append(new_inst)
else:
new_block.append(inst)
return new_block
| RewriteConstSetitems |
python | pallets__click | src/click/testing.py | {
"start": 2050,
"end": 2799
} | class ____:
"""Mixes `<stdout>` and `<stderr>` streams.
The result is available in the ``output`` attribute.
.. versionadded:: 8.2
"""
def __init__(self) -> None:
self.output: io.BytesIO = io.BytesIO()
self.stdout: io.BytesIO = BytesIOCopy(copy_to=self.output)
self.stderr: io.BytesIO = BytesIOCopy(copy_to=self.output)
def __del__(self) -> None:
"""
Guarantee that embedded file-like objects are closed in a
predictable order, protecting against races between
self.output being closed and other streams being flushed on close
.. versionadded:: 8.2.2
"""
self.stderr.close()
self.stdout.close()
self.output.close()
| StreamMixer |
python | pytorch__pytorch | torch/testing/_internal/jit_utils.py | {
"start": 2304,
"end": 3084
} | class ____:
"""
A context manager that is useful for checking that error messages highlight
the correct part of the source code.
"""
def __init__(self, test_case, exception, regex, highlight):
self.test_case = test_case
self.exception_type = exception
self.regex = regex
self.highlight = highlight
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with self.test_case.assertRaisesRegex(self.exception_type, self.regex):
if type:
raise value
if self.highlight:
FileCheck().check_source_highlighted(self.highlight).run(str(value))
return True
FUSION_GROUP = "prim::TensorExprGroup"
| _AssertRaisesRegexWithHighlightContext |
python | tiangolo__fastapi | docs_src/sql_databases/tutorial002_an_py39.py | {
"start": 504,
"end": 2604
} | class ____(HeroBase):
name: Union[str, None] = None
age: Union[int, None] = None
secret_name: Union[str, None] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
SessionDep = Annotated[Session, Depends(get_session)]
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate, session: SessionDep):
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(
session: SessionDep,
offset: int = 0,
limit: Annotated[int, Query(le=100)] = 100,
):
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int, session: SessionDep):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate, session: SessionDep):
hero_db = session.get(Hero, hero_id)
if not hero_db:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
hero_db.sqlmodel_update(hero_data)
session.add(hero_db)
session.commit()
session.refresh(hero_db)
return hero_db
@app.delete("/heroes/{hero_id}")
def delete_hero(hero_id: int, session: SessionDep):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
| HeroUpdate |
python | ansible__ansible | lib/ansible/module_utils/facts/system/service_mgr.py | {
"start": 1375,
"end": 6666
} | class ____(BaseFactCollector):
name = 'service_mgr'
_fact_ids = set() # type: t.Set[str]
required_facts = set(['platform', 'distribution'])
@staticmethod
def is_systemd_managed(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init failed to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
@staticmethod
def is_systemd_managed_offline(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# check if /sbin/init is a symlink to systemd
# on SUSE, /sbin/init may be missing if systemd-sysvinit package is not installed.
if os.path.islink('/sbin/init') and os.path.basename(os.readlink('/sbin/init')) == 'systemd':
return True
return False
def collect(self, module=None, collected_facts=None):
facts_dict = {}
if not module:
return facts_dict
collected_facts = collected_facts or {}
service_mgr_name = None
# TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
'runit-init': 'runit',
'svscan': 'svc',
'openrc-init': 'openrc',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# if command fails, or stdout is empty string or the output of the command starts with what looks like a PID,
# then the 'ps' command probably didn't work the way we wanted, probably because it's busybox
if rc != 0 or not proc_1.strip() or re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
if proc_1 is None and os.path.islink('/sbin/init'):
proc_1 = os.readlink('/sbin/init')
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probably is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
service_mgr_name = proc_1_map.get(proc_1, proc_1)
# start with the easy ones
elif collected_facts.get('ansible_distribution', None) == 'MacOSX':
# FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
service_mgr_name = 'launchd'
else:
service_mgr_name = 'systemstarter'
elif 'BSD' in collected_facts.get('ansible_system', '') or collected_facts.get('ansible_system') in ['Bitrig', 'DragonFly']:
# FIXME: we might want to break out to individual BSDs or 'rc'
service_mgr_name = 'bsdinit'
elif collected_facts.get('ansible_system') == 'AIX':
service_mgr_name = 'src'
elif collected_facts.get('ansible_system') == 'SunOS':
service_mgr_name = 'smf'
elif collected_facts.get('ansible_distribution') == 'OpenWrt':
service_mgr_name = 'openwrt_init'
elif collected_facts.get('ansible_distribution') == 'SMGL':
service_mgr_name = 'simpleinit_msb'
elif collected_facts.get('ansible_system') == 'Linux':
# FIXME: mv is_systemd_managed
if self.is_systemd_managed(module=module):
service_mgr_name = 'systemd'
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
service_mgr_name = 'upstart'
elif os.path.exists('/sbin/openrc'):
service_mgr_name = 'openrc'
elif self.is_systemd_managed_offline(module=module):
service_mgr_name = 'systemd'
elif os.path.exists('/etc/init.d/'):
service_mgr_name = 'sysvinit'
elif os.path.exists('/etc/dinit.d/'):
service_mgr_name = 'dinit'
if not service_mgr_name:
# if we cannot detect, fallback to generic 'service'
service_mgr_name = 'service'
facts_dict['service_mgr'] = service_mgr_name
return facts_dict
| ServiceMgrFactCollector |
python | django__django | django/utils/safestring.py | {
"start": 653,
"end": 2178
} | class ____(str, SafeData):
"""
A str subclass that has been specifically marked as "safe" for HTML output
purposes.
"""
__slots__ = ()
def __add__(self, rhs):
"""
Concatenating a safe string with another safe bytestring or
safe string is safe. Otherwise, the result is no longer safe.
"""
if isinstance(rhs, str):
t = super().__add__(rhs)
if isinstance(rhs, SafeData):
t = SafeString(t)
return t
# Give the rhs object a chance to handle the addition, for example if
# the rhs object's class implements `__radd__`. More details:
# https://docs.python.org/3/reference/datamodel.html#object.__radd__
return NotImplemented
def __str__(self):
return self
SafeText = SafeString # For backwards compatibility since Django 2.0.
def _safety_decorator(safety_marker, func):
@wraps(func)
def wrapper(*args, **kwargs):
return safety_marker(func(*args, **kwargs))
return wrapper
@keep_lazy(SafeString)
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string is appropriate.
If used on a method as a decorator, mark the returned data as safe.
Can be called multiple times on a single string.
"""
if hasattr(s, "__html__"):
return s
if callable(s):
return _safety_decorator(mark_safe, s)
return SafeString(s)
| SafeString |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/automations.py | {
"start": 21199,
"end": 21849
} | class ____(ORMBaseModel, AutomationCore, extra="ignore"):
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.trigger._set_parent(self)
@classmethod
def model_validate(
cls: type[Self],
obj: Any,
*,
strict: bool | None = None,
from_attributes: bool | None = None,
context: dict[str, Any] | None = None,
) -> Self:
automation = super().model_validate(
obj, strict=strict, from_attributes=from_attributes, context=context
)
automation.trigger._set_parent(automation)
return automation
| Automation |
python | getsentry__sentry | tests/sentry/api/endpoints/test_rule_snooze.py | {
"start": 426,
"end": 914
} | class ____(APITestCase):
def setUp(self) -> None:
self.issue_alert_rule = Rule.objects.create(
label="test rule",
project=self.project,
owner_team_id=self.team.id,
)
self.metric_alert_rule = self.create_alert_rule(
organization=self.project.organization, projects=[self.project]
)
self.until = datetime.now(timezone.utc) + timedelta(days=10)
self.login_as(user=self.user)
| BaseRuleSnoozeTest |
python | great-expectations__great_expectations | tests/integration/sql_session_manager.py | {
"start": 1329,
"end": 4264
} | class ____:
POOL_CONFIG = PoolConfig(
poolclass=QueuePool,
pool_size=2,
max_overflow=3,
pool_recycle=5400, # 1.5 hours
pool_timeout=30, # 30 seconds
pool_pre_ping=True,
)
def __init__(self):
# It's ok to use ConnectionDetails as the key since that contains all the unique
# information needed to create an engine. If we allowed POOL_CONFIG to be configurable
# we'd need to incorporate that into the key.
self._engine_cache: dict[ConnectionDetails, sa.engine.Engine] = {}
def get_engine(
self, connection_details: ConnectionDetails, connect_args: ConnectArgs | None = None
) -> sa.engine.Engine:
if connect_args is None:
connect_args = {}
cache_key = connection_details
if cache_key not in self._engine_cache:
logger.info(f"Cache miss for engine: {cache_key}. Creating new engine.")
engine_kwargs = asdict(self.POOL_CONFIG)
logger.info(
f"Creating engine for {connection_details.dialect} with settings: {engine_kwargs}"
)
self._engine_cache[cache_key] = sa.create_engine(
connection_details.connection_string, **engine_kwargs, connect_args=connect_args
)
else:
logger.info(f"Cache hit for engine: {cache_key}")
return self._engine_cache[cache_key]
def dispose_all_engines(self):
logger.info("Disposing all cached SQLAlchemy engines.")
for key, engine in self._engine_cache.items():
logger.info(f"Disposing engine: {key}")
try:
engine.dispose()
except Exception:
logger.exception(f"Error disposing engine '{key}'")
self._engine_cache.clear()
def get_all_pool_statistics(
self,
) -> dict[ConnectionDetails, dict[str, Any]]:
stats: dict[ConnectionDetails, dict[str, Any]] = {}
for key, engine in self._engine_cache.items():
try:
pool = engine.pool
if isinstance(pool, QueuePool):
stats[key] = {
"size": pool.size(),
"checked_in": pool.checkedin(),
"overflow": pool.overflow(),
"checked_out": pool.checkedout(),
}
else:
logger.warning(
f"Pool for engine {key} is not a QueuePool. It is a {type(pool)}."
)
stats[key] = {
"type": f"{type(pool)}",
"status": f"{pool.status()}",
}
except Exception as e:
logger.exception(f"Error getting pool status for engine '{key}'")
stats[key] = {"error": str(e)}
return stats
| SessionSQLEngineManager |
python | allegroai__clearml | clearml/automation/optimization.py | {
"start": 11290,
"end": 39893
} | class ____(object):
"""
The base search strategy class. Inherit this class to implement your custom strategy.
"""
_tag = "optimization"
_job_class: ClearmlJob = ClearmlJob
def __init__(
self,
base_task_id: str,
hyper_parameters: Sequence[Parameter],
objective_metric: Objective,
execution_queue: str,
num_concurrent_workers: int,
pool_period_min: float = 2.0,
time_limit_per_job: Optional[float] = None,
compute_time_limit: Optional[float] = None,
min_iteration_per_job: Optional[int] = None,
max_iteration_per_job: Optional[int] = None,
total_max_jobs: Optional[int] = None,
**_: Any
) -> ():
"""
Initialize a search strategy optimizer.
:param str base_task_id: The Task ID (str)
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When time limit is
exceeded, the job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int min_iteration_per_job: The minimum iterations (of the Objective metric) per single job (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric) per single job.
When maximum iterations is exceeded, the job is aborted. (Optional)
:param int total_max_jobs: The total maximum jobs for the optimization process. The default value is ``None``,
for unlimited.
"""
super(SearchStrategy, self).__init__()
self._base_task_id = base_task_id
self._hyper_parameters = hyper_parameters
self._objective_metric = objective_metric
self._execution_queue = execution_queue
self._num_concurrent_workers = num_concurrent_workers
self.pool_period_minutes = pool_period_min
self.time_limit_per_job = time_limit_per_job
self.compute_time_limit = compute_time_limit
self.max_iteration_per_job = max_iteration_per_job
self.min_iteration_per_job = min_iteration_per_job
self.total_max_jobs = total_max_jobs
self._stop_event = Event()
self._current_jobs = []
self._pending_jobs = []
self._num_jobs = 0
self._job_parent_id = None
self._job_project_id = None
self._created_jobs_ids = {}
self._naming_function = None
self._job_project = {}
self.budget = Budget(
jobs_limit=self.total_max_jobs,
compute_time_limit=self.compute_time_limit if self.compute_time_limit else None,
iterations_limit=self.total_max_jobs * self.max_iteration_per_job
if self.max_iteration_per_job and self.total_max_jobs
else None,
)
self._validate_base_task()
self._optimizer_task = None
def start(self) -> ():
"""
Start the Optimizer controller function loop(). If the calling process is stopped, the controller will stop
as well.
.. important::
This function returns only after the optimization is completed or :meth:`stop` was called.
"""
counter = 0
while True:
logger.debug("optimization loop #{}".format(counter))
if not self.process_step():
break
if self._stop_event.wait(timeout=self.pool_period_minutes * 60.0):
break
counter += 1
def stop(self) -> ():
"""
Stop the current running optimization loop. Called from a different thread than the :meth:`start`.
"""
self._stop_event.set()
def process_step(self) -> bool:
"""
Abstract helper function. Implementation is not required. Default use in start default implementation
Main optimization loop, called from the daemon thread created by :meth:`start`.
- Call monitor job on every ``ClearmlJob`` in jobs:
- Check the performance or elapsed time, and then decide whether to kill the jobs.
- Call create_job:
- Check if spare job slots exist, and if they do call create a new job based on previous tested experiments.
:return: True, if continue the optimization. False, if immediately stop.
"""
updated_jobs = []
for job in self._current_jobs:
if self.monitor_job(job):
updated_jobs.append(job)
self._current_jobs = updated_jobs
pending_jobs = []
for job in self._pending_jobs:
if job.is_pending():
pending_jobs.append(job)
else:
self.budget.jobs.update(job.task_id(), 1)
self._pending_jobs = pending_jobs
free_workers = self._num_concurrent_workers - len(self._current_jobs)
# do not create more jobs if we hit the limit
if self.total_max_jobs and self._num_jobs >= self.total_max_jobs:
return bool(self._current_jobs)
# see how many free slots we have and create job
for i in range(max(0, free_workers)):
new_job = self.create_job()
if not new_job:
break
if not new_job.launch(self._execution_queue):
# error enqueuing Job, something wrong here
continue
self._num_jobs += 1
self._current_jobs.append(new_job)
self._pending_jobs.append(new_job)
return bool(self._current_jobs)
def create_job(self) -> Optional[ClearmlJob]:
"""
Abstract helper function. Implementation is not required. Default use in process_step default implementation
Create a new job if needed. return the newly created job. If no job needs to be created, return ``None``.
:return: A Newly created ClearmlJob object, or None if no ClearmlJob created.
"""
return None
def monitor_job(self, job: ClearmlJob) -> bool:
"""
Helper function, Implementation is not required. Default use in process_step default implementation.
Check if the job needs to be aborted or already completed.
If returns ``False``, the job was aborted / completed, and should be taken off the current job list.
If there is a budget limitation, this call should update
``self.budget.compute_time.update`` / ``self.budget.iterations.update``
:param ClearmlJob job: A ``ClearmlJob`` object to monitor.
:return: False, if the job is no longer relevant.
"""
abort_job = self.update_budget_per_job(job)
if abort_job:
job.abort()
return False
return not job.is_stopped()
def update_budget_per_job(self, job: ClearmlJob) -> bool:
abort_job = False
if self.time_limit_per_job:
elapsed = job.elapsed() / 60.0
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if elapsed > self.time_limit_per_job:
abort_job = True
if self.compute_time_limit:
if not self.time_limit_per_job:
elapsed = job.elapsed() / 60.0
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if self.max_iteration_per_job:
iterations = self._get_job_iterations(job)
if iterations and iterations > 0:
self.budget.iterations.update(job.task_id(), iterations)
if iterations > self.max_iteration_per_job:
abort_job = True
return abort_job
def get_running_jobs(self) -> Sequence[ClearmlJob]:
"""
Return the current running ClearmlJob.
:return: List of ClearmlJob objects.
"""
return self._current_jobs
def get_created_jobs_ids(self) -> Mapping[str, dict]:
"""
Return a Task IDs dict created by this optimizer until now, including completed and running jobs.
The values of the returned dict are the parameters used in the specific job
:return: dict of task IDs (str) as keys, and their parameters dict as values.
"""
return {job_id: job_val[1] for job_id, job_val in self._created_jobs_ids.items()}
def get_created_jobs_tasks(self) -> Mapping[str, dict]:
"""
Return a Task IDs dict created by this optimizer until now.
The values of the returned dict are the ClearmlJob.
:return: dict of task IDs (str) as keys, and their ClearmlJob as values.
"""
return {job_id: job_val[0] for job_id, job_val in self._created_jobs_ids.items()}
def get_top_experiments(self, top_k: int) -> Sequence[Task]:
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
return self._objective_metric.get_top_tasks(
top_k=top_k, optimizer_task_id=self._job_parent_id or self._base_task_id
)
def get_top_experiments_id_metrics_pair(
self,
top_k: int,
all_metrics: bool = False,
only_completed: bool = False,
) -> Sequence[Union[str, dict]]:
"""
Return a list of pairs (Task ID, scalar metric dict) of the top performing experiments.
Order is based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
If True, return all scalar metrics of the experiment
:param only_completed: return only completed Tasks. Default False.
:return: A list of pairs (Task ID, metric values dict), ordered by performance,
where index 0 is the best performing Task.
Example w/ all_metrics=False:
.. code-block:: py
[
('0593b76dc7234c65a13a301f731958fa',
{
'accuracy per class/cat': {
'metric': 'accuracy per class',
'variant': 'cat',
'value': 0.119,
'min_value': 0.119,
'max_value': 0.782
},
}
),
]
Example w/ all_metrics=True:
.. code-block:: py
[
('0593b76dc7234c65a13a301f731958fa',
{
'accuracy per class/cat': {
'metric': 'accuracy per class',
'variant': 'cat',
'value': 0.119,
'min_value': 0.119,
'max_value': 0.782
},
'accuracy per class/deer': {
'metric': 'accuracy per class',
'variant': 'deer',
'value': 0.219,
'min_value': 0.219,
'max_value': 0.282
},
}
),
]
"""
additional_filters = dict(page_size=int(top_k), page=0)
if only_completed:
additional_filters["status"] = ["completed"]
# noinspection PyProtectedMember
top_tasks_ids_metric = self._get_child_tasks_ids(
parent_task_id=self._job_parent_id or self._base_task_id,
order_by=self._objective_metric._get_last_metrics_encode_field()[0],
additional_filters=additional_filters,
additional_fields=["last_metrics"],
)
title_series = self._objective_metric.get_objective_metric() if not all_metrics else (None, None)
titles = [ts[0] for ts in title_series]
series = [ts[1] for ts in title_series]
return [
(
i,
{
"{}/{}".format(v["metric"], v["variant"]): v
for variant in metric.values()
for v in variant.values()
if all_metrics or (v["metric"] in titles and v["variant"] in series)
},
)
for i, metric in top_tasks_ids_metric
]
def get_top_experiments_details(
self,
top_k: int,
all_metrics: bool = False,
all_hyper_parameters: bool = False,
only_completed: bool = False,
) -> Sequence[Union[str, dict]]:
"""
Return a list of dictionaries of the top performing experiments.
Example: ``[{'task_id': Task-ID, 'metrics': scalar-metric-dict, 'hyper_parameters': Hyper-Parameters},]``
Order is based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
If True, return all scalar metrics of the experiment
:param all_hyper_parameters: Default False. If True, return all the hyperparameters from all the sections.
:param only_completed: return only completed Tasks. Default False.
:return: A list of dictionaries ``({task_id: '', hyper_parameters: {}, metrics: {}})``, ordered by performance,
where index 0 is the best performing Task.
Example w/ all_metrics=False:
.. code-block:: py
[
{
task_id: '0593b76dc7234c65a13a301f731958fa',
hyper_parameters: {'General/lr': '0.03', 'General/batch_size': '32'},
metrics: {
'accuracy per class/cat': {
'metric': 'accuracy per class',
'variant': 'cat',
'value': 0.119,
'min_value': 0.119,
'max_value': 0.782
},
}
},
]
Example w/ all_metrics=True:
.. code-block:: py
[
{
task_id: '0593b76dc7234c65a13a301f731958fa',
hyper_parameters: {'General/lr': '0.03', 'General/batch_size': '32'},
metrics: {
'accuracy per class/cat': {
'metric': 'accuracy per class',
'variant': 'cat',
'value': 0.119,
'min_value': 0.119,
'max_value': 0.782
},
'accuracy per class/deer': {
'metric': 'accuracy per class',
'variant': 'deer',
'value': 0.219,
'min_value': 0.219,
'max_value': 0.282
},
}
},
]
"""
additional_filters = dict(page_size=int(top_k), page=0)
if only_completed:
additional_filters["status"] = ["completed"]
# noinspection PyProtectedMember
top_tasks_ids_metric_params = self._get_child_tasks_ids(
parent_task_id=self._job_parent_id or self._base_task_id,
order_by=self._objective_metric._get_last_metrics_encode_field()[0]
if self._objective_metric.len == 1
else None,
additional_filters=additional_filters,
additional_fields=["last_metrics", "hyperparams"],
)
if self._objective_metric.len != 1:
top_tasks_ids_metric_params_dict = {}
for task in top_tasks_ids_metric_params:
objective = self._objective_metric.get_objective(task[0])
if objective is None or any(o is None for o in objective):
continue
top_tasks_ids_metric_params_dict[task[0]] = (objective, task)
# noinspection PyProtectedMember
sorted_ids = self._objective_metric._sort_jobs_by_domination(top_tasks_ids_metric_params_dict)
top_tasks_ids_metric_params = [top_tasks_ids_metric_params_dict[s][1] for s in sorted_ids]
# get hp_parameters:
hp_params = set(p.name for p in self._hyper_parameters)
title_series = self._objective_metric.get_objective_metric() if not all_metrics else (None, None)
titles = [ts[0] for ts in title_series]
series = [ts[1] for ts in title_series]
return [
{
"task_id": tid,
"hyper_parameters": {
"{}/{}".format(p.section, p.name): p.value
for params in (param_sections or {}).values()
for p in (params or {}).values()
if all_hyper_parameters or "{}/{}".format(p.section, p.name) in hp_params
},
"metrics": {
"{}/{}".format(v["metric"], v["variant"]): v
for variant in metric.values()
for v in variant.values()
if all_metrics or v["metric"] in titles and v["variant"] in series
},
}
for tid, metric, param_sections in top_tasks_ids_metric_params
]
def get_objective_metric(
self,
) -> Union[Tuple[str, str], List[Tuple[str, str]]]:
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
objective = self._objective_metric.get_objective_metric()
return objective[0] if self._objective_metric.len == 1 else objective
def helper_create_job(
self,
base_task_id: str,
parameter_override: Optional[Mapping[str, str]] = None,
task_overrides: Optional[Mapping[str, str]] = None,
tags: Optional[Sequence[str]] = None,
parent: Optional[str] = None,
**kwargs: Any
) -> ClearmlJob:
"""
Create a Job using the specified arguments, ``ClearmlJob`` for details.
:return: A newly created Job instance.
"""
if parameter_override:
param_str = ["{}={}".format(k, parameter_override[k]) for k in sorted(parameter_override.keys())]
if self._naming_function:
name = self._naming_function(self._base_task_name, parameter_override)
elif self._naming_function is False:
name = None
else:
name = "{}: {}".format(self._base_task_name, " ".join(param_str))
comment = "\n".join(param_str)
else:
name = None
comment = None
tags = (tags or []) + [
self._tag,
"opt" + (": {}".format(self._job_parent_id) if self._job_parent_id else ""),
]
new_job = self._job_class(
base_task_id=base_task_id,
parameter_override=parameter_override,
task_overrides=task_overrides,
tags=tags,
parent=parent or self._job_parent_id,
name=name,
comment=comment,
project=self._job_project_id or self._get_task_project(parent or self._job_parent_id),
**kwargs
)
self._created_jobs_ids[new_job.task_id()] = (new_job, parameter_override)
logger.info("Creating new Task: {}".format(parameter_override))
return new_job
def set_job_class(self, job_class: ClearmlJob) -> ():
"""
Set the class to use for the :meth:`helper_create_job` function.
:param ClearmlJob job_class: The Job Class type.
"""
self._job_class = job_class
def set_job_default_parent(
self,
job_parent_task_id: Optional[str],
project_name: Optional[str] = None,
) -> ():
"""
Set the default parent for all Jobs created by the :meth:`helper_create_job` method.
:param str job_parent_task_id: The parent Task ID.
:param str project_name: If specified, create the jobs in the specified project
"""
self._job_parent_id = job_parent_task_id
# noinspection PyProtectedMember
self._job_project_id = (
get_or_create_project(
session=Task._get_default_session(),
project_name=project_name,
description="HPO process spawned Tasks",
)
if project_name
else None
)
def set_job_naming_scheme(self, naming_function: Optional[Callable[[str, dict], str]]) -> ():
"""
Set the function used to name a newly created job.
:param callable naming_function: Callable function for naming a newly created job.
Use the following format:
.. code-block:: py
naming_functor(base_task_name, argument_dict) -> str
"""
self._naming_function = naming_function
def set_optimizer_task(self, task: Task) -> ():
"""
Set the optimizer task object to be used to store/generate reports on the optimization process.
Usually this is the current task of this process.
:param Task task: The optimizer`s current Task.
"""
self._optimizer_task = task
def _validate_base_task(self) -> ():
"""
Check the base task exists and contains the requested Objective metric and hyperparameters.
"""
# check if the task exists
try:
task = Task.get_task(task_id=self._base_task_id)
self._base_task_name = task.name
except ValueError:
raise ValueError("Could not find base task id {}".format(self._base_task_id))
# check if the hyper-parameters exist:
task_parameters = task.get_parameters(backwards_compatibility=False)
missing_params = [h.name for h in self._hyper_parameters if h.name not in task_parameters]
if missing_params:
logger.warning(
"Could not find requested hyper-parameters {} on base task {}".format(
missing_params, self._base_task_id
)
)
# check if the objective metric exists (i.e. no typos etc)
if self._objective_metric.get_objective(self._base_task_id) is None:
logger.warning(
"Could not find requested metric {} report on base task {}".format(
self._objective_metric.get_objective_metric(), self._base_task_id
)
)
def _get_task_project(self, parent_task_id: str) -> Optional[str]:
if not parent_task_id:
return
if parent_task_id not in self._job_project:
task = Task.get_task(task_id=parent_task_id)
self._job_project[parent_task_id] = task.project
return self._job_project.get(parent_task_id)
def _get_job_iterations(self, job: Union[ClearmlJob, Task]) -> int:
iteration_value = self._objective_metric.get_current_raw_objective(job)
if iteration_value is not None and any(iv is not None and iv[0] is not None for iv in iteration_value):
return max(iv[0] for iv in iteration_value if iv is not None)
return -1
@classmethod
def _get_child_tasks_ids(
cls,
parent_task_id: str,
status: Optional[Union[Task.TaskStatusEnum, Sequence[Task.TaskStatusEnum]]] = None,
order_by: Optional[str] = None,
additional_fields: Optional[Sequence[str]] = None,
additional_filters: Optional[dict] = None,
) -> Union[Sequence[str], Sequence[List]]:
"""
Helper function. Return a list of tasks is tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param additional_fields: Optional, list of fields (str) to return next to the Task ID,
this implies return value is a list of pairs
:param dict additional_filters: The additional task filters.
:return: A list of Task IDs (str)
"""
task_filter = {
"parent": parent_task_id,
# 'tags': [cls._tag],
# since we have auto archive we do not want to filter out archived tasks
# 'system_tags': ['-archived'],
}
task_filter.update(additional_filters or {})
if status:
task_filter["status"] = status if isinstance(status, (tuple, list)) else [status]
if order_by and (order_by.startswith("last_metrics") or order_by.startswith("-last_metrics")):
parts = order_by.split(".")
if parts[-1] in ("min", "max", "last"):
title = hashlib.md5(str(parts[1]).encode("utf-8")).hexdigest()
series = hashlib.md5(str(parts[2]).encode("utf-8")).hexdigest()
minmax = "min_value" if "min" in parts[3] else ("max_value" if "max" in parts[3] else "value")
order_by = "{}last_metrics.".join(
(
"-" if order_by and order_by[0] == "-" else "",
title,
series,
minmax,
)
)
if order_by:
task_filter["order_by"] = [order_by]
if additional_fields:
task_filter["only_fields"] = list(set(list(additional_fields) + ["id"]))
# noinspection PyProtectedMember
task_objects = Task._query_tasks(**task_filter)
if not additional_fields:
return [t.id for t in task_objects]
return [[t.id] + [getattr(t, f, None) for f in additional_fields] for t in task_objects]
@classmethod
def _get_child_tasks(
cls,
parent_task_id: str,
status: Optional[Union[Task.TaskStatusEnum, Sequence[Task.TaskStatusEnum]]] = None,
order_by: Optional[str] = None,
additional_filters: Optional[dict] = None,
) -> Sequence[Task]:
"""
Helper function. Return a list of tasks tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param dict additional_filters: The additional task filters.
:return: A list of Task objects
"""
return [
Task.get_task(task_id=t_id)
for t_id in cls._get_child_tasks_ids(
parent_task_id=parent_task_id,
status=status,
order_by=order_by,
additional_filters=additional_filters,
)
]
| SearchStrategy |
python | PrefectHQ__prefect | src/prefect/client/schemas/responses.py | {
"start": 16508,
"end": 16678
} | class ____(PrefectBaseModel):
model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore")
id: UUID
name: str
limit: int
| MinimalConcurrencyLimitResponse |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 3338,
"end": 5399
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output.
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output.
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`Blip2QFormerModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`Blip2VisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for text model's outputs that also contains a pooling of the last hidden states.
"""
)
# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Blip2
| Blip2ImageTextMatchingModelOutput |
python | pytorch__pytorch | torch/ao/nn/intrinsic/qat/modules/conv_fused.py | {
"start": 18484,
"end": 20301
} | class ____(nnqat.Conv1d, nni._FusedModule):
r"""A ConvReLU1d module is a fused module of Conv1d and ReLU, attached with
FakeQuantize modules for weight for
quantization aware training.
We combined the interface of :class:`~torch.nn.Conv1d` and
:class:`~torch.nn.BatchNorm1d`.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE: ClassVar[type[nni.ConvReLU1d]] = nni.ConvReLU1d # type: ignore[assignment]
_FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d
_FLOAT_BN_MODULE: ClassVar[type[nn.Module] | None] = None
_FLOAT_RELU_MODULE: ClassVar[type[nn.Module] | None] = nn.ReLU
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
qconfig=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
# pyrefly: ignore [bad-argument-type]
padding_mode=padding_mode,
qconfig=qconfig,
)
assert qconfig, "qconfig must be provided for QAT module"
self.qconfig = qconfig
self.weight_fake_quant = self.qconfig.weight()
def forward(self, input):
return F.relu(
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
| ConvReLU1d |
python | pytorch__pytorch | test/distributed/tensor/debug/test_comm_mode_features.py | {
"start": 731,
"end": 12093
} | class ____(DTensorTestBase):
# checks if parameter / sharding info is the same as ground truth
def check_same_set_of_keys(self, dict1, dict2):
"""
Used to ensure the comm_mode parameter/sharding dictionaries contain the same information produced by the
ground truth
"""
dict1_keys = []
dict2_keys = []
for key in dict1:
for nested_key in dict1[key]:
dict1_keys.append((key, nested_key))
for key in dict2:
for nested_key in dict2[key]:
dict2_keys.append((key, nested_key))
self.assertEqual(len(dict1_keys), len(dict2_keys))
for i in range(len(dict1_keys)):
self.assertEqual(dict1_keys[i], dict2_keys[i])
# generates the ground truth parameter and sharding info
def ground_truth(self, model):
"""
Used to generate the ground-truth parameter and sharding info for a given distributed model to
verify comm_mode correctness
"""
module_parameters_dict: dict[str, Any] = {}
module_sharding_dict: dict[str, Any] = {}
for name, parameters in model.named_parameters():
# splits name into module name to create FQN and parameter name
module_name = model.__class__.__name__ + "." + name.rsplit(".", 1)[0]
parameter_name = name.rsplit(".", 1)[1]
if module_name not in module_parameters_dict:
module_parameters_dict[module_name] = {}
module_parameters_dict[module_name][parameter_name] = parameters.data
if isinstance(parameters.data, DTensor):
key_name = module_name + "." + parameter_name
module_sharding_dict[key_name] = parameters.data.placements
return module_parameters_dict, module_sharding_dict
@with_comms
def test_MLP_distributed_sharding_display(self):
"""
tests parameters and sharding on a module level
"""
device_mesh = DeviceMesh(
self.device_type,
torch.arange(0, NUM_DEVICES),
)
inp_size = [8, 10]
torch.manual_seed(0)
inp = torch.rand(*inp_size, device=self.device_type)
model = MLPModule(self.device_type)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
comm_mode = CommDebugMode()
with comm_mode:
output_tp = model(inp)
output_tp.sum().backward()
module_parameters_dict, module_sharding_dict = self.ground_truth(model)
# checks if parameter / sharding info is the same as ground truth
self.check_same_set_of_keys(
module_parameters_dict, comm_mode.get_parameter_info()
)
self.check_same_set_of_keys(module_sharding_dict, comm_mode.get_sharding_info())
@skipIfHpu
@with_comms
def test_MLPStacked_distributed_sharding_display(self):
"""
tests model with nested modules and makes sure comm_mode correctly resets parameter and sharding information
"""
device_mesh = DeviceMesh(
self.device_type,
torch.arange(0, NUM_DEVICES),
)
inp_size = [8, 10]
torch.manual_seed(0)
inp = torch.rand(*inp_size, device=self.device_type)
model = MLPModule(self.device_type)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
comm_mode = CommDebugMode()
with comm_mode:
output_tp = model(inp)
output_tp.sum().backward()
model2 = MLPStacked(self.device_type)
parallelize_plan = {
"layers.0.net1": ColwiseParallel(),
"layers.0.net2": RowwiseParallel(),
"layers.1.net1": ColwiseParallel(),
"layers.1.net2": RowwiseParallel(),
}
model2 = parallelize_module(model2, device_mesh, parallelize_plan)
with comm_mode:
# ensures that comm_mode is resetting properly
self.assertEqual(comm_mode.get_parameter_info(), {})
self.assertEqual(comm_mode.get_sharding_info(), {})
output_tp = model2(inp)
module_parameters_dict, module_sharding_dict = self.ground_truth(model2)
self.check_same_set_of_keys(
module_parameters_dict, comm_mode.get_parameter_info()
)
self.check_same_set_of_keys(module_sharding_dict, comm_mode.get_sharding_info())
self.assertEqual(len(comm_mode.get_sharding_info()), 8)
@with_comms
def test_MLP_module_tracing(self):
"""
tests module-level tracing for MLP module
"""
device_mesh = DeviceMesh(
self.device_type,
torch.arange(0, NUM_DEVICES),
)
inp_size = [8, 10]
torch.manual_seed(0)
inp = torch.rand(*inp_size, device=self.device_type)
model = MLPModule(self.device_type)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
comm_mode = CommDebugMode()
with comm_mode:
output_tp = model(inp)
output_tp.sum().backward()
# checks to see if all sub-modules make it into the module_depth_dictionary
self.assertEqual(len(comm_mode.advanced_module_tracker.module_helper_dict), 5)
# checks to see if all collectives were correctly traced at the module-level
self.assertEqual(
comm_mode.comm_module_counts["Global"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["MLPModule"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["MLPModule.net2"]["forward"][
c10d_functional.all_reduce
],
1,
)
@skipIfHpu
@skip_unless_torch_gpu
@xfailIf(TEST_XPU) # https://github.com/intel/torch-xpu-ops/issues/1555
@with_comms
def test_transformer_module_tracing(self, is_seq_parallel=False):
"""
tests module-level tracing for more complicated transformer module and
ensures that comm_module depth and tracing dictionaries correctly reset
"""
device_mesh = DeviceMesh(
self.device_type,
torch.arange(0, NUM_DEVICES),
)
inp_size = [8, 10]
torch.manual_seed(0)
inp = torch.rand(*inp_size, device=self.device_type)
model = MLPModule(self.device_type)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
comm_mode = CommDebugMode()
with comm_mode:
self.assertEqual(
len(comm_mode.advanced_module_tracker.module_helper_dict), 1
)
self.assertEqual(
comm_mode.comm_module_counts,
{"Global": {"forward": {}, "backward": {}}},
)
model(inp)
model_args = ModelArgs(dropout_p=0.0)
model2 = Transformer(model_args).to(device=self.device_type)
model2 = Transformer.parallelize(model2, device_mesh, is_seq_parallel)
inp_size = [8, 8]
inp = torch.randint(model_args.vocab_size, inp_size, device=self.device_type)
inp = distribute_tensor(inp, device_mesh=device_mesh)
comm_mode = CommDebugMode()
with comm_mode:
model2(inp)
# checks to see if all collectives were correctly traced at the module-level
self.assertEqual(
comm_mode.comm_module_counts["Global"]["forward"][
c10d_functional.all_reduce
],
6,
)
self.assertEqual(
comm_mode.comm_module_counts["Global"]["forward"][
c10d_functional.all_gather_into_tensor
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer"]["forward"][
c10d_functional.all_reduce
],
6,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer"]["forward"][
c10d_functional.all_gather_into_tensor
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.tok_embeddings"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.pos_embeddings"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0"]["forward"][
c10d_functional.all_reduce
],
2,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.attention"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.attention.wo"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.feed_forward"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.feed_forward.w2"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1"]["forward"][
c10d_functional.all_reduce
],
2,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.attention"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.attention.wo"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.feed_forward"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.feed_forward.w2"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.output"]["forward"][
c10d_functional.all_gather_into_tensor
],
1,
)
if __name__ == "__main__":
run_tests()
| TestCommModeFeatures |
python | coleifer__peewee | tests/models.py | {
"start": 148041,
"end": 150942
} | class ____(PGOnConflictTests, ModelTestCase):
@requires_postgresql
@requires_models(UKV)
def test_conflict_target_constraint(self):
u1 = UKV.create(key='k1', value='v1')
u2 = UKV.create(key='k2', value='v2')
ret = (UKV.insert(key='k1', value='v1', extra='e1')
.on_conflict(conflict_target=(UKV.key, UKV.value),
preserve=(UKV.extra,))
.execute())
self.assertEqual(ret, u1.id)
# Changes were saved successfully.
u1_db = UKV.get(UKV.key == 'k1')
self.assertEqual(u1_db.key, 'k1')
self.assertEqual(u1_db.value, 'v1')
self.assertEqual(u1_db.extra, 'e1')
self.assertEqual(UKV.select().count(), 2)
ret = (UKV.insert(key='k2', value='v2', extra='e2')
.on_conflict(conflict_constraint='ukv_key_value',
preserve=(UKV.extra,))
.execute())
self.assertEqual(ret, u2.id)
# Changes were saved successfully.
u2_db = UKV.get(UKV.key == 'k2')
self.assertEqual(u2_db.key, 'k2')
self.assertEqual(u2_db.value, 'v2')
self.assertEqual(u2_db.extra, 'e2')
self.assertEqual(UKV.select().count(), 2)
ret = (UKV.insert(key='k3', value='v3', extra='e3')
.on_conflict(conflict_target=[UKV.key, UKV.value],
preserve=[UKV.extra])
.execute())
self.assertTrue(ret > u2_db.id)
self.assertEqual(UKV.select().count(), 3)
@requires_models(UKV, UKVRel)
def test_conflict_ambiguous_column(self):
# k1/v1/e1, k2/v2/e0, k3/v3/e1
for i in [1, 2, 3]:
UKV.create(key='k%s' % i, value='v%s' % i, extra='e%s' % (i % 2))
UKVRel.create(key='k1', value='v1', extra='x1')
UKVRel.create(key='k2', value='v2', extra='x2')
subq = UKV.select(UKV.key, UKV.value, UKV.extra)
query = (UKVRel
.insert_from(subq, [UKVRel.key, UKVRel.value, UKVRel.extra])
.on_conflict(conflict_target=[UKVRel.key, UKVRel.value],
preserve=[UKVRel.extra],
where=(UKVRel.key != 'k2')))
self.assertSQL(query, (
'INSERT INTO "ukv_rel" ("key", "value", "extra") '
'SELECT "t1"."key", "t1"."value", "t1"."extra" FROM "ukv" AS "t1" '
'ON CONFLICT ("key", "value") DO UPDATE '
'SET "extra" = EXCLUDED."extra" '
'WHERE ("ukv_rel"."key" != ?) RETURNING "ukv_rel"."id"'), ['k2'])
query.execute()
query = (UKVRel
.select(UKVRel.key, UKVRel.value, UKVRel.extra)
.order_by(UKVRel.key))
self.assertEqual(list(query.tuples()), [
('k1', 'v1', 'e1'),
('k2', 'v2', 'x2'),
('k3', 'v3', 'e1')])
| TestUpsertPostgresql |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 70493,
"end": 71753
} | class ____(DefinedFunction):
r"""
Calculate the Euler totient function phi(n)
``totient(n)`` or `\phi(n)` is the number of positive integers `\leq` n
that are relatively prime to n.
Examples
========
>>> from sympy.functions.combinatorial.numbers import totient
>>> totient(1)
1
>>> totient(25)
20
>>> totient(45) == totient(5)*totient(9)
True
See Also
========
sympy.ntheory.factor_.divisor_count
References
==========
.. [1] https://en.wikipedia.org/wiki/Euler%27s_totient_function
.. [2] https://mathworld.wolfram.com/TotientFunction.html
.. [3] https://oeis.org/A000010
"""
is_integer = True
is_positive = True
@classmethod
def eval(cls, n):
if n.is_integer is False:
raise TypeError("n should be an integer")
if n.is_positive is False:
raise ValueError("n should be a positive integer")
if n is S.One:
return S.One
if n.is_prime is True:
return n - 1
if isinstance(n, Dict):
return S(prod(p**(k-1)*(p-1) for p, k in n.items()))
if n.is_Integer is True:
return S(prod(p**(k-1)*(p-1) for p, k in factorint(n).items()))
| totient |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 31794,
"end": 32379
} | class ____(nn.Module):
def __init__(self, config: FlavaPossibleConfigs):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@auto_docstring
| FlavaPooler |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-lindorm/llama_index/vector_stores/lindorm/base.py | {
"start": 909,
"end": 27744
} | class ____:
"""
Object encapsulating an Lindorm index that has vector search enabled.
If the index does not yet exist, it is created during init.
Therefore, the underlying index is assumed to either:
1) not exist yet or 2) be created due to previous usage of this class.
Two index types are available: IVFPQ & HNSW. Default: IVFPQ.
Detailed info for these arguments can be found here:
https://help.aliyun.com/document_detail/2773371.html
Args:
host (str): Elasticsearch compatible host of the lindorm search engine.
port (int): Port of you lindorm instance.
username (str): Username of your lindorm instance.
password (str): Password of your lindorm instance.
index (str): Name of the index.
dimension (int): Dimension of the vector.
how to obtain an lindorm instance:
https://alibabacloud.com/help/en/lindorm/latest/create-an-instance
how to access your lindorm instance:
https://www.alibabacloud.com/help/en/lindorm/latest/view-endpoints
run curl commands to connect to and use LindormSearch:
https://www.alibabacloud.com/help/en/lindorm/latest/connect-and-use-the-search-engine-with-the-curl-command
Optional Args:
text_field(str): Document field the text of the document is stored in. Defaults to "content".
max_chunk_bytes(int): Maximum size of a chunk in bytes; default : 1 * 1024 * 1024.
os_client(OSClient): opensearch_client; default : None.
Optional Keyword Args to construct method of mapping:
method_name(str): "ivfpq","hnsw"; default: "ivfpq".
engine(str): "lvector"; default: "lvector".
space_type(str): "l2", "cosinesimil", "innerproduct"; default: "l2"
vector_field(str): Document field embeddings are stored in. default: "vector_field".
Optional Keyword Args for lindorm search extension setting:
filter_type (str): filter type for lindorm search, pre_filter or post_filter; default: post_filter.
nprobe (str): number of cluster units to query; between 1 and method.parameters.nlist.
No default value.
reorder_factor (str): reorder_factor for lindorm search; between 1 and 200; default: 10.
Optional Keyword Args for IVFPQ:
m(int): Number of subspaces. Between 2 and 32768; default: 16.
nlist(int): Number of cluster centersdefault. Between 2 and 1000000; default: 10000.
centroids_use_hnsw(bool): Whether to use the HNSW algorithm when searching for cluster centers; default: True.
centroids_hnsw_m: Between 1 and 100; default: 16.
centroids_hnsw_ef_search(int): Size of the dynamic list used during k-NN searches. Higher values.
lead to more accurate but slower searches; default: 100.
centroids_hnsw_ef_construct(int): Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed; default: 100.
Optional Keyword Args for HNSW:
m(int): maximum number of outgoing edges in each layer of the graph. Between 1 and 100; default: 16.
ef_construction(int): Length of the dynamic list when the index is built. Between 1 and 1000; default: 100.
"""
def __init__(
self,
host: str,
port: int,
username: str,
password: str,
index: str,
dimension: int,
text_field: str = "content",
max_chunk_bytes: int = 1 * 1024 * 1024,
os_client: Optional[OSClient] = None,
**kwargs: Any,
):
"""Init params."""
method_name = kwargs.get("method_name", "ivfpq")
engine = kwargs.get("engine", "lvector")
space_type = kwargs.get("space_type", "l2")
vector_field = kwargs.get("vector_field", "vector_field")
filter_type = kwargs.get("filter_type", "post_filter")
nprobe = kwargs.get("nprobe", "1")
reorder_factor = kwargs.get("reorder_factor", "10")
if filter_type not in ["post_filter", "pre_filter"]:
raise ValueError(
f"Unsupported filter type: {filter_type}, only post_filter and pre_filter are suopported now."
)
# initialize parameters
if method_name == "ivfpq":
m = kwargs.get("m", dimension)
nlist = kwargs.get("nlist", 10000)
centroids_use_hnsw = kwargs.get("centroids_use_hnsw", True)
centroids_hnsw_m = kwargs.get("centroids_hnsw_m", 16)
centroids_hnsw_ef_construct = kwargs.get("centroids_hnsw_ef_construct", 100)
centroids_hnsw_ef_search = kwargs.get("centroids_hnsw_ef_search", 100)
parameters = {
"m": m,
"nlist": nlist,
"centroids_use_hnsw": centroids_use_hnsw,
"centroids_hnsw_m": centroids_hnsw_m,
"centroids_hnsw_ef_construct": centroids_hnsw_ef_construct,
"centroids_hnsw_ef_search": centroids_hnsw_ef_search,
}
elif method_name == "hnsw":
m = kwargs.get("m", 16)
ef_construction = kwargs.get("ef_construction", 100)
parameters = {"m": m, "ef_construction": ef_construction}
else:
raise RuntimeError(f"unexpected method_name: {method_name}")
self._vector_field = vector_field
self._filter_type = filter_type
self._nprobe = nprobe
self._reorder_factor = reorder_factor
self._host = host
self._port = port
self._username = username
self._password = password
self._dimension = dimension
self._index = index
self._text_field = text_field
self._max_chunk_bytes = max_chunk_bytes
# initialize mapping
mapping = {
"settings": {"index": {"number_of_shards": 4, "knn": True}},
"mappings": {
"_source": {"excludes": [vector_field]},
"properties": {
vector_field: {
"type": "knn_vector",
"dimension": dimension,
"data_type": "float",
"method": {
"engine": engine,
"name": method_name,
"space_type": space_type,
"parameters": parameters,
},
},
},
},
}
self._os_client = os_client or self._get_async_lindorm_search_client(
self._host, self._port, self._username, self._password, **kwargs
)
not_found_error = self._import_not_found_error()
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(
self._os_client.indices.get(index=self._index)
)
except not_found_error:
event_loop.run_until_complete(
self._os_client.indices.create(index=self._index, body=mapping)
)
event_loop.run_until_complete(
self._os_client.indices.refresh(index=self._index)
)
def _import_async_opensearch(self) -> Any:
"""Import OpenSearch Python SDK if available, otherwise raise error."""
try:
from opensearchpy import AsyncOpenSearch
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return AsyncOpenSearch
def _import_async_bulk(self) -> Any:
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import async_bulk
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return async_bulk
def _import_not_found_error(self) -> Any:
"""Import not found error if available, otherwise raise error."""
try:
from opensearchpy.exceptions import NotFoundError
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return NotFoundError
def _get_async_lindorm_search_client(
self,
host: str,
port: int,
username: str,
password: str,
time_out: Optional[int] = 100,
**kwargs: Any,
) -> Any:
"""Get lindorm search client through `opensearchpy` base on the lindorm_search_instance, otherwise raise error."""
try:
opensearch = self._import_async_opensearch()
auth = (username, password)
client = opensearch(
hosts=[{"host": host, "port": port}],
http_auth=auth,
time_out=time_out,
**kwargs,
)
except ValueError as e:
raise ValueError(
f"Async Lindorm Search Client string provided is not in proper format. "
f"Got error: {e} "
)
return client
def _flatten_request(self, request) -> Dict:
"""Flatten metadata in request."""
if "metadata" in request:
for key, value in request["metadata"].items():
request[key] = value
del request["metadata"]
return request
async def _bulk_ingest_embeddings(
self,
client: Any,
index_name: str,
embeddings: List[List[float]],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
vector_field: str = "vector_field",
text_field: str = "content",
mapping: Optional[Dict] = None,
max_chunk_bytes: Optional[int] = 1 * 1024 * 1024,
) -> List[str]:
"""Async Bulk Ingest Embeddings into given index."""
if not mapping:
mapping = {}
async_bulk = self._import_async_bulk()
not_found_error = self._import_not_found_error()
requests = []
return_ids = []
mapping = mapping
try:
await client.indices.get(index=index_name)
except not_found_error:
await client.indices.create(index=index_name, body=mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": index_name,
vector_field: embeddings[i],
text_field: text,
"metadata": metadata,
"_id": _id,
}
# Flatten metadata in request
request = self._flatten_request(request)
requests.append(request)
return_ids.append(_id)
await async_bulk(client, requests, max_chunk_bytes=max_chunk_bytes)
await client.indices.refresh(index=index_name)
return return_ids
def _default_approximate_search_query(
self,
query_vector: List[float],
k: int,
nprobe: str,
reorder_factor: str,
vector_field: str = "vector_field",
) -> Dict:
"""
For Approximate k-NN Search, this is the default query.
Args:
query_vector(List[float]): Vector embedding to query.
k(int): Maximum number of results. default: 4.
nprobe (str): number of cluster units to query; between 1 and method.parameters.nlist.
No default value.
reorder_factor (str): reorder_factor for lindorm search; between 1 and 200; default: 10.
Optional Args:
vector_field(str): Document field embeddings are stored in. default: "vector_field".
Return:
A dictionary representing the query.
"""
return {
"size": k,
"query": {"knn": {vector_field: {"vector": query_vector, "k": k}}},
"ext": {"lvector": {"nprobe": nprobe, "reorder_factor": reorder_factor}},
}
def _search_query_with_filter(
self,
query_vector: List[float],
k: int,
filter_type: str,
nprobe: str,
reorder_factor: str,
vector_field: str = "vector_field",
filter: Union[Dict, List, None] = None,
) -> Dict:
"""
Construct search query with pre-filter or post-filter.
Args:
query_vector(List[float]): Vector embedding to query.
k(int): Maximum number of results. default: 4.
filter_type(str): filter_type for lindorm search, pre_filter and post_filter are supported;
default: "post_filter".
nprobe (str): number of cluster units to query; between 1 and method.parameters.nlist.
No default value.
reorder_factor (str): reorder_factor for lindorm search; between 1 and 200; default: 10.
vector_field(str): Document field embeddings are stored in. default: "vector_field".
filter(Union[Dict, List, None]): filter for lindorm search. default: None.
Returns:
A dictionary representing the query.
"""
if not filter:
filter = MATCH_ALL_QUERY
return {
"size": k,
"query": {
"knn": {
vector_field: {"vector": query_vector, "filter": filter, "k": k}
}
},
"ext": {
"lvector": {
"filter_type": filter_type,
"nprobe": nprobe,
"reorder_factor": reorder_factor,
}
},
}
def _metadatafilter_to_dict(self, filter: MetadataFilter) -> Dict:
"""
Parse MetadataFilter into a dictionary.
Args:
filter (MetadataFilter): A MetadataFilter object.
Returns:
dict: A dictionary representing the filter.
"""
operator = filter.operator
range_operators = {
FilterOperator.GTE: "gte",
FilterOperator.LTE: "lte",
FilterOperator.GT: "gt",
FilterOperator.LT: "lt",
}
if operator in range_operators:
filter_dict = {
"range": {filter.key: {range_operators[operator]: filter.value}}
}
elif operator == FilterOperator.EQ:
filter_dict = {"term": {filter.key: filter.value}}
else:
raise ValueError(f"Unsupported filter operator: {operator}")
return filter_dict
def _parse_filters(self, filters: Optional[MetadataFilters]) -> Any:
"""
Parse MetadataFilters into a list of dictionaries.
Args:
filters (Optional[MetadataFilters]): An optional MetadataFilters object.
Returns:
list: A list of dictionaries. If no filters are provided, an empty list is returned.
"""
filter_list = []
if filters is not None:
for filter in filters.filters:
filter_list.append(self._metadatafilter_to_dict(filter))
return filter_list
def _knn_search_query(
self,
vector_field: str,
query_embedding: List[float],
k: int,
filter_type: str,
nprobe: str,
reorder_factor: str,
filters: Optional[MetadataFilters] = None,
) -> Dict:
"""
Do knn search.
If there are no filters do approx-knn search.
If there are filters, do an exhaustive exact knn search using filters.
Note that approximate knn search does not support metadata filting.
Args:
query_embedding(List[float]): Vector embedding to query.
k(int): Maximum number of results.
filter_type(str): filter_type for lindorm search, pre_filter and post_filter are supported;
default: "post_filter".
nprobe (str): number of cluster units to query; between 1 and method.parameters.nlist.
No default value.
reorder_factor (str): reorder_factor for lindorm search; between 1 and 200; default: 10.
Optional Args:
filters(Optional[MetadataFilters]): Optional filters to apply before the search.
Supports filter-context queries documented at
https://opensearch.org/docs/latest/query-dsl/query-filter-context/
Returns:
Up to k targets closest to query_embedding.
"""
filter_list = self._parse_filters(filters)
if not filters:
search_query = self._default_approximate_search_query(
query_vector=query_embedding,
k=k,
vector_field=vector_field,
nprobe=nprobe,
reorder_factor=reorder_factor,
)
else:
if filters.condition == FilterCondition.AND:
filter = {"bool": {"must": filter_list}}
elif filters.condition == FilterCondition.OR:
filter = {"bool": {"should": filter_list}}
else:
# TODO: FilterCondition can also be 'NOT', but llama_index does not support it yet.
# https://opensearch.org/docs/latest/query-dsl/compound/bool/
# post_filter = {"bool": {"must_not": filter_list}}
raise ValueError(f"Unsupported filter condition: {filters.condition}")
search_query = self._search_query_with_filter(
query_vector=query_embedding,
vector_field=vector_field,
k=k,
filter=filter,
nprobe=nprobe,
reorder_factor=reorder_factor,
filter_type=filter_type,
)
return search_query
def _hybrid_search_query(
self,
text_field: str,
query_str: str,
vector_field: str,
query_embedding: List[float],
k: int,
filter_type: str,
nprobe: str,
reorder_factor: str,
filters: Optional[MetadataFilters] = None,
) -> Dict:
"""
Do hybrid search.
Args:
text_field(str): Document field to query.
query_str(str): Query string.
vector_field(str): Document field embeddings are stored in.
query_embedding(List[float]): Vector embedding to query.
k(int): Maximum number of results.
filter_type(str): filter_type for lindorm search, pre_filter and post_filter are supported;
default: "post_filter".
nprobe (str): number of cluster units to query; between 1 and method.parameters.nlist.
No default value.
reorder_factor (str): reorder_factor for lindorm search; between 1 and 200; default: 10.
Optional Args:
filters(Optional[MetadataFilters]): Optional filters to apply before the search.
Supports filter-context queries documented at
https://opensearch.org/docs/latest/query-dsl/query-filter-context/
Returns:
Up to k targets closest to query_embedding
"""
knn_query = self._knn_search_query(
vector_field=vector_field,
filter_type=filter_type,
nprobe=nprobe,
reorder_factor=reorder_factor,
query_embedding=query_embedding,
k=k,
filters=filters,
)
lexical_query = self._lexical_search_query(text_field, query_str, k, filters)
# Combine knn and lexical search query
knn_field_query = knn_query["query"]["knn"][vector_field]
if "filter" not in knn_field_query:
knn_field_query["filter"] = {"bool": {"must": []}}
elif "bool" not in knn_field_query["filter"]:
knn_field_query["filter"]["bool"] = {"must": []}
elif "must" not in knn_field_query["filter"]["bool"]:
knn_field_query["filter"]["bool"]["must"] = []
knn_query["query"]["knn"][vector_field]["filter"]["bool"]["must"].append(
lexical_query["query"]["bool"]["must"]
)
return {
"size": k,
"query": knn_query["query"],
"ext": {
"lvector": {
"filter_type": filter_type,
"nprobe": nprobe,
"reorder_factor": reorder_factor,
}
},
}
def _lexical_search_query(
self,
text_field: str,
query_str: str,
k: int,
filters: Optional[MetadataFilters] = None,
) -> Dict:
"""
Do lexical search.
Args:
text_field(str): Document field to query.
query_str(str): Query string.
k(int): Maximum number of results.
Optional Args:
filters(Optional[MetadataFilters]): Optional filters to apply before the search.
Supports filter-context queries documented at
https://opensearch.org/docs/latest/query-dsl/query-filter-context/
Returns:
Up to k targets closest to query_embedding.
"""
lexical_query = {
"bool": {"must": {"match": {text_field: {"query": query_str}}}}
}
parsed_filters = self._parse_filters(filters)
if len(parsed_filters) > 0:
lexical_query["bool"]["filter"] = parsed_filters
return {
"size": k,
"query": lexical_query,
}
async def index_results(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""
Store results in the index.
Args:
nodes (List[BaseNode]): A list of BaseNode objects.
Returns:
List[str]: A list of node_ids
"""
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
return await self._bulk_ingest_embeddings(
self._os_client,
self._index,
embeddings,
texts,
metadatas=metadatas,
ids=ids,
vector_field=self._vector_field,
text_field=self._text_field,
mapping=None,
max_chunk_bytes=self._max_chunk_bytes,
)
async def delete_by_doc_id(self, doc_id: str) -> None:
"""
Deletes nodes corresponding to the given LlamaIndex `Document` ID.
Args:
doc_id (str): a LlamaIndex `Document` id.
"""
search_query = {"query": {"term": {"doc_id.keyword": {"value": doc_id}}}}
await self._os_client.delete_by_query(index=self._index, body=search_query)
async def aquery(
self,
query_mode: VectorStoreQueryMode,
query_str: Optional[str],
query_embedding: List[float],
k: int,
filters: Optional[MetadataFilters] = None,
) -> VectorStoreQueryResult:
"""
Do vector search.
Args:
query_mode (VectorStoreQueryMode): Query mode.
query_str (Optional[str]): Query string.
query_embedding (List[float]): Query embedding.
k (int): Maximum number of results.
Optional Args:
filters(Optional[MetadataFilters]): Optional filters to apply before the search.
Supports filter-context queries documented at
https://opensearch.org/docs/latest/query-dsl/query-filter-context/
Returns:
VectorStoreQueryResult.
"""
if query_mode == VectorStoreQueryMode.HYBRID:
if query_str is None:
raise ValueError(INVALID_HYBRID_QUERY_ERROR)
search_query = self._hybrid_search_query(
text_field=self._text_field,
query_str=query_str,
vector_field=self._vector_field,
query_embedding=query_embedding,
k=k,
filters=filters,
filter_type=self._filter_type,
nprobe=self._nprobe,
reorder_factor=self._reorder_factor,
)
params = None
elif query_mode == VectorStoreQueryMode.TEXT_SEARCH:
search_query = self._lexical_search_query(
self._text_field, query_str, k, filters=filters
)
params = None
else:
search_query = self._knn_search_query(
vector_field=self._vector_field,
query_embedding=query_embedding,
k=k,
filters=filters,
filter_type=self._filter_type,
nprobe=self._nprobe,
reorder_factor=self._reorder_factor,
)
params = None
res = await self._os_client.search(
index=self._index, body=search_query, _source=True, params=params
)
return self._to_query_result(res)
def _to_query_result(self, res) -> VectorStoreQueryResult:
"""
Convert Lindorm search result to VectorStoreQueryResult.
Args:
res(Dict): Lindorm search result.
Returns:
VectorStoreQueryResult.
"""
nodes = []
ids = []
scores = []
for hit in res["hits"]["hits"]:
source = hit["_source"]
node_id = hit["_id"]
text = source[self._text_field]
metadata = source.get("metadata", None)
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old nodes
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
ids.append(node_id)
nodes.append(node)
scores.append(hit["_score"])
return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores)
| LindormVectorClient |
python | dask__dask | dask/tests/test_utils.py | {
"start": 20540,
"end": 24958
} | class ____:
pass
def test_typename_on_instances():
instance = MyType()
assert typename(instance) == typename(MyType)
def test_cached_cumsum():
a = (1, 2, 3, 4)
x = cached_cumsum(a)
y = cached_cumsum(a, initial_zero=True)
assert x == (1, 3, 6, 10)
assert y == (0, 1, 3, 6, 10)
def test_cached_cumsum_nan():
np = pytest.importorskip("numpy")
a = (1, np.nan, 3)
x = cached_cumsum(a)
y = cached_cumsum(a, initial_zero=True)
np.testing.assert_equal(x, (1, np.nan, np.nan))
np.testing.assert_equal(y, (0, 1, np.nan, np.nan))
def test_cached_cumsum_non_tuple():
a = [1, 2, 3]
assert cached_cumsum(a) == (1, 3, 6)
a[1] = 4
assert cached_cumsum(a) == (1, 5, 8)
def test_tmpfile_naming():
with tmpfile() as fn:
# Do not end file or directory name with a period.
# This causes issues on Windows.
assert fn[-1] != "."
with tmpfile(extension="jpg") as fn:
assert fn[-4:] == ".jpg"
with tmpfile(extension=".jpg") as fn:
assert fn[-4:] == ".jpg"
assert fn[-5] != "."
def test_get_meta_library():
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
assert get_meta_library(pd.DataFrame()) == pd
assert get_meta_library(np.array([])) == np
assert get_meta_library(pd.DataFrame()) == get_meta_library(pd.DataFrame)
assert get_meta_library(np.ndarray([])) == get_meta_library(np.ndarray)
assert get_meta_library(pd.DataFrame()) == get_meta_library(
dd.from_dict({}, npartitions=1)
)
assert get_meta_library(np.ndarray([])) == get_meta_library(da.from_array([]))
def test_get_meta_library_gpu():
cp = pytest.importorskip("cupy")
cudf = pytest.importorskip("cudf")
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
assert get_meta_library(cudf.DataFrame()) == cudf
assert get_meta_library(cp.array([])) == cp
assert get_meta_library(cudf.DataFrame()) == get_meta_library(cudf.DataFrame)
assert get_meta_library(cp.ndarray([])) == get_meta_library(cp.ndarray)
assert get_meta_library(cudf.DataFrame()) == get_meta_library(
dd.from_dict({}, npartitions=1).to_backend("cudf")
)
assert get_meta_library(cp.ndarray([])) == get_meta_library(
da.from_array([]).to_backend("cupy")
)
def test_is_empty_list_and_tuple():
assert is_empty([]) is True
assert is_empty([1]) is False
assert is_empty(()) is True
assert is_empty((1,)) is False
def test_is_empty_numpy_array():
np = pytest.importorskip("numpy")
assert is_empty(np.array([])) is True
assert is_empty(np.array([1, 2])) is False
# len() == 3 → not empty even though one dimension is 0
assert is_empty(np.empty((3, 0))) is False
def test_is_empty_fake_sparse_like_object():
"""Simulate sparse arrays via fake .nnz and .shape attributes."""
class FakeSparse:
def __init__(self, nnz, shape):
self.nnz = nnz
self.shape = shape
assert is_empty(FakeSparse(0, (10, 10))) is True # nnz == 0 → True
assert is_empty(FakeSparse(5, (10, 10))) is False # nnz != 0 → False
assert (
is_empty(FakeSparse(10, (0, 5))) is False
) # nnz != 0 → False (never checks shape)
def test_is_empty_object_with_nnz_only():
class FakeSparse:
def __init__(self, nnz):
self.nnz = nnz
assert is_empty(FakeSparse(0)) is True
assert is_empty(FakeSparse(3)) is False
def test_is_empty_object_with_shape_only():
class FakeShape:
def __init__(self, shape):
self.shape = shape
assert is_empty(FakeShape((0, 5))) is True
assert is_empty(FakeShape((3, 5))) is False
def test_is_empty_fallback_object():
class Dummy:
pass
assert is_empty(Dummy()) is False
def test_is_empty_typeerror_in_len():
"""Force TypeError in len() to trigger nnz/shape logic."""
class FakeObj:
def __len__(self):
raise TypeError
nnz = 0
assert is_empty(FakeObj()) is True
def test_is_empty_handles_invalid_nnz_and_shape():
class Weird:
nnz = "not comparable"
shape = "not iterable"
# The function should handle these gracefully and return False
assert is_empty(Weird()) is False
| MyType |
python | anthropics__anthropic-sdk-python | tests/lib/streaming/test_partial_json.py | {
"start": 427,
"end": 6284
} | class ____:
def test_trailing_strings_mode_header(self) -> None:
"""Test behavior differences with and without the beta header for JSON parsing."""
message = ParsedBetaMessage(
id="msg_123",
type="message",
role="assistant",
content=[
BetaToolUseBlock(
type="tool_use",
input={},
id="tool_123",
name="test_tool",
caller=BetaDirectCaller(type="direct"),
)
],
model="claude-sonnet-4-5",
stop_reason=None,
stop_sequence=None,
usage=BetaUsage(input_tokens=10, output_tokens=10),
)
# Test case 1: Complete JSON
complete_json = '{"key": "value"}'
event_complete = BetaRawContentBlockDeltaEvent(
type="content_block_delta",
index=0,
delta=BetaInputJSONDelta(type="input_json_delta", partial_json=complete_json),
)
# Both modes should handle complete JSON the same way
message1 = accumulate_event(
event=event_complete,
current_snapshot=copy.deepcopy(message),
request_headers=httpx.Headers({"some-header": "value"}),
)
message2 = accumulate_event(
event=event_complete,
current_snapshot=copy.deepcopy(message),
request_headers=httpx.Headers({"anthropic-beta": "fine-grained-tool-streaming-2025-05-14"}),
)
# Both should parse complete JSON correctly
assert cast(ToolUseBlock, message1.content[0]).input == {"key": "value"}
assert cast(ToolUseBlock, message2.content[0]).input == {"key": "value"}
# Test case 2: Incomplete JSON with trailing string that will be treated differently
# Here we want to create a situation where regular mode and trailing strings mode behave differently
incomplete_json = '{"items": ["item1", "item2"], "unfinished_field": "incomplete value'
event_incomplete = BetaRawContentBlockDeltaEvent(
type="content_block_delta",
index=0,
delta=BetaInputJSONDelta(type="input_json_delta", partial_json=incomplete_json),
)
# Without beta header (standard mode)
message_standard = accumulate_event(
event=event_incomplete,
current_snapshot=copy.deepcopy(message),
request_headers=httpx.Headers({"some-header": "value"}),
)
# With beta header (trailing strings mode)
message_trailing = accumulate_event(
event=event_incomplete,
current_snapshot=copy.deepcopy(message),
request_headers=httpx.Headers({"anthropic-beta": "fine-grained-tool-streaming-2025-05-14"}),
)
# Get the tool use blocks
standard_tool = cast(ToolUseBlock, message_standard.content[0])
trailing_tool = cast(ToolUseBlock, message_trailing.content[0])
# Both should have the valid complete part of the JSON
assert isinstance(standard_tool.input, dict)
assert isinstance(trailing_tool.input, dict)
standard_input = standard_tool.input # type: ignore
trailing_input = trailing_tool.input # type: ignore
# The input should have the items array in both cases
items_standard = cast(List[str], standard_input["items"])
items_trailing = cast(List[str], trailing_input["items"])
assert items_standard == ["item1", "item2"]
assert items_trailing == ["item1", "item2"]
# The key difference is how they handle the incomplete field:
# Standard mode should not include the incomplete field
assert "unfinished_field" not in standard_input
# Trailing strings mode should include the incomplete field
assert "unfinished_field" in trailing_input
assert trailing_input["unfinished_field"] == "incomplete value"
# test that with invalid JSON we throw the correct error
def test_partial_json_with_invalid_json(self) -> None:
"""Test that invalid JSON raises an error."""
message = ParsedBetaMessage(
id="msg_123",
type="message",
role="assistant",
content=[
BetaToolUseBlock(
type="tool_use",
input={},
id="tool_123",
name="test_tool",
caller=BetaDirectCaller(type="direct"),
)
],
model="claude-sonnet-4-5",
stop_reason=None,
stop_sequence=None,
usage=BetaUsage(input_tokens=10, output_tokens=10),
)
# Invalid JSON input
invalid_json = '{"key": "value", "incomplete_field": bad_value'
event_invalid = BetaRawContentBlockDeltaEvent(
type="content_block_delta",
index=0,
delta=BetaInputJSONDelta(type="input_json_delta", partial_json=invalid_json),
)
# Expect an error when trying to accumulate the invalid JSON
try:
accumulate_event(
event=event_invalid,
current_snapshot=copy.deepcopy(message),
request_headers=httpx.Headers({"anthropic-beta": "fine-grained-tool-streaming-2025-05-14"}),
)
raise AssertionError("Expected ValueError for invalid JSON, but no error was raised.")
except ValueError as e:
assert str(e).startswith(
"Unable to parse tool parameter JSON from model. Please retry your request or adjust your prompt."
)
except Exception as e:
raise AssertionError(f"Unexpected error type: {type(e).__name__} with message: {str(e)}") from e
| TestPartialJson |
python | PyCQA__pylint | tests/functional/t/too/too_many_ancestors_ignored_parents.py | {
"start": 585,
"end": 621
} | class ____(D, E):
"""3 parents"""
| B |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 9015,
"end": 9083
} | class ____(sqltypes.String):
render_bind_cast = True
| AsyncpgString |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 58751,
"end": 59528
} | class ____:
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt) * log2_
assert_almost_equal(np.exp(yf), xf)
def test_exp_strides(self):
np.random.seed(42)
strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4])
sizes = np.arange(2, 100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii))
y_true = np.exp(x_f64)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)
| TestExp |
python | pytorch__pytorch | torch/_inductor/codegen/cutedsl/cutedsl_template.py | {
"start": 654,
"end": 4933
} | class ____(KernelTemplate):
"""Template for generating CuteDSL (CUTLASS Python DSL) kernels."""
kernel_type: type[Any] = CuteDSLTemplateKernel
index_counter = itertools.count()
all_templates: dict[str, "CuteDSLTemplate"] = {}
def __init__(
self,
name: str,
source: str,
subgraph_fn: Optional[Any] = None,
mask_fn: Optional[Any] = None,
) -> None:
super().__init__(name)
self.source = source
self.subgraph_fn = subgraph_fn
self.mask_fn = mask_fn
self.template = CuteDSLTemplate._template_from_string(source)
assert name not in self.all_templates, f"duplicate template name, {name}"
CuteDSLTemplate.all_templates[name] = self
@staticmethod
@functools.lru_cache(None)
# pyrefly: ignore [bad-override]
def _template_from_string(source: str) -> Any:
return KernelTemplate._template_from_string(source)
def maybe_append_choice(
self, choices: list[Any], **kwargs: Any
) -> Optional[NotImplementedError]:
"""
Maybe generates a new ChoiceCaller and appends it into existing choices.
Returns None if success, otherwise returns the error.
"""
try:
choices.append(self.generate(**kwargs))
return None
except NotImplementedError as e:
log.debug("CuteDSL template choice generation failed: %s", e) # noqa: G200
return e
except Exception as e:
log.debug("CuteDSL template choice generation error: %s", e) # noqa: G200
return NotImplementedError(f"CuteDSL template failed: {e}")
def generate(self, **kwargs: Any) -> ChoiceCaller:
"""Generate the CuteDSL kernel caller."""
input_nodes = kwargs.pop("input_nodes")
layout = kwargs.pop("layout")
mutated_inputs = kwargs.pop("mutated_inputs", None)
subgraphs = kwargs.pop("subgraphs", None)
kernel_name = f"cutedsl_{self.name}_{next(self.index_counter)}"
if self.template is None:
raise RuntimeError("Template compilation failed (Jinja2 required)")
self.output_node: Buffer = Buffer(name="buf_out", layout=layout)
# Patch V.graph.get_dtype to handle the fake buf_out buffer
with patch.object(
V.graph, "get_dtype", KernelTemplate._fake_get_dtype(self.output_node)
):
kernel = self.kernel_type(
kernel_name=kernel_name,
input_nodes=input_nodes,
output_node=self.output_node,
subgraphs=subgraphs,
)
code = kernel.render(self.template, **kwargs)
log.debug("Generated CuteDSL Code:\n%s", code)
bmreq = CuteDSLBenchmarkRequest(
kernel_name=kernel_name,
input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(self.output_node),
extra_args=tuple(),
source_code=code,
)
def make_kernel_render(out_node, hint_override: Optional[int] = None):
"""
Factory function that creates a kernel renderer for the final output.
This closure captures the current template and parameters, but allows
the output node to be specified later. This is used during the final
kernel selection phase when the actual output buffer is available.
"""
render_kernel = self.kernel_type(
kernel_name=str(Placeholder.KERNEL_NAME),
input_nodes=input_nodes,
output_node=out_node,
subgraphs=subgraphs,
)
def render():
return render_kernel.render(self.template, **kwargs)
return render_kernel, render
return CuteDSLTemplateCaller(
name=kernel_name,
input_nodes=input_nodes,
layout=layout,
make_kernel_render=make_kernel_render,
bmreq=bmreq,
template=self,
mutated_inputs=mutated_inputs,
)
| CuteDSLTemplate |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 74656,
"end": 74741
} | class ____(ConstantLikeVariable):
_error_prefix = "re.Pattern"
| RegexPatternVariable |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 24605,
"end": 31230
} | class ____(nn.Module):
"""Pretraining head.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dropout_layer = nn.Dropout(config.head_dropout)
self.base_pt_block = nn.Linear(config.d_model, config.patch_length)
def forward(self, hidden_features):
"""
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`.
"""
hidden_features = self.dropout_layer(hidden_features)
forecast = self.base_pt_block(hidden_features) # [batch_size x n_vars x num_patch x patch_length]
return forecast
# Copied from transformers.models.patchtst.modeling_patchtst.random_masking
def random_masking(
inputs: torch.Tensor,
mask_ratio: float,
unmasked_channel_indices: Optional[list] = None,
channel_consistent_masking: bool = False,
mask_value: int = 0,
):
"""random_masking: Mask the input considering the control variables.
Args:
inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`):
The input tensor to mask.
mask_ratio (`float`):
Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1.
unmasked_channel_indices (list, *optional*):
Indices of channels that will not be masked.
channel_consistent_masking (bool, *optional*, defaults to `False`):
When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary
across channels.
mask_value (int, *optional*, defaults to 0):
Define the value of masked patches for pretraining.
Returns:
`tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x
n]
"""
if mask_ratio < 0 or mask_ratio >= 1:
raise ValueError(f"Mask ratio {mask_ratio} has to be between 0 and 1.")
batch_size, num_channels, sequence_length, num_features = inputs.shape
device = inputs.device
len_keep = int(sequence_length * (1 - mask_ratio))
if channel_consistent_masking:
noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L
noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time
else:
# noise in [0, 1], bs x num_channels x L
noise = torch.rand(batch_size, num_channels, sequence_length, device=device)
# mask: [bs x num_channels x num_patch]
mask = torch.ones(batch_size, num_channels, sequence_length, device=device)
mask[:, :, :len_keep] = 0
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L]
mask = torch.gather(mask, dim=-1, index=ids_restore)
mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length]
if unmasked_channel_indices is not None:
mask[:, unmasked_channel_indices, :, :] = 0
inputs_mask = inputs.masked_fill(mask.bool(), mask_value)
return inputs_mask, mask[..., 0]
# Copied from transformers.models.patchtst.modeling_patchtst.forecast_masking
def forecast_masking(
inputs: torch.Tensor,
num_forecast_mask_patches: Union[list, int],
unmasked_channel_indices: Optional[list] = None,
mask_value: int = 0,
):
"""Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches.
If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list.
Parameters:
inputs (`torch.Tensor`):
Input of shape `(bs, num_channels, num_patch, patch_length)`
num_forecast_mask_patches (`list`):
Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5].
unmasked_channel_indices (`list`, *optional*):
Indices of channels that are not masked.
mask_value (`int`, *optional*, defaults to 0):
Values in the masked patches will be filled by `mask_value`.
Returns:
`tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs,
num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)`
"""
if isinstance(num_forecast_mask_patches, int):
num_forecast_mask_patches = [num_forecast_mask_patches]
forecast_mask_ratios = [1 for _ in num_forecast_mask_patches]
batch_size, num_channels, sequence_length, num_features = inputs.shape
mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device)
t_list = []
total_length = 0
total_ratio = sum(forecast_mask_ratios)
for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios):
if patch_length <= 0 or patch_length >= sequence_length:
raise ValueError(
f"num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches."
)
temp_len = int(batch_size * ratio / total_ratio)
t_list.append([patch_length, ratio, temp_len])
total_length += temp_len
t_list = sorted(t_list, key=lambda x: x[2])
if total_length < batch_size:
t_list[0][2] = t_list[0][2] + (batch_size - total_length)
elif total_length > batch_size:
t_list[-1][2] = t_list[-1][2] + (total_length - batch_size)
batch1 = 0
for patch_len, _, temp_len in t_list:
batch2 = batch1 + temp_len
mask[batch1:batch2, :, -patch_len:] = 1
batch1 = batch2
perm = torch.randperm(mask.shape[0])
mask = mask[perm]
mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len]
if unmasked_channel_indices is not None:
mask[:, unmasked_channel_indices, :, :] = 0
inputs_mask = inputs.masked_fill(mask.bool(), mask_value)
return inputs_mask, mask[..., 0]
# Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTPatchify with PatchTST->PatchTSMixer
| PatchTSMixerPretrainHead |
python | gevent__gevent | src/gevent/tests/test__socket_dns.py | {
"start": 19029,
"end": 20294
} | class ____(TestCase):
NORMALIZE_GHBA_IGNORE_ALIAS = True
def __normalize_name(self, result):
if (RESOLVER_ARES or RESOLVER_DNSPYTHON) and isinstance(result, tuple):
# The system resolver can return the FQDN, in the first result,
# when given certain configurations. But c-ares and dnspython
# do not.
name = result[0]
name = name.split('.', 1)[0]
result = (name,) + result[1:]
return result
def _normalize_result_gethostbyaddr(self, result):
result = TestCase._normalize_result_gethostbyaddr(self, result)
return self.__normalize_name(result)
def _normalize_result_getnameinfo(self, result):
result = TestCase._normalize_result_getnameinfo(self, result)
if PY2:
# Not sure why we only saw this on Python 2
result = self.__normalize_name(result)
return result
add(
TestHostname,
socket.gethostname,
skip=greentest.RUNNING_ON_TRAVIS and greentest.RESOLVER_NOT_SYSTEM,
skip_reason=("Sometimes get a different result for getaddrinfo "
"with dnspython; c-ares produces different results for "
"localhost on Travis beginning Sept 2019")
)
| TestHostname |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 61553,
"end": 62068
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("ta_IN")
Faker.seed(0)
def test_gender_first_names(self):
"""simple test to verify that we are pulling gender specific names"""
name = self.fake.first_name_female()
assert name in TaINProvider.first_names_female
name = self.fake.first_name_male()
assert name in TaINProvider.first_names_male
name = self.fake.first_name()
assert name in TaINProvider.first_names
| TestTaIN |
python | Netflix__metaflow | metaflow/exception.py | {
"start": 1275,
"end": 1841
} | class ____(Exception):
headline = "Flow failed"
def __init__(self, msg="", lineno=None, source_file=None):
self.message = msg
self.line_no = lineno
self.source_file = source_file
super(MetaflowException, self).__init__()
def __str__(self):
prefix = ""
if self.source_file:
prefix = "%s:" % self.source_file
if self.line_no:
prefix = "line %d:" % self.line_no
prefix = "%s: " % prefix if prefix else ""
return "%s%s" % (prefix, self.message)
| MetaflowException |
python | getsentry__sentry | src/sentry/dynamic_sampling/tasks/boost_low_volume_transactions.py | {
"start": 11970,
"end": 24271
} | class ____:
"""
Fetch transactions for all orgs and all projects with pagination orgs and projects with count per root project
org_ids: the orgs for which the projects & transactions should be returned
large_transactions: if True it returns transactions with the largest count
if False it returns transactions with the smallest count
max_transactions: maximum number of transactions to return
"""
def __init__(
self,
orgs: list[int],
large_transactions: bool,
max_transactions: int,
):
self.large_transactions = large_transactions
self.max_transactions = max_transactions
self.org_ids = orgs
self.offset = 0
transaction_string_id = indexer.resolve_shared_org("transaction")
self.transaction_tag = f"tags_raw[{transaction_string_id}]"
self.metric_id = indexer.resolve_shared_org(
str(TransactionMRI.COUNT_PER_ROOT_PROJECT.value)
)
self.has_more_results = True
self.cache: list[ProjectTransactions] = []
if self.large_transactions:
self.transaction_ordering = Direction.DESC
else:
self.transaction_ordering = Direction.ASC
def __iter__(self) -> FetchProjectTransactionVolumes:
return self
def __next__(self) -> ProjectTransactions:
if self.max_transactions == 0:
# the user is not interested in transactions of this type, return nothing.
raise StopIteration()
if not self._cache_empty():
# data in cache no need to go to the db
return self._get_from_cache()
granularity = Granularity(60)
if self.has_more_results:
# still data in the db, load cache
query = (
Query(
match=Entity(EntityKey.GenericOrgMetricsCounters.value),
select=[
Function("sum", [Column("value")], "num_transactions"),
Column("org_id"),
Column("project_id"),
AliasedExpression(Column(self.transaction_tag), "transaction_name"),
],
groupby=[
Column("org_id"),
Column("project_id"),
AliasedExpression(Column(self.transaction_tag), "transaction_name"),
],
where=[
Condition(
Column("timestamp"),
Op.GTE,
datetime.utcnow() - BOOST_LOW_VOLUME_TRANSACTIONS_QUERY_INTERVAL,
),
Condition(Column("timestamp"), Op.LT, datetime.utcnow()),
Condition(Column("metric_id"), Op.EQ, self.metric_id),
Condition(Column("org_id"), Op.IN, self.org_ids),
],
granularity=granularity,
orderby=[
OrderBy(Column("org_id"), Direction.ASC),
OrderBy(Column("project_id"), Direction.ASC),
OrderBy(Column("num_transactions"), self.transaction_ordering),
],
)
.set_limitby(
LimitBy(
columns=[Column("org_id"), Column("project_id")],
count=self.max_transactions,
)
)
.set_limit(CHUNK_SIZE + 1)
.set_offset(self.offset)
)
request = Request(
dataset=Dataset.PerformanceMetrics.value,
app_id="dynamic_sampling",
query=query,
tenant_ids={"use_case_id": UseCaseID.TRANSACTIONS.value, "cross_org_query": 1},
)
data = raw_snql_query(
request,
referrer=Referrer.DYNAMIC_SAMPLING_COUNTERS_FETCH_PROJECTS_WITH_COUNT_PER_TRANSACTION.value,
)["data"]
count = len(data)
self.has_more_results = count > CHUNK_SIZE
self.offset += CHUNK_SIZE
if self.has_more_results:
data = data[:-1]
self._add_results_to_cache(data)
# return from cache if empty stops iteration
return self._get_from_cache()
def _add_results_to_cache(self, data: list[dict[str, int | float | str]]) -> None:
transaction_counts: list[tuple[str, float]] = []
current_org_id: int | None = None
current_proj_id: int | None = None
for row in data:
proj_id = int(row["project_id"])
org_id = int(row["org_id"])
transaction_name = str(row["transaction_name"])
num_transactions = float(row["num_transactions"])
if current_proj_id != proj_id or current_org_id != org_id:
if (
transaction_counts
and current_proj_id is not None
and current_org_id is not None
):
self.cache.append(
{
"project_id": current_proj_id,
"org_id": current_org_id,
"transaction_counts": transaction_counts,
"total_num_transactions": None,
"total_num_classes": None,
}
)
transaction_counts = []
current_org_id = org_id
current_proj_id = proj_id
transaction_counts.append((transaction_name, num_transactions))
# collect the last project data
if transaction_counts:
# since we accumulated some transactions we must have set the org and proj
assert current_proj_id is not None
assert current_org_id is not None
self.cache.append(
{
"project_id": current_proj_id,
"org_id": current_org_id,
"transaction_counts": transaction_counts,
"total_num_transactions": None,
"total_num_classes": None,
}
)
def _cache_empty(self) -> bool:
return not self.cache
def _get_from_cache(self) -> ProjectTransactions:
if self._cache_empty():
raise StopIteration()
return self.cache.pop(0)
def merge_transactions(
left: ProjectTransactions | None,
right: ProjectTransactions | None,
totals: ProjectTransactionsTotals | None,
) -> ProjectTransactions:
if right is None and left is None:
raise ValueError(
"no transactions passed to merge",
)
if left is not None and right is not None and not is_same_project(left, right):
raise ValueError(
"mismatched project transactions",
(left["org_id"], left["project_id"]),
(right["org_id"], right["project_id"]),
)
if totals is not None and not is_same_project(left, totals):
left_tuple = (left["org_id"], left["project_id"]) if left is not None else None
totals_tuple = (totals["org_id"], totals["project_id"]) if totals is not None else None
raise ValueError(
"mismatched projectTransaction and projectTransactionTotals",
left_tuple,
totals_tuple,
)
assert left is not None
if right is None:
merged_transactions = left["transaction_counts"]
else:
# we have both left and right we need to merge
names = set()
merged_transactions = [*left["transaction_counts"]]
for transaction_name, _ in merged_transactions:
names.add(transaction_name)
for transaction_name, count in right["transaction_counts"]:
if transaction_name not in names:
# not already in left, add it
merged_transactions.append((transaction_name, count))
total_num_classes = totals.get("total_num_classes") if totals is not None else None
return {
"org_id": left["org_id"],
"project_id": left["project_id"],
"transaction_counts": merged_transactions,
"total_num_transactions": (
totals.get("total_num_transactions") if totals is not None else None
),
"total_num_classes": int(total_num_classes) if total_num_classes is not None else None,
}
def next_totals(
totals: Iterator[ProjectTransactionsTotals],
) -> Callable[[ProjectIdentity], ProjectTransactionsTotals | None]:
"""
Advances the total iterator until it reaches the required identity
Given a match the iterator returns None if it cannot find it ( i.e. it is
already past it) or it is at the end (it never terminates, DO NOT use it
in a for loop). If it finds the match it will return the total for the match.
"""
current: list[ProjectTransactionsTotals | None] = [None]
# protection for the case when the caller passes a list instead of an iterator
totals = iter(totals)
def inner(match: ProjectIdentity) -> ProjectTransactionsTotals | None:
if is_same_project(current[0], match):
temp = current[0]
current[0] = None
return temp
if current[0] is not None and is_project_identity_before(match, current[0]):
# still haven't reach current no point looking further
return None
for total in totals:
if is_same_project(total, match):
# found it
return total
if is_project_identity_before(match, total):
# we passed after match, remember were we are no need to go further
current[0] = total
return None
return None
return inner
def transactions_zip(
totals: Iterator[ProjectTransactionsTotals],
left: Iterator[ProjectTransactions],
right: Iterator[ProjectTransactions],
) -> Iterator[ProjectTransactions]:
"""
returns a generator that zips left and right (when they match) and when not it re-aligns the sequence
if it finds a totals to match it consolidates the result with totals information as well
"""
more_right = True
more_left = True
left_elm = None
right_elm = None
get_next_total = next_totals(totals)
while more_left or more_right:
if more_right and right_elm is None:
try:
right_elm = next(right)
except StopIteration:
more_right = False
right_elm = None
if more_left and left_elm is None:
try:
left_elm = next(left)
except StopIteration:
more_left = False
left_elm = None
if left_elm is None and right_elm is None:
return
if right_elm is not None and left_elm is not None:
# we have both right and left try to merge them if they point to the same entity
if is_same_project(left_elm, right_elm):
yield merge_transactions(left_elm, right_elm, get_next_total(left_elm))
left_elm = None
right_elm = None
elif is_project_identity_before(left_elm, right_elm):
# left is before right (return left keep right for next iteration)
yield merge_transactions(left_elm, None, get_next_total(left_elm))
left_elm = None
else: # project_before(right_elm, left_elm):
# right before left ( return right keep left for next iteration)
yield merge_transactions(right_elm, None, get_next_total(right_elm))
right_elm = None
else:
# only one is not None
if left_elm is not None:
yield merge_transactions(left_elm, None, get_next_total(left_elm))
left_elm = None
elif right_elm is not None:
yield merge_transactions(right_elm, None, get_next_total(right_elm))
right_elm = None
| FetchProjectTransactionVolumes |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 25864,
"end": 27311
} | class ____(AppearanceStreamGenerator):
"""Basic appearance stream generator using QPDF's default algorithm.
It is thus subject to all the same
`limitations <https://qpdf.readthedocs.io/en/stable/cli.html#option-generate-appearances>`_.
Briefly summarized, these limitations are:
* Cannot generate appearance streams using encodings other than ASCII, WinAnsi, or
MacRoman
* No support for multiline text
* No support for auto-sized text
* Does not respect quadding
Using this class will produce the same results as the following code:
.. code-block:: python
form = Form(pdf, generate_appearances = None)
...
pdf.generate_appearances()
However, unlike the above, appearances will be generated on the fly as the form is
filled out, rather than all at once at the end.
You may extend this class to customize appearance streams or add support for
features you need.
"""
def generate_text(self, field: AcroFormField):
"""Generate the appearance stream for a text field."""
for annot in self.form.get_annotations_for_field(field):
field.generate_appearance(annot)
def generate_choice(self, field: AcroFormField):
"""Generate the appearance stream for a choice field."""
for annot in self.form.get_annotations_for_field(field):
field.generate_appearance(annot)
| DefaultAppearanceStreamGenerator |
python | openai__openai-python | src/openai/types/responses/file_search_tool_param.py | {
"start": 817,
"end": 1404
} | class ____(TypedDict, total=False):
hybrid_search: RankingOptionsHybridSearch
"""
Weights that control how reciprocal rank fusion balances semantic embedding
matches versus sparse keyword matches when hybrid search is enabled.
"""
ranker: Literal["auto", "default-2024-11-15"]
"""The ranker to use for the file search."""
score_threshold: float
"""The score threshold for the file search, a number between 0 and 1.
Numbers closer to 1 will attempt to return only the most relevant results, but
may return fewer results.
"""
| RankingOptions |
python | matplotlib__matplotlib | lib/matplotlib/scale.py | {
"start": 6152,
"end": 7348
} | class ____(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
@_make_axis_parameter_optional
def __init__(self, axis):
# This method is present only to prevent inheritance of the base class'
# constructor docstring, which would otherwise end up interpolated into
# the docstring of Axis.set_scale.
"""
""" # noqa: D419
def set_default_locators_and_formatters(self, axis):
# docstring inherited
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_formatter(NullFormatter())
# update the minor locator for x and y axis based on rcParams
if (axis.axis_name == 'x' and mpl.rcParams['xtick.minor.visible'] or
axis.axis_name == 'y' and mpl.rcParams['ytick.minor.visible']):
axis.set_minor_locator(AutoMinorLocator())
else:
axis.set_minor_locator(NullLocator())
def get_transform(self):
"""
Return the transform for linear scaling, which is just the
`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
| LinearScale |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/vt100_parser.py | {
"start": 1077,
"end": 1927
} | class ____(Dict[str, bool]):
"""
Dictionary that maps input sequences to a boolean indicating whether there is
any key that start with this characters.
"""
def __missing__(self, prefix: str) -> bool:
# (hard coded) If this could be a prefix of a CPR response, return
# True.
if _cpr_response_prefix_re.match(prefix) or _mouse_event_prefix_re.match(
prefix
):
result = True
else:
# If this could be a prefix of anything else, also return True.
result = any(
v
for k, v in ANSI_SEQUENCES.items()
if k.startswith(prefix) and k != prefix
)
self[prefix] = result
return result
_IS_PREFIX_OF_LONGER_MATCH_CACHE = _IsPrefixOfLongerMatchCache()
| _IsPrefixOfLongerMatchCache |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 435552,
"end": 437502
} | class ____(ValueChannelMixin, core.OrderValueDef):
"""
OrderValue schema wrapper.
Parameters
----------
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
condition : dict, :class:`ConditionalValueDefnumber`, :class:`ConditionalParameterValueDefnumber`, :class:`ConditionalPredicateValueDefnumber`, Sequence[dict, :class:`ConditionalValueDefnumber`, :class:`ConditionalParameterValueDefnumber`, :class:`ConditionalPredicateValueDefnumber`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "order"
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float] = Undefined,
) -> OrderValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float] = Undefined,
) -> OrderValue: ...
@overload
def condition(self, _: list[core.ConditionalValueDefnumber], /) -> OrderValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| OrderValue |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 6536,
"end": 8420
} | class ____(unittest.TestCase):
"""Tests for az_AZ locale person provider"""
def setUp(self):
self.fake = Faker("az")
Faker.seed(0)
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.first_names
# Females first name
name = self.fake.first_name_female()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.first_names
assert name in AzAzProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.first_names
assert name in AzAzProvider.first_names_male
def test_last_name(self):
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.last_names
# Females last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.last_names_female + AzAzProvider.last_names_unisex
# Females only last name.
name = self.fake.last_name_unique_to_female()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.last_names_female
# Male last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.last_names_male + AzAzProvider.last_names_unisex
# Male only last name.
name = self.fake.last_name_unique_to_male()
assert name
self.assertIsInstance(name, str)
assert name in AzAzProvider.last_names_male
| TestAzAz |
python | paramiko__paramiko | paramiko/_winapi.py | {
"start": 1454,
"end": 3634
} | class ____(builtins.WindowsError):
"""more info about errors at
http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx"""
def __init__(self, value=None):
if value is None:
value = ctypes.windll.kernel32.GetLastError()
strerror = format_system_message(value)
args = 0, strerror, None, value
super().__init__(*args)
@property
def message(self):
return self.strerror
@property
def code(self):
return self.winerror
def __str__(self):
return self.message
def __repr__(self):
return "{self.__class__.__name__}({self.winerror})".format(**vars())
def handle_nonzero_success(result):
if result == 0:
raise WindowsError()
###########################
# jaraco.windows.api.memory
GMEM_MOVEABLE = 0x2
GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_size_t
GlobalAlloc.restype = ctypes.wintypes.HANDLE
GlobalLock = ctypes.windll.kernel32.GlobalLock
GlobalLock.argtypes = (ctypes.wintypes.HGLOBAL,)
GlobalLock.restype = ctypes.wintypes.LPVOID
GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock
GlobalUnlock.argtypes = (ctypes.wintypes.HGLOBAL,)
GlobalUnlock.restype = ctypes.wintypes.BOOL
GlobalSize = ctypes.windll.kernel32.GlobalSize
GlobalSize.argtypes = (ctypes.wintypes.HGLOBAL,)
GlobalSize.restype = ctypes.c_size_t
CreateFileMapping = ctypes.windll.kernel32.CreateFileMappingW
CreateFileMapping.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.c_void_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.LPWSTR,
]
CreateFileMapping.restype = ctypes.wintypes.HANDLE
MapViewOfFile = ctypes.windll.kernel32.MapViewOfFile
MapViewOfFile.restype = ctypes.wintypes.HANDLE
UnmapViewOfFile = ctypes.windll.kernel32.UnmapViewOfFile
UnmapViewOfFile.argtypes = (ctypes.wintypes.HANDLE,)
RtlMoveMemory = ctypes.windll.kernel32.RtlMoveMemory
RtlMoveMemory.argtypes = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)
ctypes.windll.kernel32.LocalFree.argtypes = (ctypes.wintypes.HLOCAL,)
#####################
# jaraco.windows.mmap
| WindowsError |
python | python-markdown__markdown | tests/test_meta.py | {
"start": 74,
"end": 921
} | class ____(unittest.TestCase):
def test_get_version(self):
"""Test that _get_version formats __version_info__ as required by PEP 440."""
self.assertEqual(_get_version((1, 1, 2, 'dev', 0)), "1.1.2.dev0")
self.assertEqual(_get_version((1, 1, 2, 'alpha', 1)), "1.1.2a1")
self.assertEqual(_get_version((1, 2, 0, 'beta', 2)), "1.2b2")
self.assertEqual(_get_version((1, 2, 0, 'rc', 4)), "1.2rc4")
self.assertEqual(_get_version((1, 2, 0, 'final', 0)), "1.2")
def test__version__IsValid(self):
"""Test that __version__ is valid and normalized."""
try:
import packaging.version
except ImportError:
self.skipTest('packaging does not appear to be installed')
self.assertEqual(__version__, str(packaging.version.Version(__version__)))
| TestVersion |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 23876,
"end": 24003
} | class ____(MixinDefault, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_SCRAPY_DEFAULT}
| TestRequestMetaDefault |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 101795,
"end": 102432
} | class ____(TestCase):
def setUp(self):
self.model = ModelWithSingleNoDBIndexUnique
self.history_model = self.model.history.model
def test_unique_field_index(self):
# Ending up with deferred fields (dont know why), using work around
self.assertTrue(self.model._meta.get_field("name").db_index)
self.assertFalse(self.history_model._meta.get_field("name").db_index)
# keeps index
self.assertTrue(self.model._meta.get_field("name_keeps_index").db_index)
self.assertTrue(self.history_model._meta.get_field("name_keeps_index").db_index)
| ModelWithSingleNoDBIndexUniqueTest |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_tm_future_annotations.py | {
"start": 5162,
"end": 12829
} | class ____(_MappedColumnTest):
def test_fully_qualified_mapped_name(self, decl_base):
"""test #8853, regression caused by #8759 ;)
See same test in test_abs_import_only
"""
class Foo(decl_base):
__tablename__ = "foo"
id: sqlalchemy.orm.Mapped[int] = mapped_column(primary_key=True)
data: sqlalchemy.orm.Mapped[int] = mapped_column()
data2: sqlalchemy.orm.Mapped[int]
self.assert_compile(
select(Foo), "SELECT foo.id, foo.data, foo.data2 FROM foo"
)
def test_indirect_mapped_name_module_level(self, decl_base):
"""test #8759
Note that M by definition has to be at the module level to be
valid, and not locally declared here, this is in accordance with
mypy::
def make_class() -> None:
ll = list
x: ll[int] = [1, 2, 3]
Will return::
$ mypy test3.py
test3.py:4: error: Variable "ll" is not valid as a type [valid-type]
test3.py:4: note: See https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
Found 1 error in 1 file (checked 1 source file)
Whereas the correct form is::
ll = list
def make_class() -> None:
x: ll[int] = [1, 2, 3]
""" # noqa: E501
class Foo(decl_base):
__tablename__ = "foo"
id: M[int] = mapped_column(primary_key=True)
data: M[int] = mapped_column()
data2: M[int]
self.assert_compile(
select(Foo), "SELECT foo.id, foo.data, foo.data2 FROM foo"
)
def test_type_favors_outer(self, decl_base):
"""test #10899, that we maintain favoring outer names vs. inner.
this is for backwards compatibility as well as what people
usually expect regarding the names of attributes in the class.
"""
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
uuid: Mapped[uuid.UUID] = mapped_column()
is_true(isinstance(User.__table__.c.uuid.type, sqltypes.Uuid))
def test_type_inline_cls_qualified(self, decl_base):
"""test #10899, where we test that we can refer to the class name
directly to refer to class-bound elements.
"""
class User(decl_base):
__tablename__ = "user"
class Role(enum.Enum):
admin = "admin"
user = "user"
id: Mapped[int] = mapped_column(primary_key=True)
role: Mapped[User.Role]
is_true(isinstance(User.__table__.c.role.type, sqltypes.Enum))
eq_(User.__table__.c.role.type.length, 5)
is_(User.__table__.c.role.type.enum_class, User.Role)
def test_type_inline_disambiguate(self, decl_base):
"""test #10899, where we test that we can refer to an inner name
that's not in conflict directly without qualification.
"""
class User(decl_base):
__tablename__ = "user"
class Role(enum.Enum):
admin = "admin"
user = "user"
id: Mapped[int] = mapped_column(primary_key=True)
role: Mapped[Role]
is_true(isinstance(User.__table__.c.role.type, sqltypes.Enum))
eq_(User.__table__.c.role.type.length, 5)
is_(User.__table__.c.role.type.enum_class, User.Role)
eq_(User.__table__.c.role.type.name, "role") # and not 'enum'
def test_type_inner_can_be_qualified(self, decl_base):
"""test #10899, same test as that of Role, using it to qualify against
a global variable with the same name.
"""
global SomeGlobalName
SomeGlobalName = None
class User(decl_base):
__tablename__ = "user"
class SomeGlobalName(enum.Enum):
admin = "admin"
user = "user"
id: Mapped[int] = mapped_column(primary_key=True)
role: Mapped[User.SomeGlobalName]
is_true(isinstance(User.__table__.c.role.type, sqltypes.Enum))
eq_(User.__table__.c.role.type.length, 5)
is_(User.__table__.c.role.type.enum_class, User.SomeGlobalName)
def test_indirect_mapped_name_local_level(self, decl_base):
"""test #8759.
this should raise an error.
"""
M2 = Mapped
with expect_raises_message(
exc.ArgumentError,
r"Could not interpret annotation M2\[int\]. Check that it "
"uses names that are correctly imported at the module level.",
):
class Foo(decl_base):
__tablename__ = "foo"
id: M2[int] = mapped_column(primary_key=True)
data2: M2[int]
def test_indirect_mapped_name_itswrong(self, decl_base):
"""test #8759.
this should raise an error.
"""
with expect_annotation_syntax_error("Foo.id"):
class Foo(decl_base):
__tablename__ = "foo"
id: M3[int] = mapped_column(primary_key=True)
data2: M3[int]
def test_typ_not_in_cls_namespace(self, decl_base):
"""test #8742.
This tests that when types are resolved, they use the ``__module__``
of they class they are used within, not the mapped class.
"""
class Mixin:
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[uuid.UUID]
class MyClass(Mixin, decl_base):
# basically no type will be resolvable here
__module__ = "some.module"
__tablename__ = "mytable"
is_(MyClass.id.expression.type._type_affinity, Integer)
is_(MyClass.data.expression.type._type_affinity, Uuid)
def test_dont_ignore_unresolvable(self, decl_base):
"""test #8888"""
with expect_raises_message(
exc.ArgumentError,
r"Could not resolve all types within mapped annotation: "
r"\"Mapped\[fake\]\". Ensure all types are written correctly and "
r"are imported within the module in use.",
):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[fake] # noqa
@testing.variation(
"reference_type",
[
"plain",
"plain_optional",
"container_w_local_mapped",
"container_w_remote_mapped",
],
)
def test_i_have_a_classvar_on_my_class(self, decl_base, reference_type):
if reference_type.container_w_remote_mapped:
class MyOtherClass(decl_base):
__tablename__ = "myothertable"
id: Mapped[int] = mapped_column(primary_key=True)
class MyClass(decl_base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
if reference_type.container_w_remote_mapped:
status: ClassVar[Dict[str, MyOtherClass]]
elif reference_type.container_w_local_mapped:
status: ClassVar[Dict[str, MyClass]]
elif reference_type.plain_optional:
status: ClassVar[Optional[int]]
elif reference_type.plain:
status: ClassVar[int]
m1 = MyClass(id=1, data=5)
assert "status" not in inspect(m1).mapper.attrs
| MappedColumnTest |
python | sqlalchemy__sqlalchemy | test/engine/test_logging.py | {
"start": 20235,
"end": 23395
} | class ____(fixtures.TestBase):
def setup_test(self):
self.existing_level = logging.getLogger("sqlalchemy.pool").level
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.pool")]:
log.addHandler(self.buf)
def teardown_test(self):
for log in [logging.getLogger("sqlalchemy.pool")]:
log.removeHandler(self.buf)
logging.getLogger("sqlalchemy.pool").setLevel(self.existing_level)
def _queuepool_echo_fixture(self):
return tsa.pool.QueuePool(creator=mock.Mock(), echo="debug")
def _queuepool_logging_fixture(self):
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
return tsa.pool.QueuePool(creator=mock.Mock())
def _stpool_echo_fixture(self):
return tsa.pool.SingletonThreadPool(creator=mock.Mock(), echo="debug")
def _stpool_logging_fixture(self):
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
return tsa.pool.SingletonThreadPool(creator=mock.Mock())
def _test_queuepool(self, q, dispose=True):
conn = q.connect()
conn.close()
conn = None
conn = q.connect()
conn.close()
conn = None
conn = q.connect()
conn._close_special(transaction_reset=True)
conn = None
conn = q.connect()
conn._close_special(transaction_reset=False)
conn = None
conn = q.connect()
conn = None
del conn
lazy_gc()
q.dispose()
eq_(
[buf.msg for buf in self.buf.buffer],
[
"Created new connection %r",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s reset, transaction already reset",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return",
"%s connection %r",
]
+ (["Pool disposed. %s"] if dispose else []),
)
def test_stpool_echo(self):
q = self._stpool_echo_fixture()
self._test_queuepool(q, False)
def test_stpool_logging(self):
q = self._stpool_logging_fixture()
self._test_queuepool(q, False)
@testing.requires.predictable_gc
def test_queuepool_echo(self):
q = self._queuepool_echo_fixture()
self._test_queuepool(q)
@testing.requires.predictable_gc
def test_queuepool_logging(self):
q = self._queuepool_logging_fixture()
self._test_queuepool(q)
| PoolLoggingTest |
python | graphql-python__graphene | graphene/types/tests/test_scalar.py | {
"start": 8439,
"end": 10292
} | class ____:
def test_query(self):
"""
Test that a normal query works.
"""
result = schema.execute('{ optional { string(input: "something something") } }')
assert not result.errors
assert result.data == {"optional": {"string": "something something"}}
result = schema.execute('{ optional { string(input: "True") } }')
assert not result.errors
assert result.data == {"optional": {"string": "True"}}
result = schema.execute('{ optional { string(input: "0") } }')
assert not result.errors
assert result.data == {"optional": {"string": "0"}}
def test_optional_input(self):
"""
Test that we can provide a null value to an optional input
"""
result = schema.execute("{ optional { string(input: null) } }")
assert not result.errors
assert result.data == {"optional": {"string": None}}
def test_invalid_input(self):
"""
Test that if an invalid type is provided we get an error
"""
result = schema.execute("{ optional { string(input: 1) } }")
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message == "String cannot represent a non string value: 1"
)
result = schema.execute("{ optional { string(input: 3.2) } }")
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message
== "String cannot represent a non string value: 3.2"
)
result = schema.execute("{ optional { string(input: true) } }")
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message
== "String cannot represent a non string value: true"
)
| TestString |
python | pallets__click | src/click/core.py | {
"start": 101764,
"end": 128873
} | class ____(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: Show the default value for this option in its
help text. Values are not shown by default, unless
:attr:`Context.show_default` is ``True``. If this value is a
string, it shows that string in parentheses instead of the
actual value. This is particularly useful for dynamic options.
For single option boolean flags, the default remains hidden if
its value is ``False``.
:param show_envvar: Controls if an environment variable should be
shown on the help page and error messages.
Normally, environment variables are not shown.
:param prompt: If set to ``True`` or a non empty string then the
user will be prompted for input. If set to ``True`` the prompt
will be the option name capitalized. A deprecated option cannot be
prompted.
:param confirmation_prompt: Prompt a second time to confirm the
value if it was prompted for. Can be set to a string instead of
``True`` to customize the message.
:param prompt_required: If set to ``False``, the user will be
prompted for input only when the option was specified as a flag
without a value.
:param hide_input: If this is ``True`` then the input on the prompt
will be hidden from the user. This is useful for password input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
:param hidden: hide this option from help outputs.
:param attrs: Other command arguments described in :class:`Parameter`.
.. versionchanged:: 8.2
``envvar`` used with ``flag_value`` will always use the ``flag_value``,
previously it would use the value of the environment variable.
.. versionchanged:: 8.1
Help text indentation is cleaned here instead of only in the
``@option`` decorator.
.. versionchanged:: 8.1
The ``show_default`` parameter overrides
``Context.show_default``.
.. versionchanged:: 8.1
The default of a single option boolean flag is not shown if the
default value is ``False``.
.. versionchanged:: 8.0.1
``type`` is detected from ``flag_value`` if given.
"""
param_type_name = "option"
def __init__(
self,
param_decls: cabc.Sequence[str] | None = None,
show_default: bool | str | None = None,
prompt: bool | str = False,
confirmation_prompt: bool | str = False,
prompt_required: bool = True,
hide_input: bool = False,
is_flag: bool | None = None,
flag_value: t.Any = UNSET,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: types.ParamType | t.Any | None = None,
help: str | None = None,
hidden: bool = False,
show_choices: bool = True,
show_envvar: bool = False,
deprecated: bool | str = False,
**attrs: t.Any,
) -> None:
if help:
help = inspect.cleandoc(help)
super().__init__(
param_decls, type=type, multiple=multiple, deprecated=deprecated, **attrs
)
if prompt is True:
if self.name is None:
raise TypeError("'name' is required with 'prompt=True'.")
prompt_text: str | None = self.name.replace("_", " ").capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
if deprecated:
deprecated_message = (
f"(DEPRECATED: {deprecated})"
if isinstance(deprecated, str)
else "(DEPRECATED)"
)
help = help + deprecated_message if help is not None else deprecated_message
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.prompt_required = prompt_required
self.hide_input = hide_input
self.hidden = hidden
# The _flag_needs_value property tells the parser that this option is a flag
# that cannot be used standalone and needs a value. With this information, the
# parser can determine whether to consider the next user-provided argument in
# the CLI as a value for this flag or as a new option.
# If prompt is enabled but not required, then it opens the possibility for the
# option to gets its value from the user.
self._flag_needs_value = self.prompt is not None and not self.prompt_required
# Auto-detect if this is a flag or not.
if is_flag is None:
# Implicitly a flag because flag_value was set.
if flag_value is not UNSET:
is_flag = True
# Not a flag, but when used as a flag it shows a prompt.
elif self._flag_needs_value:
is_flag = False
# Implicitly a flag because secondary options names were given.
elif self.secondary_opts:
is_flag = True
# The option is explicitly not a flag, but to determine whether or not it needs
# value, we need to check if `flag_value` or `default` was set. Either one is
# sufficient.
# Ref: https://github.com/pallets/click/issues/3084
elif is_flag is False and not self._flag_needs_value:
self._flag_needs_value = flag_value is not UNSET or self.default is UNSET
if is_flag:
# Set missing default for flags if not explicitly required or prompted.
if self.default is UNSET and not self.required and not self.prompt:
if multiple:
self.default = ()
# Auto-detect the type of the flag based on the flag_value.
if type is None:
# A flag without a flag_value is a boolean flag.
if flag_value is UNSET:
self.type: types.ParamType = types.BoolParamType()
# If the flag value is a boolean, use BoolParamType.
elif isinstance(flag_value, bool):
self.type = types.BoolParamType()
# Otherwise, guess the type from the flag value.
else:
self.type = types.convert_type(None, flag_value)
self.is_flag: bool = bool(is_flag)
self.is_bool_flag: bool = bool(
is_flag and isinstance(self.type, types.BoolParamType)
)
self.flag_value: t.Any = flag_value
# Set boolean flag default to False if unset and not required.
if self.is_bool_flag:
if self.default is UNSET and not self.required:
self.default = False
# Support the special case of aligning the default value with the flag_value
# for flags whose default is explicitly set to True. Note that as long as we
# have this condition, there is no way a flag can have a default set to True,
# and a flag_value set to something else. Refs:
# https://github.com/pallets/click/issues/3024#issuecomment-3146199461
# https://github.com/pallets/click/pull/3030/commits/06847da
if self.default is True and self.flag_value is not UNSET:
self.default = self.flag_value
# Set the default flag_value if it is not set.
if self.flag_value is UNSET:
if self.is_flag:
self.flag_value = True
else:
self.flag_value = None
# Counting.
self.count = count
if count:
if type is None:
self.type = types.IntRange(min=0)
if self.default is UNSET:
self.default = 0
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
if __debug__:
if deprecated and prompt:
raise ValueError("`deprecated` options cannot use `prompt`.")
if self.nargs == -1:
raise TypeError("nargs=-1 is not supported for options.")
if not self.is_bool_flag and self.secondary_opts:
raise TypeError("Secondary flag is not valid for non-boolean flag.")
if self.is_bool_flag and self.hide_input and self.prompt is not None:
raise TypeError(
"'prompt' with 'hide_input' is not valid for boolean flag."
)
if self.count:
if self.multiple:
raise TypeError("'count' is not valid with 'multiple'.")
if self.is_flag:
raise TypeError("'count' is not valid with 'is_flag'.")
def to_info_dict(self) -> dict[str, t.Any]:
"""
.. versionchanged:: 8.3.0
Returns ``None`` for the :attr:`flag_value` if it was not set.
"""
info_dict = super().to_info_dict()
info_dict.update(
help=self.help,
prompt=self.prompt,
is_flag=self.is_flag,
# We explicitly hide the :attr:`UNSET` value to the user, as we choose to
# make it an implementation detail. And because ``to_info_dict`` has been
# designed for documentation purposes, we return ``None`` instead.
flag_value=self.flag_value if self.flag_value is not UNSET else None,
count=self.count,
hidden=self.hidden,
)
return info_dict
def get_error_hint(self, ctx: Context) -> str:
result = super().get_error_hint(ctx)
if self.show_envvar and self.envvar is not None:
result += f" (env var: '{self.envvar}')"
return result
def _parse_decls(
self, decls: cabc.Sequence[str], expose_value: bool
) -> tuple[str | None, list[str], list[str]]:
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if decl.isidentifier():
if name is not None:
raise TypeError(f"Name '{name}' defined twice")
name = decl
else:
split_char = ";" if decl[:1] == "/" else "/"
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(_split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
if first == second:
raise ValueError(
f"Boolean option {decl!r} cannot use the"
" same flag for true/false."
)
else:
possible_names.append(_split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: -len(x[0])) # group long options first
name = possible_names[0][1].replace("-", "_").lower()
if not name.isidentifier():
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError(
f"Could not determine name for option with declarations {decls!r}"
)
if not opts and not secondary_opts:
raise TypeError(
f"No options defined but a name was passed ({name})."
" Did you mean to declare an argument instead? Did"
f" you mean to pass '--{name}'?"
)
return name, opts, secondary_opts
def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None:
if self.multiple:
action = "append"
elif self.count:
action = "count"
else:
action = "store"
if self.is_flag:
action = f"{action}_const"
if self.is_bool_flag and self.secondary_opts:
parser.add_option(
obj=self, opts=self.opts, dest=self.name, action=action, const=True
)
parser.add_option(
obj=self,
opts=self.secondary_opts,
dest=self.name,
action=action,
const=False,
)
else:
parser.add_option(
obj=self,
opts=self.opts,
dest=self.name,
action=action,
const=self.flag_value,
)
else:
parser.add_option(
obj=self,
opts=self.opts,
dest=self.name,
action=action,
nargs=self.nargs,
)
def get_help_record(self, ctx: Context) -> tuple[str, str] | None:
if self.hidden:
return None
any_prefix_is_slash = False
def _write_opts(opts: cabc.Sequence[str]) -> str:
nonlocal any_prefix_is_slash
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash = True
if not self.is_flag and not self.count:
rv += f" {self.make_metavar(ctx=ctx)}"
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = self.get_help_extra(ctx)
extra_items = []
if "envvars" in extra:
extra_items.append(
_("env var: {var}").format(var=", ".join(extra["envvars"]))
)
if "default" in extra:
extra_items.append(_("default: {default}").format(default=extra["default"]))
if "range" in extra:
extra_items.append(extra["range"])
if "required" in extra:
extra_items.append(_(extra["required"]))
if extra_items:
extra_str = "; ".join(extra_items)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return ("; " if any_prefix_is_slash else " / ").join(rv), help
def get_help_extra(self, ctx: Context) -> types.OptionHelpExtra:
extra: types.OptionHelpExtra = {}
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if (
self.allow_from_autoenv
and ctx.auto_envvar_prefix is not None
and self.name is not None
):
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
if envvar is not None:
if isinstance(envvar, str):
extra["envvars"] = (envvar,)
else:
extra["envvars"] = tuple(str(d) for d in envvar)
# Temporarily enable resilient parsing to avoid type casting
# failing for the default. Might be possible to extend this to
# help formatting in general.
resilient = ctx.resilient_parsing
ctx.resilient_parsing = True
try:
default_value = self.get_default(ctx, call=False)
finally:
ctx.resilient_parsing = resilient
show_default = False
show_default_is_str = False
if self.show_default is not None:
if isinstance(self.show_default, str):
show_default_is_str = show_default = True
else:
show_default = self.show_default
elif ctx.show_default is not None:
show_default = ctx.show_default
if show_default_is_str or (
show_default and (default_value not in (None, UNSET))
):
if show_default_is_str:
default_string = f"({self.show_default})"
elif isinstance(default_value, (list, tuple)):
default_string = ", ".join(str(d) for d in default_value)
elif isinstance(default_value, enum.Enum):
default_string = default_value.name
elif inspect.isfunction(default_value):
default_string = _("(dynamic)")
elif self.is_bool_flag and self.secondary_opts:
# For boolean flags that have distinct True/False opts,
# use the opt without prefix instead of the value.
default_string = _split_opt(
(self.opts if default_value else self.secondary_opts)[0]
)[1]
elif self.is_bool_flag and not self.secondary_opts and not default_value:
default_string = ""
elif default_value == "":
default_string = '""'
else:
default_string = str(default_value)
if default_string:
extra["default"] = default_string
if (
isinstance(self.type, types._NumberRangeBase)
# skip count with default range type
and not (self.count and self.type.min == 0 and self.type.max is None)
):
range_str = self.type._describe_range()
if range_str:
extra["range"] = range_str
if self.required:
extra["required"] = "required"
return extra
def prompt_for_value(self, ctx: Context) -> t.Any:
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
assert self.prompt is not None
# Calculate the default before prompting anything to lock in the value before
# attempting any user interaction.
default = self.get_default(ctx)
# A boolean flag can use a simplified [y/n] confirmation prompt.
if self.is_bool_flag:
# If we have no boolean default, we force the user to explicitly provide
# one.
if default in (UNSET, None):
default = None
# Nothing prevent you to declare an option that is simultaneously:
# 1) auto-detected as a boolean flag,
# 2) allowed to prompt, and
# 3) still declare a non-boolean default.
# This forced casting into a boolean is necessary to align any non-boolean
# default to the prompt, which is going to be a [y/n]-style confirmation
# because the option is still a boolean flag. That way, instead of [y/n],
# we get [Y/n] or [y/N] depending on the truthy value of the default.
# Refs: https://github.com/pallets/click/pull/3030#discussion_r2289180249
else:
default = bool(default)
return confirm(self.prompt, default)
# If show_default is set to True/False, provide this to `prompt` as well. For
# non-bool values of `show_default`, we use `prompt`'s default behavior
prompt_kwargs: t.Any = {}
if isinstance(self.show_default, bool):
prompt_kwargs["show_default"] = self.show_default
return prompt(
self.prompt,
# Use ``None`` to inform the prompt() function to reiterate until a valid
# value is provided by the user if we have no default.
default=None if default is UNSET else default,
type=self.type,
hide_input=self.hide_input,
show_choices=self.show_choices,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x),
**prompt_kwargs,
)
def resolve_envvar_value(self, ctx: Context) -> str | None:
""":class:`Option` resolves its environment variable the same way as
:func:`Parameter.resolve_envvar_value`, but it also supports
:attr:`Context.auto_envvar_prefix`. If we could not find an environment from
the :attr:`envvar` property, we fallback on :attr:`Context.auto_envvar_prefix`
to build dynamiccaly the environment variable name using the
:python:`{ctx.auto_envvar_prefix}_{self.name.upper()}` template.
:meta private:
"""
rv = super().resolve_envvar_value(ctx)
if rv is not None:
return rv
if (
self.allow_from_autoenv
and ctx.auto_envvar_prefix is not None
and self.name is not None
):
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
rv = os.environ.get(envvar)
if rv:
return rv
return None
def value_from_envvar(self, ctx: Context) -> t.Any:
"""For :class:`Option`, this method processes the raw environment variable
string the same way as :func:`Parameter.value_from_envvar` does.
But in the case of non-boolean flags, the value is analyzed to determine if the
flag is activated or not, and returns a boolean of its activation, or the
:attr:`flag_value` if the latter is set.
This method also takes care of repeated options (i.e. options with
:attr:`multiple` set to ``True``).
:meta private:
"""
rv = self.resolve_envvar_value(ctx)
# Absent environment variable or an empty string is interpreted as unset.
if rv is None:
return None
# Non-boolean flags are more liberal in what they accept. But a flag being a
# flag, its envvar value still needs to be analyzed to determine if the flag is
# activated or not.
if self.is_flag and not self.is_bool_flag:
# If the flag_value is set and match the envvar value, return it
# directly.
if self.flag_value is not UNSET and rv == self.flag_value:
return self.flag_value
# Analyze the envvar value as a boolean to know if the flag is
# activated or not.
return types.BoolParamType.str_to_bool(rv)
# Split the envvar value if it is allowed to be repeated.
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0:
multi_rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
multi_rv = batch(multi_rv, self.nargs) # type: ignore[assignment]
return multi_rv
return rv
def consume_value(
self, ctx: Context, opts: cabc.Mapping[str, Parameter]
) -> tuple[t.Any, ParameterSource]:
"""For :class:`Option`, the value can be collected from an interactive prompt
if the option is a flag that needs a value (and the :attr:`prompt` property is
set).
Additionally, this method handles flag option that are activated without a
value, in which case the :attr:`flag_value` is returned.
:meta private:
"""
value, source = super().consume_value(ctx, opts)
# The parser will emit a sentinel value if the option is allowed to as a flag
# without a value.
if value is FLAG_NEEDS_VALUE:
# If the option allows for a prompt, we start an interaction with the user.
if self.prompt is not None and not ctx.resilient_parsing:
value = self.prompt_for_value(ctx)
source = ParameterSource.PROMPT
# Else the flag takes its flag_value as value.
else:
value = self.flag_value
source = ParameterSource.COMMANDLINE
# A flag which is activated always returns the flag value, unless the value
# comes from the explicitly sets default.
elif (
self.is_flag
and value is True
and not self.is_bool_flag
and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
):
value = self.flag_value
# Re-interpret a multiple option which has been sent as-is by the parser.
# Here we replace each occurrence of value-less flags (marked by the
# FLAG_NEEDS_VALUE sentinel) with the flag_value.
elif (
self.multiple
and value is not UNSET
and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
and any(v is FLAG_NEEDS_VALUE for v in value)
):
value = [self.flag_value if v is FLAG_NEEDS_VALUE else v for v in value]
source = ParameterSource.COMMANDLINE
# The value wasn't set, or used the param's default, prompt for one to the user
# if prompting is enabled.
elif (
(
value is UNSET
or source in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
)
and self.prompt is not None
and (self.required or self.prompt_required)
and not ctx.resilient_parsing
):
value = self.prompt_for_value(ctx)
source = ParameterSource.PROMPT
return value, source
def process_value(self, ctx: Context, value: t.Any) -> t.Any:
# process_value has to be overridden on Options in order to capture
# `value == UNSET` cases before `type_cast_value()` gets called.
#
# Refs:
# https://github.com/pallets/click/issues/3069
if self.is_flag and not self.required and self.is_bool_flag and value is UNSET:
value = False
if self.callback is not None:
value = self.callback(ctx, self, value)
return value
# in the normal case, rely on Parameter.process_value
return super().process_value(ctx, value)
| Option |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/eventbridge.py | {
"start": 6717,
"end": 8567
} | class ____(AwsBaseOperator[EventBridgeHook]):
"""
Enable an EventBridge Rule.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EventBridgeEnableRuleOperator`
:param name: the name of the rule to enable
:param event_bus_name: the name or ARN of the event bus associated with the rule (default if omitted)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.htmlt
"""
aws_hook_class = EventBridgeHook
template_fields: Sequence[str] = aws_template_fields("name", "event_bus_name")
def __init__(self, *, name: str, event_bus_name: str | None = None, **kwargs):
super().__init__(**kwargs)
self.name = name
self.event_bus_name = event_bus_name
def execute(self, context: Context):
self.hook.conn.enable_rule(
**prune_dict(
{
"Name": self.name,
"EventBusName": self.event_bus_name,
}
)
)
self.log.info('Enabled rule "%s"', self.name)
| EventBridgeEnableRuleOperator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.