language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 113313,
"end": 117271
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(2775995570)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50), random_state=self.rng)
assert np.shape(vals) == (2, 50)
assert vals.dtype.char in typecodes['AllFloat']
val = stats.pearson3.rvs(0.5, random_state=self.rng)
assert isinstance(val, float)
val = stats.pearson3(0.5).rvs(3, random_state=self.rng)
assert isinstance(val, np.ndarray)
assert val.dtype.char in typecodes['AllFloat']
assert len(val) == 3
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
def test_negative_cdf_bug_11186(self):
# incorrect CDFs for negative skews in gh-11186; fixed in gh-12640
# Also check vectorization w/ negative, zero, and positive skews
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
neg_inf = -30 # avoid RuntimeWarning caused by np.log(0)
cdfs = stats.pearson3.cdf(x_eval, skews)
int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0]
for skew in skews]
assert_allclose(cdfs, int_pdfs)
def test_return_array_bug_11746(self):
# pearson3.moment was returning size 0 or 1 array instead of float
# The first moment is equal to the loc, which defaults to zero
moment = stats.pearson3.moment(1, 2)
assert_equal(moment, 0)
assert isinstance(moment, np.number)
moment = stats.pearson3.moment(1, 0.000001)
assert_equal(moment, 0)
assert isinstance(moment, np.number)
def test_ppf_bug_17050(self):
# incorrect PPF for negative skews were reported in gh-17050
# Check that this is fixed (even in the array case)
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
res = stats.pearson3.ppf(stats.pearson3.cdf(x_eval, skews), skews)
assert_allclose(res, x_eval)
# Negation of the skew flips the distribution about the origin, so
# the following should hold
skew = np.array([[-0.5], [1.5]])
x = np.linspace(-2, 2)
assert_allclose(stats.pearson3.pdf(x, skew),
stats.pearson3.pdf(-x, -skew))
assert_allclose(stats.pearson3.cdf(x, skew),
stats.pearson3.sf(-x, -skew))
assert_allclose(stats.pearson3.ppf(x, skew),
-stats.pearson3.isf(x, -skew))
def test_sf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 50; Pearson3(skew=skew).sf(x). Check positive, negative,
# and zero skew due to branching.
skew = [0.1, 0.5, 1.0, -0.1]
x = [5.0, 10.0, 50.0, 8.0]
ref = [1.64721926440872e-06, 8.271911573556123e-11,
1.3149506021756343e-40, 2.763057937820296e-21]
assert_allclose(stats.pearson3.sf(x, skew), ref, rtol=2e-14)
assert_allclose(stats.pearson3.sf(x, 0), stats.norm.sf(x), rtol=2e-14)
| TestPearson3 |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 10969,
"end": 11203
} | class ____(models.Model):
random_char_field = RandomCharField(length=8, uppercase=True)
class Meta:
app_label = "django_extensions"
verbose_name = "uppercase alpha digits"
| RandomCharTestModelUppercaseAlphaDigits |
python | pytorch__pytorch | test/inductor/test_cpu_select_algorithm.py | {
"start": 115215,
"end": 123657
} | class ____(_DynamicShapesTestBase):
common = check_model
test_linear_dynamic_shapes = TestSelectAlgorithm.test_linear_static_shapes
test_linear_with_pointwise_dynamic_shapes = (
TestSelectAlgorithm.test_linear_with_pointwise
)
test_linear_with_transpose_dynamic_shapes = (
TestSelectAlgorithm.test_linear_with_transpose
)
test_linear_with_unary_binary_dynamic_shapes = (
TestSelectAlgorithm.test_linear_with_unary_binary
)
test_linear_amx_dynamic_shapes = TestSelectAlgorithm.test_linear_amx
test_linear_with_embedding_dynamic_shapes = (
TestSelectAlgorithm.test_linear_with_embedding
)
test_quantized_linear_with_pointwise_dynamic_shapes = (
TestSelectAlgorithm.test_quantized_linear_with_pointwise
)
test_quantized_linear_with_pointwise_binary_dynamic_shapes = (
TestSelectAlgorithm.test_quantized_linear_with_pointwise_binary
)
test_quantized_linear_amx_dynamic_shapes = (
TestSelectAlgorithm.test_quantized_linear_amx
)
test_grouped_linear_dynamic_shapes = TestSelectAlgorithm.test_grouped_linear
test_grouped_linear_epilogue_dynamic_shapes = (
TestSelectAlgorithm.test_grouped_linear_epilogue
)
test_linear_k_slicing_dynamic_shapes = TestSelectAlgorithm.test_linear_k_slicing
test_linear_cache_blocking_dynamic_shapes = (
TestSelectAlgorithm.test_linear_cache_blocking
)
test_linear_thread_factors_dynamic_shapes = (
TestSelectAlgorithm.test_linear_thread_factors
)
@patches
@torch.no_grad
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
@parametrize("bs", (5,))
@parametrize("Mdim", (384,))
@parametrize("Kdim", (96,))
@parametrize("Ndim", (64, 65))
@dtypes(torch.float, torch.bfloat16, torch.half)
def test_bmm_with_pointwise_dynamic_shapes(self, bs, Mdim, Kdim, Ndim, dtype):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.epilogue = torch.nn.ReLU()
def forward(self, x, other):
return self.epilogue(x @ other)
counters.clear()
u = torch.randn(bs, Mdim, Kdim).to(dtype=dtype)
v = torch.randn(bs, Kdim, Ndim).to(dtype=dtype)
torch._dynamo.mark_dynamic(u, 0)
torch._dynamo.mark_dynamic(u, 1)
torch._dynamo.mark_static(u, 2)
torch._dynamo.mark_static(v, 2)
mod = M().to(dtype=dtype).eval()
with verify(dtype) as (atol, rtol):
self.common(mod, (u, v), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["cpp_templated_kernel_counter"], 1)
self.assertEqual(counters["inductor"]["cpp_epilogue_fusion_counter"], 1)
@patches
@torch.no_grad
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
@parametrize("bs", (5,))
@parametrize("Mdim", (384,))
@parametrize("Kdim", (96,))
@parametrize("Ndim", (64, 65))
@dtypes(torch.float, torch.bfloat16, torch.half)
def test_bmm_with_pointwise_with_reshape_dynamic_shapes(
self, bs, Mdim, Kdim, Ndim, dtype
):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.epilogue = torch.nn.ReLU()
def forward(self, x, other, noise):
result = x.reshape(-1, Mdim, Kdim) @ other.reshape(-1, Kdim, Ndim)
return self.epilogue(result) + noise
counters.clear()
u = torch.randn(bs, 8, Mdim, Kdim).to(dtype=dtype)
v = torch.randn(bs, 8, Kdim, Ndim).to(dtype=dtype)
noise = torch.randn(bs * 8, Mdim, Ndim).to(dtype=dtype)
torch._dynamo.mark_dynamic(u, 0)
torch._dynamo.mark_dynamic(u, 1)
torch._dynamo.mark_static(u, 2)
torch._dynamo.mark_static(u, 3)
torch._dynamo.mark_static(v, 2)
torch._dynamo.mark_static(v, 3)
mod = M().to(dtype=dtype).eval()
with verify(dtype) as (atol, rtol):
self.common(mod, (u, v, noise), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["cpp_templated_kernel_counter"], 1)
self.assertEqual(counters["inductor"]["cpp_epilogue_fusion_counter"], 1)
@patches
@torch.no_grad
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
@dtypes(torch.float, torch.bfloat16)
def test_bmm_epilogue_dynamic_reshape(self, dtype):
bs = 5
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.epilogue = torch.nn.ReLU()
def forward(self, x, w, arg5_1):
arg131_1 = x.shape[0]
mul_91 = arg131_1 * 8
view_422 = torch.ops.aten.reshape.default(x, [mul_91, 512, 64])
view_423 = torch.ops.aten.reshape.default(w, [mul_91, 64, 512])
bmm_36 = torch.ops.aten.bmm.default(view_422, view_423)
view_424 = torch.ops.aten.reshape.default(
bmm_36, [arg131_1, 8, 512, 512]
)
abs_2 = torch.ones(512, 512, dtype=torch.int64)
lt_562 = torch.ops.aten.lt.Scalar(abs_2, 8)
add_5084 = torch.ones(512, 512, dtype=torch.int64)
add_5085 = torch.ones(512, 512, dtype=torch.int64)
full_default_1 = torch.ops.aten.full.default(
[512, 512], 15, dtype=torch.int64, layout=torch.strided
)
minimum_3 = torch.ops.aten.minimum.default(add_5085, full_default_1)
where_2 = torch.ops.aten.where.self(lt_562, abs_2, minimum_3)
add_5086 = torch.ops.aten.add.Tensor(add_5084, where_2)
embedding_5 = torch.ops.aten.embedding.default(arg5_1, add_5086)
permute_196 = torch.ops.aten.permute.default(embedding_5, [2, 0, 1])
unsqueeze_21 = torch.ops.aten.unsqueeze.default(permute_196, 0)
full_default = torch.ops.aten.full.default(
[arg131_1, 1, 1, 512],
-0.0,
dtype=torch.float32,
layout=torch.strided,
)
add_5087 = torch.ops.aten.add.Tensor(unsqueeze_21, full_default)
add_5103 = torch.ops.aten.add.Tensor(view_424, add_5087)
return add_5103
counters.clear()
u = torch.randn(bs, 8, 512, 64).to(dtype=dtype)
v = torch.randn(bs, 8, 64, 512).to(dtype=dtype)
arg5 = torch.randn(32, 8)
torch._dynamo.mark_dynamic(u, 0)
torch._dynamo.mark_static(u, 1)
torch._dynamo.mark_static(u, 2)
torch._dynamo.mark_static(u, 3)
torch._dynamo.mark_static(v, 2)
torch._dynamo.mark_static(v, 3)
mod = M().to(dtype=dtype).eval()
with verify(dtype) as (atol, rtol):
self.common(mod, (u, v, arg5), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["cpp_templated_kernel_counter"], 1)
self.assertEqual(counters["inductor"]["cpp_epilogue_fusion_counter"], 1)
@patches
@torch.no_grad
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
def test_bmm_dynamic_bm_stride(self):
bs = 8
Mdim = 256
Kdim = 64
dtype = torch.float
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, weight):
return x @ weight.permute(2, 0, 1)
counters.clear()
u = torch.randn(bs, Mdim, Kdim).to(dtype=dtype)
v = torch.randn(Kdim, Mdim, bs).to(dtype=dtype)
torch._dynamo.mark_dynamic(u, 0)
torch._dynamo.mark_dynamic(u, 1)
torch._dynamo.mark_static(u, 2)
torch._dynamo.mark_static(v, 0)
torch._dynamo.mark_static(v, 1)
mod = M().to(dtype=dtype).eval()
with verify(dtype) as (atol, rtol):
self.common(mod, (u, v), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["cpp_templated_kernel_counter"], 1)
instantiate_device_type_tests(TestSelectAlgorithm, globals(), only_for="cpu")
instantiate_device_type_tests(
TestSelectAlgorithmDynamicShapes, globals(), only_for="cpu"
)
if __name__ == "__main__":
from torch.testing._internal.inductor_utils import HAS_CPU
if HAS_CPU and not (IS_MACOS or IS_WINDOWS):
run_tests()
| TestSelectAlgorithmDynamicShapes |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/critical_section_test.py | {
"start": 1745,
"end": 15813
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testCreateCriticalSection(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn(a, b):
c = v.value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return array_ops.identity(c)
num_concurrent = 100
r = [cs.execute(lambda: fn(1.0, 2.0)) for _ in range(num_concurrent)]
self.evaluate(v.initializer)
r_value = self.evaluate(r)
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
@parameterized.named_parameters(
("Inner%sOuter%s" % (inner, outer), inner, outer)
for (inner, outer) in itertools.product(*([(False, True)] * 2)))
@test_util.run_in_graph_and_eager_modes
@test_util.xla_allow_fallback("b/128495870")
def testCriticalSectionWithControlFlow(self, outer_cond, inner_cond):
if (not context.executing_eagerly() and
control_flow_v2_toggles.control_flow_v2_enabled()):
self.skipTest("b/135070612")
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
num_concurrent = 100
# pylint: disable=cell-var-from-loop
def fn(a, b):
c = v.read_value()
def true_fn():
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return array_ops.identity(c)
return cond.cond(
array_ops.identity(inner_cond), true_fn, lambda: c)
def execute():
return cs.execute(lambda: fn(1.0, 2.0))
r = [
cond.cond(array_ops.identity(outer_cond),
execute,
v.read_value)
for _ in range(num_concurrent)
]
# pylint: enable=cell-var-from-loop
self.evaluate(v.initializer)
r_value = self.evaluate(r)
if inner_cond and outer_cond:
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
else:
self.assertAllClose([0] * num_concurrent, r_value)
@test_util.run_v1_only("b/123990562 Sees CancelledError on some calls")
def testCriticalSectionInParallelDoesntDeadlockOnError(self):
# No eager mode execution of this test because eager does not
# run fn() in parallel, which is where the deadlock could
# potentially occur (in graph mode).
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn(i):
error = control_flow_assert.Assert((i % 2) == 1, ["Error"])
with ops.control_dependencies([error]):
return v.read_value()
num_concurrent = 2
@def_function.function(autograph=False)
def run_concurrently():
return [cs.execute(lambda: fn(i)) for i in range(num_concurrent)]
if not context.executing_eagerly():
run_concurrently = run_concurrently()
self.evaluate(v.initializer)
for _ in range(100):
with self.assertRaisesOpError("Error"):
if context.executing_eagerly():
run_concurrently()
else:
self.evaluate(run_concurrently)
@test_util.run_in_graph_and_eager_modes
def testCreateCriticalSectionFnReturnsOp(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
def fn_return_op(a, b):
c = v.read_value()
with ops.control_dependencies([c]):
nv = v.assign_add(a * b)
with ops.control_dependencies([nv]):
return control_flow_ops.no_op()
num_concurrent = 100
r = [cs.execute(lambda: fn_return_op(1.0, 2.0))
for _ in range(num_concurrent)]
self.evaluate(v.initializer)
self.evaluate(r)
final_v = self.evaluate(v)
self.assertAllClose(2.0 * num_concurrent, final_v)
@test_util.run_v1_only("Collections don't exist in TF2")
def testCollection(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
self.assertIn(
cs, ops.get_collection(critical_section_ops.CRITICAL_SECTIONS))
add = lambda x: x + 1
execute = cs.execute(lambda: add(1.0), name="my_execute")
execute_op = [
x for x in execute.graph.get_operations()
if "my_execute" in x.name and "MutexLock" in x.type
][0]
self.assertIn(
execute_op,
[signature.op for signature in
ops.get_collection(critical_section_ops.CRITICAL_SECTION_EXECUTIONS)])
def testRecursiveCriticalSectionAccessIsIllegal(self):
# This does not work properly in eager mode. Eager users will
# just hit a deadlock if they do this. But at least it'll be easier
# to debug.
cs = critical_section_ops.CriticalSection()
add = lambda y: y + 1
def fn(x):
return cs.execute(lambda: add(x))
with self.assertRaisesRegex(
ValueError, r"Attempting to lock a CriticalSection .* in which we are"):
cs.execute(lambda: fn(1.0))
def testRecursiveCriticalSectionAccessViaCapturedTensorIsProtected(self):
# This one is subtle; and we're being overly cautious here. The
# deadlock we are ensuring we catch is:
#
# to_capture = CS[lambda x: x + 1](1.0)
# deadlocked = CS[lambda x: x + to_capture](1.0)
#
# This would have caused a deadlock because executing `deadlocked` will
# lock the mutex on CS; but then due to dependencies, will attempt
# to compute `to_capture`. This computation requires locking CS,
# but that is not possible now because CS is already locked by
# `deadlocked`.
#
# We check that CriticalSection.execute properly inserts new
# control dependencies to its lock to ensure all captured
# operations are finished before anything runs within the critical section.
cs = critical_section_ops.CriticalSection(shared_name="cs")
fn = array_ops.identity
to_capture = cs.execute(lambda: fn(1.0))
fn_captures = lambda x: x + to_capture
to_capture_too = array_ops.identity(to_capture)
ex_0 = cs.execute(lambda: fn_captures(1.0))
with ops.control_dependencies([to_capture]):
# This is OK because to_capture will execute before this next call
ex_1 = cs.execute(lambda: fn_captures(1.0))
dependency = array_ops.identity(to_capture)
fn_captures_dependency = lambda x: x + dependency
ex_2 = cs.execute(lambda: fn_captures_dependency(1.0))
with ops.control_dependencies([to_capture_too]):
ex_3 = cs.execute(lambda: fn_captures_dependency(1.0))
# Ensure there's no actual deadlock on to_execute.
self.assertEqual(2.0, self.evaluate(ex_0))
self.assertEqual(2.0, self.evaluate(ex_1))
self.assertEqual(2.0, self.evaluate(ex_2))
self.assertEqual(2.0, self.evaluate(ex_3))
def testRecursiveCriticalSectionAccessWithinLoopIsProtected(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
def body_implicit_capture(i, j):
# This would have caused a deadlock if not for logic in execute
# that inserts additional control dependencies onto the lock op:
# * Loop body argument j is captured by fn()
# * i is running in parallel to move forward the execution
# * j is not being checked by the predicate function
# * output of cs.execute() is returned as next j.
fn = lambda: j + 1
return (i + 1, cs.execute(fn))
(i_n, j_n) = while_loop.while_loop(
lambda i, _: i < 1000,
body_implicit_capture, [0, 0],
parallel_iterations=25)
# For consistency between eager and graph mode.
i_n = array_ops.identity(i_n)
logging.warn(
"\n==============\nRunning "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture'\n"
"==============\n")
self.assertEqual((1000, 1000), self.evaluate((i_n, j_n)))
logging.warn(
"\n==============\nSuccessfully finished running "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture'\n"
"==============\n")
def body_implicit_capture_protected(i, j):
# This version is ok because we manually add a control
# dependency on j, which is an argument to the while_loop body
# and captured by fn.
fn = lambda: j + 1
with ops.control_dependencies([j]):
return (i + 1, cs.execute(fn))
(i_n, j_n) = while_loop.while_loop(
lambda i, _: i < 1000,
body_implicit_capture_protected, [0, 0],
parallel_iterations=25)
# For consistency between eager and graph mode.
i_n = array_ops.identity(i_n)
logging.warn(
"\n==============\nRunning "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture_protected'\n"
"==============\n")
self.assertEqual((1000, 1000), self.evaluate((i_n, j_n)))
logging.warn(
"\n==============\nSuccessfully finished running "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_implicit_capture_protected'\n"
"==============\n")
def body_args_capture(i, j):
# This version is ok because j is an argument to fn and we can
# ensure there's a control dependency on j.
fn = lambda x: x + 1
return (i + 1, cs.execute(lambda: fn(j)))
(i_n, j_n) = while_loop.while_loop(
lambda i, _: i < 1000,
body_args_capture, [0, 0],
parallel_iterations=25)
# For consistency between eager and graph mode.
i_n = array_ops.identity(i_n)
logging.warn(
"\n==============\nRunning "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_args_capture'\n"
"==============\n")
self.assertEqual((1000, 1000), self.evaluate((i_n, j_n)))
logging.warn(
"\n==============\nSuccessfully finished running "
"'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
"body_args_capture'\n"
"==============\n")
def testRecursiveCriticalSectionAccessIsIllegalSameSharedName(self):
# This does not work properly in eager mode. Eager users will
# just hit a deadlock if they do this. But at least it'll be easier
# to debug.
cs = critical_section_ops.CriticalSection(shared_name="cs")
cs_same = critical_section_ops.CriticalSection(shared_name="cs")
add = lambda x: x + 1
def fn(x):
return cs_same.execute(lambda: add(x))
with self.assertRaisesRegex(
ValueError, r"Attempting to lock a CriticalSection .* in which we are"):
cs.execute(lambda: fn(1.0))
@test_util.run_v1_only(
"b/123955885 Can't identify consumed resources in eager mode")
def testMultipleCSExecutionsRequestSameResource(self):
cs0 = critical_section_ops.CriticalSection()
cs1 = critical_section_ops.CriticalSection()
v = resource_variable_ops.ResourceVariable(0.0, name="v")
cs0.execute(lambda: v + 1)
# It's OK for the same CriticalSection to access this resource.
cs0.execute(lambda: v - 1)
# It's *not* OK for a different CriticalSection to access it by
# default.
with self.assertRaisesRegex(ValueError,
"requested exclusive resource access"):
cs1.execute(lambda: v + 1)
# It's not even OK if the second call doesn't request exclusive access.
with self.assertRaisesRegex(ValueError,
"requested exclusive resource access"):
cs1.execute(lambda: v + 1, exclusive_resource_access=False)
v2 = resource_variable_ops.ResourceVariable(0.0, name="v2")
cs0.execute(lambda: v2 + 1, exclusive_resource_access=False)
# It's OK if neither requests exclusive resource access.
cs1.execute(lambda: v2 + 1, exclusive_resource_access=False)
# It's not OK if the second request requires exclusive resource
# access.
with self.assertRaisesRegex(ValueError,
"requested exclusive resource access"):
cs1.execute(lambda: v2 + 1)
def testControlDependencyFromOutsideWhileLoopMixedWithInsideLoop(self):
cs = critical_section_ops.CriticalSection()
v = resource_variable_ops.ResourceVariable(0, name="v")
# Make sure that the control dependencies on v do not cause issues
# in the lock_op's automatic control dependency adder.
#
# Note, here v must be a resource variable (or something similar),
# otherwise it gets hoisted into the while_loop by the time we add
# control dependencies to the lock_op.
def body(i):
add_j = lambda j: v + j + 1
return cs.execute(lambda: add_j(i))
out = while_loop.while_loop(lambda i: i < 10, body, [0])
self.evaluate(v.initializer)
self.assertEqual(10, self.evaluate(out))
@test_util.run_in_graph_and_eager_modes
def testInsideFunction(self):
if test_util.is_gpu_available():
self.skipTest(
"b/123899495: Colocation errors for critical sections in map on GPU")
cs = critical_section_ops.CriticalSection()
with ops.device("/gpu:0" if test_util.is_gpu_available() else "/cpu:0"):
v = resource_variable_ops.ResourceVariable(1)
def fn():
return v.read_value()
# map() creates a TensorFlow function.
ds = dataset_ops.Dataset.range(1)
if test_util.is_gpu_available():
ds = (ds.apply(prefetching_ops.copy_to_device("/gpu:0"))
.apply(prefetching_ops.map_on_gpu(lambda _: cs.execute(fn))))
else:
ds = ds.map(lambda _: cs.execute(fn))
def get_first():
if context.executing_eagerly():
return self.evaluate(dataset_ops.make_one_shot_iterator(ds).get_next())
itr = dataset_ops.make_initializable_iterator(ds)
self.evaluate([v.initializer, itr.initializer])
return self.evaluate(itr.get_next())
self.assertEqual(1, get_first())
if __name__ == "__main__":
test.main()
| CriticalSectionTest |
python | pytorch__pytorch | torch/distributed/nn/functional.py | {
"start": 8887,
"end": 9549
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, src, group, *tensors):
ctx.src = src
ctx.group = group
assert all(t.size() == tensors[0].size() for t in tensors)
output = torch.zeros_like(tensors[0])
if dist.get_rank(group=group) == src:
dist.scatter(output, list(tensors), src, group=group)
else:
dist.scatter(output, None, src, group=group)
return output
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
return (None, None) + _Gather.apply(ctx.src, ctx.group, grad_output)
| _Scatter |
python | sqlalchemy__sqlalchemy | test/sql/test_compare.py | {
"start": 55583,
"end": 64048
} | class ____(fixtures.TestBase):
custom_traverse = {
"AnnotatedFunctionAsBinary": {
"sql_function",
"left_index",
"right_index",
"modifiers",
"_annotations",
},
"Annotatednext_value": {"sequence", "_annotations"},
"FunctionAsBinary": {
"sql_function",
"left_index",
"right_index",
"modifiers",
},
"next_value": {"sequence"},
"array": ({"type", "clauses"}),
}
ignore_keys = {
"AnnotatedColumn": {"dialect_options"},
"SelectStatementGrouping": {
"_independent_ctes",
"_independent_ctes_opts",
},
}
@testing.combinations(*all_hascachekey_subclasses())
def test_traverse_internals(self, cls: type):
super_traverse = {}
# ignore_super = self.ignore_super.get(cls.__name__, set())
for s in cls.mro()[1:]:
for attr in s.__dict__:
if not attr.endswith("_traverse_internals"):
continue
for k, v in s.__dict__[attr]:
if k not in super_traverse:
super_traverse[k] = v
traverse_dict = dict(cls.__dict__["_traverse_internals"])
eq_(len(cls.__dict__["_traverse_internals"]), len(traverse_dict))
if cls.__name__ in self.custom_traverse:
eq_(traverse_dict.keys(), self.custom_traverse[cls.__name__])
else:
ignore = self.ignore_keys.get(cls.__name__, set())
left_keys = traverse_dict.keys() | ignore
is_true(
left_keys >= super_traverse.keys(),
f"{left_keys} >= {super_traverse.keys()} - missing: "
f"{super_traverse.keys() - left_keys} - ignored {ignore}",
)
subset = {
k: v for k, v in traverse_dict.items() if k in super_traverse
}
eq_(
subset,
{k: v for k, v in super_traverse.items() if k not in ignore},
)
# name -> (traverse names, init args)
custom_init = {
"BinaryExpression": (
{"right", "operator", "type", "negate", "modifiers", "left"},
{"right", "operator", "type_", "negate", "modifiers", "left"},
),
"BindParameter": (
{"literal_execute", "type", "callable", "value", "key"},
{"required", "isoutparam", "literal_execute", "type_", "callable_"}
| {"unique", "expanding", "quote", "value", "key"},
),
"Cast": ({"type", "clause"}, {"type_", "expression"}),
"ClauseList": (
{"clauses", "operator"},
{"group_contents", "group", "operator", "clauses"},
),
"ColumnClause": (
{"is_literal", "type", "table", "name"},
{"type_", "is_literal", "text"},
),
"ExpressionClauseList": (
{"clauses", "operator"},
{"type_", "operator", "clauses"},
),
"FromStatement": (
{"_raw_columns", "_with_options", "element"}
| {"_propagate_attrs", "_compile_state_funcs"},
{"element", "entities"},
),
"FunctionAsBinary": (
{"modifiers", "sql_function", "right_index", "left_index"},
{"right_index", "left_index", "fn"},
),
"FunctionElement": (
{"clause_expr", "_table_value_type", "_with_ordinality"},
{"clauses"},
),
"Function": (
{"_table_value_type", "clause_expr", "_with_ordinality"}
| {"packagenames", "type", "name"},
{"type_", "packagenames", "name", "clauses"},
),
"Label": ({"_element", "type", "name"}, {"type_", "element", "name"}),
"LambdaElement": (
{"_resolved"},
{"role", "opts", "apply_propagate_attrs", "fn"},
),
"Load": (
{"propagate_to_loaders", "additional_source_entities"}
| {"path", "context"},
{"entity"},
),
"LoaderCriteriaOption": (
{"where_criteria", "entity", "propagate_to_loaders"}
| {"root_entity", "include_aliases"},
{"where_criteria", "include_aliases", "propagate_to_loaders"}
| {"entity_or_base", "loader_only", "track_closure_variables"},
),
"NullLambdaStatement": ({"_resolved"}, {"statement"}),
"ScalarFunctionColumn": (
{"type", "fn", "name"},
{"type_", "name", "fn"},
),
"ScalarValues": (
{"_data", "_column_args", "literal_binds"},
{"columns", "data", "literal_binds"},
),
"Select": (
{
"_having_criteria",
"_distinct",
"_group_by_clauses",
"_fetch_clause",
"_limit_clause",
"_label_style",
"_order_by_clauses",
"_raw_columns",
"_correlate_except",
"_statement_hints",
"_hints",
"_independent_ctes",
"_distinct_on",
"_compile_state_funcs",
"_setup_joins",
"_suffixes",
"_memoized_select_entities",
"_for_update_arg",
"_prefixes",
"_propagate_attrs",
"_with_options",
"_independent_ctes_opts",
"_offset_clause",
"_correlate",
"_where_criteria",
"_annotations",
"_fetch_clause_options",
"_from_obj",
"_post_select_clause",
"_post_body_clause",
"_post_criteria_clause",
"_pre_columns_clause",
},
{"entities"},
),
"TableValuedColumn": (
{"scalar_alias", "type", "name"},
{"type_", "scalar_alias"},
),
"TableValueType": ({"_elements"}, {"elements"}),
"TextualSelect": (
{"column_args", "_annotations", "_independent_ctes"}
| {"element", "_independent_ctes_opts"},
{"positional", "columns", "text"},
),
"Tuple": ({"clauses", "operator"}, {"clauses", "types"}),
"TypeClause": ({"type"}, {"type_"}),
"TypeCoerce": ({"type", "clause"}, {"type_", "expression"}),
"UnaryExpression": (
{"modifier", "element", "operator"},
{"operator", "wraps_column_expression"}
| {"type_", "modifier", "element"},
),
"Values": (
{
"_column_args",
"literal_binds",
"name",
"_data",
"_independent_ctes",
"_independent_ctes_opts",
},
{"columns", "name", "literal_binds"},
),
"FrameClause": (
{"upper_bind", "upper_type", "lower_type", "lower_bind"},
{"start", "end", "start_frame_type", "end_frame_type"},
),
"_MemoizedSelectEntities": (
{"_with_options", "_raw_columns", "_setup_joins"},
{"args"},
),
"array": ({"type", "clauses"}, {"clauses", "type_"}),
"next_value": ({"sequence"}, {"seq"}),
}
@testing.combinations(
*all_hascachekey_subclasses(
ignore_subclasses=[
Annotated,
NoInit,
SingletonConstant,
SyntaxExtension,
DialectKWArgs,
Executable,
]
)
)
def test_init_args_in_traversal(self, cls: type):
sig = signature(cls.__init__)
init_args = set()
for p in sig.parameters.values():
if (
p.name == "self"
or p.name.startswith("_")
or p.kind in (p.VAR_KEYWORD,)
):
continue
init_args.add(p.name)
names = {n for n, _ in cls.__dict__["_traverse_internals"]}
if cls.__name__ in self.custom_init:
traverse, inits = self.custom_init[cls.__name__]
eq_(names, traverse)
eq_(init_args, inits)
else:
is_true(names.issuperset(init_args), f"{names} : {init_args}")
| HasCacheKeySubclass |
python | spack__spack | lib/spack/spack/vendor/jinja2/exceptions.py | {
"start": 77,
"end": 364
} | class ____(Exception):
"""Baseclass for all template errors."""
def __init__(self, message: t.Optional[str] = None) -> None:
super().__init__(message)
@property
def message(self) -> t.Optional[str]:
return self.args[0] if self.args else None
| TemplateError |
python | huggingface__transformers | src/transformers/models/owlvit/modeling_owlvit.py | {
"start": 31120,
"end": 34733
} | class ____(nn.Module):
def __init__(self, config: OwlViTTextConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = OwlViTTextEmbeddings(config)
self.encoder = OwlViTEncoder(config)
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
# num_samples, seq_len = input_shape where num_samples = batch_size * num_max_text_queries
# OWLVIT's text model uses causal mask, prepare it here.
# https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
causal_attention_mask = _create_4d_causal_attention_mask(
input_shape, hidden_states.dtype, device=hidden_states.device
)
# expand attention_mask
if attention_mask is not None:
# [num_samples, seq_len] -> [num_samples, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.final_layer_norm(last_hidden_state)
# take features from the end of tokens embedding (end of token is the highest number in each sequence)
# casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
pooled_output = last_hidden_state[
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
input_ids.to(torch.int).argmax(dim=-1).to(last_hidden_state.device),
]
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| OwlViTTextTransformer |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 69159,
"end": 73902
} | class ____(UOWTest):
"""Test that events which occur within a flush()
get the same attribute loading behavior as on the outside
of the flush, and that the unit of work itself uses the
"committed" version of primary/foreign key attributes
when loading a collection for historical purposes (this typically
has importance for when primary key values change).
"""
def _mapper_setup(self, passive_updates=True):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
order_by=addresses.c.email_address,
passive_updates=passive_updates,
backref="user",
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
return fixture_session(expire_on_commit=False)
def test_before_update_m2o(self):
"""Expect normal many to one attribute load behavior
(should not get committed value)
from within public 'before_update' event"""
sess = self._mapper_setup()
Address, User = self.classes.Address, self.classes.User
def before_update(mapper, connection, target):
# if get committed is used to find target.user, then
# it will be still be u1 instead of u2
assert target.user.id == target.user_id == u2.id
from sqlalchemy import event
event.listen(Address, "before_update", before_update)
a1 = Address(email_address="a1")
u1 = User(name="u1", addresses=[a1])
sess.add(u1)
u2 = User(name="u2")
sess.add(u2)
sess.commit()
sess.expunge_all()
# lookup an address and move it to the other user
a1 = sess.get(Address, a1.id)
# move address to another user's fk
assert a1.user_id == u1.id
a1.user_id = u2.id
sess.flush()
def test_before_update_o2m_passive(self):
"""Expect normal one to many attribute load behavior
(should not get committed value)
from within public 'before_update' event"""
self._test_before_update_o2m(True)
def test_before_update_o2m_notpassive(self):
"""Expect normal one to many attribute load behavior
(should not get committed value)
from within public 'before_update' event with
passive_updates=False
"""
self._test_before_update_o2m(False)
def _test_before_update_o2m(self, passive_updates):
sess = self._mapper_setup(passive_updates=passive_updates)
Address, User = self.classes.Address, self.classes.User
class AvoidReferencialError(Exception):
"""the test here would require ON UPDATE CASCADE on FKs
for the flush to fully succeed; this exception is used
to cancel the flush before we get that far.
"""
def before_update(mapper, connection, target):
if passive_updates:
# we shouldn't be using committed value.
# so, having switched target's primary key,
# we expect no related items in the collection
# since we are using passive_updates
# this is a behavior change since #2350
assert "addresses" not in target.__dict__
eq_(target.addresses, [])
else:
# in contrast with passive_updates=True,
# here we expect the orm to have looked up the addresses
# with the committed value (it needs to in order to
# update the foreign keys). So we expect addresses
# collection to move with the user,
# (just like they will be after the update)
# collection is already loaded
assert "addresses" in target.__dict__
eq_([a.id for a in target.addresses], [a.id for a in [a1, a2]])
raise AvoidReferencialError()
from sqlalchemy import event
event.listen(User, "before_update", before_update)
a1 = Address(email_address="jack1")
a2 = Address(email_address="jack2")
u1 = User(id=1, name="jack", addresses=[a1, a2])
sess.add(u1)
sess.commit()
sess.expunge_all()
u1 = sess.get(User, u1.id)
u1.id = 2
try:
sess.flush()
except AvoidReferencialError:
pass
| LoadersUsingCommittedTest |
python | pypa__warehouse | tests/unit/test_config.py | {
"start": 26755,
"end": 27652
} | class ____:
def test_allowed_domains_parsing(self):
"""Test that allowed domains are parsed correctly."""
# Test the lambda function used in maybe_set
def parser(s):
return [d.strip() for d in s.split(",") if d.strip()]
# Test normal case
assert parser("pypi.org, test.pypi.org, example.com") == [
"pypi.org",
"test.pypi.org",
"example.com",
]
# Test with empty strings
assert parser("pypi.org,,, test.pypi.org, ") == ["pypi.org", "test.pypi.org"]
# Test with only commas
assert parser(",,,") == []
# Test single domain
assert parser("pypi.org") == ["pypi.org"]
# Test with extra spaces
assert parser(" pypi.org , test.pypi.org ") == [
"pypi.org",
"test.pypi.org",
]
| TestWarehouseAllowedDomains |
python | PrefectHQ__prefect | tests/utilities/test_hashing.py | {
"start": 1229,
"end": 2011
} | class ____:
def test_file_hash_returns_string(self):
assert isinstance(file_hash(__file__), str)
def test_file_hash_requires_path(self):
with pytest.raises(TypeError, match="path"):
file_hash()
def test_file_hash_raises_if_path_doesnt_exist(self, tmp_path):
fake_path = tmp_path.joinpath("foobar.txt")
with pytest.raises(FileNotFoundError):
file_hash(path=fake_path)
def test_file_hash_hashes(self, tmp_path):
with open(tmp_path.joinpath("test.py"), "w") as f:
f.write("0")
val = file_hash(tmp_path.joinpath("test.py"))
assert val == hashlib.md5(b"0").hexdigest()
# Check if the hash is stable
assert val == "cfcd208495d565ef66e7dff9f98764da"
| TestFileHash |
python | pypa__pipenv | pipenv/patched/pip/_internal/distributions/base.py | {
"start": 292,
"end": 1828
} | class ____(metaclass=abc.ABCMeta):
"""A base class for handling installable artifacts.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
- if we need to do work in the build tracker, we must be able to generate a unique
string to identify the requirement in the build tracker.
"""
def __init__(self, req: InstallRequirement) -> None:
super().__init__()
self.req = req
@abc.abstractproperty
def build_tracker_id(self) -> Optional[str]:
"""A string that uniquely identifies this requirement to the build tracker.
If None, then this dist has no work to do in the build tracker, and
``.prepare_distribution_metadata()`` will not be called."""
raise NotImplementedError()
@abc.abstractmethod
def get_metadata_distribution(self) -> BaseDistribution:
raise NotImplementedError()
@abc.abstractmethod
def prepare_distribution_metadata(
self,
finder: "PackageFinder",
build_isolation: bool,
check_build_deps: bool,
) -> None:
raise NotImplementedError()
| AbstractDistribution |
python | facebookresearch__faiss | faiss/gpu/test/test_multi_gpu.py | {
"start": 4452,
"end": 8207
} | class ____(unittest.TestCase):
def get_dataset(self, small_one=False):
if not small_one:
d = 128
nb = 100000
nt = 15000
nq = 2000
else:
d = 32
nb = 10000
nt = 1000
nq = 200
np.random.seed(123)
# generate points in a low-dim subspace to make the resutls
# look better :-)
d1 = 16
q, r = np.linalg.qr(np.random.randn(d, d))
qc = q[:d1, :]
def make_mat(n):
return np.dot(
np.random.random(size=(nb, d1)), qc).astype('float32')
return (make_mat(nt), make_mat(nb), make_mat(nq))
def test_mm(self):
# trouble with MKL+fbmake that appears only at runtime. Check it here
x = np.random.random(size=(100, 20)).astype('float32')
mat = faiss.PCAMatrix(20, 10)
mat.train(x)
mat.apply_py(x)
def do_cpu_to_gpu(self, index_key):
ts = []
ts.append(time.time())
(xt, xb, xq) = self.get_dataset(small_one=True)
nb, d = xb.shape
index = faiss.index_factory(d, index_key)
if index.__class__ == faiss.IndexIVFPQ:
# speed up test
index.pq.cp.niter = 2
index.do_polysemous_training = False
ts.append(time.time())
index.train(xt)
ts.append(time.time())
# adding some ids because there was a bug in this case;
# those need to be cast to idx_t(= int64_t), because
# on windows the numpy int default is int32
ids = (np.arange(nb) * 3 + 12345).astype('int64')
index.add_with_ids(xb, ids)
ts.append(time.time())
index.nprobe = 4
Dref, Iref = index.search(xq, 10)
ts.append(time.time())
res = faiss.StandardGpuResources()
co = faiss.GpuClonerOptions()
co.use_cuvs = False
gpu_index = faiss.index_cpu_to_gpu(res, 0, index, co)
ts.append(time.time())
# Validate the layout of the memory info
mem_info = res.getMemoryInfo()
assert isinstance(mem_info, dict)
assert isinstance(mem_info[0]['FlatData'], tuple)
assert isinstance(mem_info[0]['FlatData'][0], int)
assert isinstance(mem_info[0]['FlatData'][1], int)
gpu_index.nprobe = 4
Dnew, Inew = gpu_index.search(xq, 10)
ts.append(time.time())
print('times:', [t - ts[0] for t in ts])
# Give us some margin of error
self.assertGreaterEqual((Iref == Inew).sum(), Iref.size - 50)
if faiss.get_num_gpus() == 1:
return
for shard in False, True:
# test on just 2 GPUs
res = [faiss.StandardGpuResources() for i in range(2)]
co = faiss.GpuMultipleClonerOptions()
co.shard = shard
co.use_cuvs = False
gpu_index = faiss.index_cpu_to_gpu_multiple_py(res, index, co)
faiss.GpuParameterSpace().set_index_parameter(
gpu_index, 'nprobe', 4)
Dnew, Inew = gpu_index.search(xq, 10)
# 0.99: allow some tolerance in results otherwise test
# fails occasionally (not reproducible)
self.assertGreaterEqual((Iref == Inew).sum(), Iref.size * 0.99)
def test_cpu_to_gpu_IVFPQ(self):
self.do_cpu_to_gpu('IVF128,PQ4')
def test_cpu_to_gpu_IVFFlat(self):
self.do_cpu_to_gpu('IVF128,Flat')
def test_set_gpu_param(self):
index = faiss.index_factory(12, "PCAR8,IVF10,PQ4")
res = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(res, 0, index)
faiss.GpuParameterSpace().set_index_parameter(gpu_index, "nprobe", 3)
| EvalIVFPQAccuracy |
python | numpy__numpy | numpy/_core/tests/test_strings.py | {
"start": 7255,
"end": 42553
} | class ____:
@pytest.mark.parametrize("in1,in2,out", [
("", "", ""),
("abc", "abc", "abcabc"),
("12345", "12345", "1234512345"),
("MixedCase", "MixedCase", "MixedCaseMixedCase"),
("12345 \0 ", "12345 \0 ", "12345 \0 12345 \0 "),
("UPPER", "UPPER", "UPPERUPPER"),
(["abc", "def"], ["hello", "world"], ["abchello", "defworld"]),
])
def test_add(self, in1, in2, out, dt):
in1 = np.array(in1, dtype=dt)
in2 = np.array(in2, dtype=dt)
out = np.array(out, dtype=dt)
assert_array_equal(np.strings.add(in1, in2), out)
@pytest.mark.parametrize("in1,in2,out", [
("abc", 3, "abcabcabc"),
("abc", 0, ""),
("abc", -1, ""),
(["abc", "def"], [1, 4], ["abc", "defdefdefdef"]),
])
def test_multiply(self, in1, in2, out, dt):
in1 = np.array(in1, dtype=dt)
out = np.array(out, dtype=dt)
assert_array_equal(np.strings.multiply(in1, in2), out)
def test_multiply_raises(self, dt):
with pytest.raises(TypeError, match="unsupported type"):
np.strings.multiply(np.array("abc", dtype=dt), 3.14)
with pytest.raises(OverflowError):
np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize)
def test_inplace_multiply(self, dt):
arr = np.array(['foo ', 'bar'], dtype=dt)
arr *= 2
if dt != "T":
assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt))
else:
assert_array_equal(arr, ['foo foo ', 'barbar'])
with pytest.raises(OverflowError):
arr *= sys.maxsize
@pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32,
np.int64, np.int_])
def test_multiply_integer_dtypes(self, i_dt, dt):
a = np.array("abc", dtype=dt)
i = np.array(3, dtype=i_dt)
res = np.array("abcabcabc", dtype=dt)
assert_array_equal(np.strings.multiply(a, i), res)
@pytest.mark.parametrize("in_,out", [
("", False),
("a", True),
("A", True),
("\n", False),
("abc", True),
("aBc123", False),
("abc\n", False),
(["abc", "aBc123"], [True, False]),
])
def test_isalpha(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.isalpha(in_), out)
@pytest.mark.parametrize("in_,out", [
('', False),
('a', True),
('A', True),
('\n', False),
('123abc456', True),
('a1b3c', True),
('aBc000 ', False),
('abc\n', False),
])
def test_isalnum(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.isalnum(in_), out)
@pytest.mark.parametrize("in_,out", [
("", False),
("a", False),
("0", True),
("012345", True),
("012345a", False),
(["a", "012345"], [False, True]),
])
def test_isdigit(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.isdigit(in_), out)
@pytest.mark.parametrize("in_,out", [
("", False),
("a", False),
("1", False),
(" ", True),
("\t", True),
("\r", True),
("\n", True),
(" \t\r \n", True),
(" \t\r\na", False),
(["\t1", " \t\r \n"], [False, True])
])
def test_isspace(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.isspace(in_), out)
@pytest.mark.parametrize("in_,out", [
('', False),
('a', True),
('A', False),
('\n', False),
('abc', True),
('aBc', False),
('abc\n', True),
])
def test_islower(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.islower(in_), out)
@pytest.mark.parametrize("in_,out", [
('', False),
('a', False),
('A', True),
('\n', False),
('ABC', True),
('AbC', False),
('ABC\n', True),
])
def test_isupper(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.isupper(in_), out)
@pytest.mark.parametrize("in_,out", [
('', False),
('a', False),
('A', True),
('\n', False),
('A Titlecased Line', True),
('A\nTitlecased Line', True),
('A Titlecased, Line', True),
('Not a capitalized String', False),
('Not\ta Titlecase String', False),
('Not--a Titlecase String', False),
('NOT', False),
])
def test_istitle(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.istitle(in_), out)
@pytest.mark.parametrize("in_,out", [
("", 0),
("abc", 3),
("12345", 5),
("MixedCase", 9),
("12345 \x00 ", 8),
("UPPER", 5),
(["abc", "12345 \x00 "], [3, 8]),
])
def test_str_len(self, in_, out, dt):
in_ = np.array(in_, dtype=dt)
assert_array_equal(np.strings.str_len(in_), out)
@pytest.mark.parametrize("a,sub,start,end,out", [
("abcdefghiabc", "abc", 0, None, 0),
("abcdefghiabc", "abc", 1, None, 9),
("abcdefghiabc", "def", 4, None, -1),
("abc", "", 0, None, 0),
("abc", "", 3, None, 3),
("abc", "", 4, None, -1),
("rrarrrrrrrrra", "a", 0, None, 2),
("rrarrrrrrrrra", "a", 4, None, 12),
("rrarrrrrrrrra", "a", 4, 6, -1),
("", "", 0, None, 0),
("", "", 1, 1, -1),
("", "", MAX, 0, -1),
("", "xx", 0, None, -1),
("", "xx", 1, 1, -1),
("", "xx", MAX, 0, -1),
pytest.param(99 * "a" + "b", "b", 0, None, 99,
id="99*a+b-b-0-None-99"),
pytest.param(98 * "a" + "ba", "ba", 0, None, 98,
id="98*a+ba-ba-0-None-98"),
pytest.param(100 * "a", "b", 0, None, -1,
id="100*a-b-0-None--1"),
pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 30000,
id="30000*a+100*b-100*b-0-None-30000"),
pytest.param(30000 * "a", 100 * "b", 0, None, -1,
id="30000*a-100*b-0-None--1"),
pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 15000,
id="15000*a+15000*b-15000*b-0-None-15000"),
pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, -1,
id="15000*a+15000*b-15000*c-0-None--1"),
(["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3],
None, [3, -1]),
("Ae¢☃€ 😊" * 2, "😊", 0, None, 6),
("Ae¢☃€ 😊" * 2, "😊", 7, None, 13),
pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1,
id=r"A*2**17-[\w]+\Z-0-None--1"),
])
def test_find(self, a, sub, start, end, out, dt):
if "😊" in a and dt == "S":
pytest.skip("Bytes dtype does not support non-ascii input")
a = np.array(a, dtype=dt)
sub = np.array(sub, dtype=dt)
assert_array_equal(np.strings.find(a, sub, start, end), out)
@pytest.mark.parametrize("a,sub,start,end,out", [
("abcdefghiabc", "abc", 0, None, 9),
("abcdefghiabc", "", 0, None, 12),
("abcdefghiabc", "abcd", 0, None, 0),
("abcdefghiabc", "abcz", 0, None, -1),
("abc", "", 0, None, 3),
("abc", "", 3, None, 3),
("abc", "", 4, None, -1),
("rrarrrrrrrrra", "a", 0, None, 12),
("rrarrrrrrrrra", "a", 4, None, 12),
("rrarrrrrrrrra", "a", 4, 6, -1),
(["abcdefghiabc", "rrarrrrrrrrra"], ["abc", "a"], [0, 0],
None, [9, 12]),
("Ae¢☃€ 😊" * 2, "😊", 0, None, 13),
("Ae¢☃€ 😊" * 2, "😊", 0, 7, 6),
])
def test_rfind(self, a, sub, start, end, out, dt):
if "😊" in a and dt == "S":
pytest.skip("Bytes dtype does not support non-ascii input")
a = np.array(a, dtype=dt)
sub = np.array(sub, dtype=dt)
assert_array_equal(np.strings.rfind(a, sub, start, end), out)
@pytest.mark.parametrize("a,sub,start,end,out", [
("aaa", "a", 0, None, 3),
("aaa", "b", 0, None, 0),
("aaa", "a", 1, None, 2),
("aaa", "a", 10, None, 0),
("aaa", "a", -1, None, 1),
("aaa", "a", -10, None, 3),
("aaa", "a", 0, 1, 1),
("aaa", "a", 0, 10, 3),
("aaa", "a", 0, -1, 2),
("aaa", "a", 0, -10, 0),
("aaa", "", 1, None, 3),
("aaa", "", 3, None, 1),
("aaa", "", 10, None, 0),
("aaa", "", -1, None, 2),
("aaa", "", -10, None, 4),
("aaa", "aaaa", 0, None, 0),
pytest.param(98 * "a" + "ba", "ba", 0, None, 1,
id="98*a+ba-ba-0-None-1"),
pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 1,
id="30000*a+100*b-100*b-0-None-1"),
pytest.param(30000 * "a", 100 * "b", 0, None, 0,
id="30000*a-100*b-0-None-0"),
pytest.param(30000 * "a" + 100 * "ab", "ab", 0, None, 100,
id="30000*a+100*ab-ab-0-None-100"),
pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 1,
id="15000*a+15000*b-15000*b-0-None-1"),
pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, 0,
id="15000*a+15000*b-15000*c-0-None-0"),
("", "", 0, None, 1),
("", "", 1, 1, 0),
("", "", MAX, 0, 0),
("", "xx", 0, None, 0),
("", "xx", 1, 1, 0),
("", "xx", MAX, 0, 0),
(["aaa", ""], ["a", ""], [0, 0], None, [3, 1]),
("Ae¢☃€ 😊" * 100, "😊", 0, None, 100),
])
def test_count(self, a, sub, start, end, out, dt):
if "😊" in a and dt == "S":
pytest.skip("Bytes dtype does not support non-ascii input")
a = np.array(a, dtype=dt)
sub = np.array(sub, dtype=dt)
assert_array_equal(np.strings.count(a, sub, start, end), out)
@pytest.mark.parametrize("a,prefix,start,end,out", [
("hello", "he", 0, None, True),
("hello", "hello", 0, None, True),
("hello", "hello world", 0, None, False),
("hello", "", 0, None, True),
("hello", "ello", 0, None, False),
("hello", "ello", 1, None, True),
("hello", "o", 4, None, True),
("hello", "o", 5, None, False),
("hello", "", 5, None, True),
("hello", "lo", 6, None, False),
("helloworld", "lowo", 3, None, True),
("helloworld", "lowo", 3, 7, True),
("helloworld", "lowo", 3, 6, False),
("", "", 0, 1, True),
("", "", 0, 0, True),
("", "", 1, 0, False),
("hello", "he", 0, -1, True),
("hello", "he", -53, -1, True),
("hello", "hello", 0, -1, False),
("hello", "hello world", -1, -10, False),
("hello", "ello", -5, None, False),
("hello", "ello", -4, None, True),
("hello", "o", -2, None, False),
("hello", "o", -1, None, True),
("hello", "", -3, -3, True),
("hello", "lo", -9, None, False),
(["hello", ""], ["he", ""], [0, 0], None, [True, True]),
])
def test_startswith(self, a, prefix, start, end, out, dt):
a = np.array(a, dtype=dt)
prefix = np.array(prefix, dtype=dt)
assert_array_equal(np.strings.startswith(a, prefix, start, end), out)
@pytest.mark.parametrize("a,suffix,start,end,out", [
("hello", "lo", 0, None, True),
("hello", "he", 0, None, False),
("hello", "", 0, None, True),
("hello", "hello world", 0, None, False),
("helloworld", "worl", 0, None, False),
("helloworld", "worl", 3, 9, True),
("helloworld", "world", 3, 12, True),
("helloworld", "lowo", 1, 7, True),
("helloworld", "lowo", 2, 7, True),
("helloworld", "lowo", 3, 7, True),
("helloworld", "lowo", 4, 7, False),
("helloworld", "lowo", 3, 8, False),
("ab", "ab", 0, 1, False),
("ab", "ab", 0, 0, False),
("", "", 0, 1, True),
("", "", 0, 0, True),
("", "", 1, 0, False),
("hello", "lo", -2, None, True),
("hello", "he", -2, None, False),
("hello", "", -3, -3, True),
("hello", "hello world", -10, -2, False),
("helloworld", "worl", -6, None, False),
("helloworld", "worl", -5, -1, True),
("helloworld", "worl", -5, 9, True),
("helloworld", "world", -7, 12, True),
("helloworld", "lowo", -99, -3, True),
("helloworld", "lowo", -8, -3, True),
("helloworld", "lowo", -7, -3, True),
("helloworld", "lowo", 3, -4, False),
("helloworld", "lowo", -8, -2, False),
(["hello", "helloworld"], ["lo", "worl"], [0, -6], None,
[True, False]),
])
def test_endswith(self, a, suffix, start, end, out, dt):
a = np.array(a, dtype=dt)
suffix = np.array(suffix, dtype=dt)
assert_array_equal(np.strings.endswith(a, suffix, start, end), out)
@pytest.mark.parametrize("a,chars,out", [
("", None, ""),
(" hello ", None, "hello "),
("hello", None, "hello"),
(" \t\n\r\f\vabc \t\n\r\f\v", None, "abc \t\n\r\f\v"),
([" hello ", "hello"], None, ["hello ", "hello"]),
("", "", ""),
("", "xyz", ""),
("hello", "", "hello"),
("xyzzyhelloxyzzy", "xyz", "helloxyzzy"),
("hello", "xyz", "hello"),
("xyxz", "xyxz", ""),
("xyxzx", "x", "yxzx"),
(["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"],
["helloxyzzy", "hello"]),
(["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]),
])
def test_lstrip(self, a, chars, out, dt):
a = np.array(a, dtype=dt)
out = np.array(out, dtype=dt)
if chars is not None:
chars = np.array(chars, dtype=dt)
assert_array_equal(np.strings.lstrip(a, chars), out)
else:
assert_array_equal(np.strings.lstrip(a), out)
@pytest.mark.parametrize("a,chars,out", [
("", None, ""),
(" hello ", None, " hello"),
("hello", None, "hello"),
(" \t\n\r\f\vabc \t\n\r\f\v", None, " \t\n\r\f\vabc"),
([" hello ", "hello"], None, [" hello", "hello"]),
("", "", ""),
("", "xyz", ""),
("hello", "", "hello"),
(["hello ", "abcdefghijklmnop"], None,
["hello", "abcdefghijklmnop"]),
("xyzzyhelloxyzzy", "xyz", "xyzzyhello"),
("hello", "xyz", "hello"),
("xyxz", "xyxz", ""),
(" ", None, ""),
("xyxzx", "x", "xyxz"),
(["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"],
["xyzzyhello", "hello"]),
(["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]),
])
def test_rstrip(self, a, chars, out, dt):
a = np.array(a, dtype=dt)
out = np.array(out, dtype=dt)
if chars is not None:
chars = np.array(chars, dtype=dt)
assert_array_equal(np.strings.rstrip(a, chars), out)
else:
assert_array_equal(np.strings.rstrip(a), out)
@pytest.mark.parametrize("a,chars,out", [
("", None, ""),
(" hello ", None, "hello"),
("hello", None, "hello"),
(" \t\n\r\f\vabc \t\n\r\f\v", None, "abc"),
([" hello ", "hello"], None, ["hello", "hello"]),
("", "", ""),
("", "xyz", ""),
("hello", "", "hello"),
("xyzzyhelloxyzzy", "xyz", "hello"),
("hello", "xyz", "hello"),
("xyxz", "xyxz", ""),
("xyxzx", "x", "yxz"),
(["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"],
["hello", "hello"]),
(["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]),
])
def test_strip(self, a, chars, out, dt):
a = np.array(a, dtype=dt)
if chars is not None:
chars = np.array(chars, dtype=dt)
out = np.array(out, dtype=dt)
assert_array_equal(np.strings.strip(a, chars), out)
@pytest.mark.parametrize("buf,old,new,count,res", [
("", "", "", -1, ""),
("", "", "A", -1, "A"),
("", "A", "", -1, ""),
("", "A", "A", -1, ""),
("", "", "", 100, ""),
("", "", "A", 100, "A"),
("A", "", "", -1, "A"),
("A", "", "*", -1, "*A*"),
("A", "", "*1", -1, "*1A*1"),
("A", "", "*-#", -1, "*-#A*-#"),
("AA", "", "*-", -1, "*-A*-A*-"),
("AA", "", "*-", -1, "*-A*-A*-"),
("AA", "", "*-", 4, "*-A*-A*-"),
("AA", "", "*-", 3, "*-A*-A*-"),
("AA", "", "*-", 2, "*-A*-A"),
("AA", "", "*-", 1, "*-AA"),
("AA", "", "*-", 0, "AA"),
("A", "A", "", -1, ""),
("AAA", "A", "", -1, ""),
("AAA", "A", "", -1, ""),
("AAA", "A", "", 4, ""),
("AAA", "A", "", 3, ""),
("AAA", "A", "", 2, "A"),
("AAA", "A", "", 1, "AA"),
("AAA", "A", "", 0, "AAA"),
("AAAAAAAAAA", "A", "", -1, ""),
("ABACADA", "A", "", -1, "BCD"),
("ABACADA", "A", "", -1, "BCD"),
("ABACADA", "A", "", 5, "BCD"),
("ABACADA", "A", "", 4, "BCD"),
("ABACADA", "A", "", 3, "BCDA"),
("ABACADA", "A", "", 2, "BCADA"),
("ABACADA", "A", "", 1, "BACADA"),
("ABACADA", "A", "", 0, "ABACADA"),
("ABCAD", "A", "", -1, "BCD"),
("ABCADAA", "A", "", -1, "BCD"),
("BCD", "A", "", -1, "BCD"),
("*************", "A", "", -1, "*************"),
("^" + "A" * 1000 + "^", "A", "", 999, "^A^"),
("the", "the", "", -1, ""),
("theater", "the", "", -1, "ater"),
("thethe", "the", "", -1, ""),
("thethethethe", "the", "", -1, ""),
("theatheatheathea", "the", "", -1, "aaaa"),
("that", "the", "", -1, "that"),
("thaet", "the", "", -1, "thaet"),
("here and there", "the", "", -1, "here and re"),
("here and there and there", "the", "", -1, "here and re and re"),
("here and there and there", "the", "", 3, "here and re and re"),
("here and there and there", "the", "", 2, "here and re and re"),
("here and there and there", "the", "", 1, "here and re and there"),
("here and there and there", "the", "", 0, "here and there and there"),
("here and there and there", "the", "", -1, "here and re and re"),
("abc", "the", "", -1, "abc"),
("abcdefg", "the", "", -1, "abcdefg"),
("bbobob", "bob", "", -1, "bob"),
("bbobobXbbobob", "bob", "", -1, "bobXbob"),
("aaaaaaabob", "bob", "", -1, "aaaaaaa"),
("aaaaaaa", "bob", "", -1, "aaaaaaa"),
("Who goes there?", "o", "o", -1, "Who goes there?"),
("Who goes there?", "o", "O", -1, "WhO gOes there?"),
("Who goes there?", "o", "O", -1, "WhO gOes there?"),
("Who goes there?", "o", "O", 3, "WhO gOes there?"),
("Who goes there?", "o", "O", 2, "WhO gOes there?"),
("Who goes there?", "o", "O", 1, "WhO goes there?"),
("Who goes there?", "o", "O", 0, "Who goes there?"),
("Who goes there?", "a", "q", -1, "Who goes there?"),
("Who goes there?", "W", "w", -1, "who goes there?"),
("WWho goes there?WW", "W", "w", -1, "wwho goes there?ww"),
("Who goes there?", "?", "!", -1, "Who goes there!"),
("Who goes there??", "?", "!", -1, "Who goes there!!"),
("Who goes there?", ".", "!", -1, "Who goes there?"),
("This is a tissue", "is", "**", -1, "Th** ** a t**sue"),
("This is a tissue", "is", "**", -1, "Th** ** a t**sue"),
("This is a tissue", "is", "**", 4, "Th** ** a t**sue"),
("This is a tissue", "is", "**", 3, "Th** ** a t**sue"),
("This is a tissue", "is", "**", 2, "Th** ** a tissue"),
("This is a tissue", "is", "**", 1, "Th** is a tissue"),
("This is a tissue", "is", "**", 0, "This is a tissue"),
("bobob", "bob", "cob", -1, "cobob"),
("bobobXbobobob", "bob", "cob", -1, "cobobXcobocob"),
("bobob", "bot", "bot", -1, "bobob"),
("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"),
("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"),
("Reykjavik", "k", "KK", 2, "ReyKKjaviKK"),
("Reykjavik", "k", "KK", 1, "ReyKKjavik"),
("Reykjavik", "k", "KK", 0, "Reykjavik"),
("A.B.C.", ".", "----", -1, "A----B----C----"),
("Reykjavik", "q", "KK", -1, "Reykjavik"),
("spam, spam, eggs and spam", "spam", "ham", -1,
"ham, ham, eggs and ham"),
("spam, spam, eggs and spam", "spam", "ham", -1,
"ham, ham, eggs and ham"),
("spam, spam, eggs and spam", "spam", "ham", 4,
"ham, ham, eggs and ham"),
("spam, spam, eggs and spam", "spam", "ham", 3,
"ham, ham, eggs and ham"),
("spam, spam, eggs and spam", "spam", "ham", 2,
"ham, ham, eggs and spam"),
("spam, spam, eggs and spam", "spam", "ham", 1,
"ham, spam, eggs and spam"),
("spam, spam, eggs and spam", "spam", "ham", 0,
"spam, spam, eggs and spam"),
("bobobob", "bobob", "bob", -1, "bobob"),
("bobobobXbobobob", "bobob", "bob", -1, "bobobXbobob"),
("BOBOBOB", "bob", "bobby", -1, "BOBOBOB"),
("one!two!three!", "!", "@", 1, "one@two!three!"),
("one!two!three!", "!", "", -1, "onetwothree"),
("one!two!three!", "!", "@", 2, "one@two@three!"),
("one!two!three!", "!", "@", 3, "one@two@three@"),
("one!two!three!", "!", "@", 4, "one@two@three@"),
("one!two!three!", "!", "@", 0, "one!two!three!"),
("one!two!three!", "!", "@", -1, "one@two@three@"),
("one!two!three!", "x", "@", -1, "one!two!three!"),
("one!two!three!", "x", "@", 2, "one!two!three!"),
("abc", "", "-", -1, "-a-b-c-"),
("abc", "", "-", 3, "-a-b-c"),
("abc", "", "-", 0, "abc"),
("abc", "ab", "--", 0, "abc"),
("abc", "xy", "--", -1, "abc"),
(["abbc", "abbd"], "b", "z", [1, 2], ["azbc", "azzd"]),
])
def test_replace(self, buf, old, new, count, res, dt):
if "😊" in buf and dt == "S":
pytest.skip("Bytes dtype does not support non-ascii input")
buf = np.array(buf, dtype=dt)
old = np.array(old, dtype=dt)
new = np.array(new, dtype=dt)
res = np.array(res, dtype=dt)
assert_array_equal(np.strings.replace(buf, old, new, count), res)
@pytest.mark.parametrize("buf,sub,start,end,res", [
("abcdefghiabc", "", 0, None, 0),
("abcdefghiabc", "def", 0, None, 3),
("abcdefghiabc", "abc", 0, None, 0),
("abcdefghiabc", "abc", 1, None, 9),
])
def test_index(self, buf, sub, start, end, res, dt):
buf = np.array(buf, dtype=dt)
sub = np.array(sub, dtype=dt)
assert_array_equal(np.strings.index(buf, sub, start, end), res)
@pytest.mark.parametrize("buf,sub,start,end", [
("abcdefghiabc", "hib", 0, None),
("abcdefghiab", "abc", 1, None),
("abcdefghi", "ghi", 8, None),
("abcdefghi", "ghi", -1, None),
("rrarrrrrrrrra", "a", 4, 6),
])
def test_index_raises(self, buf, sub, start, end, dt):
buf = np.array(buf, dtype=dt)
sub = np.array(sub, dtype=dt)
with pytest.raises(ValueError, match="substring not found"):
np.strings.index(buf, sub, start, end)
@pytest.mark.parametrize("buf,sub,start,end,res", [
("abcdefghiabc", "", 0, None, 12),
("abcdefghiabc", "def", 0, None, 3),
("abcdefghiabc", "abc", 0, None, 9),
("abcdefghiabc", "abc", 0, -1, 0),
])
def test_rindex(self, buf, sub, start, end, res, dt):
buf = np.array(buf, dtype=dt)
sub = np.array(sub, dtype=dt)
assert_array_equal(np.strings.rindex(buf, sub, start, end), res)
@pytest.mark.parametrize("buf,sub,start,end", [
("abcdefghiabc", "hib", 0, None),
("defghiabc", "def", 1, None),
("defghiabc", "abc", 0, -1),
("abcdefghi", "ghi", 0, 8),
("abcdefghi", "ghi", 0, -1),
("rrarrrrrrrrra", "a", 4, 6),
])
def test_rindex_raises(self, buf, sub, start, end, dt):
buf = np.array(buf, dtype=dt)
sub = np.array(sub, dtype=dt)
with pytest.raises(ValueError, match="substring not found"):
np.strings.rindex(buf, sub, start, end)
@pytest.mark.parametrize("buf,tabsize,res", [
("abc\rab\tdef\ng\thi", 8, "abc\rab def\ng hi"),
("abc\rab\tdef\ng\thi", 4, "abc\rab def\ng hi"),
("abc\r\nab\tdef\ng\thi", 8, "abc\r\nab def\ng hi"),
("abc\r\nab\tdef\ng\thi", 4, "abc\r\nab def\ng hi"),
("abc\r\nab\r\ndef\ng\r\nhi", 4, "abc\r\nab\r\ndef\ng\r\nhi"),
(" \ta\n\tb", 1, " a\n b"),
])
def test_expandtabs(self, buf, tabsize, res, dt):
buf = np.array(buf, dtype=dt)
res = np.array(res, dtype=dt)
assert_array_equal(np.strings.expandtabs(buf, tabsize), res)
def test_expandtabs_raises_overflow(self, dt):
with pytest.raises(OverflowError, match="new string is too long"):
np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize)
np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61)
def test_expandtabs_length_not_cause_segfault(self, dt):
# see gh-28829
with pytest.raises(
_UFuncNoLoopError,
match="did not contain a loop with signature matching types",
):
np._core.strings._expandtabs_length.reduce(np.zeros(200))
with pytest.raises(
_UFuncNoLoopError,
match="did not contain a loop with signature matching types",
):
np.strings.expandtabs(np.zeros(200))
FILL_ERROR = "The fill character must be exactly one character long"
def test_center_raises_multiple_character_fill(self, dt):
buf = np.array("abc", dtype=dt)
fill = np.array("**", dtype=dt)
with pytest.raises(TypeError, match=self.FILL_ERROR):
np.strings.center(buf, 10, fill)
def test_ljust_raises_multiple_character_fill(self, dt):
buf = np.array("abc", dtype=dt)
fill = np.array("**", dtype=dt)
with pytest.raises(TypeError, match=self.FILL_ERROR):
np.strings.ljust(buf, 10, fill)
def test_rjust_raises_multiple_character_fill(self, dt):
buf = np.array("abc", dtype=dt)
fill = np.array("**", dtype=dt)
with pytest.raises(TypeError, match=self.FILL_ERROR):
np.strings.rjust(buf, 10, fill)
@pytest.mark.parametrize("buf,width,fillchar,res", [
('abc', 10, ' ', ' abc '),
('abc', 6, ' ', ' abc '),
('abc', 3, ' ', 'abc'),
('abc', 2, ' ', 'abc'),
('abc', -2, ' ', 'abc'),
('abc', 10, '*', '***abc****'),
])
def test_center(self, buf, width, fillchar, res, dt):
buf = np.array(buf, dtype=dt)
fillchar = np.array(fillchar, dtype=dt)
res = np.array(res, dtype=dt)
assert_array_equal(np.strings.center(buf, width, fillchar), res)
@pytest.mark.parametrize("buf,width,fillchar,res", [
('abc', 10, ' ', 'abc '),
('abc', 6, ' ', 'abc '),
('abc', 3, ' ', 'abc'),
('abc', 2, ' ', 'abc'),
('abc', -2, ' ', 'abc'),
('abc', 10, '*', 'abc*******'),
])
def test_ljust(self, buf, width, fillchar, res, dt):
buf = np.array(buf, dtype=dt)
fillchar = np.array(fillchar, dtype=dt)
res = np.array(res, dtype=dt)
assert_array_equal(np.strings.ljust(buf, width, fillchar), res)
@pytest.mark.parametrize("buf,width,fillchar,res", [
('abc', 10, ' ', ' abc'),
('abc', 6, ' ', ' abc'),
('abc', 3, ' ', 'abc'),
('abc', 2, ' ', 'abc'),
('abc', -2, ' ', 'abc'),
('abc', 10, '*', '*******abc'),
])
def test_rjust(self, buf, width, fillchar, res, dt):
buf = np.array(buf, dtype=dt)
fillchar = np.array(fillchar, dtype=dt)
res = np.array(res, dtype=dt)
assert_array_equal(np.strings.rjust(buf, width, fillchar), res)
@pytest.mark.parametrize("buf,width,res", [
('123', 2, '123'),
('123', 3, '123'),
('0123', 4, '0123'),
('+123', 3, '+123'),
('+123', 4, '+123'),
('+123', 5, '+0123'),
('+0123', 5, '+0123'),
('-123', 3, '-123'),
('-123', 4, '-123'),
('-0123', 5, '-0123'),
('000', 3, '000'),
('34', 1, '34'),
('34', -1, '34'),
('0034', 4, '0034'),
])
def test_zfill(self, buf, width, res, dt):
buf = np.array(buf, dtype=dt)
res = np.array(res, dtype=dt)
assert_array_equal(np.strings.zfill(buf, width), res)
@pytest.mark.parametrize("buf,sep,res1,res2,res3", [
("this is the partition method", "ti", "this is the par",
"ti", "tion method"),
("http://www.python.org", "://", "http", "://", "www.python.org"),
("http://www.python.org", "?", "http://www.python.org", "", ""),
("http://www.python.org", "http://", "", "http://", "www.python.org"),
("http://www.python.org", "org", "http://www.python.", "org", ""),
("http://www.python.org", ["://", "?", "http://", "org"],
["http", "http://www.python.org", "", "http://www.python."],
["://", "", "http://", "org"],
["www.python.org", "", "www.python.org", ""]),
("mississippi", "ss", "mi", "ss", "issippi"),
("mississippi", "i", "m", "i", "ssissippi"),
("mississippi", "w", "mississippi", "", ""),
])
def test_partition(self, buf, sep, res1, res2, res3, dt):
buf = np.array(buf, dtype=dt)
sep = np.array(sep, dtype=dt)
res1 = np.array(res1, dtype=dt)
res2 = np.array(res2, dtype=dt)
res3 = np.array(res3, dtype=dt)
act1, act2, act3 = np.strings.partition(buf, sep)
assert_array_equal(act1, res1)
assert_array_equal(act2, res2)
assert_array_equal(act3, res3)
assert_array_equal(act1 + act2 + act3, buf)
@pytest.mark.parametrize("buf,sep,res1,res2,res3", [
("this is the partition method", "ti", "this is the parti",
"ti", "on method"),
("http://www.python.org", "://", "http", "://", "www.python.org"),
("http://www.python.org", "?", "", "", "http://www.python.org"),
("http://www.python.org", "http://", "", "http://", "www.python.org"),
("http://www.python.org", "org", "http://www.python.", "org", ""),
("http://www.python.org", ["://", "?", "http://", "org"],
["http", "", "", "http://www.python."],
["://", "", "http://", "org"],
["www.python.org", "http://www.python.org", "www.python.org", ""]),
("mississippi", "ss", "missi", "ss", "ippi"),
("mississippi", "i", "mississipp", "i", ""),
("mississippi", "w", "", "", "mississippi"),
])
def test_rpartition(self, buf, sep, res1, res2, res3, dt):
buf = np.array(buf, dtype=dt)
sep = np.array(sep, dtype=dt)
res1 = np.array(res1, dtype=dt)
res2 = np.array(res2, dtype=dt)
res3 = np.array(res3, dtype=dt)
act1, act2, act3 = np.strings.rpartition(buf, sep)
assert_array_equal(act1, res1)
assert_array_equal(act2, res2)
assert_array_equal(act3, res3)
assert_array_equal(act1 + act2 + act3, buf)
@pytest.mark.parametrize("args", [
(None,),
(None, None),
(None, None, -1),
(0,),
(0, None),
(0, None, -1),
(1,),
(1, None),
(1, None, -1),
(3,),
(3, None),
(5,),
(5, None),
(5, 5),
(5, 5, -1),
(6,), # test index past the end
(6, None),
(6, None, -1),
(6, 7), # test start and stop index past the end
(4, 3), # test start > stop index
(-1,),
(-1, None),
(-1, None, -1),
(-3,),
(-3, None),
([3, 4],),
([3, 4], None),
([2, 4],),
([-3, 5],),
([-3, 5], None),
([-3, 5], None, -1),
([0, -5],),
([0, -5], None),
([0, -5], None, -1),
(1, 4),
(-3, 5),
(None, -1),
(0, [4, 2]),
([1, 2], [-1, -2]),
(1, 5, 2),
(None, None, -1),
([0, 6], [-1, 0], [2, -1]),
])
@pytest.mark.parametrize("buf", [
["hello", "world"],
['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'],
])
def test_slice(self, args, buf, dt):
if dt == "S" and "你好世界" in buf:
pytest.skip("Bytes dtype does not support non-ascii input")
if len(buf) == 4:
args = tuple(s * 2 if isinstance(s, list) else s for s in args)
buf = np.array(buf, dtype=dt)
act = np.strings.slice(buf, *args)
bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args)
res = np.array([s[slice(*arg)]
for s, arg in zip(buf, zip(*bcast_args))],
dtype=dt)
assert_array_equal(act, res)
def test_slice_unsupported(self, dt):
with pytest.raises(TypeError, match="did not contain a loop"):
np.strings.slice(np.array([1, 2, 3]), 4)
regexp = (r"Cannot cast ufunc '_slice' input .* "
r"from .* to dtype\('int(64|32)'\)")
with pytest.raises(TypeError, match=regexp):
np.strings.slice(np.array(['foo', 'bar'], dtype=dt),
np.array(['foo', 'bar'], dtype=dt))
@pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16,
np.uint32, np.uint64])
def test_slice_int_type_promotion(self, int_dt, dt):
buf = np.array(["hello", "world"], dtype=dt)
np_slice = np.strings.slice
assert_array_equal(np_slice(buf, int_dt(4)),
np.array(["hell", "worl"], dtype=dt))
assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)),
np.array(["hell", "worl"], dtype=dt))
assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)),
np.array(["ll", "rl"], dtype=dt))
assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt),
np.array([4, 4], dtype=int_dt)),
np.array(["ll", "rl"], dtype=dt))
assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)),
np.array(["hl", "wr"], dtype=dt))
assert_array_equal(np_slice(buf,
np.array([0, 0], dtype=int_dt),
np.array([4, 4], dtype=int_dt),
np.array([2, 2], dtype=int_dt)),
np.array(["hl", "wr"], dtype=dt))
@pytest.mark.parametrize("dt", ["U", "T"])
| TestMethods |
python | doocs__leetcode | lcof/面试题24. 反转链表/Solution2.py | {
"start": 136,
"end": 393
} | class ____:
def reverseList(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
ans = self.reverseList(head.next)
head.next.next = head
head.next = None
return ans
| Solution |
python | Textualize__textual | docs/examples/guide/input/key03.py | {
"start": 211,
"end": 507
} | class ____(App):
"""App to display key events."""
CSS_PATH = "key03.tcss"
def compose(self) -> ComposeResult:
yield KeyLogger()
yield KeyLogger()
yield KeyLogger()
yield KeyLogger()
if __name__ == "__main__":
app = InputApp()
app.run()
| InputApp |
python | Netflix__metaflow | metaflow/_vendor/v3_7/typeguard/_transformer.py | {
"start": 2061,
"end": 8405
} | class ____:
node: Module | ClassDef | FunctionDef | AsyncFunctionDef | None
parent: TransformMemo | None
path: tuple[str, ...]
joined_path: Constant = field(init=False)
return_annotation: expr | None = None
yield_annotation: expr | None = None
send_annotation: expr | None = None
is_async: bool = False
local_names: set[str] = field(init=False, default_factory=set)
imported_names: dict[str, str] = field(init=False, default_factory=dict)
ignored_names: set[str] = field(init=False, default_factory=set)
load_names: defaultdict[str, dict[str, Name]] = field(
init=False, default_factory=lambda: defaultdict(dict)
)
has_yield_expressions: bool = field(init=False, default=False)
has_return_expressions: bool = field(init=False, default=False)
memo_var_name: Name | None = field(init=False, default=None)
should_instrument: bool = field(init=False, default=True)
variable_annotations: dict[str, expr] = field(init=False, default_factory=dict)
configuration_overrides: dict[str, Any] = field(init=False, default_factory=dict)
code_inject_index: int = field(init=False, default=0)
def __post_init__(self) -> None:
elements: list[str] = []
memo = self
while isinstance(memo.node, (ClassDef, FunctionDef, AsyncFunctionDef)):
elements.insert(0, memo.node.name)
if not memo.parent:
break
memo = memo.parent
if isinstance(memo.node, (FunctionDef, AsyncFunctionDef)):
elements.insert(0, "<locals>")
self.joined_path = Constant(".".join(elements))
# Figure out where to insert instrumentation code
if self.node:
for index, child in enumerate(self.node.body):
if isinstance(child, ImportFrom) and child.module == "__future__":
# (module only) __future__ imports must come first
continue
elif isinstance(child, Expr):
if isinstance(child.value, Constant) and isinstance(
child.value.value, str
):
continue # docstring
elif sys.version_info < (3, 8) and isinstance(child.value, Str):
continue # docstring
self.code_inject_index = index
break
def get_unused_name(self, name: str) -> str:
memo: TransformMemo | None = self
while memo is not None:
if name in memo.local_names:
memo = self
name += "_"
else:
memo = memo.parent
self.local_names.add(name)
return name
def is_ignored_name(self, expression: expr | Expr | None) -> bool:
top_expression = (
expression.value if isinstance(expression, Expr) else expression
)
if isinstance(top_expression, Attribute) and isinstance(
top_expression.value, Name
):
name = top_expression.value.id
elif isinstance(top_expression, Name):
name = top_expression.id
else:
return False
memo: TransformMemo | None = self
while memo is not None:
if name in memo.ignored_names:
return True
memo = memo.parent
return False
def get_memo_name(self) -> Name:
if not self.memo_var_name:
self.memo_var_name = Name(id="memo", ctx=Load())
return self.memo_var_name
def get_import(self, module: str, name: str) -> Name:
if module in self.load_names and name in self.load_names[module]:
return self.load_names[module][name]
qualified_name = f"{module}.{name}"
if name in self.imported_names and self.imported_names[name] == qualified_name:
return Name(id=name, ctx=Load())
alias = self.get_unused_name(name)
node = self.load_names[module][name] = Name(id=alias, ctx=Load())
self.imported_names[name] = qualified_name
return node
def insert_imports(self, node: Module | FunctionDef | AsyncFunctionDef) -> None:
"""Insert imports needed by injected code."""
if not self.load_names:
return
# Insert imports after any "from __future__ ..." imports and any docstring
for modulename, names in self.load_names.items():
aliases = [
alias(orig_name, new_name.id if orig_name != new_name.id else None)
for orig_name, new_name in sorted(names.items())
]
node.body.insert(self.code_inject_index, ImportFrom(modulename, aliases, 0))
def name_matches(self, expression: expr | Expr | None, *names: str) -> bool:
if expression is None:
return False
path: list[str] = []
top_expression = (
expression.value if isinstance(expression, Expr) else expression
)
if isinstance(top_expression, Subscript):
top_expression = top_expression.value
elif isinstance(top_expression, Call):
top_expression = top_expression.func
while isinstance(top_expression, Attribute):
path.insert(0, top_expression.attr)
top_expression = top_expression.value
if not isinstance(top_expression, Name):
return False
if top_expression.id in self.imported_names:
translated = self.imported_names[top_expression.id]
elif hasattr(builtins, top_expression.id):
translated = "builtins." + top_expression.id
else:
translated = top_expression.id
path.insert(0, translated)
joined_path = ".".join(path)
if joined_path in names:
return True
elif self.parent:
return self.parent.name_matches(expression, *names)
else:
return False
def get_config_keywords(self) -> list[keyword]:
if self.parent and isinstance(self.parent.node, ClassDef):
overrides = self.parent.configuration_overrides.copy()
else:
overrides = {}
overrides.update(self.configuration_overrides)
return [keyword(key, value) for key, value in overrides.items()]
| TransformMemo |
python | networkx__networkx | networkx/algorithms/approximation/tests/test_dominating_set.py | {
"start": 151,
"end": 2686
} | class ____:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = {1, 2, 3, 4, 5, 6}
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
assert len(neighbors & dom_set) > 0, "Non dominating set found!"
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {0: 9, 9: 0})
assert min_weighted_dominating_set(G) == {9}
def test_null_graph(self):
"""Tests that the unique dominating set for the null graph is an empty set"""
G = nx.Graph()
assert min_weighted_dominating_set(G) == set()
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
assert found, "Non adjacent edge found!"
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
assert found, "Non adjacent edge found!"
graph = nx.Graph() # empty Networkx graph
with pytest.raises(ValueError, match="Expected non-empty NetworkX graph!"):
min_edge_dominating_set(graph)
| TestMinWeightDominatingSet |
python | tensorflow__tensorflow | tensorflow/python/framework/python_memory_checker.py | {
"start": 1862,
"end": 5269
} | class ____(object):
"""Python memory leak detection class."""
def __init__(self):
self._snapshots = []
# cache the function used by mark_stack_trace_and_call to avoid
# contaminating the leak measurement.
def _record_snapshot():
self._snapshots.append(_create_python_object_snapshot())
self._record_snapshot = _record_snapshot
# We do not enable trace_wrapper on this function to avoid contaminating
# the snapshot.
def record_snapshot(self):
# Function called using `mark_stack_trace_and_call` will have
# "_python_memory_checker_helper" string in the C++ stack trace. This will
# be used to filter out C++ memory allocations caused by this function,
# because we are not interested in detecting memory growth caused by memory
# checker itself.
_python_memory_checker_helper.mark_stack_trace_and_call(
self._record_snapshot)
@trace.trace_wrapper
def report(self):
# TODO(kkb): Implement.
pass
@trace.trace_wrapper
def assert_no_leak_if_all_possibly_except_one(self):
"""Raises an exception if a leak is detected.
This algorithm classifies a series of allocations as a leak if it's the same
type at every snapshot, but possibly except one snapshot.
"""
snapshot_diffs = []
for i in range(0, len(self._snapshots) - 1):
snapshot_diffs.append(self._snapshot_diff(i, i + 1))
allocation_counter = collections.Counter()
for diff in snapshot_diffs:
for name, count in diff.items():
if count > 0:
allocation_counter[name] += 1
leaking_object_names = {
name for name, count in allocation_counter.items()
if count >= len(snapshot_diffs) - 1
}
if leaking_object_names:
object_list_to_print = '\n'.join(
[' - ' + name for name in leaking_object_names])
raise AssertionError(
'These Python objects were allocated in every snapshot possibly '
f'except one.\n\n{object_list_to_print}')
@trace.trace_wrapper
def assert_no_new_objects(self, threshold=None):
"""Assert no new Python objects."""
if not threshold:
threshold = {}
count_diff = self._snapshot_diff(0, -1)
original_count_diff = copy.deepcopy(count_diff)
count_diff.subtract(collections.Counter(threshold))
if max(count_diff.values() or [0]) > 0:
raise AssertionError('New Python objects created exceeded the threshold.'
'\nPython object threshold:\n'
f'{threshold}\n\nNew Python objects:\n'
f'{original_count_diff.most_common()}')
elif min(count_diff.values(), default=0) < 0:
logging.warning('New Python objects created were less than the threshold.'
'\nPython object threshold:\n'
f'{threshold}\n\nNew Python objects:\n'
f'{original_count_diff.most_common()}')
@trace.trace_wrapper
def _snapshot_diff(self, old_index, new_index):
return _snapshot_diff(self._snapshots[old_index],
self._snapshots[new_index],
self._get_internal_object_ids())
@trace.trace_wrapper
def _get_internal_object_ids(self):
ids = set()
for snapshot in self._snapshots:
ids.add(id(snapshot))
for v in snapshot.values():
ids.add(id(v))
return ids
| _PythonMemoryChecker |
python | celery__celery | celery/contrib/testing/mocks.py | {
"start": 3470,
"end": 4182
} | class ____(Mock):
"""Dummy class implementing __enter__ and __exit__.
The :keyword:`with` statement requires these to be implemented
in the class, not just the instance.
"""
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def ContextMock(*args, **kwargs):
"""Mock that mocks :keyword:`with` statement contexts."""
obj = _ContextMock(*args, **kwargs)
obj.attach_mock(_ContextMock(), '__enter__')
obj.attach_mock(_ContextMock(), '__exit__')
obj.__enter__.return_value = obj
# if __exit__ return a value the exception is ignored,
# so it must return None here.
obj.__exit__.return_value = None
return obj
| _ContextMock |
python | huggingface__transformers | tests/models/albert/test_modeling_albert.py | {
"start": 12730,
"end": 15057
} | class ____(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = AlbertModel.from_pretrained("albert/albert-base-v2")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]]
)
torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@pytest.mark.torch_export_test
def test_export(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
distilbert_model = "albert/albert-base-v2"
device = "cpu"
attn_implementation = "sdpa"
max_length = 64
tokenizer = AutoTokenizer.from_pretrained(distilbert_model)
inputs = tokenizer(
f"Paris is the {tokenizer.mask_token} of France.",
return_tensors="pt",
padding="max_length",
max_length=max_length,
)
model = AlbertForMaskedLM.from_pretrained(
distilbert_model,
device_map=device,
attn_implementation=attn_implementation,
)
logits = model(**inputs).logits
eg_predicted_mask = tokenizer.decode(logits[0, 4].topk(5).indices)
self.assertEqual(
eg_predicted_mask.split(),
["capital", "capitol", "comune", "arrondissement", "bastille"],
)
exported_program = torch.export.export(
model,
args=(inputs["input_ids"],),
kwargs={"attention_mask": inputs["attention_mask"]},
strict=True,
)
result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"])
ep_predicted_mask = tokenizer.decode(result.logits[0, 4].topk(5).indices)
self.assertEqual(eg_predicted_mask, ep_predicted_mask)
| AlbertModelIntegrationTest |
python | pypa__pipenv | pipenv/vendor/click/types.py | {
"start": 17075,
"end": 17832
} | class ____(_NumberRangeBase, IntParamType):
"""Restrict an :data:`click.INT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "integer range"
def _clamp( # type: ignore
self, bound: int, dir: "te.Literal[1, -1]", open: bool
) -> int:
if not open:
return bound
return bound + dir
| IntRange |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 114554,
"end": 115273
} | class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
"""
Create a DataArray with a time-axis that contains cftime.datetime
objects.
"""
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=["time"])
darray.coords["time"] = xr.date_range(
start="2017", periods=12, freq="1ME", calendar="noleap", use_cftime=True
)
self.darray = darray
def test_ncaxis_notinstalled_line_plot(self) -> None:
with pytest.raises(ImportError, match=r"optional `nc-time-axis`"):
self.darray.plot.line()
@requires_matplotlib
| TestNcAxisNotInstalled |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_embed_image12.py | {
"start": 315,
"end": 959
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("embed_image12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"bg_color": "#FFFF00"})
worksheet.embed_image(
0, 0, self.image_dir + "red.png", {"cell_format": format1}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/m2m_recursive/tests.py | {
"start": 80,
"end": 2413
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a, cls.b, cls.c, cls.d = [
Person.objects.create(name=name)
for name in ["Anne", "Bill", "Chuck", "David"]
]
cls.a.friends.add(cls.b, cls.c)
# Add m2m for Anne and Chuck in reverse direction.
cls.d.friends.add(cls.a, cls.c)
def test_recursive_m2m_all(self):
for person, friends in (
(self.a, [self.b, self.c, self.d]),
(self.b, [self.a]),
(self.c, [self.a, self.d]),
(self.d, [self.a, self.c]),
):
with self.subTest(person=person):
self.assertSequenceEqual(person.friends.all(), friends)
def test_recursive_m2m_reverse_add(self):
# Add m2m for Anne in reverse direction.
self.b.friends.add(self.a)
self.assertSequenceEqual(self.a.friends.all(), [self.b, self.c, self.d])
self.assertSequenceEqual(self.b.friends.all(), [self.a])
def test_recursive_m2m_remove(self):
self.b.friends.remove(self.a)
self.assertSequenceEqual(self.a.friends.all(), [self.c, self.d])
self.assertSequenceEqual(self.b.friends.all(), [])
def test_recursive_m2m_clear(self):
# Clear m2m for Anne.
self.a.friends.clear()
self.assertSequenceEqual(self.a.friends.all(), [])
# Reverse m2m relationships should be removed.
self.assertSequenceEqual(self.c.friends.all(), [self.d])
self.assertSequenceEqual(self.d.friends.all(), [self.c])
def test_recursive_m2m_add_via_related_name(self):
# Add m2m with custom related name for Anne in reverse direction.
self.d.stalkers.add(self.a)
self.assertSequenceEqual(self.a.idols.all(), [self.d])
self.assertSequenceEqual(self.a.stalkers.all(), [])
def test_recursive_m2m_add_in_both_directions(self):
# Adding the same relation twice results in a single relation.
self.a.idols.add(self.d)
self.d.stalkers.add(self.a)
self.assertSequenceEqual(self.a.idols.all(), [self.d])
def test_recursive_m2m_related_to_self(self):
self.a.idols.add(self.a)
self.assertSequenceEqual(self.a.idols.all(), [self.a])
self.assertSequenceEqual(self.a.stalkers.all(), [self.a])
| RecursiveM2MTests |
python | django__django | tests/delete/models.py | {
"start": 630,
"end": 658
} | class ____(R):
pass
| RChild |
python | python-openxml__python-docx | src/docx/oxml/table.py | {
"start": 27628,
"end": 30780
} | class ____(BaseOxmlElement):
"""``<w:tcPr>`` element, defining table cell properties."""
get_or_add_gridSpan: Callable[[], CT_DecimalNumber]
get_or_add_tcW: Callable[[], CT_TblWidth]
get_or_add_vAlign: Callable[[], CT_VerticalJc]
_add_vMerge: Callable[[], CT_VMerge]
_remove_gridSpan: Callable[[], None]
_remove_vAlign: Callable[[], None]
_remove_vMerge: Callable[[], None]
_tag_seq = (
"w:cnfStyle",
"w:tcW",
"w:gridSpan",
"w:hMerge",
"w:vMerge",
"w:tcBorders",
"w:shd",
"w:noWrap",
"w:tcMar",
"w:textDirection",
"w:tcFitText",
"w:vAlign",
"w:hideMark",
"w:headers",
"w:cellIns",
"w:cellDel",
"w:cellMerge",
"w:tcPrChange",
)
tcW: CT_TblWidth | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:tcW", successors=_tag_seq[2:]
)
gridSpan: CT_DecimalNumber | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:gridSpan", successors=_tag_seq[3:]
)
vMerge: CT_VMerge | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:vMerge", successors=_tag_seq[5:]
)
vAlign: CT_VerticalJc | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:vAlign", successors=_tag_seq[12:]
)
del _tag_seq
@property
def grid_span(self) -> int:
"""The integer number of columns this cell spans.
Determined by ./w:gridSpan/@val, it defaults to 1.
"""
gridSpan = self.gridSpan
return 1 if gridSpan is None else gridSpan.val
@grid_span.setter
def grid_span(self, value: int):
self._remove_gridSpan()
if value > 1:
self.get_or_add_gridSpan().val = value
@property
def vAlign_val(self):
"""Value of `w:val` attribute on `w:vAlign` child.
Value is |None| if `w:vAlign` child is not present. The `w:val` attribute on
`w:vAlign` is required.
"""
vAlign = self.vAlign
if vAlign is None:
return None
return vAlign.val
@vAlign_val.setter
def vAlign_val(self, value: WD_CELL_VERTICAL_ALIGNMENT | None):
if value is None:
self._remove_vAlign()
return
self.get_or_add_vAlign().val = value
@property
def vMerge_val(self):
"""The value of the ./w:vMerge/@val attribute, or |None| if the w:vMerge element
is not present."""
vMerge = self.vMerge
if vMerge is None:
return None
return vMerge.val
@vMerge_val.setter
def vMerge_val(self, value: str | None):
self._remove_vMerge()
if value is not None:
self._add_vMerge().val = value
@property
def width(self) -> Length | None:
"""EMU length in `./w:tcW` or |None| if not present or its type is not 'dxa'."""
tcW = self.tcW
if tcW is None:
return None
return tcW.width
@width.setter
def width(self, value: Length):
tcW = self.get_or_add_tcW()
tcW.width = value
| CT_TcPr |
python | Pylons__pyramid | src/pyramid/path.py | {
"start": 15457,
"end": 15908
} | class ____:
def __init__(self, path):
self.path = os.path.abspath(path)
def absspec(self):
raise NotImplementedError
def abspath(self):
return self.path
def stream(self):
return open(self.path, 'rb')
def isdir(self):
return os.path.isdir(self.path)
def listdir(self):
return os.listdir(self.path)
def exists(self):
return os.path.exists(self.path)
| FSAssetDescriptor |
python | google__jax | jax/_src/stages.py | {
"start": 2451,
"end": 8040
} | class ____:
def xla_extension_executable(self) -> xc.LoadedExecutable:
raise NotImplementedError(
"compiled executable carries no loaded XLA executable. It may be "
f"that {type(self)} defines an incomplete implementation.")
def call(self, *args_flat) -> Sequence[Any]:
"""Execute on the flat list of arguments, returning flat outputs."""
raise NotImplementedError("compiled executable does not support invocation")
def create_cpp_call(self, params: CompiledCallParams) -> Any:
"""Optionally constructs a fast c++ dispatcher."""
return None
def input_shardings(self) -> Sequence[sharding_lib.Sharding]:
"""Flat sequence of input shardings.
May raise ``NotImplementedError`` if unavailable, e.g. based on backend,
compiler, or runtime.
"""
raise NotImplementedError(
"compiled executable carries no input sharding information")
def output_shardings(self) -> Sequence[sharding_lib.Sharding]:
"""Flat sequence of output shardings.
May raise ``NotImplementedError`` if unavailable, e.g. based on backend,
compiler, or runtime.
"""
raise NotImplementedError(
"compiled executable carries no output sharding information")
def input_formats(self):
raise NotImplementedError(
"compiled executable carries no input layout information")
def output_formats(self):
raise NotImplementedError(
"compiled executable carries no output layout information")
def as_text(self) -> str:
"""A human-readable text representation of this executable.
Intended for visualization and debugging purposes. This need not be a valid
nor reliable serialization. It is relayed directly to external callers.
May raise ``NotImplementedError`` if unavailable, e.g. based on backend,
compiler, or runtime.
"""
xla_ext_exe = self.xla_extension_executable()
err_msg = ("text view unsupported on current XLA backend: "
f"{type(xla_ext_exe)}")
if hasattr(xla_ext_exe, "get_hlo_text"):
try:
return xla_ext_exe.get_hlo_text()
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if type(msg) is str and msg.startswith("UNIMPLEMENTED"):
raise NotImplementedError(err_msg) from e
else:
raise
else:
if not hasattr(xla_ext_exe, "hlo_modules"):
raise NotImplementedError(err_msg)
try:
return "\n\n".join([m.to_string() for m in xla_ext_exe.hlo_modules()])
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if type(msg) is str and msg.startswith("UNIMPLEMENTED"):
raise NotImplementedError(err_msg) from e
else:
raise
def cost_analysis(self) -> Any:
"""A summary of execution cost estimates.
Intended for visualization and debugging purposes. The object output by
this is some simple data structure that can easily be printed or serialized
(e.g. nested dicts, lists, and tuples with numeric leaves). However, its
structure can be arbitrary: it need not be consistent across versions of JAX
and jaxlib, or even across invocations. It is relayed directly to external
callers.
May raise ``NotImplementedError`` if unavailable, e.g. based on backend,
compiler, or runtime.
"""
xla_ext_exe = self.xla_extension_executable()
if hasattr(xla_ext_exe, "cost_analysis"):
try:
return xla_ext_exe.cost_analysis()
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if not (type(msg) is str and msg.startswith("UNIMPLEMENTED")):
raise
if (
xla_ext_exe is None
and hasattr(self, "unsafe_call")
and hasattr(self.unsafe_call, "compiled")
and hasattr(self.unsafe_call.compiled, "cost_analysis")
):
return self.unsafe_call.compiled.cost_analysis()
raise NotImplementedError(
f"cost analysis unsupported on current XLA backend: {type(xla_ext_exe)}"
)
def memory_analysis(self) -> Any:
"""A summary of estimated memory requirements.
Intended for visualization and debugging purposes. The object output by
this is some simple data structure that can easily be printed or serialized
(e.g. nested dicts, lists, and tuples with numeric leaves). However, its
structure can be arbitrary: it need not be consistent across versions of JAX
and jaxlib, or even across invocations. It is relayed directly to external
callers.
May raise ``NotImplementedError`` if unavailable, e.g. based on backend,
compiler, or runtime.
"""
xla_ext_exe = self.xla_extension_executable()
err_msg = ("memory analysis unsupported on current XLA backend: "
f"{type(xla_ext_exe)}")
if not hasattr(xla_ext_exe, "get_compiled_memory_stats"):
raise NotImplementedError(err_msg)
try:
return xla_ext_exe.get_compiled_memory_stats()
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if type(msg) is str and msg.startswith("UNIMPLEMENTED"):
raise NotImplementedError(err_msg) from e
else:
raise
def runtime_executable(self) -> Any:
"""An arbitrary object representation of this executable.
Intended for debugging purposes. This need not be a valid nor reliable
serialization. It is relayed directly to external callers, with no
guarantee on type, structure, or consistency across invocations.
May raise ``NotImplementedError`` if unavailable, e.g. based on backend or
compiler.
"""
return self.xla_extension_executable()
| Executable |
python | sqlalchemy__sqlalchemy | test/orm/test_deferred.py | {
"start": 52570,
"end": 54029
} | class ____(testing.fixtures.DeclarativeMappedTest):
"""test for [ticket:3822]"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Node(Base):
__tablename__ = "node"
id = sa.Column(sa.Integer, primary_key=True)
parent_id = sa.Column(sa.ForeignKey("node.id"))
parent = relationship("Node", remote_side=[id])
name = sa.Column(sa.String(10))
@classmethod
def insert_data(cls, connection):
Node = cls.classes.Node
session = Session(connection)
session.add_all(
[
Node(id=1, name="name"),
Node(id=2, parent_id=1, name="name"),
Node(id=3, parent_id=1, name="name"),
]
)
session.commit()
def test_present_overrides_deferred(self):
Node = self.classes.Node
session = fixture_session()
q = session.query(Node).options(
joinedload(Node.parent).load_only(Node.id, Node.parent_id)
)
# Node #1 will appear first as Node.parent and have
# deferred applied to Node.name. it will then appear
# as Node in the last row and "name" should be populated.
nodes = q.order_by(Node.id.desc()).all()
def go():
for node in nodes:
eq_(node.name, "name")
self.assert_sql_count(testing.db, go, 0)
| SelfReferentialMultiPathTest |
python | huggingface__transformers | src/transformers/generation/configuration_utils.py | {
"start": 70322,
"end": 74064
} | class ____(BaseWatermarkingConfig):
"""
Class that holds arguments for watermark generation and should be passed into `GenerationConfig` during `generate`.
See [this paper](https://www.nature.com/articles/s41586-024-08025-4) for more details on the arguments.
Args:
ngram_len (`int`):
Ngram length.
keys (`list[int]`):
A sequence of watermarking keys, one for each depth.
context_history_size (`int`, *optional*, defaults to 1024):
Size of the tensor to keep track of seen contexts.
sampling_table_seed (`int`, *optional*, defaults to 0):
Random seed to generate the sampling table.
sampling_table_size (`int`, *optional*, defaults to 65536):
Size of the sampling table.
skip_first_ngram_calls (`bool`, *optional*, defaults to `False`):
Whether to skip first ngram calls.
debug_mode (`bool`, optional, *optional*, defaults to `False`):
Logits are modified to uniform one got before watermarking modification is applied. This is to test the
implementation.
Examples:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, SynthIDTextWatermarkingConfig
>>> tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b', padding_side="left")
>>> model = AutoModelForCausalLM.from_pretrained('google/gemma-2-2b')
>>> # SynthID Text configuration
>>> watermarking_config = SynthIDTextWatermarkingConfig(
... keys=[654, 400, 836, 123, 340, 443, 597, 160, 57],
... ngram_len=5,
... )
>>> # Generation with watermarking
>>> tokenized_prompts = tokenizer(["Once upon a time, "], return_tensors="pt", padding=True)
>>> output_sequences = model.generate(
... **tokenized_prompts, watermarking_config=watermarking_config, do_sample=True, max_new_tokens=10
... )
>>> watermarked_text = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
```
"""
def __init__(
self,
ngram_len: int,
keys: list[int],
context_history_size: int = 1024,
sampling_table_seed: int = 0,
sampling_table_size: int = 2**16,
skip_first_ngram_calls: bool = False,
debug_mode: bool = False,
):
self.ngram_len = ngram_len
self.keys = keys
self.sampling_table_size = sampling_table_size
self.sampling_table_seed = sampling_table_seed
self.context_history_size = context_history_size
self.skip_first_ngram_calls = skip_first_ngram_calls
self.debug_mode = debug_mode
def validate(self):
watermark_missing_arg_msg = (
"Some of the keys in `watermarking_config` are defined incorrectly. `{key}` should be {correct_value}` "
"but found {found_value}"
)
if self.sampling_table_size > 2**24:
raise ValueError(
watermark_missing_arg_msg.format(
key="sampling_table_size",
correct_value="< 2**24",
found_value=self.sampling_table_size,
),
)
def construct_processor(self, vocab_size: int, device) -> "WatermarkLogitsProcessor":
return SynthIDTextWatermarkLogitsProcessor(
ngram_len=self.ngram_len,
keys=self.keys,
sampling_table_size=self.sampling_table_size,
sampling_table_seed=self.sampling_table_seed,
context_history_size=self.context_history_size,
device=device,
skip_first_ngram_calls=self.skip_first_ngram_calls,
debug_mode=self.debug_mode,
)
@dataclass
| SynthIDTextWatermarkingConfig |
python | dagster-io__dagster | python_modules/automation/automation/dagster_docs/docstring_rules/base.py | {
"start": 766,
"end": 3206
} | class ____:
"""Results from validating a docstring."""
symbol_path: str
errors: list[str]
warnings: list[str]
parsing_successful: bool
@staticmethod
def create(symbol_path: str) -> "ValidationResult":
"""Create a new ValidationResult."""
return ValidationResult(
symbol_path=symbol_path,
errors=[],
warnings=[],
parsing_successful=True,
)
def with_error(self, message: str, line_number: Optional[int] = None) -> "ValidationResult":
"""Return a new ValidationResult with an additional error message."""
location = f" (line {line_number})" if line_number else ""
full_message = f"{message}{location}"
# Avoid duplicates
if full_message not in self.errors:
return ValidationResult(
symbol_path=self.symbol_path,
errors=self.errors + [full_message],
warnings=self.warnings,
parsing_successful=self.parsing_successful,
)
return self
def with_warning(self, message: str, line_number: Optional[int] = None) -> "ValidationResult":
"""Return a new ValidationResult with an additional warning message."""
location = f" (line {line_number})" if line_number else ""
full_message = f"{message}{location}"
# Avoid duplicates
if full_message not in self.warnings:
return ValidationResult(
symbol_path=self.symbol_path,
errors=self.errors,
warnings=self.warnings + [full_message],
parsing_successful=self.parsing_successful,
)
return self
def with_parsing_failed(self) -> "ValidationResult":
"""Return a new ValidationResult with parsing marked as failed."""
return ValidationResult(
symbol_path=self.symbol_path,
errors=self.errors,
warnings=self.warnings,
parsing_successful=False,
)
def has_errors(self) -> bool:
"""Check if there are any errors."""
return len(self.errors) > 0
def has_warnings(self) -> bool:
"""Check if there are any warnings."""
return len(self.warnings) > 0
def is_valid(self) -> bool:
"""Check if the docstring is valid (no errors)."""
return not self.has_errors() and self.parsing_successful
| ValidationResult |
python | django__django | django/forms/widgets.py | {
"start": 34449,
"end": 34990
} | class ____(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = "django/forms/widgets/splithiddendatetime.html"
def __init__(
self,
attrs=None,
date_format=None,
time_format=None,
date_attrs=None,
time_attrs=None,
):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = "hidden"
| SplitHiddenDateTimeWidget |
python | cython__cython | Cython/Tests/TestCythonUtils.py | {
"start": 471,
"end": 6691
} | class ____(unittest.TestCase):
def test_build_hex_version(self):
self.assertEqual('0x001D00A1', build_hex_version('0.29a1'))
self.assertEqual('0x001D03C4', build_hex_version('0.29.3rc4'))
self.assertEqual('0x001D00F0', build_hex_version('0.29'))
self.assertEqual('0x040000F0', build_hex_version('4.0'))
############################## Cached Methods ##############################
def test_cache_method_name(self):
method_name = "foo"
cache_name = _build_cache_name(method_name)
match = _CACHE_NAME_PATTERN.match(cache_name)
self.assertIsNot(match, None)
self.assertEqual(match.group(1), method_name)
def test_requirements_for_Cached(self):
obj = Cached()
self.assertFalse(hasattr(obj, CACHE_NAME))
self.assertTrue(hasattr(obj, METHOD_NAME))
self.set_of_names_equal(obj, set())
def set_of_names_equal(self, obj, value):
self.assertEqual(set(_find_cache_attributes(obj)), value)
def test_find_cache_attributes(self):
obj = Cached()
method_name = "bar"
cache_name = _build_cache_name(method_name)
setattr(obj, CACHE_NAME, {})
setattr(obj, cache_name, {})
self.assertFalse(hasattr(obj, method_name))
self.set_of_names_equal(obj, {NAMES, (cache_name, method_name)})
def test_cached_method(self):
obj = Cached()
value = iter(range(3))
cache = {(value,): 0}
# cache args
self.assertEqual(obj.cached_next(value), 0)
self.set_of_names_equal(obj, {NAMES})
self.assertEqual(getattr(obj, CACHE_NAME), cache)
# use cache
self.assertEqual(obj.cached_next(value), 0)
self.set_of_names_equal(obj, {NAMES})
self.assertEqual(getattr(obj, CACHE_NAME), cache)
def test_clear_method_caches(self):
obj = Cached()
value = iter(range(3))
cache = {(value,): 1}
obj.cached_next(value) # cache args
clear_method_caches(obj)
self.set_of_names_equal(obj, set())
self.assertEqual(obj.cached_next(value), 1)
self.set_of_names_equal(obj, {NAMES})
self.assertEqual(getattr(obj, CACHE_NAME), cache)
def test_clear_method_caches_with_missing_method(self):
obj = Cached()
method_name = "bar"
cache_name = _build_cache_name(method_name)
names = cache_name, method_name
setattr(obj, cache_name, object())
self.assertFalse(hasattr(obj, method_name))
self.set_of_names_equal(obj, {names})
clear_method_caches(obj)
self.set_of_names_equal(obj, {names})
def test_try_finally_contextmanager(self):
states = []
@try_finally_contextmanager
def gen(*args, **kwargs):
states.append("enter")
yield (args, kwargs)
states.append("exit")
with gen(1, 2, 3, x=4) as call_args:
assert states == ["enter"]
self.assertEqual(call_args, ((1, 2, 3), {'x': 4}))
assert states == ["enter", "exit"]
class MyException(RuntimeError):
pass
del states[:]
with self.assertRaises(MyException):
with gen(1, 2, y=4) as call_args:
assert states == ["enter"]
self.assertEqual(call_args, ((1, 2), {'y': 4}))
raise MyException("FAIL INSIDE")
assert states == ["enter", "exit"]
del states[:]
with self.assertRaises(StopIteration):
with gen(1, 2, y=4) as call_args:
assert states == ["enter"]
self.assertEqual(call_args, ((1, 2), {'y': 4}))
raise StopIteration("STOP")
assert states == ["enter", "exit"]
def test_print_version(self):
orig_stderr = sys.stderr
orig_stdout = sys.stdout
stderr = sys.stderr = StringIO()
stdout = sys.stdout = StringIO()
try:
print_version()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
stdout = stdout.getvalue()
stderr = stderr.getvalue()
from .. import __version__ as version
self.assertIn(version, stdout)
if stderr: # Depends on os.fstat(1/2).
self.assertIn(version, stderr)
def test_print_version_stdouterr(self):
orig_stderr = sys.stderr
orig_stdout = sys.stdout
stdout = sys.stdout = sys.stderr = StringIO() # same!
try:
print_version()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
stdout = stdout.getvalue()
from .. import __version__ as version
self.assertIn(version, stdout)
self.assertEqual(stdout.count(version), 1)
def test_normalise_float_repr(self):
examples = [
('.0', '.0'),
('.000000', '.0'),
('.1', '.1'),
('1.', '1.'),
('1.0', '1.'),
('1.000000000000000000000', '1.'),
('00000000000000000000001.000000000000000000000', '1.'),
('12345.0025', '12345.0025'),
('1E5', '100000.'),
('.1E-5', '.000001'),
('1.1E-5', '.000011'),
('12.3E-5', '.000123'),
('.1E10', '1000000000.'),
('1.1E10', '11000000000.'),
('123.4E10', '1234000000000.'),
('123.456E0', '123.456'),
('123.456E-1', '12.3456'),
('123.456E-2', '1.23456'),
('123.456E1', '1234.56'),
('123.456E2', '12345.6'),
('2.1E80', '210000000000000000000000000000000000000000000000000000000000000000000000000000000.'),
]
for float_str, norm_str in examples:
self.assertEqual(float(float_str), float(norm_str)) # safety check for test data
result = normalise_float_repr(float_str)
self.assertEqual(float(float_str), float(result))
self.assertEqual(
result, norm_str,
"normalise_float_repr(%r) == %r != %r (%.330f)" % (float_str, result, norm_str, float(float_str))
)
| TestCythonUtils |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/base.py | {
"start": 593,
"end": 1670
} | class ____:
"""Mixin for Retriever callbacks."""
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when Retriever errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when Retriever ends running.
Args:
documents: The documents retrieved.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
"""
| RetrieverManagerMixin |
python | pandas-dev__pandas | pandas/core/window/ewm.py | {
"start": 29117,
"end": 30266
} | class ____(BaseWindowGroupby, ExponentialMovingWindow):
"""
Provide an exponential moving window groupby implementation.
"""
_attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes
def __init__(self, obj, *args, _grouper=None, **kwargs) -> None:
super().__init__(obj, *args, _grouper=_grouper, **kwargs)
if not obj.empty and self.times is not None:
# sort the times and recalculate the deltas according to the groups
groupby_order = np.concatenate(list(self._grouper.indices.values()))
self._deltas = _calculate_deltas(
self.times.take(groupby_order),
self.halflife,
)
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
"""
window_indexer = GroupbyIndexer(
groupby_indices=self._grouper.indices,
window_indexer=ExponentialMovingWindowIndexer,
)
return window_indexer
| ExponentialMovingWindowGroupby |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 936062,
"end": 936567
} | class ____(sgqlc.types.Type):
"""A repository contact link."""
__schema__ = github_schema
__field_names__ = ("about", "name", "url")
about = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="about")
"""The contact link purpose."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The contact link name."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The contact link URL."""
| RepositoryContactLink |
python | modin-project__modin | modin/core/dataframe/algebra/fold.py | {
"start": 1083,
"end": 3497
} | class ____(Operator):
"""Builder class for Fold functions."""
@classmethod
def register(
cls, fold_function: Callable[..., pandas.DataFrame], shape_preserved=False
) -> Callable[..., PandasQueryCompiler]:
"""
Build Fold operator that will be performed across rows/columns.
Parameters
----------
fold_function : callable(pandas.DataFrame, *args, **kwargs) -> pandas.DataFrame
Function to apply across rows/columns.
shape_preserved : bool, default: False
Whether the shape of the dataframe is preserved or not
after applying a function.
Returns
-------
callable
Function that takes query compiler and executes Fold function.
"""
def caller(
query_compiler: PandasQueryCompiler,
fold_axis: Optional[int] = None,
*args: tuple,
new_index=None,
new_columns=None,
**kwargs: dict,
) -> PandasQueryCompiler:
"""
Execute Fold function against passed query compiler.
Parameters
----------
query_compiler : PandasQueryCompiler
The query compiler to execute the function on.
fold_axis : int, optional
0 or None means apply across full column partitions. 1 means
apply across full row partitions.
*args : tuple
Additional arguments passed to `fold_function`.
new_index : list-like, optional
The index of the result.
new_columns : list-like, optional
The columns of the result.
**kwargs: dict
Additional keyword arguments passed to `fold_function`.
Returns
-------
PandasQueryCompiler
A new query compiler representing the result of executing the
function.
"""
return query_compiler.__constructor__(
query_compiler._modin_frame.fold(
cls.validate_axis(fold_axis),
lambda x: fold_function(x, *args, **kwargs),
new_index=new_index,
new_columns=new_columns,
shape_preserved=shape_preserved,
)
)
return caller
| Fold |
python | encode__starlette | tests/test_routing.py | {
"start": 29474,
"end": 30580
} | class ____:
async def my_method(self, request: Request) -> None: ... # pragma: no cover
@classmethod
async def my_classmethod(cls, request: Request) -> None: ... # pragma: no cover
@staticmethod
async def my_staticmethod(request: Request) -> None: ... # pragma: no cover
def __call__(self, request: Request) -> None: ... # pragma: no cover
@pytest.mark.parametrize(
"endpoint, expected_name",
[
pytest.param(func_homepage, "func_homepage", id="function"),
pytest.param(Endpoint().my_method, "my_method", id="method"),
pytest.param(Endpoint.my_classmethod, "my_classmethod", id="classmethod"),
pytest.param(
Endpoint.my_staticmethod,
"my_staticmethod",
id="staticmethod",
),
pytest.param(Endpoint(), "Endpoint", id="object"),
pytest.param(lambda request: ..., "<lambda>", id="lambda"), # pragma: no branch
],
)
def test_route_name(endpoint: Callable[..., Response], expected_name: str) -> None:
assert Route(path="/", endpoint=endpoint).name == expected_name
| Endpoint |
python | mlflow__mlflow | tests/utils/test_request_utils.py | {
"start": 779,
"end": 5089
} | class ____:
def __init__(self):
self.headers = {"Content-Length": "100"}
raw = mock.MagicMock()
raw.tell.return_value = 50
self.raw = raw
def __enter__(self):
return self
def __exit__(self, *args):
pass
def test_download_chunk_incomplete_read(tmp_path):
with mock.patch.object(
request_utils, "cloud_storage_http_request", return_value=IncompleteResponse()
):
download_path = tmp_path / "chunk"
download_path.touch()
with pytest.raises(IOError, match="Incomplete read"):
request_utils.download_chunk(
range_start=0,
range_end=999,
headers={},
download_path=download_path,
http_uri="https://example.com",
)
@pytest.mark.parametrize("env_value", ["0", "false", "False", "FALSE"])
def test_redirects_disabled_if_env_var_set(monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_ALLOW_HTTP_REDIRECTS", env_value)
with mock.patch("requests.Session.request") as mock_request:
mock_request.return_value.status_code = 302
mock_request.return_value.text = "mock response"
response = request_utils.cloud_storage_http_request("GET", "http://localhost:5000")
assert response.text == "mock response"
mock_request.assert_called_once_with(
"GET",
"http://localhost:5000",
allow_redirects=False,
timeout=None,
)
@pytest.mark.parametrize("env_value", ["1", "true", "True", "TRUE"])
def test_redirects_enabled_if_env_var_set(monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_ALLOW_HTTP_REDIRECTS", env_value)
with mock.patch("requests.Session.request") as mock_request:
mock_request.return_value.status_code = 302
mock_request.return_value.text = "mock response"
response = request_utils.cloud_storage_http_request(
"GET",
"http://localhost:5000",
)
assert response.text == "mock response"
mock_request.assert_called_once_with(
"GET",
"http://localhost:5000",
allow_redirects=True,
timeout=None,
)
@pytest.mark.parametrize("env_value", ["0", "false", "False", "FALSE"])
def test_redirect_kwarg_overrides_env_value_false(monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_ALLOW_HTTP_REDIRECTS", env_value)
with mock.patch("requests.Session.request") as mock_request:
mock_request.return_value.status_code = 302
mock_request.return_value.text = "mock response"
response = request_utils.cloud_storage_http_request(
"GET", "http://localhost:5000", allow_redirects=True
)
assert response.text == "mock response"
mock_request.assert_called_once_with(
"GET",
"http://localhost:5000",
allow_redirects=True,
timeout=None,
)
@pytest.mark.parametrize("env_value", ["1", "true", "True", "TRUE"])
def test_redirect_kwarg_overrides_env_value_true(monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_ALLOW_HTTP_REDIRECTS", env_value)
with mock.patch("requests.Session.request") as mock_request:
mock_request.return_value.status_code = 302
mock_request.return_value.text = "mock response"
response = request_utils.cloud_storage_http_request(
"GET", "http://localhost:5000", allow_redirects=False
)
assert response.text == "mock response"
mock_request.assert_called_once_with(
"GET",
"http://localhost:5000",
allow_redirects=False,
timeout=None,
)
def test_redirects_enabled_by_default():
with mock.patch("requests.Session.request") as mock_request:
mock_request.return_value.status_code = 302
mock_request.return_value.text = "mock response"
response = request_utils.cloud_storage_http_request(
"GET",
"http://localhost:5000",
)
assert response.text == "mock response"
mock_request.assert_called_once_with(
"GET",
"http://localhost:5000",
allow_redirects=True,
timeout=None,
)
| IncompleteResponse |
python | apache__airflow | airflow-core/tests/unit/utils/test_db.py | {
"start": 2736,
"end": 15251
} | class ____:
def test_database_schema_and_sqlalchemy_model_are_in_sync(self, initialized_db):
import airflow.models
# Ensure we have a fresh connection for schema comparison
settings.Session.remove() # Clear any existing sessions
airflow.models.import_all_models()
all_meta_data = MetaData()
# Airflow DB
for table_name, table in airflow_base.metadata.tables.items():
all_meta_data._add_table(table_name, table.schema, table)
# External DB Managers
external_db_managers = RunDBManager()
for dbmanager in external_db_managers._managers:
for table_name, table in dbmanager.metadata.tables.items():
all_meta_data._add_table(table_name, table.schema, table)
skip_fab = PY313
if not skip_fab:
# FAB DB Manager
from airflow.providers.fab.auth_manager.models.db import FABDBManager
# test FAB models
for table_name, table in FABDBManager.metadata.tables.items():
all_meta_data._add_table(table_name, table.schema, table)
else:
print("Ignoring FAB models in Python 3.13+ as FAB is not compatible with 3.13+ yet.")
# create diff between database schema and SQLAlchemy model
mctx = MigrationContext.configure(
settings.engine.connect(),
opts={"compare_type": compare_type, "compare_server_default": compare_server_default},
)
diff = compare_metadata(mctx, all_meta_data)
# known diffs to ignore
ignores = [
# ignore tables created by celery
lambda t: (t[0] == "remove_table" and t[1].name == "celery_taskmeta"),
lambda t: (t[0] == "remove_table" and t[1].name == "celery_tasksetmeta"),
# ignore indices created by celery
lambda t: (t[0] == "remove_index" and t[1].name == "task_id"),
lambda t: (t[0] == "remove_index" and t[1].name == "taskset_id"),
# from test_security unit test
lambda t: (t[0] == "remove_table" and t[1].name == "some_model"),
# Ignore flask-session table/index
lambda t: (t[0] == "remove_table" and t[1].name == "session"),
lambda t: (t[0] == "remove_index" and t[1].name == "session_id"),
lambda t: (t[0] == "remove_index" and t[1].name == "session_session_id_uq"),
# sqlite sequence is used for autoincrementing columns created with `sqlite_autoincrement` option
lambda t: (t[0] == "remove_table" and t[1].name == "sqlite_sequence"),
# fab version table
lambda t: (t[0] == "remove_table" and t[1].name == "alembic_version_fab"),
# Ignore _xcom_archive table
lambda t: (t[0] == "remove_table" and t[1].name == "_xcom_archive"),
]
if skip_fab:
# Check structure first
ignores.append(lambda t: len(t) > 1 and hasattr(t[1], "name") and t[1].name.startswith("ab_"))
ignores.append(
lambda t: (
len(t) > 1
and t[0] == "remove_index"
and hasattr(t[1], "columns")
and len(t[1].columns) > 0
and hasattr(t[1].columns[0], "table")
and t[1].columns[0].table.name.startswith("ab_")
)
)
for ignore in ignores:
diff = [d for d in diff if not ignore(d)]
# Filter out modify_default diffs - handle the list-wrapped format
final_diff = []
for d in diff:
# Check if it's a list containing a tuple with 'modify_default' as first element
if isinstance(d, list) and len(d) > 0 and isinstance(d[0], tuple) and d[0][0] == "modify_default":
continue # Skip modify_default diffs
# Also check direct tuple format just in case
if isinstance(d, tuple) and len(d) > 0 and d[0] == "modify_default":
continue # Skip modify_default diffs
final_diff.append(d)
if final_diff:
print("Database schema and SQLAlchemy model are not in sync: ")
for single_diff in final_diff:
print(f"Diff: {single_diff}")
pytest.fail("Database schema and SQLAlchemy model are not in sync")
def test_only_single_head_revision_in_migrations(self):
config = Config()
config.set_main_option("script_location", "airflow:migrations")
script = ScriptDirectory.from_config(config)
from airflow.settings import engine
with EnvironmentContext(
config,
script,
as_sql=True,
) as env:
env.configure(dialect_name=engine.dialect.name)
# This will raise if there are multiple heads
# To resolve, use the command `alembic merge`
script.get_current_head()
def test_default_connections_sort(self):
pattern = re.compile("conn_id=[\"|'](.*?)[\"|']", re.DOTALL)
source = inspect.getsource(create_default_connections)
src = pattern.findall(source)
assert sorted(src) == src
@pytest.mark.usefixtures("initialized_db")
def test_check_migrations(self):
# Should run without error. Can't easily test the behaviour, but we can check it works
check_migrations(0)
check_migrations(1)
@pytest.mark.parametrize(
("auth", "expected"),
[
(
{
(
"core",
"auth_manager",
): "airflow.api_fastapi.auth.managers.simple.simple_auth_manager.SimpleAuthManager"
},
1,
),
(
{
(
"core",
"auth_manager",
): "airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager"
},
2,
),
],
)
def test_upgradedb(self, auth, expected, mocker):
if PY313 and "airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager" in str(auth):
pytest.skip(
"Skipping test for FAB Auth Manager on Python 3.13+ as FAB is not compatible with 3.13+ yet."
)
mock_upgrade = mocker.patch("alembic.command.upgrade")
with conf_vars(auth):
upgradedb()
# Verify the mock was called correctly
assert mock_upgrade.call_count >= expected, (
f"Expected at least {expected} calls, got {mock_upgrade.call_count}"
)
# Check that it was called with 'heads' at least once
# Handle different call structures more safely
heads_called = False
for call in mock_upgrade.call_args_list:
# Check positional args
if len(call.args) > 1 and call.args[1] == "heads":
heads_called = True
break
# Check keyword args
if "revision" in call.kwargs and call.kwargs["revision"] == "heads":
heads_called = True
break
assert heads_called, (
f"upgrade should be called with revision='heads', got calls: {mock_upgrade.call_args_list}"
)
@pytest.mark.parametrize(
("from_revision", "to_revision"),
[("be2bfac3da23", "e959f08ac86c"), ("ccde3e26fe78", "2e42bb497a22")],
)
def test_offline_upgrade_wrong_order(self, from_revision, to_revision, mocker):
mocker.patch("airflow.utils.db.settings.engine.dialect")
mocker.patch("alembic.command.upgrade")
with pytest.raises(ValueError, match="Error while checking history for revision range *:*"):
upgradedb(from_revision=from_revision, to_revision=to_revision, show_sql_only=True)
@pytest.mark.parametrize(
("to_revision", "from_revision"),
[
("e959f08ac86c", "e959f08ac86c"),
],
)
def test_offline_upgrade_revision_nothing(self, from_revision, to_revision, mocker):
mocker.patch("airflow.utils.db.settings.engine.dialect")
mocker.patch("alembic.command.upgrade")
with redirect_stdout(StringIO()) as temp_stdout:
upgradedb(to_revision=to_revision, from_revision=from_revision, show_sql_only=True)
stdout = temp_stdout.getvalue()
assert "nothing to do" in stdout
def test_offline_upgrade_no_versions(self, mocker):
"""Offline upgrade should work with no version / revision options."""
mock_om = mocker.patch("airflow.utils.db._offline_migration")
mocker.patch("airflow.utils.db._get_current_revision", return_value="22ed7efa9da2")
mocker.patch("airflow.utils.db.settings.engine.dialect").name = "postgresql"
upgradedb(from_revision=None, to_revision=None, show_sql_only=True)
actual = mock_om.call_args.args[2]
assert re.match(r"22ed7efa9da2:[a-z0-9]+", actual) is not None
def test_sqlite_offline_upgrade_raises_with_revision(self, mocker):
mocker.patch("airflow.utils.db._get_current_revision")
mocker.patch("airflow.utils.db.settings.engine.dialect").name = "sqlite"
with pytest.raises(SystemExit, match="Offline migration not supported for SQLite"):
upgradedb(from_revision=None, to_revision=None, show_sql_only=True)
@pytest.mark.usefixtures("initialized_db")
def test_downgrade_sql_no_from(self, mocker):
mock_om = mocker.patch("airflow.utils.db._offline_migration")
downgrade(to_revision="abc", show_sql_only=True, from_revision=None)
# The actual revision might be 'None:abc' due to engine state
# Be more flexible in what we accept
actual = mock_om.call_args.kwargs["revision"]
# Accept either format since the workaround might affect this
assert re.match(r"([a-z0-9]+|None):abc", actual) is not None, (
f"Expected revision to match pattern, got: {actual}"
)
def test_downgrade_sql_with_from(self, mocker):
mock_om = mocker.patch("airflow.utils.db._offline_migration")
downgrade(to_revision="abc", show_sql_only=True, from_revision="123")
actual = mock_om.call_args.kwargs["revision"]
assert actual == "123:abc"
def test_downgrade_invalid_combo(self, mocker):
"""Can't combine `sql=False` and `from_revision`"""
mocker.patch("alembic.command.downgrade")
with pytest.raises(ValueError, match="can't be combined"):
downgrade(to_revision="abc", from_revision="123")
def test_downgrade_with_from(self, mocker):
mock_om = mocker.patch("alembic.command.downgrade")
downgrade(to_revision="abc")
actual = mock_om.call_args.kwargs["revision"]
assert actual == "abc"
def test_resetdb_logging_level(self):
unset_logging_level = logging.root.level
logging.root.setLevel(logging.DEBUG)
set_logging_level = logging.root.level
resetdb()
assert logging.root.level == set_logging_level
assert logging.root.level != unset_logging_level
def test_alembic_configuration(self, mocker):
# Test with custom path
mocker.patch.dict(os.environ, {"AIRFLOW__DATABASE__ALEMBIC_INI_FILE_PATH": "/tmp/alembic.ini"})
config = _get_alembic_config()
assert config.config_file_name == "/tmp/alembic.ini"
# Test default behaviour - need to clear the env var
mocker.patch.dict(os.environ, {}, clear=True) # Clear all env vars
# Or more safely, just remove the specific key
if "AIRFLOW__DATABASE__ALEMBIC_INI_FILE_PATH" in os.environ:
del os.environ["AIRFLOW__DATABASE__ALEMBIC_INI_FILE_PATH"]
config = _get_alembic_config()
import airflow
assert config.config_file_name == os.path.join(os.path.dirname(airflow.__file__), "alembic.ini")
def test_bool_lazy_select_sequence(self):
class MockSession:
def __init__(self):
pass
def scalar(self, stmt):
return None
t = Table("t", MetaData(), Column("id", Integer, primary_key=True))
lss = LazySelectSequence.from_select(select(t.c.id), order_by=[], session=MockSession())
assert bool(lss) is False
| TestDb |
python | sanic-org__sanic | sanic/blueprints.py | {
"start": 1720,
"end": 20676
} | class ____(BaseSanic):
"""A logical collection of URLs that consist of a similar logical domain.
A Blueprint object is the main tool for grouping functionality and similar endpoints. It allows the developer to
organize routes, exception handlers, middleware, and other web functionalities into separate, modular groups.
See [Blueprints](/en/guide/best-practices/blueprints) for more information.
Args:
name (str): The name of the blueprint.
url_prefix (Optional[str]): The URL prefix for all routes defined on this blueprint.
host (Optional[Union[List[str], str]]): Host or list of hosts that this blueprint should respond to.
version (Optional[Union[int, str, float]]): Version number of the API implemented by this blueprint.
strict_slashes (Optional[bool]): Whether or not the URL should end with a slash.
version_prefix (str): Prefix for the version. Default is "/v".
""" # noqa: E501
__slots__ = (
"_apps",
"_future_commands",
"_future_routes",
"_future_statics",
"_future_middleware",
"_future_listeners",
"_future_exceptions",
"_future_signals",
"_allow_route_overwrite",
"copied_from",
"ctx",
"exceptions",
"host",
"listeners",
"middlewares",
"routes",
"statics",
"strict_slashes",
"url_prefix",
"version",
"version_prefix",
"websocket_routes",
)
def __init__(
self,
name: str,
url_prefix: Optional[str] = None,
host: Optional[Union[list[str], str]] = None,
version: Optional[Union[int, str, float]] = None,
strict_slashes: Optional[bool] = None,
version_prefix: str = "/v",
):
super().__init__(name=name)
self.reset()
self._allow_route_overwrite = False
self.copied_from = ""
self.ctx = SimpleNamespace()
self.host = host
self.strict_slashes = strict_slashes
self.url_prefix = (
url_prefix[:-1]
if url_prefix and url_prefix.endswith("/")
else url_prefix
)
self.version = version
self.version_prefix = version_prefix
def __repr__(self) -> str:
args = ", ".join(
[
f'{attr}="{getattr(self, attr)}"'
if isinstance(getattr(self, attr), str)
else f"{attr}={getattr(self, attr)}"
for attr in (
"name",
"url_prefix",
"host",
"version",
"strict_slashes",
)
]
)
return f"Blueprint({args})"
@property
def apps(self) -> set[Sanic]:
"""Get the set of apps that this blueprint is registered to.
Returns:
Set[Sanic]: Set of apps that this blueprint is registered to.
Raises:
SanicException: If the blueprint has not yet been registered to
an app.
"""
if not self._apps:
raise SanicException(
f"{self} has not yet been registered to an app"
)
return self._apps
@property
def registered(self) -> bool:
"""Check if the blueprint has been registered to an app.
Returns:
bool: `True` if the blueprint has been registered to an app,
`False` otherwise.
"""
return bool(self._apps)
exception = lazy(BaseSanic.exception)
listener = lazy(BaseSanic.listener)
middleware = lazy(BaseSanic.middleware)
route = lazy(BaseSanic.route)
signal = lazy(BaseSanic.signal)
static = lazy(BaseSanic.static, as_decorator=False)
def reset(self) -> None:
"""Reset the blueprint to its initial state."""
self._apps: set[Sanic] = set()
self._allow_route_overwrite = False
self.exceptions: list[RouteHandler] = []
self.listeners: dict[str, list[ListenerType[Any]]] = {}
self.middlewares: list[MiddlewareType] = []
self.routes: list[Route] = []
self.statics: list[RouteHandler] = []
self.websocket_routes: list[Route] = []
def copy(
self,
name: str,
url_prefix: Optional[Union[str, Default]] = _default,
version: Optional[Union[int, str, float, Default]] = _default,
version_prefix: Union[str, Default] = _default,
allow_route_overwrite: Union[bool, Default] = _default,
strict_slashes: Optional[Union[bool, Default]] = _default,
with_registration: bool = True,
with_ctx: bool = False,
):
"""Copy a blueprint instance with some optional parameters to override the values of attributes in the old instance.
Args:
name (str): Unique name of the blueprint.
url_prefix (Optional[Union[str, Default]]): URL to be prefixed before all route URLs.
version (Optional[Union[int, str, float, Default]]): Blueprint version.
version_prefix (Union[str, Default]): The prefix of the version number shown in the URL.
allow_route_overwrite (Union[bool, Default]): Whether to allow route overwrite or not.
strict_slashes (Optional[Union[bool, Default]]): Enforce the API URLs are requested with a trailing "/*".
with_registration (bool): Whether to register the new blueprint instance with Sanic apps that were registered with the old instance or not. Default is `True`.
with_ctx (bool): Whether the ``ctx`` will be copied or not. Default is `False`.
Returns:
Blueprint: A new Blueprint instance with the specified attributes.
""" # noqa: E501
attrs_backup = {
"_apps": self._apps,
"routes": self.routes,
"websocket_routes": self.websocket_routes,
"middlewares": self.middlewares,
"exceptions": self.exceptions,
"listeners": self.listeners,
"statics": self.statics,
}
self.reset()
new_bp = deepcopy(self)
new_bp.name = name
new_bp.copied_from = self.name
if not isinstance(url_prefix, Default):
new_bp.url_prefix = url_prefix
if not isinstance(version, Default):
new_bp.version = version
if not isinstance(strict_slashes, Default):
new_bp.strict_slashes = strict_slashes
if not isinstance(version_prefix, Default):
new_bp.version_prefix = version_prefix
if not isinstance(allow_route_overwrite, Default):
new_bp._allow_route_overwrite = allow_route_overwrite
for key, value in attrs_backup.items():
setattr(self, key, value)
if with_registration and self._apps:
if new_bp._future_statics:
raise SanicException(
"Static routes registered with the old blueprint instance,"
" cannot be registered again."
)
for app in self._apps:
app.blueprint(new_bp)
if not with_ctx:
new_bp.ctx = SimpleNamespace()
return new_bp
@staticmethod
def group(
*blueprints: Union[Blueprint, BlueprintGroup],
url_prefix: Optional[str] = None,
version: Optional[Union[int, str, float]] = None,
strict_slashes: Optional[bool] = None,
version_prefix: str = "/v",
name_prefix: Optional[str] = "",
) -> BlueprintGroup:
"""Group multiple blueprints (or other blueprint groups) together.
Gropuping blueprings is a method for modularizing and organizing
your application's code. This can be a powerful tool for creating
reusable components, logically structuring your application code,
and easily maintaining route definitions in bulk.
This is the preferred way to group multiple blueprints together.
Args:
blueprints (Union[Blueprint, BlueprintGroup]): Blueprints to be
registered as a group.
url_prefix (Optional[str]): URL route to be prepended to all
sub-prefixes. Default is `None`.
version (Optional[Union[int, str, float]]): API Version to be
used for Blueprint group. Default is `None`.
strict_slashes (Optional[bool]): Indicate strict slash
termination behavior for URL. Default is `None`.
version_prefix (str): Prefix to be used for the version in the
URL. Default is "/v".
name_prefix (Optional[str]): Prefix to be used for the name of
the blueprints in the group. Default is an empty string.
Returns:
BlueprintGroup: A group of blueprints.
Example:
The resulting group will have the URL prefixes
`'/v2/bp1'` and `'/v2/bp2'` for bp1 and bp2, respectively.
```python
bp1 = Blueprint('bp1', url_prefix='/bp1')
bp2 = Blueprint('bp2', url_prefix='/bp2')
group = group(bp1, bp2, version=2)
```
"""
def chain(nested) -> Iterable[Blueprint]:
"""Iterate through nested blueprints"""
for i in nested:
if isinstance(i, (list, tuple)):
yield from chain(i)
else:
yield i
bps = BlueprintGroup(
url_prefix=url_prefix,
version=version,
strict_slashes=strict_slashes,
version_prefix=version_prefix,
name_prefix=name_prefix,
)
for bp in chain(blueprints):
bps.append(bp)
return bps
def register(self, app, options):
"""Register the blueprint to the sanic app.
Args:
app (Sanic): Sanic app to register the blueprint to.
options (dict): Options to be passed to the blueprint.
"""
self._apps.add(app)
url_prefix = options.get("url_prefix", self.url_prefix)
opt_version = options.get("version", None)
opt_strict_slashes = options.get("strict_slashes", None)
opt_version_prefix = options.get("version_prefix", self.version_prefix)
opt_name_prefix = options.get("name_prefix", None)
error_format = options.get(
"error_format", app.config.FALLBACK_ERROR_FORMAT
)
routes = []
middleware = []
exception_handlers = []
listeners = defaultdict(list)
registered = set()
# Routes
for future in self._future_routes:
# Prepend the blueprint URI prefix if available
uri = self._setup_uri(future.uri, url_prefix)
route_error_format = (
future.error_format if future.error_format else error_format
)
version_prefix = self.version_prefix
for prefix in (
future.version_prefix,
opt_version_prefix,
):
if prefix and prefix != "/v":
version_prefix = prefix
break
version = self._extract_value(
future.version, opt_version, self.version
)
strict_slashes = self._extract_value(
future.strict_slashes, opt_strict_slashes, self.strict_slashes
)
name = future.name
if opt_name_prefix:
name = f"{opt_name_prefix}_{future.name}"
name = app.generate_name(name)
host = future.host or self.host
if isinstance(host, list):
host = tuple(host)
apply_route = FutureRoute(
future.handler,
uri,
future.methods,
host,
strict_slashes,
future.stream,
version,
name,
future.ignore_body,
future.websocket,
future.subprotocols,
future.unquote,
future.static,
version_prefix,
route_error_format,
future.route_context,
)
if (self, apply_route) in app._future_registry:
continue
registered.add(apply_route)
route = app._apply_route(
apply_route, overwrite=self._allow_route_overwrite
)
# If it is a copied BP, then make sure all of the names of routes
# matchup with the new BP name
if self.copied_from:
for r in route:
r.name = r.name.replace(self.copied_from, self.name)
r.extra.ident = r.extra.ident.replace(
self.copied_from, self.name
)
operation = (
routes.extend if isinstance(route, list) else routes.append
)
operation(route)
# Static Files
for future in self._future_statics:
# Prepend the blueprint URI prefix if available
uri = self._setup_uri(future.uri, url_prefix)
apply_route = FutureStatic(uri, *future[1:])
if (self, apply_route) in app._future_registry:
continue
registered.add(apply_route)
route = app._apply_static(apply_route)
routes.append(route)
route_names = [route.name for route in routes if route]
if route_names:
# Middleware
for future in self._future_middleware:
if (self, future) in app._future_registry:
continue
middleware.append(app._apply_middleware(future, route_names))
# Exceptions
for future in self._future_exceptions:
if (self, future) in app._future_registry:
continue
exception_handlers.append(
app._apply_exception_handler(future, route_names)
)
# Event listeners
for future in self._future_listeners:
if (self, future) in app._future_registry:
continue
listeners[future.event].append(app._apply_listener(future))
# Signals
for future in self._future_signals:
if (self, future) in app._future_registry:
continue
future.condition.update({"__blueprint__": self.name})
# Force exclusive to be False
app._apply_signal(
FutureSignal(
future.handler,
future.event,
future.condition,
False,
future.priority,
)
)
self.routes += [route for route in routes if isinstance(route, Route)]
self.websocket_routes += [
route for route in self.routes if route.extra.websocket
]
self.middlewares += middleware
self.exceptions += exception_handlers
self.listeners.update(dict(listeners))
if self.registered:
self.register_futures(
self.apps,
self,
chain(
registered,
self._future_middleware,
self._future_exceptions,
self._future_listeners,
self._future_signals,
),
)
if self._future_commands:
raise SanicException(
"Registering commands with blueprints is not supported."
)
async def dispatch(self, *args, **kwargs):
"""Dispatch a signal event
Args:
*args: Arguments to be passed to the signal event.
**kwargs: Keyword arguments to be passed to the signal event.
"""
condition = kwargs.pop("condition", {})
condition.update({"__blueprint__": self.name})
kwargs["condition"] = condition
return await asyncio.gather(
*[app.dispatch(*args, **kwargs) for app in self.apps]
)
def event(
self,
event: str,
timeout: Optional[Union[int, float]] = None,
*,
condition: Optional[dict[str, Any]] = None,
):
"""Wait for a signal event to be dispatched.
Args:
event (str): Name of the signal event.
timeout (Optional[Union[int, float]]): Timeout for the event to be
dispatched.
condition: If provided, method will only return when the signal
is dispatched with the given condition.
Returns:
Awaitable: Awaitable for the event to be dispatched.
"""
if condition is None:
condition = {}
condition.update({"__blueprint__": self.name})
waiters = []
for app in self.apps:
waiter = app.signal_router.get_waiter(
event, condition, exclusive=False
)
if not waiter:
raise NotFound("Could not find signal %s" % event)
waiters.append(waiter)
return self._event(waiters, timeout)
async def _event(self, waiters, timeout):
done, pending = await asyncio.wait(
[asyncio.create_task(waiter.wait()) for waiter in waiters],
return_when=asyncio.FIRST_COMPLETED,
timeout=timeout,
)
for task in pending:
task.cancel()
if not done:
raise TimeoutError()
(finished_task,) = done
return finished_task.result()
@staticmethod
def _extract_value(*values):
value = values[-1]
for v in values:
if v is not None:
value = v
break
return value
@staticmethod
def _setup_uri(base: str, prefix: Optional[str]):
uri = base
if prefix:
uri = prefix
if base.startswith("/") and prefix.endswith("/"):
uri += base[1:]
else:
uri += base
return uri[1:] if uri.startswith("//") else uri
@staticmethod
def register_futures(
apps: set[Sanic], bp: Blueprint, futures: Sequence[tuple[Any, ...]]
):
"""Register futures to the apps.
Args:
apps (Set[Sanic]): Set of apps to register the futures to.
bp (Blueprint): Blueprint that the futures belong to.
futures (Sequence[Tuple[Any, ...]]): Sequence of futures to be
registered.
"""
for app in apps:
app._future_registry.update({(bp, item) for item in futures})
bpg_base = MutableSequence[Blueprint]
| Blueprint |
python | catalyst-team__catalyst | catalyst/contrib/layers/lama.py | {
"start": 403,
"end": 886
} | class ____(nn.Module):
"""@TODO: Docs. Contribution is welcome."""
def forward(self, x: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor:
"""Forward call."""
if mask is None:
x_out = x.mean(1, keepdim=True)
else:
x_masked = torch.sum(x * mask.float(), dim=1, keepdim=True)
mask_sum = torch.sum(mask.float(), dim=1, keepdim=True)
x_out = x_masked / mask_sum
return x_out
| TemporalAvgPooling |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/flat_map_test.py | {
"start": 2080,
"end": 12002
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
# pylint: disable=g-long-lambda
@combinations.generate(test_base.default_test_combinations())
def testFlatMapDataset(self):
repeats = [1, 2, 3, 4, 5, 0, 1]
components = np.array(repeats, dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensors([x]).repeat(x)
)
expected_output = []
for i in repeats:
expected_output.extend([[i]] * i)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(
lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y)
)
)
expected_output = []
for row in repeats:
for i in row:
expected_output.extend([i] * i)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.graph_only_combinations())
def testSharedResourceNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(
lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y)
)
),
shared_name="shared_flat_map_iterator",
)
init_op = iterator.initializer
get_next = iterator.get_next()
# Create two concurrent sessions that share the same iterator
# resource on the same server, and verify that a random
# interleaving of `Session.run(get_next)` calls on the two
# sessions yields the expected result.
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess1:
with session.Session(server.target) as sess2:
for _ in range(3):
sess = random.choice([sess1, sess2])
sess.run(init_op)
for row in repeats:
for i in row:
for _ in range(i):
sess = random.choice([sess1, sess2])
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess = random.choice([sess1, sess2])
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testMapDict(self):
dataset = (
dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x**2})
.flat_map(
lambda d: dataset_ops.Dataset.from_tensors(d["foo"]).repeat(
d["bar"]
)
)
)
get_next = self.getNext(dataset)
for i in range(10):
for _ in range(i**2):
self.assertEqual(i * 2, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2]
)
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values)
)
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
for j in range(2):
expected_output.append([i, 0] if j % 2 == 0 else [0, -i])
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testTensorArray(self):
def _map_fn(i):
i = math_ops.cast(i, dtypes.int32)
return tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=i
).unstack(math_ops.range(i))
def _flat_map_fn(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return dataset_ops.Dataset.from_tensor_slices(x.stack())
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
for j in range(i):
expected_output.append(j)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testRagged(self):
def _map_fn(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1], [-1]])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
ragged_conversion_ops.to_tensor(x)
)
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
expected_output.append([i])
expected_output.append([-i])
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testName(self):
def fn(x):
return dataset_ops.Dataset.from_tensors(x)
dataset = dataset_ops.Dataset.from_tensors(42).flat_map(fn, name="flat_map")
self.assertDatasetProduces(dataset, [42])
@combinations.generate(test_base.default_test_combinations())
def testCardinality(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
options = dataset_options_pb2.CardinalityOptions(
compute_level="CARDINALITY_COMPUTE_MODERATE")
cardinality = dataset_ops.gen_dataset_ops.dataset_cardinality(
dataset._variant_tensor, options.SerializeToString())
self.assertEqual(self.evaluate(cardinality), 9)
@combinations.generate(test_base.default_test_combinations())
def testInfiniteCardinality(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset = dataset.flat_map(lambda _: dataset_ops.Dataset.range(1).repeat())
options = dataset_options_pb2.CardinalityOptions(
compute_level="CARDINALITY_COMPUTE_MODERATE")
cardinality = dataset_ops.gen_dataset_ops.dataset_cardinality(
dataset._variant_tensor, options.SerializeToString())
self.assertEqual(self.evaluate(cardinality), dataset_ops.INFINITE)
@combinations.generate(test_base.default_test_combinations())
def testUnknownCardinality(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset = dataset.flat_map(
lambda _: dataset_ops.Dataset.range(10).filter(lambda x: x % 2 == 1))
options = dataset_options_pb2.CardinalityOptions(
compute_level="CARDINALITY_COMPUTE_MODERATE")
cardinality = dataset_ops.gen_dataset_ops.dataset_cardinality(
dataset._variant_tensor, options.SerializeToString())
self.assertEqual(self.evaluate(cardinality), dataset_ops.UNKNOWN)
@combinations.generate(test_base.default_test_combinations())
def testEmptyCardinality(self):
dataset = dataset_ops.Dataset.range(0)
dataset = dataset.flat_map(dataset_ops.Dataset.range)
options = dataset_options_pb2.CardinalityOptions(
compute_level="CARDINALITY_COMPUTE_MODERATE")
cardinality = dataset_ops.gen_dataset_ops.dataset_cardinality(
dataset._variant_tensor, options.SerializeToString())
self.assertEqual(self.evaluate(cardinality), 0)
@combinations.generate(test_base.default_test_combinations())
def testCardinalityLowEffort(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
options = dataset_options_pb2.CardinalityOptions(
compute_level="CARDINALITY_COMPUTE_LOW")
cardinality = dataset_ops.gen_dataset_ops.dataset_cardinality(
dataset._variant_tensor, options.SerializeToString())
self.assertEqual(self.evaluate(cardinality), dataset_ops.UNKNOWN)
@combinations.generate(test_base.default_test_combinations())
def testMapFuncFailWithErrorContext(self):
def fn(x):
return dataset_ops.Dataset.from_tensors(x // 0)
dataset = dataset_ops.Dataset.from_tensors(42).flat_map(fn, name="flat_map")
get_next = self.getNext(dataset)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r".*Error in user-defined function passed to .* transformation with "
r"iterator: Iterator::Root::.*"):
self.evaluate(get_next())
@combinations.generate(test_base.v2_eager_only_combinations())
def testSymbolicCheckpointSize(self):
examples_per_flat_map = 100
example_len = 10_000
def flat_map_fn(_):
data = []
for _ in range(examples_per_flat_map):
data.append(
stateless_random_ops.stateless_random_uniform(
[example_len], seed=(42, 42)
)
)
return dataset_ops.Dataset.from_tensor_slices(data)
ds = dataset_ops.Dataset.range(10)
# Inputs to flat_map are >1MB
ds = ds.map(
lambda x: stateless_random_ops.stateless_random_uniform(
[1_000_000], seed=(42, 42)
)
)
ds = ds.flat_map(flat_map_fn)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = True
ds = ds.with_options(options)
it = ds.as_numpy_iterator()
for _ in range(30):
next(it)
ckpt = it._save()
# Make sure the checkpoint is smaller than the element sizes, i.e. no
# elements are being stored in the checkpoint.
self.assertLess(len(ckpt.numpy()), 10_000)
| FlatMapTest |
python | Lightning-AI__lightning | tests/tests_fabric/strategies/test_model_parallel_integration.py | {
"start": 1264,
"end": 29471
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Linear(32, 64)
self.w2 = nn.Linear(32, 64)
self.w3 = nn.Linear(64, 32)
def forward(self, x):
return self.w3(F.silu(self.w1(x)) * self.w2(x))
def _parallelize_feed_forward_tp(model, device_mesh):
from torch.distributed.tensor.parallel import ColwiseParallel, RowwiseParallel, parallelize_module
tp_mesh = device_mesh["tensor_parallel"]
tp_plan = {
"w1": ColwiseParallel(),
"w2": ColwiseParallel(),
"w3": RowwiseParallel(),
}
parallelize_module(model, tp_mesh, tp_plan)
return model
def _parallelize_feed_forward_fsdp2(model, device_mesh):
from torch.distributed._composable.fsdp.fully_shard import fully_shard
dp_mesh = device_mesh["data_parallel"]
assert dp_mesh.ndim == 1 # Hybrid-sharding not supported
# Fully-shard each layer
fully_shard(model.w1, mesh=dp_mesh)
fully_shard(model.w2, mesh=dp_mesh)
fully_shard(model.w3, mesh=dp_mesh)
# TODO: Re-enable activation checkpointing
# Currently, state dict keys get prefixed with '_checkpoint_wrapper' in the keys
# which leads to mismatches when loading weights into a checkpoint-wrapped module.
# PyTorch should handle this automatically.
# model = checkpoint_wrapper(model)
return model
def _parallelize_feed_forward_fsdp2_tp(model, device_mesh):
model = _parallelize_feed_forward_tp(model, device_mesh)
return _parallelize_feed_forward_fsdp2(model, device_mesh)
@RunIf(min_torch="2.4", standalone=True, min_cuda_gpus=4)
def test_setup_device_mesh(distributed):
from torch.distributed.device_mesh import DeviceMesh
for dp_size, tp_size in ((1, 4), (4, 1), (2, 2)):
strategy = ModelParallelStrategy(
parallelize_fn=(lambda m, _: m),
data_parallel_size=dp_size,
tensor_parallel_size=tp_size,
)
fabric = Fabric(accelerator="auto", devices=4, strategy=strategy)
fabric.launch()
device_mesh = fabric.strategy.device_mesh
assert isinstance(device_mesh, DeviceMesh)
assert device_mesh.device_type == fabric.device.type
assert device_mesh.mesh_dim_names == ("data_parallel", "tensor_parallel")
assert device_mesh.size(0) == dp_size
assert device_mesh.size(1) == tp_size
assert device_mesh.ndim == 2
fabric.barrier()
# Passing "auto" will select internode and intranode dimensions automatically
strategy = ModelParallelStrategy(
parallelize_fn=(lambda m, _: m),
data_parallel_size="auto",
tensor_parallel_size="auto",
)
fabric = Fabric(accelerator="auto", devices=4, num_nodes=1, strategy=strategy)
fabric.launch()
assert fabric.strategy.device_mesh.mesh_dim_names == ("data_parallel", "tensor_parallel")
assert fabric.strategy.device_mesh.size(0) == 1
assert fabric.strategy.device_mesh.size(1) == 4
def _parallelize_with_compile(parallelize):
def fn(model, device_mesh):
model = parallelize(model, device_mesh)
return torch.compile(model)
return fn
@RunIf(min_torch="2.4", standalone=True, min_cuda_gpus=2)
@pytest.mark.parametrize("compile", [True, False])
@pytest.mark.xfail(
raises=AssertionError,
reason="Test left zombie thread",
strict=False,
run=True,
condition=lambda e: isinstance(e, AssertionError) and str(e).startswith("Test left zombie thread"),
)
def test_tensor_parallel(distributed, compile: bool):
from torch.distributed._tensor import DTensor
parallelize = _parallelize_feed_forward_tp
if compile:
parallelize = _parallelize_with_compile(parallelize)
strategy = ModelParallelStrategy(parallelize_fn=parallelize)
fabric = Fabric(accelerator="auto", devices=2, strategy=strategy)
fabric.launch()
fabric.seed_everything(0)
with fabric.init_module(empty_init=True):
model = FeedForward()
model = fabric.setup(model)
optimizer = torch.optim.AdamW(model.parameters())
optimizer = fabric.setup_optimizers(optimizer)
device_mesh = fabric.strategy.device_mesh
assert all(tensor.device_mesh == device_mesh["tensor_parallel"] for tensor in optimizer.param_groups[0]["params"])
assert all(isinstance(weight, DTensor) for weight in model.parameters())
assert model.w1.weight.device_mesh == device_mesh["tensor_parallel"]
dataset_size = 6
dataset = RandomDataset(32, dataset_size)
dataloader = DataLoader(dataset, batch_size=2)
dataloader = fabric.setup_dataloaders(dataloader)
# No data sharding, all GPUs get the same input inside a TP group
assert len(dataloader) == dataset_size // dataloader.batch_size
assert isinstance(dataloader.sampler, DistributedSampler)
for _, batch in enumerate(dataloader):
# All batches must be identical across TP group
batches = fabric.all_gather(batch)
assert all(torch.equal(batches[0], batches[i]) for i in range(1, len(batches)))
output = model(batch)
fabric.backward(output.sum())
assert isinstance(model.w1.weight.grad, DTensor)
assert model.w1.weight.grad.device_mesh == device_mesh["tensor_parallel"]
optimizer.step()
optimizer.zero_grad()
@RunIf(min_torch="2.4", standalone=True, min_cuda_gpus=4)
@pytest.mark.parametrize("compile", [True, False])
def test_fsdp2_tensor_parallel(distributed, compile):
from torch.distributed._tensor import DTensor
parallelize = _parallelize_feed_forward_fsdp2_tp
if compile:
parallelize = _parallelize_with_compile(parallelize)
strategy = ModelParallelStrategy(
parallelize_fn=_parallelize_feed_forward_fsdp2_tp,
data_parallel_size=2,
tensor_parallel_size=2,
)
fabric = Fabric(accelerator="auto", devices=4, strategy=strategy)
fabric.launch()
fabric.seed_everything(0)
with fabric.init_module(empty_init=True):
model = FeedForward()
model = fabric.setup(model)
optimizer = torch.optim.AdamW(model.parameters())
optimizer = fabric.setup_optimizers(optimizer)
assert all(isinstance(weight, DTensor) for weight in model.parameters())
assert all(isinstance(tensor, DTensor) for tensor in optimizer.param_groups[0]["params"])
assert model.w1.weight.device_mesh.ndim == 2
assert model.w1.weight.device_mesh.size(0) == 2
assert model.w1.weight.device_mesh.size(1) == 2
assert all(weight.device.type != "meta" for weight in model.parameters())
assert all(tensor.device_mesh.ndim == 2 for tensor in optimizer.param_groups[0]["params"])
assert all(tensor.device.type != "meta" for tensor in optimizer.param_groups[0]["params"])
dataset_size = 8
dataset = RandomDataset(32, dataset_size)
dataloader = DataLoader(dataset, batch_size=2)
dataloader = fabric.setup_dataloaders(dataloader)
# No data sharding across TP dimension, sharding across data-parallel dimension only
device_mesh = fabric.strategy.device_mesh
dp_mesh = device_mesh["data_parallel"]
tp_mesh = device_mesh["tensor_parallel"]
assert len(dataloader) == dataset_size // dataloader.batch_size // dp_mesh.size()
assert isinstance(dataloader.sampler, DistributedSampler)
for _, batch in enumerate(dataloader):
batches = fabric.all_gather(batch)
# Batches across the TP dimension must be identical
batches_tp = batches[tp_mesh.mesh]
assert all(torch.equal(batches_tp[0], batches_tp[i]) for i in range(1, len(batches_tp)))
# Batches across the DP dimension must be different
batches_dp = batches[dp_mesh.mesh]
assert all(not torch.equal(batches_dp[0], batches_dp[i]) for i in range(1, len(batches_dp)))
output = model(batch)
fabric.backward(output.sum())
assert isinstance(model.w1.weight.grad, DTensor)
assert model.w1.weight.grad.device_mesh == device_mesh
optimizer.step()
optimizer.zero_grad()
def _train(fabric, model=None, optimizer=None):
fabric.seed_everything(0)
if model is None:
with fabric.init_module(empty_init=True):
model = FeedForward()
model = fabric.setup(model)
if optimizer is None:
optimizer = torch.optim.AdamW(model.parameters())
optimizer = fabric.setup_optimizers(optimizer)
output = model(torch.rand(2, 32, device=fabric.device))
fabric.backward(output.sum())
optimizer.step()
optimizer.zero_grad()
return model, optimizer
@RunIf(min_torch="2.4", min_cuda_gpus=4, standalone=True)
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize(
"precision",
[
pytest.param("32-true"),
pytest.param("bf16-mixed", marks=RunIf(bf16_cuda=True)),
],
)
def test_train_save_load(distributed, precision, tmp_path):
"""Test 2D-parallel training, saving and loading precision settings."""
strategy = ModelParallelStrategy(
_parallelize_feed_forward_fsdp2_tp,
data_parallel_size=2,
tensor_parallel_size=2,
)
fabric = Fabric(accelerator="cuda", devices=4, strategy=strategy, precision=precision)
fabric.launch()
model, optimizer = _train(fabric)
checkpoint_path = fabric.broadcast(str(tmp_path / "dist-checkpoint"))
params_before = [p.full_tensor().clone() for p in model.parameters()]
state = {"model": model, "optimizer": optimizer, "steps": 1}
fabric.save(checkpoint_path, state)
assert set(os.listdir(checkpoint_path)) == {
".metadata",
"__0_0.distcp",
"__1_0.distcp",
"__2_0.distcp",
"__3_0.distcp",
"meta.pt",
}
# re-init all objects and resume
strategy = ModelParallelStrategy(
_parallelize_feed_forward_fsdp2_tp,
data_parallel_size=2,
tensor_parallel_size=2,
)
fabric = Fabric(accelerator="cuda", devices=4, strategy=strategy, precision=precision)
fabric.launch()
model, optimizer = _train(fabric)
# check correctness with loaded state
state = {"model": model, "optimizer": optimizer, "steps": 0}
metadata = fabric.load(checkpoint_path, state)
for p0, p1 in zip(params_before, (p.full_tensor() for p in model.parameters())):
torch.testing.assert_close(p0, p1, atol=0, rtol=0, equal_nan=True)
# check user data in state reloaded
assert state["steps"] == 1
assert not metadata
# attempt to load a key not in the metadata checkpoint
state = {"model": model, "coconut": 11}
with pytest.raises(KeyError, match="The requested state contains a key 'coconut' that does not exist"):
fabric.load(checkpoint_path, state)
# `strict=False` ignores the missing key
state = {"model": model, "coconut": 11}
fabric.load(checkpoint_path, state, strict=False)
assert state["coconut"] == 11
@pytest.mark.filterwarnings("ignore::FutureWarning")
@RunIf(min_torch="2.4", min_cuda_gpus=2, standalone=True)
def test_save_full_state_dict(distributed, tmp_path):
"""Test that ModelParallelStrategy saves the full state into a single file with
`save_distributed_checkpoint=False`."""
from torch.distributed.checkpoint.state_dict import get_optimizer_state_dict
strategy = ModelParallelStrategy(
_parallelize_feed_forward_fsdp2,
data_parallel_size=2,
tensor_parallel_size=1,
save_distributed_checkpoint=False,
)
fabric = Fabric(accelerator="cuda", strategy=strategy, devices=2)
fabric.launch()
model, optimizer = _train(fabric)
checkpoint_path = Path(fabric.broadcast(str(tmp_path / "fsdp-checkpoint.pt")))
state = {"model": model, "optimizer": optimizer, "steps": 1}
fabric.save(checkpoint_path, state)
checkpoint = torch.load(checkpoint_path, weights_only=True)
assert checkpoint["steps"] == 1
loaded_state_dict = checkpoint["model"]
# assert the correct state model was saved
state_dict = model.state_dict()
assert set(loaded_state_dict.keys()) == set(state_dict.keys())
for param_name in state_dict:
assert torch.equal(loaded_state_dict[param_name], state_dict[param_name].full_tensor().cpu())
params_before = [p.full_tensor().cpu() for p in model.parameters()]
# assert the correct optimizer state was saved
optimizer_state_before = get_optimizer_state_dict(model, optimizer)
assert set(checkpoint["optimizer"].keys()) == set(optimizer_state_before.keys()) == {"state", "param_groups"}
# 1. verify the FSDP state can be loaded back into a FSDP model/strategy directly
strategy = ModelParallelStrategy(_parallelize_feed_forward_fsdp2, data_parallel_size=2, tensor_parallel_size=1)
fabric = Fabric(accelerator="cuda", strategy=strategy, devices=2)
fabric.launch()
model, optimizer = _train(fabric)
metadata = fabric.load(checkpoint_path, {"model": model, "optimizer": optimizer})
assert metadata == {"steps": 1}
params_after = [p.full_tensor() for p in model.parameters()]
assert all(torch.equal(p0.cpu(), p1.cpu()) for p0, p1 in zip(params_before, params_after))
optimizer_state_after = get_optimizer_state_dict(model, optimizer)
optimizer_state_after["param_groups"][0]["betas"] = tuple(optimizer_state_after["param_groups"][0]["betas"])
assert set(optimizer_state_after.keys()) == set(optimizer_state_before.keys()) == {"state", "param_groups"}
torch.testing.assert_close(optimizer_state_after["state"], optimizer_state_before["state"], atol=0, rtol=0)
assert optimizer_state_after["param_groups"] == optimizer_state_before["param_groups"]
# run a step to verify the optimizer state is correct
_train(fabric, model, optimizer)
# 2. verify the FSDP state can be loaded back into a single-device model/strategy
fabric = Fabric(accelerator="cpu", devices=1)
model, optimizer = _train(fabric)
metadata = fabric.load(checkpoint_path, {"model": model, "optimizer": optimizer})
assert metadata == {"steps": 1}
params_after = list(model.parameters())
assert all(torch.equal(p0, p1) for p0, p1 in zip(params_before, params_after))
# get optimizer state after loading
normal_checkpoint_path = Path(fabric.broadcast(str(tmp_path / "normal-checkpoint.pt")))
fabric.save(normal_checkpoint_path, {"model": model, "optimizer": optimizer, "steps": 2})
optimizer_state_after = torch.load(normal_checkpoint_path, weights_only=True)["optimizer"]
assert set(optimizer_state_after.keys()) == set(optimizer_state_before.keys()) == {"state", "param_groups"}
assert torch.equal(
optimizer_state_after["state"][0]["exp_avg"],
optimizer_state_before["state"]["_forward_module.w1.weight"]["exp_avg"].full_tensor().cpu(),
)
# run a step to verify the optimizer state is correct
_train(fabric, model, optimizer)
# 3. verify that a single-device model/strategy states can be loaded into a FSDP model/strategy
strategy = ModelParallelStrategy(_parallelize_feed_forward_fsdp2, data_parallel_size=2, tensor_parallel_size=1)
fabric = Fabric(accelerator="cuda", strategy=strategy, devices=2)
fabric.launch()
model, optimizer = _train(fabric)
metadata = fabric.load(normal_checkpoint_path, {"model": model, "optimizer": optimizer})
assert metadata == {"steps": 2}
params_after = [p.full_tensor() for p in model.parameters()]
assert all(torch.equal(p0.cpu(), p1.cpu()) for p0, p1 in zip(params_before, params_after))
optimizer_state_after = get_optimizer_state_dict(model, optimizer)
optimizer_state_after["param_groups"][0]["betas"] = tuple(optimizer_state_after["param_groups"][0]["betas"])
assert set(optimizer_state_after.keys()) == set(optimizer_state_before.keys()) == {"state", "param_groups"}
torch.testing.assert_close(optimizer_state_after["state"], optimizer_state_before["state"], atol=0, rtol=0)
assert optimizer_state_after["param_groups"] == optimizer_state_before["param_groups"]
# run a step to verify the optimizer state is correct
_train(fabric, model, optimizer)
@pytest.mark.filterwarnings("ignore::FutureWarning")
@RunIf(min_torch="2.4", min_cuda_gpus=2, standalone=True)
def test_load_full_state_dict_into_sharded_model(distributed, tmp_path):
"""Test that the strategy can load a full-state checkpoint into a distributed model."""
fabric = Fabric(accelerator="cuda", devices=1)
fabric.seed_everything(0)
model, optimizer = _train(fabric)
# Save a full-state-dict checkpoint
checkpoint_path = Path(fabric.broadcast(str(tmp_path / "full-checkpoint.pt")))
state = {"model": model, "optimizer": optimizer, "steps": 1}
fabric.save(checkpoint_path, state)
# Gather all weights and store a copy manually
params_before = torch.cat([p.cpu().view(-1) for p in model.parameters()])
# Create a FSDP sharded model
strategy = ModelParallelStrategy(_parallelize_feed_forward_fsdp2, data_parallel_size=2, tensor_parallel_size=1)
fabric = Fabric(accelerator="cuda", strategy=strategy, devices=2)
fabric.launch()
model, optimizer = _train(fabric)
state = {"model": model, "optimizer": optimizer, "steps": 44}
fabric.load(checkpoint_path, state)
assert state["steps"] == 1
# Gather all weights and compare
params_after = torch.cat([p.full_tensor().cpu().view(-1) for p in model.parameters()])
assert torch.equal(params_before, params_after)
# Create a raw state-dict checkpoint to test `Fabric.load_raw` too
raw_checkpoint_path = checkpoint_path.with_name("model-state-dict")
if fabric.global_rank == 0:
checkpoint = torch.load(checkpoint_path, weights_only=True)
torch.save(checkpoint["model"], raw_checkpoint_path)
fabric.barrier()
_train(fabric, model, optimizer)
fabric.load_raw(raw_checkpoint_path, model)
# Gather all weights and compare
params_after = torch.cat([p.full_tensor().cpu().view(-1) for p in model.parameters()])
assert torch.equal(params_before, params_after)
@RunIf(min_torch="2.4", min_cuda_gpus=2, skip_windows=True, standalone=True)
@pytest.mark.parametrize("move_to_device", [True, False])
@mock.patch("lightning.fabric.wrappers._FabricModule")
def test_setup_module_move_to_device(fabric_module_mock, move_to_device, distributed):
"""Test that `move_to_device` does nothing, ModelParallel decides which device parameters get moved to which device
(sharding)."""
from torch.distributed._tensor import DTensor
strategy = ModelParallelStrategy(parallelize_fn=_parallelize_feed_forward_fsdp2)
fabric = Fabric(accelerator="cuda", devices=2, strategy=strategy)
fabric.launch()
model = FeedForward()
fabric_model = fabric.setup_module(model, move_to_device=move_to_device)
fabric_module_mock.assert_not_called()
# the linear layer got sharded and each part is on the expected device
assert fabric_model.w1.weight.device == torch.device("cuda", fabric.local_rank)
assert isinstance(fabric_model.w1.weight, DTensor)
# The _DeviceDtypeModuleMixin currently can't represent the device in a meaningful way for models with pieces on
# different devices
assert fabric_model.device == torch.device("cuda", fabric.local_rank)
assert fabric.device == torch.device("cuda", fabric.local_rank)
@RunIf(min_torch="2.4", min_cuda_gpus=2, skip_windows=True, standalone=True)
@pytest.mark.parametrize(
("precision", "expected_dtype"),
[
("32-true", torch.float32),
("16-true", torch.float16),
pytest.param("bf16-true", torch.bfloat16, marks=RunIf(bf16_cuda=True)),
],
)
def test_module_init_context(distributed, precision, expected_dtype):
"""Test that the module under the init-context gets moved to the right device and dtype."""
strategy = ModelParallelStrategy(parallelize_fn=_parallelize_feed_forward_fsdp2)
fabric = Fabric(accelerator="cuda", devices=2, strategy=strategy, precision=precision)
fabric.launch()
def _run_setup_assertions(empty_init, expected_device):
with fabric.init_module(empty_init=empty_init):
model = FeedForward()
# The model is on the CPU/meta-device until after `.setup()``
assert all(weight.device == expected_device for weight in model.parameters())
assert all(weight.dtype == expected_dtype for weight in model.parameters())
model = fabric.setup(model)
# Parameters get sharded in `.setup()` and moved to the target device
assert all(weight.device == torch.device("cuda", fabric.local_rank) for weight in model.parameters())
assert all(weight.dtype == expected_dtype for weight in model.parameters())
_run_setup_assertions(empty_init=False, expected_device=torch.device("cpu"))
_run_setup_assertions(empty_init=True, expected_device=torch.device("meta"))
@RunIf(min_torch="2.4", min_cuda_gpus=2, standalone=True)
def test_save_filter(distributed, tmp_path):
strategy = ModelParallelStrategy(
parallelize_fn=_parallelize_feed_forward_fsdp2,
save_distributed_checkpoint=False,
)
fabric = Fabric(accelerator="cuda", strategy=strategy, devices=2)
fabric.launch()
model = FeedForward()
model = fabric.setup_module(model)
tmp_path = Path(fabric.broadcast(str(tmp_path)))
state = {"model": model}
filter = {"model": lambda k, v: "bias" in k}
checkpoint_path = tmp_path / "full.pth"
fabric.save(checkpoint_path, state, filter=filter)
checkpoint = torch.load(checkpoint_path, weights_only=True)["model"]
assert set(checkpoint) == {"w1.bias", "w2.bias", "w3.bias"}
assert type(checkpoint["w1.bias"]) is torch.Tensor
fabric.strategy._save_distributed_checkpoint = True
checkpoint_path = tmp_path / "distributed"
with pytest.raises(NotImplementedError, match="doesn't support loading distributed filtered"):
fabric.save(checkpoint_path, state, filter=filter)
def _parallelize_single_linear_tp_fsdp2(model, device_mesh):
from torch.distributed._composable.fsdp.fully_shard import fully_shard
from torch.distributed.tensor.parallel import ColwiseParallel, parallelize_module
dp_mesh = device_mesh["data_parallel"]
tp_mesh = device_mesh["tensor_parallel"]
parallelize_module(model, tp_mesh, ColwiseParallel())
fully_shard(model, mesh=dp_mesh)
return model
@RunIf(min_torch="2.4", min_cuda_gpus=2, standalone=True)
@pytest.mark.parametrize(
"precision",
[
"32-true",
pytest.param("bf16-mixed", marks=RunIf(bf16_cuda=True)),
],
)
@pytest.mark.parametrize(
"clip_type",
[
pytest.param("norm", marks=pytest.mark.skip("Gradient clipping by norm is not correct.")),
"val",
],
)
def test_clip_gradients(distributed, clip_type, precision):
strategy = ModelParallelStrategy(_parallelize_single_linear_tp_fsdp2)
fabric = Fabric(accelerator="auto", devices=2, precision=precision, strategy=strategy)
fabric.launch()
in_features, out_features = 32, 2
model = torch.nn.Linear(in_features, out_features, bias=False)
model.weight.data.fill_(0.01)
model = fabric.setup(model)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
optimizer = fabric.setup_optimizers(optimizer)
batch = torch.full((1, in_features), 0.1, device=fabric.device)
loss = model(batch).sum()
# The example is constructed such that the gradients are all the same
fabric.backward(loss)
if clip_type == "norm":
norm = torch.linalg.vector_norm(model.weight.grad.full_tensor().detach().cpu(), 2, dtype=torch.float32).item()
new_norm = norm / 10
fabric.clip_gradients(model, optimizer, max_norm=new_norm * 10)
assert torch.allclose(
torch.linalg.vector_norm(model.weight.grad.full_tensor().detach().cpu(), 2, dtype=torch.float32),
torch.tensor(new_norm),
)
elif clip_type == "val":
val = model.weight.grad.full_tensor()[0, 0].item()
new_val = val / 2.0
fabric.clip_gradients(model, optimizer, clip_val=new_val)
assert torch.allclose(
model.weight.grad.full_tensor(), torch.full_like(model.weight.grad.full_tensor(), new_val)
)
else:
raise AssertionError(f"Unknown clip type: {clip_type}")
optimizer.step()
optimizer.zero_grad()
@RunIf(min_torch="2.4", min_cuda_gpus=4, standalone=True)
def test_save_sharded_and_consolidate_and_load(distributed, tmp_path):
"""Test the consolidation of a distributed (DTensor) checkpoint into a single file."""
strategy = ModelParallelStrategy(
_parallelize_feed_forward_fsdp2_tp,
data_parallel_size=2,
tensor_parallel_size=2,
)
fabric = Fabric(accelerator="cuda", devices=4, strategy=strategy)
fabric.launch()
model = FeedForward()
model = fabric.setup(model)
optimizer = torch.optim.Adam(model.parameters())
optimizer = fabric.setup_optimizers(optimizer)
state = {"model": model, "optimizer": optimizer, "steps": 1}
# run one iteration to init the state of the optimizer
loss = model(torch.rand(1, 32, device=fabric.device)).sum()
fabric.backward(loss)
optimizer.step()
checkpoint_path_sharded = fabric.broadcast(str(tmp_path / "checkpoint_sharded"))
fabric.save(checkpoint_path_sharded, state)
assert set(os.listdir(checkpoint_path_sharded)) == {
".metadata",
"__0_0.distcp",
"__1_0.distcp",
"__2_0.distcp",
"__3_0.distcp",
"meta.pt",
}
# consolidate the checkpoint to a single file
checkpoint_path_full = fabric.broadcast(str(tmp_path / "checkpoint_full.pt"))
if fabric.global_rank == 0:
checkpoint = _load_distributed_checkpoint(Path(checkpoint_path_sharded))
torch.save(checkpoint, checkpoint_path_full)
fabric.barrier()
# re-init and load from full checkpoint
strategy = ModelParallelStrategy(
_parallelize_feed_forward_fsdp2_tp,
data_parallel_size=2,
tensor_parallel_size=2,
)
fabric = Fabric(accelerator="cuda", devices=4, strategy=strategy)
fabric.launch()
model = FeedForward()
model = fabric.setup(model)
optimizer = torch.optim.Adam(model.parameters())
optimizer = fabric.setup_optimizers(optimizer)
state = {"model": model, "optimizer": optimizer, "steps": 1}
fabric.load(checkpoint_path_full, state)
@RunIf(min_torch="2.4", min_cuda_gpus=2, standalone=True)
def test_load_raw_module_state(distributed):
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import ColwiseParallel, parallelize_module
class CustomModel(nn.Module):
def __init__(self):
super().__init__()
self.parameter = nn.Parameter(torch.rand(2, 2))
self.layer1 = nn.Linear(4, 4)
self.layer2 = nn.Linear(4, 4)
self.register_buffer("persistent_buffer", torch.rand(2), persistent=True)
self.register_buffer("non_persistent_buffer", torch.rand(2), persistent=False)
fabric = Fabric(accelerator="cuda", devices=2)
fabric.launch()
fabric.seed_everything(0)
with fabric.init_module():
model = CustomModel()
state_dict = deepcopy(model.state_dict())
with fabric.init_module():
model = CustomModel()
device_mesh = init_device_mesh("cuda", mesh_shape=(2,), mesh_dim_names=("tp",))
plan = {"layer1": ColwiseParallel()}
parallelize_module(model, device_mesh, plan)
_load_raw_module_state(state_dict, model, strict=True)
assert torch.equal(model.parameter, state_dict["parameter"])
assert torch.equal(model.layer1.weight.full_tensor(), state_dict["layer1.weight"])
assert torch.equal(model.layer2.weight, state_dict["layer2.weight"])
assert torch.equal(model.persistent_buffer, state_dict["persistent_buffer"])
state_dict.pop("parameter")
with pytest.raises(KeyError, match="The model contains a key 'parameter' that does not exist"):
_load_raw_module_state(state_dict, model, strict=True)
_load_raw_module_state(state_dict, model, strict=False)
| FeedForward |
python | pytorch__pytorch | torch/nn/attention/flex_attention.py | {
"start": 17101,
"end": 66195
} | class ____:
r"""
BlockMask is our format for representing a block-sparse attention mask.
It is somewhat of a cross in-between BCSR and a non-sparse format.
**Basics**
A block-sparse mask means that instead of representing the sparsity of
individual elements in the mask, a KV_BLOCK_SIZE x Q_BLOCK_SIZE block is
considered sparse only if every element within that block is sparse.
This aligns well with hardware, which generally expects to perform
contiguous loads and computation.
This format is primarily optimized for 1. simplicity, and 2. kernel
efficiency. Notably, it is *not* optimized for size, as this mask is always
reduced by a factor of KV_BLOCK_SIZE * Q_BLOCK_SIZE. If the size is a
concern, the tensors can be reduced in size by increasing the block size.
The essentials of our format are:
num_blocks_in_row: Tensor[ROWS]:
Describes the number of blocks present in each row.
col_indices: Tensor[ROWS, MAX_BLOCKS_IN_COL]:
`col_indices[i]` is the sequence of block positions for row i. The values of
this row after `col_indices[i][num_blocks_in_row[i]]` are undefined.
For example, to reconstruct the original tensor from this format:
.. code-block:: python
dense_mask = torch.zeros(ROWS, COLS)
for row in range(ROWS):
for block_idx in range(num_blocks_in_row[row]):
dense_mask[row, col_indices[row, block_idx]] = 1
Notably, this format makes it easier to implement a reduction along the
*rows* of the mask.
**Details**
The basics of our format require only kv_num_blocks and kv_indices. But, we
have up to 8 tensors on this object. This represents 4 pairs:
1. (kv_num_blocks, kv_indices): Used for the forwards pass of attention, as
we reduce along the KV dimension.
2. [OPTIONAL] (full_kv_num_blocks, full_kv_indices): This is optional and
purely an optimization. As it turns out, applying masking to every block
is quite expensive! If we specifically know which blocks are "full" and
don't require masking at all, then we can skip applying mask_mod to these
blocks. This requires the user to split out a separate mask_mod from the
score_mod. For causal masks, this is about a 15% speedup.
3. [GENERATED] (q_num_blocks, q_indices): Required for the backwards pass,
as computing dKV requires iterating along the mask along the Q dimension. These are autogenerated from 1.
4. [GENERATED] (full_q_num_blocks, full_q_indices): Same as above, but for
the backwards pass. These are autogenerated from 2.
"""
seq_lengths: tuple[int, int]
kv_num_blocks: Tensor
kv_indices: Tensor
full_kv_num_blocks: Tensor | None
full_kv_indices: Tensor | None
q_num_blocks: Tensor | None
q_indices: Tensor | None
full_q_num_blocks: Tensor | None
full_q_indices: Tensor | None
BLOCK_SIZE: tuple[int, int]
mask_mod: _mask_mod_signature
# Attribute lists for pytree flatten/unflatten
_TENSOR_ATTRS = [
"kv_num_blocks",
"kv_indices",
"full_kv_num_blocks",
"full_kv_indices",
"q_num_blocks",
"q_indices",
"full_q_num_blocks",
"full_q_indices",
]
_CONTEXT_ATTRS = [
"seq_lengths",
"BLOCK_SIZE",
"mask_mod",
]
def __init__(
self,
seq_lengths: tuple[int, int],
kv_num_blocks: Tensor,
kv_indices: Tensor,
full_kv_num_blocks: Tensor | None,
full_kv_indices: Tensor | None,
q_num_blocks: Tensor | None,
q_indices: Tensor | None,
full_q_num_blocks: Tensor | None,
full_q_indices: Tensor | None,
BLOCK_SIZE: tuple[int, int],
mask_mod: _mask_mod_signature,
) -> None:
if kv_indices.dim() < 2:
raise RuntimeError("BlockMask must have at least 2 dimensions")
assert kv_num_blocks is not None, "kv_num_blocks must be provided"
assert kv_indices is not None, "kv_indices must be provided"
assert (full_kv_num_blocks is None) == (full_kv_indices is None), (
"full_kv_num_blocks and full_kv_indices must be both provided or omitted"
)
assert (full_q_num_blocks is None) == (full_q_indices is None), (
"full_q_num_blocks and full_q_indices must be both provided or omitted"
)
self.seq_lengths = seq_lengths
self.kv_num_blocks = kv_num_blocks
self.kv_indices = kv_indices
self.full_kv_num_blocks = full_kv_num_blocks
self.full_kv_indices = full_kv_indices
self.q_num_blocks = q_num_blocks
self.q_indices = q_indices
self.full_q_num_blocks = full_q_num_blocks
self.full_q_indices = full_q_indices
self.BLOCK_SIZE = BLOCK_SIZE
self.mask_mod = mask_mod
@classmethod
def from_kv_blocks(
cls,
kv_num_blocks: Tensor,
kv_indices: Tensor,
full_kv_num_blocks: Tensor | None = None,
full_kv_indices: Tensor | None = None,
BLOCK_SIZE: int | tuple[int, int] = _DEFAULT_SPARSE_BLOCK_SIZE,
mask_mod: _mask_mod_signature | None = None,
seq_lengths: tuple[int, int] | None = None,
compute_q_blocks: bool = True,
):
"""
Creates a BlockMask instance from key-value block information.
Args:
kv_num_blocks (Tensor): Number of kv_blocks in each Q_BLOCK_SIZE row tile.
kv_indices (Tensor): Indices of key-value blocks in each Q_BLOCK_SIZE row tile.
full_kv_num_blocks (Optional[Tensor]): Number of full kv_blocks in each Q_BLOCK_SIZE row tile.
full_kv_indices (Optional[Tensor]): Indices of full key-value blocks in each Q_BLOCK_SIZE row tile.
BLOCK_SIZE (Union[int, tuple[int, int]]): Size of KV_BLOCK_SIZE x Q_BLOCK_SIZE tiles.
mask_mod (Optional[Callable]): Function to modify the mask.
Returns:
BlockMask: Instance with full Q information generated via _transposed_ordered
Raises:
RuntimeError: If kv_indices has < 2 dimensions.
AssertionError: If only one of full_kv_* args is provided.
"""
if kv_indices.dim() < 2:
raise RuntimeError("BlockMask must have at least 2 dimensions")
assert (full_kv_num_blocks is None) == (full_kv_indices is None), (
"full_kv_num_blocks and full_kv_indices must be both provided or omitted"
)
# Generate q_num_blocks and q_indices
if compute_q_blocks:
q_num_blocks, q_indices = _transpose_ordered(kv_num_blocks, kv_indices)
if full_kv_num_blocks is not None:
assert full_kv_indices is not None
full_q_num_blocks, full_q_indices = _transpose_ordered(
full_kv_num_blocks, full_kv_indices
)
else:
full_q_num_blocks, full_q_indices = None, None
else:
q_num_blocks, q_indices = None, None
full_q_num_blocks, full_q_indices = None, None
if isinstance(BLOCK_SIZE, int):
BLOCK_SIZE = (BLOCK_SIZE, BLOCK_SIZE)
mask_mod = mask_mod if mask_mod is not None else noop_mask
if seq_lengths is None:
q_length = kv_indices.shape[-2] * BLOCK_SIZE[0]
kv_length = kv_indices.shape[-1] * BLOCK_SIZE[1]
seq_lengths = (q_length, kv_length)
return cls(
seq_lengths=seq_lengths,
kv_num_blocks=kv_num_blocks,
kv_indices=kv_indices,
full_kv_num_blocks=full_kv_num_blocks,
full_kv_indices=full_kv_indices,
q_num_blocks=q_num_blocks,
q_indices=q_indices,
full_q_num_blocks=full_q_num_blocks,
full_q_indices=full_q_indices,
BLOCK_SIZE=BLOCK_SIZE,
mask_mod=mask_mod,
)
def as_tuple(self, flatten: bool = True):
"""
Returns a tuple of the attributes of the BlockMask.
Args:
flatten (bool): If True, it will flatten the tuple of (KV_BLOCK_SIZE, Q_BLOCK_SIZE)
"""
if flatten:
block_size = (self.BLOCK_SIZE[0], self.BLOCK_SIZE[1]) # type: ignore[assignment]
seq_lengths = (self.seq_lengths[0], self.seq_lengths[1]) # type: ignore[assignment]
else:
block_size = (self.BLOCK_SIZE,) # type: ignore[assignment]
seq_lengths = (self.seq_lengths,) # type: ignore[assignment]
# pyrefly: ignore [not-iterable]
return (
*seq_lengths,
self.kv_num_blocks,
self.kv_indices,
self.full_kv_num_blocks,
self.full_kv_indices,
self.q_num_blocks,
self.q_indices,
self.full_q_num_blocks,
self.full_q_indices,
*block_size,
self.mask_mod,
)
@property
def shape(self):
*batch_dims, _, _ = self.kv_indices.shape
return tuple(batch_dims) + self.seq_lengths
def __str__(self) -> str:
s = f"BlockMask(shape={self.shape}, sparsity={self.sparsity():.2f}%, \n"
mask_str = self.to_string().strip()
s += mask_str
s += "\n)"
return s
def __getitem__(self, index) -> "BlockMask":
"""
Returns a new BlockMask instance by getting the mask for the given index position.
Args:
index: Index to apply to all attributes.
Example Usage:
.. code-block:: python
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_block_mask(
causal_mask, 4, 2, 512, 512, device="cuda"
)
assert block_mask.kv_num_blocks.shape == (4, 2, 4)
assert block_mask.kv_indices.shape == (4, 2, 4, 4)
# Index on batch dimension
new_block_mask = block_mask[0]
assert new_block_mask.kv_num_blocks.shape == (2, 4)
assert new_block_mask.kv_indices.shape == (2, 4, 4)
# Index on batch and head dimension
new_block_mask = block_mask[0, 1]
assert new_block_mask.kv_num_blocks.shape == (4,)
assert new_block_mask.kv_indices.shape == (4, 4)
# slicing on batch and head dimension
new_block_mask = block_mask[0:2, 1:2]
assert new_block_mask.kv_num_blocks.shape == (2, 1, 4)
assert new_block_mask.kv_indices.shape == (2, 1, 4, 4)
# slicing on batch, head, and query dimension
new_block_mask = block_mask[
0:2, 1:2, torch.tensor([1], dtype=torch.int32)
]
assert new_block_mask.kv_num_blocks.shape == (2, 1, 1)
assert new_block_mask.kv_indices.shape == (2, 1, 1, 4)
"""
index = (index,) if not isinstance(index, tuple) else index
padded = (*index, slice(None), slice(None), slice(None))[:3]
sizes = self.kv_num_blocks.shape[:3]
index = tuple(
(slice(i + n, i + n + 1) if -n <= i < 0 else slice(i, i + 1))
if isinstance(i, int)
else i
for i, n in zip(padded, sizes, strict=True)
)
new_kv_num_blocks = self.kv_num_blocks[index]
new_kv_indices = self.kv_indices[index]
if self.full_kv_num_blocks is not None:
assert self.full_kv_indices is not None
new_full_kv_num_blocks = self.full_kv_num_blocks[index]
new_full_kv_indices = self.full_kv_indices[index]
else:
new_full_kv_num_blocks = None
new_full_kv_indices = None
return BlockMask.from_kv_blocks(
new_kv_num_blocks,
new_kv_indices,
new_full_kv_num_blocks,
new_full_kv_indices,
BLOCK_SIZE=self.BLOCK_SIZE,
mask_mod=_sliced_mask_mod_error,
seq_lengths=self.seq_lengths,
compute_q_blocks=self.q_indices is not None,
)
def __repr__(self) -> str:
def shape_or_none(x: torch.Tensor | None):
return x.shape if x is not None else None
return (
f"BlockMask(\n"
f" kv_num_blocks={self.kv_num_blocks.shape},\n"
f" kv_indices={self.kv_indices.shape},\n"
f" full_kv_num_blocks={shape_or_none(self.full_kv_num_blocks)},\n"
f" full_kv_indices={shape_or_none(self.full_kv_indices)},\n"
f" q_num_blocks={shape_or_none(self.q_num_blocks)},\n"
f" q_indices={shape_or_none(self.q_indices)},\n"
f" full_q_num_blocks={shape_or_none(self.full_q_num_blocks)},\n"
f" full_q_indices={shape_or_none(self.full_q_indices)},\n"
f" BLOCK_SIZE={self.BLOCK_SIZE},\n"
f" shape={self.shape},\n"
f" sparsity={self.sparsity():.2f}%,\n"
f" mask_mod={self.mask_mod.__name__ if hasattr(self.mask_mod, '__name__') else self.mask_mod}\n"
f")"
)
def _adjust(self, new_q_len: int, new_kv_len: int):
new_num_rows = (new_q_len + self.BLOCK_SIZE[0] - 1) // self.BLOCK_SIZE[0]
new_num_cols = (new_kv_len + self.BLOCK_SIZE[1] - 1) // self.BLOCK_SIZE[1]
new_kv_num_blocks, new_kv_indices = _adjust_num_blocks_and_indices(
self.kv_num_blocks, self.kv_indices, new_num_rows, new_num_cols
)
if self.full_kv_num_blocks is not None:
assert self.full_kv_indices is not None
(
new_full_kv_num_blocks,
new_full_kv_indices,
) = _adjust_num_blocks_and_indices(
self.full_kv_num_blocks,
self.full_kv_indices,
new_num_rows,
new_num_cols,
)
else:
new_full_kv_num_blocks = None
new_full_kv_indices = None
return self.from_kv_blocks(
new_kv_num_blocks,
new_kv_indices,
new_full_kv_num_blocks,
new_full_kv_indices,
self.BLOCK_SIZE,
self.mask_mod,
)
def numel(self):
"""Returns the number of elements (not accounting for sparsity) in the mask."""
shape = self.shape
def _prod(xs):
return functools.reduce(operator.mul, xs, 1)
return _prod(shape)
def sparsity(self) -> float:
"""Computes the percentage of blocks that are sparse (i.e. not computed)"""
total_size = self.numel()
computed_blocks = self.kv_num_blocks.sum()
if self.full_kv_num_blocks is not None:
computed_blocks += self.full_kv_num_blocks.sum()
computed_size = computed_blocks.item() * self.BLOCK_SIZE[0] * self.BLOCK_SIZE[1]
dense_ratio = computed_size / total_size
return 100 * (1 - dense_ratio)
def to_dense(self) -> Tensor:
"""Returns a dense block that is equivalent to the block mask."""
partial_dense = _ordered_to_dense(self.kv_num_blocks, self.kv_indices)
if self.full_kv_num_blocks is not None:
assert self.full_kv_indices is not None
# pyrefly: ignore [bad-return]
return partial_dense | _ordered_to_dense(
self.full_kv_num_blocks, self.full_kv_indices
)
return partial_dense
def to_string(self, grid_size=(20, 20), limit=4):
"""Returns a string representation of the block mask. Quite nifty.
If grid_size is -1, prints out an uncompressed version. Warning, it can be quite big!
"""
dense_mask = self.to_dense()
*batch_dims, num_rows, num_cols = dense_mask.shape
if isinstance(grid_size, int):
max_rows = grid_size
max_cols = grid_size
elif grid_size == -1:
max_rows = num_rows
max_cols = num_cols
else:
max_rows, max_cols = grid_size
def create_block_vis(*batch_idx):
descriptors = []
descriptors.append(f"{batch_idx}")
vis = ", ".join(reversed(descriptors)) + "\n"
def summarize_section(section) -> str:
percentage = section.float().mean().item()
if percentage == 1:
return "█"
elif percentage == 0:
return " "
else:
return "░"
def cdiv(a, b):
return (a + (b - 1)) // b
row_step = max(1, cdiv(num_rows, max_rows))
col_step = max(1, cdiv(num_cols, max_cols))
for r in range(0, num_rows, row_step):
for c in range(0, num_cols, col_step):
cur_mask = dense_mask
for idx in batch_idx:
cur_mask = cur_mask[idx]
char = summarize_section(
cur_mask[r : r + row_step, c : c + col_step]
)
vis += char * 2
vis += "\n"
return vis
total_vis = []
for idx, batch_idx in enumerate(
itertools.product(*[range(i) for i in batch_dims])
):
if idx == limit:
total_vis.append("...")
total_vis.append("To print out more, set BlockMask.to_string(limit=N)")
total_vis.append(
"You can also index (BlockMask[batch, head]) to choose a specific batch or head"
)
break
block_vis = create_block_vis(*batch_idx)
total_vis.append(block_vis)
return "\n".join(total_vis)
def to(self, device: torch.device | str) -> "BlockMask":
"""Moves the BlockMask to the specified device.
Args:
device (torch.device or str): The target device to move the BlockMask to.
Can be a torch.device object or a string (e.g., 'cpu', 'cuda:0').
Returns:
BlockMask: A new BlockMask instance with all tensor components moved
to the specified device.
Note:
This method does not modify the original BlockMask in-place.
Instead, it returns a new BlockMask instance where individual tensor attributes
may or may not be moved to the specified device, depending on their
current device placement.
"""
mapped_attributes = tree_map_only(
torch.Tensor,
lambda x: x.to(device),
self.as_tuple(flatten=False),
)
return BlockMask(*mapped_attributes)
def _flatten(self):
"""Flatten BlockMask into a list of tensors and context."""
tensors = tuple(getattr(self, attr) for attr in self._TENSOR_ATTRS)
context = tuple(getattr(self, attr) for attr in self._CONTEXT_ATTRS)
return tensors, context
@classmethod
def _unflatten(cls, tensors, context):
"""Unflatten tensors and context back into a BlockMask."""
kwargs = {
**dict(zip(cls._CONTEXT_ATTRS, context)),
**dict(zip(cls._TENSOR_ATTRS, tensors)),
}
# pyrefly: ignore [bad-argument-type]
return cls(**kwargs)
def _flatten_with_keys(self):
"""Flatten BlockMask with keys for better tracing."""
tensors = tuple(
(GetAttrKey(attr), getattr(self, attr)) for attr in self._TENSOR_ATTRS
)
context = tuple(
(GetAttrKey(attr), getattr(self, attr)) for attr in self._CONTEXT_ATTRS
)
return tensors, context
def _broadcast_to_dim(x, dim):
while x.dim() < dim:
x = x.unsqueeze(0)
return x
def _round_up_to_multiple(x, multiple):
return (x + multiple - 1) // multiple * multiple
def _convert_mask_to_block_mask(
mask: Tensor,
Q_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
KV_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
separate_full_blocks: bool = False,
) -> tuple[Tensor, Tensor | None]:
assert mask.dtype == torch.bool
mask = _broadcast_to_dim(mask, 4)
def padding_needed_for_multiple(x, multiple):
return _round_up_to_multiple(x, multiple) - x
mask = torch.nn.functional.pad(
mask,
(
0,
padding_needed_for_multiple(mask.shape[-1], KV_BLOCK_SIZE),
0,
padding_needed_for_multiple(mask.shape[-2], Q_BLOCK_SIZE),
),
)
B, H, Q, KV = mask.shape
assert Q % Q_BLOCK_SIZE == 0
assert KV % KV_BLOCK_SIZE == 0
mask = mask.view(
B, H, Q // Q_BLOCK_SIZE, Q_BLOCK_SIZE, KV // KV_BLOCK_SIZE, KV_BLOCK_SIZE
) # [B, H, Q//Q_BLOCK_SIZE, Q_BLOCK_SIZE, KV//KV_BLOCK_SIZE, KV_BLOCK_SIZE]
mask = mask.permute(
0, 1, 2, 4, 3, 5
) # [B, H, Q//Q_BLOCK_SIZE, KV//KV_BLOCK_SIZE, Q_BLOCK_SIZE, KV_BLOCK_SIZE]
mask_block_sum = mask.sum(
dim=[-2, -1]
) # [B, H, Q//Q_BLOCK_SIZE, KV//KV_BLOCK_SIZE]
if separate_full_blocks:
full_block_sum = Q_BLOCK_SIZE * KV_BLOCK_SIZE
full_blocks = mask_block_sum == full_block_sum
partial_blocks = (mask_block_sum > 0) & (mask_block_sum < full_block_sum)
partial_blocks = partial_blocks.to(dtype=torch.int8)
full_blocks = full_blocks.to(dtype=torch.int8)
return partial_blocks, full_blocks
else:
partial_blocks = mask_block_sum > 0
partial_blocks = partial_blocks.to(dtype=torch.int8)
return partial_blocks, None
def or_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature:
"""Returns a mask_mod that's the union of provided mask_mods"""
if not all(callable(arg) for arg in mask_mods):
raise RuntimeError(f"All inputs should be callable mask_mods: {mask_mods}")
def or_mask(b, h, q_idx, kv_idx):
result = b.new_zeros((), dtype=torch.bool)
for mask in mask_mods:
result = result | mask(b, h, q_idx, kv_idx)
return result
return or_mask
def and_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature:
"""Returns a mask_mod that's the intersection of provided mask_mods"""
if not all(callable(arg) for arg in mask_mods):
raise RuntimeError(f"All inputs should be callable mask_mods: {mask_mods}")
def and_mask(b, h, q_idx, kv_idx):
result = b.new_ones((), dtype=torch.bool)
for mask in mask_mods:
result = result & mask(b, h, q_idx, kv_idx)
return result
return and_mask
def _convert_block_mask_to_mask(
block_mask,
KV_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
Q_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
) -> Tensor:
assert block_mask.dim() == 4
B, H, Q, KV = block_mask.shape
block_mask = block_mask.expand(Q_BLOCK_SIZE, KV_BLOCK_SIZE, *block_mask.shape)
block_mask = block_mask.permute(2, 3, 4, 0, 5, 1).reshape(
B, H, Q * Q_BLOCK_SIZE, KV * KV_BLOCK_SIZE
)
return block_mask
def _create_sparse_block_from_block_mask(
block_mask: tuple[Tensor, Tensor | None],
mask_mod: Callable | None,
seq_lengths: tuple[int, int],
Q_BLOCK_SIZE: int = _DEFAULT_SPARSE_BLOCK_SIZE,
KV_BLOCK_SIZE: int = _DEFAULT_SPARSE_BLOCK_SIZE,
) -> BlockMask:
partial_blocks, full_blocks = block_mask
partial_bm = _dense_to_ordered(partial_blocks)
if full_blocks is not None:
full_bm: tuple[Tensor | None, Tensor | None] = _dense_to_ordered(full_blocks)
else:
full_bm = (None, None)
return BlockMask.from_kv_blocks(
partial_bm[0],
partial_bm[1],
full_bm[0],
full_bm[1],
BLOCK_SIZE=(Q_BLOCK_SIZE, KV_BLOCK_SIZE),
mask_mod=mask_mod,
seq_lengths=seq_lengths,
)
def create_mask(
mod_fn: _score_mod_signature | _mask_mod_signature,
B: int | None,
H: int | None,
Q_LEN: int,
KV_LEN: int,
device: DeviceLikeType | None = None,
) -> Tensor:
r"""This function creates a mask tensor from a mod_fn function.
Args:
mod_fn (Union[_score_mod_signature, _mask_mod_signature]): Function to modify attention scores.
B (int): Batch size.
H (int): Number of query heads.
Q_LEN (int): Sequence length of query.
KV_LEN (int): Sequence length of key/value.
device (str): Device to run the mask creation on.
Returns:
mask (Tensor): A mask tensor with shape (B, H, M, N).
"""
if device is None:
device = torch.accelerator.current_accelerator() or "cpu"
if B is None:
B = 1
if H is None:
H = 1
b = torch.arange(0, B, device=device)
h = torch.arange(0, H, device=device)
m = torch.arange(0, Q_LEN, device=device)
n = torch.arange(0, KV_LEN, device=device)
mod_type = _get_mod_type(mod_fn)
from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex
with TransformGetItemToIndex():
if mod_type == _ModificationType.SCORE_MOD:
score_mod = mod_fn
score_mod = _vmap_for_bhqkv(score_mod, prefix=(0,)) # first input is score
out = score_mod(torch.zeros(B, H, Q_LEN, KV_LEN, device=device), b, h, m, n)
mask = torch.where(torch.isneginf(out), False, True)
return mask
elif mod_type == _ModificationType.MASK_MOD:
mask_mod = mod_fn
mask_mod = _vmap_for_bhqkv(mask_mod, prefix=())
mask = mask_mod(b, h, m, n)
return mask
else:
raise AssertionError
def create_block_mask(
mask_mod: _mask_mod_signature,
B: int | None,
H: int | None,
Q_LEN: int,
KV_LEN: int,
device: DeviceLikeType | None = None,
BLOCK_SIZE: int | tuple[int, int] = _DEFAULT_SPARSE_BLOCK_SIZE,
_compile=False,
) -> BlockMask:
r"""This function creates a block mask tuple from a mask_mod function.
Args:
mask_mod (Callable): mask_mod function. This is a callable that defines the
masking pattern for the attention mechanism. It takes four arguments:
b (batch size), h (number of heads), q_idx (query index), and kv_idx (key/value index).
It should return a boolean tensor indicating which attention connections are allowed (True)
or masked out (False).
B (int): Batch size.
H (int): Number of query heads.
Q_LEN (int): Sequence length of query.
KV_LEN (int): Sequence length of key/value.
device (str): Device to run the mask creation on.
BLOCK_SIZE (int or tuple[int, int]): Block size for the block mask. If a single int is provided it is used for both query and key/value.
Returns:
BlockMask: A BlockMask object that contains the block mask information.
Example Usage:
.. code-block:: python
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_block_mask(causal_mask, 1, 1, 8192, 8192, device="cuda")
query = torch.randn(1, 1, 8192, 64, device="cuda", dtype=torch.float16)
key = torch.randn(1, 1, 8192, 64, device="cuda", dtype=torch.float16)
value = torch.randn(1, 1, 8192, 64, device="cuda", dtype=torch.float16)
output = flex_attention(query, key, value, block_mask=block_mask)
"""
if device is None:
device = torch.accelerator.current_accelerator() or "cpu"
mod_type = _get_mod_type(mask_mod)
assert mod_type == _ModificationType.MASK_MOD, (
f"create-block_mask requires a mask_mod function! Got {mask_mod}"
)
if B is None:
B = 1
if H is None:
H = 1
if isinstance(BLOCK_SIZE, int):
Q_BLOCK_SIZE = BLOCK_SIZE
KV_BLOCK_SIZE = BLOCK_SIZE
else:
Q_BLOCK_SIZE, KV_BLOCK_SIZE = BLOCK_SIZE
if _compile:
warnings.warn(
"_compile flag on create_block_mask was originally added to work around a torch.compile limitation. That limitation has since been addressed. So, to compile create_block_mask, we suggest doing torch.compile(create_block_mask). This still works for now, but will be removed in the future.",
DeprecationWarning,
stacklevel=2,
)
return torch.compile(create_block_mask)(
mask_mod, B, H, Q_LEN, KV_LEN, device, BLOCK_SIZE
)
mask_tensor = create_mask(mask_mod, B, H, Q_LEN, KV_LEN, device)
partial_block_mask, full_block_mask = _convert_mask_to_block_mask(
mask_tensor,
Q_BLOCK_SIZE=Q_BLOCK_SIZE,
KV_BLOCK_SIZE=KV_BLOCK_SIZE,
separate_full_blocks=True,
)
block_mask = _create_sparse_block_from_block_mask(
(partial_block_mask, full_block_mask),
mask_mod,
(Q_LEN, KV_LEN),
Q_BLOCK_SIZE,
KV_BLOCK_SIZE,
)
return block_mask
def _create_empty_block_mask(query: Tensor, key: Tensor) -> BlockMask:
r"""Default block mask for flex attention.
If users don't specify any block sparse mask info, we create this
empty block sparse mask. Which creates a BlockMask with 1 block that is the full length
of the query and key tensors.
"""
device = query.device
return BlockMask.from_kv_blocks(
kv_num_blocks=torch.ones([1, 1, 1], dtype=torch.int32, device=device),
kv_indices=torch.zeros([1, 1, 1, 1], dtype=torch.int32, device=device),
BLOCK_SIZE=_LARGE_SPARSE_BLOCK_SIZE,
seq_lengths=(1, 1),
)
def _apply_kernel_options(
query: Tensor,
key: Tensor,
value: Tensor,
return_lse: bool,
kernel_options,
return_aux: AuxRequest | None = None,
):
kernel_options = {} if kernel_options is None else dict(kernel_options)
if "BACKEND" in kernel_options and kernel_options.get(
"FORCE_USE_FLEX_ATTENTION", False
):
# TODO: remove FORCE_USE_FLEX_ATTENTION once BACKEND is fully adopted.
raise RuntimeError(
"BACKEND cannot be combined with legacy FORCE_USE_FLEX_ATTENTION. "
"BACKEND supersedes the legacy knob; please drop FORCE_USE_FLEX_ATTENTION "
"and only specify the desired BACKEND."
)
if "BACKEND" in kernel_options:
valid_backends = typing.get_args(_Backend)
if kernel_options["BACKEND"] not in valid_backends:
raise ValueError(
f"Invalid BACKEND value '{kernel_options['BACKEND']}'. "
f"Must be one of {valid_backends}"
)
kernel_options.setdefault("BACKEND", "AUTO")
kernel_options.setdefault("PRESCALE_QK", False)
kernel_options.setdefault("ROWS_GUARANTEED_SAFE", False)
kernel_options.setdefault("BLOCKS_ARE_CONTIGUOUS", False)
# This forces all biases grad scatters to be done in the DQ iteration loop of the backwards
kernel_options.setdefault("WRITE_DQ", True)
any_inputs_on_cpu_device = (
query.device.type == "cpu"
or key.device.type == "cpu"
or value.device.type == "cpu"
)
# Determine what auxiliary outputs are needed
output_lse = return_lse
output_max = False
if return_aux is not None:
# New API takes precedence over legacy parameters
output_lse = return_aux.lse
output_max = return_aux.max_scores
# If forward kernel needs to return logsumexp is decided by this rule internally.
assert "OUTPUT_LOGSUMEXP" not in kernel_options
kernel_options["OUTPUT_LOGSUMEXP"] = True
if not output_lse:
# We used to check if q,k,v required grads but since captured buffers can require grad
# we always write unless in no_grad
kernel_options["OUTPUT_LOGSUMEXP"] = torch.is_grad_enabled()
if any_inputs_on_cpu_device:
# CPU with torch.compile now supports inference, and will not return lse
# TODO: support CPU for training and return lse
kernel_options["OUTPUT_LOGSUMEXP"] = False
# If forward kernel needs to return max is decided by this rule internally.
assert "OUTPUT_MAX" not in kernel_options
kernel_options["OUTPUT_MAX"] = output_max
if any_inputs_on_cpu_device and output_max:
# CPU doesn't support returning max yet
# TODO: support CPU for returning max
raise NotImplementedError("Returning max scores is not supported on CPU.")
kernel_options["OUTPUT_MAX"] = False
return kernel_options
def _validate_embed_dim(query: Tensor, key: Tensor, value: Tensor) -> None:
if query.size(-1) != key.size(-1):
raise ValueError(
f"Expect query and key/value to have the same embedding dimension "
f"but got E={query.size(-1)} and E={key.size(-1)}."
)
def _validate_device(query: Tensor, key: Tensor, value: Tensor) -> None:
"""TODO: Remove once non cuda/cpu devices support is added
We only need to check query since we have already that q,k,v are on the same device
"""
supported_devices = {"cuda", "cpu", "xpu", "hpu"}
if query.device.type not in supported_devices:
raise ValueError(
"FlexAttention is only supported on CUDA, CPU or HPU devices. "
f"Found input tensors on {query.device.type} device."
)
def _enforce_mem_layouts(
query: Tensor, key: Tensor, value: Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Enforce memory layouts for query, key, and value tensors.
For non-FP8 dtypes, no action is taken.
For FP8 dtypes, we enforce the following memory layouts:
- Query tensor must be in row-major memory layout, as it will be the left-operand in the FP8 GEMM `q @ k.T`.
- Key tensor must be in row-major memory layout, as it will be transposed when used as the right-operand
in the FP8 GEMM `q @ k.T`, meaning it will correctly be in column-major memory layout for the GEMM.
- Value tensor must be in column-major memory layout, as it will be the right-operand in the FP8 GEMM `softmax_scores @ v`.
Returns the query, key, and value tensors with the enforced memory layouts.
"""
def is_row_major(tensor: Tensor) -> bool:
return tensor.stride()[-1] == 1
def is_col_major(tensor: Tensor) -> bool:
return tensor.stride()[-2] == 1
# These memory layout constraint are only for FP8 GEMMs on NVIDIA GPU architectures >= SM89 and < SM100.
# This is because GPU arch < SM89 does not not support FP8 GEMMs, and
# SM100 has support for TN, NT, TT, NN layouts for FP8 GEMMs
# (i.e., left and right operands can be in row or column major layouts)
# so this check is only needed for older architectures.
# See: https://github.com/NVIDIA/cutlass/blob/main/media/docs/cpp/blackwell_functionality.md
fp8_dtypes = (
torch.float8_e4m3fn,
torch.float8_e5m2,
)
gemm_precision = query.dtype
should_enforce_mem_layout = (
gemm_precision in fp8_dtypes
and torch.version.cuda is not None
and torch.cuda.get_device_capability("cuda") >= (8, 9)
and torch.cuda.get_device_capability("cuda") < (10, 0)
)
if not should_enforce_mem_layout:
return query, key, value
# Query must be in row-major memory layout as the left-operand in the FP8 GEMM `q @ k.T`
if not is_row_major(query):
query = query.contiguous()
# Key must be in row-major memory layout as it will be transposed when used as the right-operand
# in the FP8 GEMM `q @ k.T`, meaning it will correctly be in column-major memory layout for the GEMM.
if not is_row_major(key):
key = key.contiguous()
# Value must be in column-major memory layout as the right-operand in the FP8 GEMM `softmax_scores @ v`
if not is_col_major(value):
value = value.transpose(-2, -1).contiguous().transpose(-2, -1)
return query, key, value
def flex_attention(
query: Tensor,
key: Tensor,
value: Tensor,
score_mod: _score_mod_signature | None = None,
block_mask: BlockMask | None = None,
scale: float | None = None,
enable_gqa: bool = False,
return_lse: bool = False,
kernel_options: FlexKernelOptions | None = None,
*,
return_aux: AuxRequest | None = None,
) -> Tensor | tuple[Tensor, Tensor] | tuple[Tensor, AuxOutput]:
r"""This function implements scaled dot product attention with an arbitrary attention score modification function.
This function computes the scaled dot product attention between query, key, and value tensors with a user-defined
attention score modification function. The attention score modification function will be applied after the attention
scores have been calculated between the query and key tensors. The attention scores are calculated as follows:
The ``score_mod`` function should have the following signature:
.. code-block:: python
def score_mod(
score: Tensor,
batch: Tensor,
head: Tensor,
q_idx: Tensor,
k_idx: Tensor
) -> Tensor:
Where:
- ``score``: A scalar tensor representing the attention score,
with the same data type and device as the query, key, and value tensors.
- ``batch``, ``head``, ``q_idx``, ``k_idx``: Scalar tensors indicating
the batch index, query head index, query index, and key/value index, respectively.
These should have the ``torch.int`` data type and be located on the same device as the score tensor.
Args:
query (Tensor): Query tensor; shape :math:`(B, Hq, L, E)`. For FP8 dtypes, should be in row-major memory layout for optimal performance.
key (Tensor): Key tensor; shape :math:`(B, Hkv, S, E)`. For FP8 dtypes, should be in row-major memory layout for optimal performance.
value (Tensor): Value tensor; shape :math:`(B, Hkv, S, Ev)`. For FP8 dtypes, should be in column-major memory layout for optimal performance.
score_mod (Optional[Callable]): Function to modify attention scores. By default no score_mod is applied.
block_mask (Optional[BlockMask]): BlockMask object that controls the blocksparsity pattern of the attention.
scale (Optional[float]): Scaling factor applied prior to softmax. If none, the default value is set to :math:`\frac{1}{\sqrt{E}}`.
enable_gqa (bool): If set to True, enables Grouped Query Attention (GQA) and broadcasts key/value heads to query heads.
return_lse (bool): Whether to return the logsumexp of the attention scores. Default is False. **Deprecated**: Use ``return_aux=AuxRequest(lse=True)`` instead.
kernel_options (Optional[FlexKernelOptions]):
Options to control the behavior of the underlying Triton kernels.
See :class:`FlexKernelOptions` for available options and usage examples.
return_aux (Optional[AuxRequest]): Specifies which auxiliary outputs to compute and return.
If None, only the attention output is returned. Use ``AuxRequest(lse=True, max_scores=True)``
to request both auxiliary outputs.
Returns:
output (Tensor): Attention output; shape :math:`(B, Hq, L, Ev)`.
When ``return_aux`` is not None:
aux (AuxOutput): Auxiliary outputs with requested fields populated.
When ``return_aux`` is None (deprecated paths):
lse (Tensor): Log-sum-exp of attention scores; shape :math:`(B, Hq, L)`. Only returned if ``return_lse=True``.
Shape legend:
- :math:`N: \text{Batch size} ... : \text{Any number of other batch dimensions (optional)}`
- :math:`S: \text{Source sequence length}`
- :math:`L: \text{Target sequence length}`
- :math:`E: \text{Embedding dimension of the query and key}`
- :math:`Ev: \text{Embedding dimension of the value}`
.. warning::
`torch.nn.attention.flex_attention` is a prototype feature in PyTorch.
Please look forward to a more stable implementation in a future version of PyTorch.
Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
"""
# Some basic input validation
_validate_sdpa_input(query, key, value)
_validate_embed_dim(query, key, value)
_validate_device(query, key, value)
query, key, value = _enforce_mem_layouts(query, key, value)
if query.dim() != 4 or key.dim() != 4 or value.dim() != 4:
raise NotImplementedError("NYI: query, key, and value must be 4D tensors")
if (not enable_gqa) and query.size(-3) != key.size(-3):
raise ValueError(
f"Expect query and key/value to have the same number of heads "
f"but got Hq={query.size(-3)} and Hkv={key.size(-3)}. "
f"Try setting enable_gqa=True for GQA."
)
if enable_gqa:
Hq = query.size(1)
Hkv = key.size(1)
if Hq % Hkv != 0:
raise ValueError(
f"Expect number of query heads to be a multiple of kv heads for GQA "
f"but got Hq={Hq} and Hkv={Hkv}."
)
if query.size(0) != key.size(0):
if block_mask is None:
raise ValueError(
f"Expect query and key/value to have the same batch size, "
f"or non-none block_mask, "
f"but got block_mask=None, Bq={query.size(0)}, and Bkv={key.size(0)}."
)
if block_mask.kv_num_blocks.size(0) != query.size(0):
raise ValueError(
f"Expect query and key/value to have the same batch size, "
f"or block_mask and query to have the same batch size, "
f"but got Bq={query.size(0)}, Bkv={key.size(0)}, B_block_mask={block_mask.kv_num_blocks.size(0)}."
)
if score_mod is None:
score_mod = _identity
if block_mask is None:
block_mask = _create_empty_block_mask(query, key)
# If BlockMask was sliced, its mask_mod is intentionally replaced with an error-raising stub.
# This guard ensures we surface the intended error message before any shape-based checks.
if getattr(block_mask, "mask_mod", None) is _sliced_mask_mod_error:
raise RuntimeError("Cannot use mask_mod from a sliced BlockMask")
if (
block_mask.BLOCK_SIZE[0] == _LARGE_SPARSE_BLOCK_SIZE
and block_mask.BLOCK_SIZE[1] == _LARGE_SPARSE_BLOCK_SIZE
):
# This corresponds to the case where we essentially have a "no-op" block mask.
pass
else:
block_mask_q_len = block_mask.shape[-2]
block_mask_kv_len = block_mask.shape[-1]
if query.size(-2) > block_mask_q_len or key.size(-2) > block_mask_kv_len:
raise ValueError(
f"block_mask was created for block_mask.shape={block_mask.shape} but got q_len={query.size(-2)} and kv_len={key.size(-2)}. "
"As the block mask was created for a smaller length than you're using it for, you likely need to create a new block mask."
)
elif (
query.size(-2) < block_mask_q_len and key.size(-2) <= block_mask_kv_len
) or (query.size(-2) <= block_mask_q_len and key.size(-2) < block_mask_kv_len):
raise ValueError(
f"block_mask was created for block_mask.shape={block_mask.shape} but got q_len={query.size(-2)} and kv_len={key.size(-2)}. "
"As the block mask was created for a larger length than you're using it for, you can either 1. create a new block mask with the correct length, or 2. 'adjust' the existing block mask to the correct length by calling block_mask._adjust(q_len, kv_len). This essentially 'crops' the block mask to the upper left corner, which does not work for all mask_mods!"
)
assert query.size(-2) == block_mask_q_len
assert key.size(-2) == block_mask_kv_len
if scale is None:
scale = 1.0 / math.sqrt(query.size(-1))
if query.device != block_mask.kv_num_blocks.device: # type: ignore[union-attr]
raise RuntimeError(
f"Expect q/k/v and block_mask to be on the same device "
f"but got {query.device} and {block_mask.kv_num_blocks.device}." # type: ignore[union-attr]
)
# Handle deprecation warnings for old parameters
if return_lse and return_aux is not None:
raise ValueError(
"Cannot specify both return_lse and return_aux. "
"return_lse is deprecated, please use return_aux=AuxRequest(lse=True) instead."
)
elif return_lse and return_aux is None:
_warn_once(
"deprecated_return_lse",
"return_lse is deprecated and will be removed in v2.10. "
"Please use return_aux=AuxRequest(lse=True) instead.",
category=FutureWarning,
)
kernel_options = _apply_kernel_options(
query,
key,
value,
return_lse,
kernel_options,
return_aux,
)
def _finalize_outputs(
out,
lse,
max_scores,
*,
return_aux: AuxRequest | None,
return_lse: bool,
):
"""Normalize stats and build return value (aux-aware, legacy-compatible)."""
ln2 = math.log(2.0)
return_lse = return_lse or return_aux is not None and return_aux.lse
return_max = return_aux is not None and return_aux.max_scores
lse_scaled = lse * ln2 if (return_lse and lse.numel() > 0) else None
max_scaled = (
max_scores * ln2 if (return_max and max_scores.numel() > 0) else None
)
if return_aux is not None:
return out, AuxOutput(
lse=lse_scaled,
max_scores=max_scaled,
)
if return_lse:
return out, lse_scaled
return out
if torch.compiler.is_dynamo_compiling():
# mark head_dim and number of heads to be static
for x in [query, key, value]:
torch._dynamo.mark_static(x, -3)
torch._dynamo.mark_static(x, -1)
out, lse, max_scores = flex_attention_hop(
query,
key,
value,
score_mod,
block_mask.as_tuple(),
scale,
kernel_options, # type: ignore[union-attr]
)
return _finalize_outputs(
out, lse, max_scores, return_aux=return_aux, return_lse=return_lse
)
if not _FLEX_ATTENTION_DISABLE_COMPILE_DEBUG:
_warn_once(
warning_id="flex_attention_performance",
message=(
"flex_attention called without torch.compile() - this will use an unfused implementation that materializes the full scores matrix instead of generating a fused kernel.\n\n"
"SOLUTION: Use torch.compile(flex_attention)(...)\n\n"
"If you want to debug your score_mod/mask_mod, you can set:\n"
"torch.nn.attention.flex_attention._FLEX_ATTENTION_DISABLE_COMPILE_DEBUG = True\n\n"
"This will allow you to use print statements or breakpoints. Note: This doesn't work with the backwards pass and may produce incorrect results."
),
)
if not torch._dynamo.is_dynamo_supported():
raise RuntimeError("flex_attention requires dynamo support")
from torch._dynamo.backends.debugging import (
make_eager_backend_with_torch_function_mode,
)
# Dynamo is expecting a callable with "__code__" attribute.
# We cannot directly pass hop to it. So we wrap it in a dummy function.
def _flex_attention_hop_wrapper(*args, **kwargs):
return flex_attention_hop(*args, **kwargs)
with _set_compilation_env():
with torch._dynamo.utils.disable_cache_limit():
with _temp_remove_pre_dispatch_torch_function_mode():
with _temp_remove_metadata_torch_function_mode() as metadata_mode:
if metadata_mode:
backend: str | Callable[..., Any] = (
make_eager_backend_with_torch_function_mode(metadata_mode)
)
else:
backend = "eager"
if _FLEX_ATTENTION_DISABLE_COMPILE_DEBUG:
flex_fn = _flex_attention_hop_wrapper
else:
flex_fn = torch.compile(
_flex_attention_hop_wrapper, backend=backend, fullgraph=True
)
out, lse, max_scores = flex_fn(
query,
key,
value,
score_mod,
block_mask.as_tuple(), # type: ignore[union-attr]
scale,
kernel_options,
)
return _finalize_outputs(
out, lse, max_scores, return_aux=return_aux, return_lse=return_lse
)
| BlockMask |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/serdes/objects/package_entry.py | {
"start": 1913,
"end": 2133
} | class ____(EnvRegistryObjectFeatureData):
schema: Optional[dict[str, Any]]
@property
def feature(self) -> EnvRegistryObjectFeature:
return "component"
@whitelist_for_serdes
@record
| ComponentFeatureData |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/common/utils.py | {
"start": 2923,
"end": 7217
} | class ____(Exception):
pass
@contextlib.contextmanager
def validate_deprecation():
if settings.get_current_profile_name() == "threading":
import pytest
if sys.version_info[:2] < (3, 14):
pytest.skip("warnings module is not thread-safe before 3.14")
import warnings
try:
warnings.simplefilter("always", HypothesisDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
yield
finally:
warnings.simplefilter("error", HypothesisDeprecationWarning)
if not any(e.category == HypothesisDeprecationWarning for e in w):
raise NotDeprecated(
f"Expected a deprecation warning but got {[e.category for e in w]!r}"
)
def checks_deprecated_behaviour(func):
"""A decorator for testing deprecated behaviour."""
@proxies(func)
def _inner(*args, **kwargs):
with validate_deprecation():
return func(*args, **kwargs)
return _inner
def all_values(db):
return {v for vs in db.data.values() for v in vs}
def non_covering_examples(database):
return {
v for k, vs in database.data.items() if not k.endswith(b".pareto") for v in vs
}
def counts_calls(func):
"""A decorator that counts how many times a function was called, and
stores that value in a ``.calls`` attribute.
"""
assert not hasattr(func, "calls")
@proxies(func)
def _inner(*args, **kwargs):
_inner.calls += 1
return func(*args, **kwargs)
_inner.calls = 0
return _inner
def assert_output_contains_failure(output, test, **kwargs):
assert test.__name__ + "(" in output
for k, v in kwargs.items():
assert f"{k}={v!r}" in output, (f"{k}={v!r}", output)
def assert_falsifying_output(
test, example_type="Falsifying", expected_exception=AssertionError, **kwargs
):
with capture_out() as out:
if expected_exception is None:
# Some tests want to check the output of non-failing runs.
test()
msg = ""
else:
with pytest.raises(expected_exception) as exc_info:
test()
notes = "\n".join(getattr(exc_info.value, "__notes__", []))
msg = str(exc_info.value) + "\n" + notes
output = out.getvalue() + msg
assert f"{example_type} example:" in output
assert_output_contains_failure(output, test, **kwargs)
temp_registered_lock = RLock()
@contextlib.contextmanager
def temp_registered(type_, strat_or_factory):
"""Register and un-register a type for st.from_type().
This is not too hard, but there's a subtlety in restoring the
previously-registered strategy which we got wrong in a few places.
"""
with temp_registered_lock:
prev = _global_type_lookup.get(type_)
register_type_strategy(type_, strat_or_factory)
try:
yield
finally:
del _global_type_lookup[type_]
from_type.__clear_cache()
if prev is not None:
register_type_strategy(type_, prev)
@contextlib.contextmanager
def raises_warning(expected_warning, match=None):
"""Use instead of pytest.warns to check that the raised warning is handled properly"""
with pytest.raises(expected_warning, match=match) as r, warnings.catch_warnings():
warnings.simplefilter("error", category=expected_warning)
yield r
@contextlib.contextmanager
def capture_observations(*, choices=None):
ls: list[Observation] = []
add_observability_callback(ls.append)
if choices is not None:
old_choices = observability.OBSERVABILITY_CHOICES
observability.OBSERVABILITY_CHOICES = choices
try:
yield ls
finally:
remove_observability_callback(ls.append)
if choices is not None:
observability.OBSERVABILITY_CHOICES = old_choices
# Specifies whether we can represent subnormal floating point numbers.
# IEE-754 requires subnormal support, but it's often disabled anyway by unsafe
# compiler options like `-ffast-math`. On most hardware that's even a global
# config option, so *linking against* something built this way can break us.
# Everything is terrible
PYTHON_FTZ = next_down(sys.float_info.min) == 0.0
| NotDeprecated |
python | chroma-core__chroma | chromadb/api/configuration.py | {
"start": 937,
"end": 1429
} | class ____:
"""Represents the definition of a configuration."""
name: str
validator: ParameterValidator
is_static: bool
default_value: ParameterValue
def __init__(
self,
name: str,
validator: ParameterValidator,
is_static: bool,
default_value: ParameterValue,
):
self.name = name
self.validator = validator
self.is_static = is_static
self.default_value = default_value
| ConfigurationDefinition |
python | kamyu104__LeetCode-Solutions | Python/correct-a-binary-tree.py | {
"start": 159,
"end": 808
} | class ____(object):
def correctBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
q = {root:None}
while q:
new_q = {}
for node, parent in q.iteritems():
if node.right in q:
if parent.left == node:
parent.left = None
else:
parent.right = None
return root
if node.left:
new_q[node.left] = node
if node.right:
new_q[node.right] = node
q = new_q
| Solution |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/virtual_with_versions/package.py | {
"start": 227,
"end": 929
} | class ____(AutotoolsPackage):
"""Uses version-test-pkg, as a build dependency"""
homepage = "http://www.spack.org"
url = "http://www.spack.org/downloads/aml-1.0.tar.gz"
version("17.0.1", md5="0123456789abcdef0123456789abcdef")
version("16.0.1", md5="0123456789abcdef0123456789abcdef")
version("11.0.1", md5="0123456789abcdef0123456789abcdef")
version("1.8.0", md5="0123456789abcdef0123456789abcdef")
provides("java@17", when="@17.0:17.9")
provides("java@16", when="@16.0:16.9")
provides("java@11", when="@11.0:11.9")
provides("java@10", when="@10.0:10.9")
provides("java@9", when="@9.0:9.9")
provides("java@8", when="@1.8.0:1.8.9")
| VirtualWithVersions |
python | huggingface__transformers | src/transformers/models/aria/modular_aria.py | {
"start": 3787,
"end": 9775
} | class ____(LlamaConfig):
r"""
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
The size of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
moe_num_experts (`int`, *optional*, defaults to 8):
The number of experts in the MoE layer.
moe_topk (`int`, *optional*, defaults to 2):
The number of top experts to route to for each token.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of shared experts.
"""
model_type = "aria_text"
base_config_key = "text_config"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
}
def __init__(
self,
intermediate_size: int = 4096,
moe_num_experts: int = 8,
moe_topk: int = 2,
moe_num_shared_experts: int = 2,
pad_token_id=2,
**super_kwargs,
):
self.intermediate_size = intermediate_size
self.moe_num_experts = moe_num_experts
self.moe_topk = moe_topk
self.moe_num_shared_experts = moe_num_shared_experts
super().__init__(pad_token_id=pad_token_id, **super_kwargs)
| AriaTextConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-fauna/unit_tests/test_util.py | {
"start": 565,
"end": 938
} | class ____:
def __init__(self, mode: str, column=""):
self.mode = mode
self.column = column
@staticmethod
def ignore() -> "DeletionsConfig":
return DeletionsConfig(mode="ignore")
@staticmethod
def deleted_field(column: str) -> "DeletionsConfig":
return DeletionsConfig(mode="deleted_field", column=column)
| DeletionsConfig |
python | pytest-dev__pytest | testing/example_scripts/fixtures/custom_item/conftest.py | {
"start": 79,
"end": 148
} | class ____(pytest.Item):
def runtest(self):
pass
| CustomItem |
python | pennersr__django-allauth | allauth/account/app_settings.py | {
"start": 81,
"end": 18932
} | class ____:
class AuthenticationMethod(str, Enum):
USERNAME = "username"
EMAIL = "email"
USERNAME_EMAIL = "username_email"
class LoginMethod(str, Enum):
USERNAME = "username"
EMAIL = "email"
PHONE = "phone"
class EmailVerificationMethod(str, Enum):
# After signing up, keep the user account inactive until the email
# address is verified
MANDATORY = "mandatory"
# Allow login with unverified email (email verification is
# still sent)
OPTIONAL = "optional"
# Don't send email verification mails during signup
NONE = "none"
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from allauth.utils import get_setting
return get_setting(self.prefix + name, dflt)
@property
def PREVENT_ENUMERATION(self):
return self._setting("PREVENT_ENUMERATION", True)
@property
def DEFAULT_HTTP_PROTOCOL(self):
return self._setting("DEFAULT_HTTP_PROTOCOL", "http").lower()
@property
def EMAIL_CONFIRMATION_EXPIRE_DAYS(self):
"""
Determines the expiration date of email confirmation mails (#
of days)
"""
from django.conf import settings
return self._setting(
"EMAIL_CONFIRMATION_EXPIRE_DAYS",
getattr(settings, "EMAIL_CONFIRMATION_DAYS", 3),
)
@property
def EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL(self):
"""
The URL to redirect to after a successful email confirmation, in
case of an authenticated user
"""
return self._setting("EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL", None)
@property
def EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL(self):
"""
The URL to redirect to after a successful email confirmation, in
case no user is logged in
"""
from django.conf import settings
return self._setting(
"EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL", settings.LOGIN_URL
)
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an email address when signing up
"""
warnings.warn(
"app_settings.EMAIL_REQUIRED is deprecated, use: app_settings.SIGNUP_FIELDS['email']['required']",
stacklevel=3,
)
email = self.SIGNUP_FIELDS.get("email")
return email and email["required"]
@property
def EMAIL_VERIFICATION(self):
"""
See email verification method
"""
ret = self._setting("EMAIL_VERIFICATION", self.EmailVerificationMethod.OPTIONAL)
# Deal with legacy (boolean based) setting
if ret is True:
ret = self.EmailVerificationMethod.MANDATORY
elif ret is False:
ret = self.EmailVerificationMethod.OPTIONAL
return self.EmailVerificationMethod(ret)
@property
def EMAIL_VERIFICATION_BY_CODE_ENABLED(self):
return self._setting("EMAIL_VERIFICATION_BY_CODE_ENABLED", False)
@property
def EMAIL_VERIFICATION_BY_CODE_MAX_ATTEMPTS(self):
return self._setting("EMAIL_VERIFICATION_BY_CODE_MAX_ATTEMPTS", 3)
@property
def EMAIL_VERIFICATION_BY_CODE_TIMEOUT(self):
return self._setting("EMAIL_VERIFICATION_BY_CODE_TIMEOUT", 15 * 60)
@property
def EMAIL_VERIFICATION_MAX_CHANGE_COUNT(self) -> int:
"""
The maximum number of times the email can be changed after signup at
the email veriication stage.
"""
v = self._setting("EMAIL_VERIFICATION_SUPPORTS_CHANGE", False)
if isinstance(v, bool):
v = 2 if v else 0
return v
@property
def EMAIL_VERIFICATION_MAX_RESEND_COUNT(self) -> int:
"""
The maximum number of times the user can request a new email verification code.
"""
v = self._setting("EMAIL_VERIFICATION_SUPPORTS_RESEND", False)
if isinstance(v, bool):
v = 2 if v else 0
return v
@property
def MAX_EMAIL_ADDRESSES(self):
return self._setting("MAX_EMAIL_ADDRESSES", None)
@property
def CHANGE_EMAIL(self):
return self._setting("CHANGE_EMAIL", False)
@property
def AUTHENTICATION_METHOD(self):
warnings.warn(
"app_settings.AUTHENTICATION_METHOD is deprecated, use: app_settings.LOGIN_METHODS",
stacklevel=3,
)
methods = self.LOGIN_METHODS
if self.LoginMethod.EMAIL in methods and self.LoginMethod.USERNAME in methods:
return "username_email"
elif self.LoginMethod.EMAIL in methods:
return "email"
elif self.LoginMethod.USERNAME in methods:
return "username"
else:
raise NotADirectoryError
@property
def LOGIN_METHODS(self) -> FrozenSet[LoginMethod]:
methods = self._setting("LOGIN_METHODS", None)
if methods is None:
auth_method = self._setting(
"AUTHENTICATION_METHOD", self.AuthenticationMethod.USERNAME
)
if auth_method == self.AuthenticationMethod.USERNAME_EMAIL:
methods = {self.LoginMethod.EMAIL, self.LoginMethod.USERNAME}
else:
methods = {self.LoginMethod(auth_method)}
return frozenset([self.LoginMethod(m) for m in methods])
@property
def EMAIL_MAX_LENGTH(self):
"""
Adjust max_length of email addresses
"""
return self._setting("EMAIL_MAX_LENGTH", 254)
@property
def PHONE_VERIFICATION_ENABLED(self):
return self._setting("PHONE_VERIFICATION_ENABLED", True)
@property
def PHONE_VERIFICATION_MAX_ATTEMPTS(self):
return self._setting("PHONE_VERIFICATION_MAX_ATTEMPTS", 3)
@property
def PHONE_VERIFICATION_MAX_CHANGE_COUNT(self) -> int:
"""
The maximum number of times the phone number can be changed after
signup at the phone number verification stage.
"""
v = self._setting("PHONE_VERIFICATION_SUPPORTS_CHANGE", False)
if isinstance(v, bool):
v = 2 if v else 0
return v
@property
def PHONE_VERIFICATION_MAX_RESEND_COUNT(self) -> int:
"""
The maximum number of times the user can request a new phone number
verification code.
"""
v = self._setting("PHONE_VERIFICATION_SUPPORTS_RESEND", False)
if isinstance(v, bool):
v = 2 if v else 0
return v
@property
def PHONE_VERIFICATION_TIMEOUT(self):
return self._setting("PHONE_VERIFICATION_TIMEOUT", 15 * 60)
@property
def UNIQUE_EMAIL(self):
"""
Enforce uniqueness of email addresses
"""
return self._setting("UNIQUE_EMAIL", True)
@property
def SIGNUP_EMAIL_ENTER_TWICE(self):
"""
Signup email verification
"""
warnings.warn(
"app_settings.SIGNUP_EMAIL_ENTER_TWICE is deprecated, use: 'email2' in app_settings.SIGNUP_FIELDS",
stacklevel=3,
)
return "email2" in self.SIGNUP_FIELDS
@property
def SIGNUP_PASSWORD_ENTER_TWICE(self):
"""
Signup password verification
"""
warnings.warn(
"app_settings.SIGNUP_PASSWORD_ENTER_TWICE is deprecated, use: 'password2' in app_settings.SIGNUP_FIELDS",
stacklevel=3,
)
return "password2" in self.SIGNUP_FIELDS
@property
def SIGNUP_REDIRECT_URL(self):
from django.conf import settings
return self._setting("SIGNUP_REDIRECT_URL", settings.LOGIN_REDIRECT_URL)
@property
def PASSWORD_MIN_LENGTH(self):
"""
Minimum password Length
"""
from django.conf import settings
ret = None
if not settings.AUTH_PASSWORD_VALIDATORS:
ret = self._setting("PASSWORD_MIN_LENGTH", 6)
return ret
@property
def RATE_LIMITS(self):
rls = self._setting("RATE_LIMITS", {})
if rls is False:
return {}
attempts_amount = self._setting("LOGIN_ATTEMPTS_LIMIT", 5)
attempts_timeout = self._setting("LOGIN_ATTEMPTS_TIMEOUT", 60 * 5)
login_failed_rl = None
if attempts_amount and attempts_timeout:
login_failed_rl = f"10/m/ip,{attempts_amount}/{attempts_timeout}s/key"
if self.EMAIL_VERIFICATION_BY_CODE_ENABLED:
confirm_email_rl = "1/10s/key"
else:
cooldown = self._setting("EMAIL_CONFIRMATION_COOLDOWN", 3 * 60)
confirm_email_rl = None
if cooldown:
confirm_email_rl = f"1/{cooldown}s/key"
ret = {
# Change password view (for users already logged in)
"change_password": "5/m/user",
# Change phone number
"change_phone": "1/m/user",
# Email management (e.g. add, remove, change primary)
"manage_email": "10/m/user",
# Request a password reset, global rate limit per IP
"reset_password": "20/m/ip,5/m/key",
# Reauthentication for users already logged in
"reauthenticate": "10/m/user",
# Password reset (the view the password reset email links to).
"reset_password_from_key": "20/m/ip",
# Signups.
"signup": "20/m/ip",
# Logins.
"login": "30/m/ip",
# Request a login code: key is the email.
"request_login_code": "20/m/ip,3/m/key",
# Logins.
"login_failed": login_failed_rl,
# Verify email (to be renamed to verify_email)
"confirm_email": confirm_email_rl,
# Verify phone
"verify_phone": "1/30s/key,3/m/ip",
}
ret.update(rls)
return ret
@property
def EMAIL_SUBJECT_PREFIX(self):
"""
Subject-line prefix to use for email messages sent
"""
return self._setting("EMAIL_SUBJECT_PREFIX", None)
@property
def SIGNUP_FORM_CLASS(self):
"""
Signup form
"""
return self._setting("SIGNUP_FORM_CLASS", None)
@property
def SIGNUP_FORM_HONEYPOT_FIELD(self):
"""
Honeypot field name. Empty string or ``None`` will disable honeypot behavior.
"""
return self._setting("SIGNUP_FORM_HONEYPOT_FIELD", None)
@property
def SIGNUP_FIELDS(self) -> dict:
fields = self._setting("SIGNUP_FIELDS", None)
if not fields:
fields = []
username = self._setting("USERNAME_REQUIRED", True)
email = self._setting("EMAIL_REQUIRED", False)
email2 = self._setting("SIGNUP_EMAIL_ENTER_TWICE", False)
password2 = self._setting(
"SIGNUP_PASSWORD_ENTER_TWICE",
self._setting("SIGNUP_PASSWORD_VERIFICATION", True),
)
if email:
fields.append("email*")
else:
fields.append("email")
if email2:
fields.append("email2*" if email else "email2")
if username:
fields.append("username*")
fields.append("password1*")
if password2:
fields.append("password2*")
ret = {}
for field in fields:
f, req, _ = field.partition("*")
ret[f] = {"required": bool(req)}
return ret
@property
def USERNAME_REQUIRED(self):
"""
The user is required to enter a username when signing up
"""
warnings.warn(
"app_settings.USERNAME_REQUIRED is deprecated, use: app_settings.SIGNUP_FIELDS['username']['required']",
stacklevel=3,
)
username = self.SIGNUP_FIELDS.get("username")
return username and username["required"]
@property
def USERNAME_MIN_LENGTH(self):
"""
Minimum username Length
"""
return self._setting("USERNAME_MIN_LENGTH", 1)
@property
def USERNAME_BLACKLIST(self):
"""
List of usernames that are not allowed
"""
return self._setting("USERNAME_BLACKLIST", [])
@property
def PASSWORD_INPUT_RENDER_VALUE(self):
"""
render_value parameter as passed to PasswordInput fields
"""
return self._setting("PASSWORD_INPUT_RENDER_VALUE", False)
@property
def ADAPTER(self):
return self._setting("ADAPTER", "allauth.account.adapter.DefaultAccountAdapter")
@property
def CONFIRM_EMAIL_ON_GET(self):
return self._setting("CONFIRM_EMAIL_ON_GET", False)
@property
def AUTHENTICATED_LOGIN_REDIRECTS(self):
return self._setting("AUTHENTICATED_LOGIN_REDIRECTS", True)
@property
def LOGIN_ON_EMAIL_CONFIRMATION(self):
"""
Automatically log the user in once they confirmed their email address
"""
return self._setting("LOGIN_ON_EMAIL_CONFIRMATION", False)
@property
def LOGIN_ON_PASSWORD_RESET(self):
"""
Automatically log the user in immediately after resetting
their password.
"""
return self._setting("LOGIN_ON_PASSWORD_RESET", False)
@property
def LOGOUT_REDIRECT_URL(self):
from django.conf import settings
return self._setting("LOGOUT_REDIRECT_URL", settings.LOGOUT_REDIRECT_URL or "/")
@property
def LOGOUT_ON_GET(self):
return self._setting("LOGOUT_ON_GET", False)
@property
def LOGOUT_ON_PASSWORD_CHANGE(self):
return self._setting("LOGOUT_ON_PASSWORD_CHANGE", False)
@property
def USER_MODEL_USERNAME_FIELD(self):
return self._setting("USER_MODEL_USERNAME_FIELD", "username")
@property
def USER_MODEL_EMAIL_FIELD(self):
return self._setting("USER_MODEL_EMAIL_FIELD", "email")
@property
def SESSION_COOKIE_AGE(self):
"""
Deprecated -- use Django's settings.SESSION_COOKIE_AGE instead
"""
from django.conf import settings
return self._setting("SESSION_COOKIE_AGE", settings.SESSION_COOKIE_AGE)
@property
def SESSION_REMEMBER(self):
"""
Controls the life time of the session. Set to `None` to ask the user
("Remember me?"), `False` to not remember, and `True` to always
remember.
"""
return self._setting("SESSION_REMEMBER", None)
@property
def TEMPLATE_EXTENSION(self):
"""
A string defining the template extension to use, defaults to `html`.
"""
return self._setting("TEMPLATE_EXTENSION", "html")
@property
def FORMS(self):
return self._setting("FORMS", {})
@property
def EMAIL_CONFIRMATION_HMAC(self):
return self._setting("EMAIL_CONFIRMATION_HMAC", True)
@property
def SALT(self):
return self._setting("SALT", "account")
@property
def PRESERVE_USERNAME_CASING(self):
return self._setting("PRESERVE_USERNAME_CASING", True)
@property
def USERNAME_VALIDATORS(self):
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from allauth.utils import import_attribute
path = self._setting("USERNAME_VALIDATORS", None)
if path:
ret = import_attribute(path)
if not isinstance(ret, list):
raise ImproperlyConfigured(
"ACCOUNT_USERNAME_VALIDATORS is expected to be a list"
)
else:
if self.USER_MODEL_USERNAME_FIELD is not None:
ret = (
get_user_model()
._meta.get_field(self.USER_MODEL_USERNAME_FIELD)
.validators
)
else:
ret = []
return ret
@property
def PASSWORD_RESET_BY_CODE_ENABLED(self):
return self._setting("PASSWORD_RESET_BY_CODE_ENABLED", False)
@property
def PASSWORD_RESET_BY_CODE_MAX_ATTEMPTS(self):
return self._setting("PASSWORD_RESET_BY_CODE_MAX_ATTEMPTS", 3)
@property
def PASSWORD_RESET_BY_CODE_TIMEOUT(self):
return self._setting("PASSWORD_RESET_BY_CODE_TIMEOUT", 3 * 60)
@property
def PASSWORD_RESET_TOKEN_GENERATOR(self):
from allauth.account.forms import EmailAwarePasswordResetTokenGenerator
from allauth.utils import import_attribute
token_generator_path = self._setting("PASSWORD_RESET_TOKEN_GENERATOR", None)
if token_generator_path is not None:
token_generator = import_attribute(token_generator_path)
else:
token_generator = EmailAwarePasswordResetTokenGenerator
return token_generator
@property
def EMAIL_UNKNOWN_ACCOUNTS(self):
return self._setting("EMAIL_UNKNOWN_ACCOUNTS", True)
@property
def REAUTHENTICATION_TIMEOUT(self):
return self._setting("REAUTHENTICATION_TIMEOUT", 300)
@property
def EMAIL_NOTIFICATIONS(self):
return self._setting("EMAIL_NOTIFICATIONS", False)
@property
def REAUTHENTICATION_REQUIRED(self):
return self._setting("REAUTHENTICATION_REQUIRED", False)
@property
def LOGIN_BY_CODE_ENABLED(self):
return self._setting("LOGIN_BY_CODE_ENABLED", False)
@property
def LOGIN_BY_CODE_TRUST_ENABLED(self):
return self._setting("LOGIN_BY_CODE_TRUST_ENABLED", False)
@property
def LOGIN_BY_CODE_MAX_ATTEMPTS(self):
return self._setting("LOGIN_BY_CODE_MAX_ATTEMPTS", 3)
@property
def LOGIN_BY_CODE_TIMEOUT(self):
return self._setting("LOGIN_BY_CODE_TIMEOUT", 3 * 60)
@property
def LOGIN_TIMEOUT(self):
"""
The maximum allowed time (in seconds) for a login to go through the
various login stages. This limits, for example, the time span that the
2FA stage remains available.
"""
return self._setting("LOGIN_TIMEOUT", 15 * 60)
@property
def LOGIN_BY_CODE_REQUIRED(self) -> Union[bool, Set[str]]:
"""
When enabled (in case of ``True``), every user logging in is
required to input a login confirmation code sent by email.
Alternatively, you can specify a set of authentication methods
(``"password"``, ``"mfa"``, or ``"socialaccount"``) for which login
codes are required.
"""
value = self._setting("LOGIN_BY_CODE_REQUIRED", False)
if isinstance(value, bool):
return value
return set(value)
_app_settings = AppSettings("ACCOUNT_")
def __getattr__(name):
# See https://peps.python.org/pep-0562/
return getattr(_app_settings, name)
| AppSettings |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 16814,
"end": 18572
} | class ____(TestBaseAsyncSpiderMiddleware):
ITEM_TYPE = dict
MW_SIMPLE = ProcessSpiderOutputSimpleMiddleware
MW_ASYNCGEN = ProcessSpiderOutputAsyncGenMiddleware
MW_UNIVERSAL = ProcessSpiderOutputUniversalMiddleware
async def _get_middleware_result(
self, *mw_classes: type[Any], start_index: int | None = None
) -> Any:
setting = self._construct_mw_setting(*mw_classes, start_index=start_index)
self.crawler = get_crawler(Spider, {"SPIDER_MIDDLEWARES": setting})
self.crawler.spider = self.crawler._create_spider("foo")
self.mwman = SpiderMiddlewareManager.from_crawler(self.crawler)
return await self.mwman.scrape_response_async(
self._scrape_func, self.response, self.request
)
@deferred_f_from_coro_f
async def test_just_builtin(self):
await self._test_simple_base()
@deferred_f_from_coro_f
async def test_builtin_simple(self):
await self._test_simple_base(self.MW_SIMPLE, start_index=1000)
@deferred_f_from_coro_f
async def test_builtin_async(self):
"""Upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, start_index=1000)
@deferred_f_from_coro_f
async def test_builtin_universal(self):
await self._test_simple_base(self.MW_UNIVERSAL, start_index=1000)
@deferred_f_from_coro_f
async def test_simple_builtin(self):
await self._test_simple_base(self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_async_builtin(self):
"""Upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN)
@deferred_f_from_coro_f
async def test_universal_builtin(self):
await self._test_simple_base(self.MW_UNIVERSAL)
| TestBuiltinMiddlewareSimple |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 68545,
"end": 73250
} | class ____(
Element,
_IDProperty,
_NameProperty,
_UtypeProperty,
_UcdProperty,
_DescriptionProperty,
):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
table,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList((FieldRef, ParamRef, Group, Param))
warn_unknown_attrs("GROUP", extra.keys(), config, pos)
def __repr__(self):
return f"<GROUP>... {len(self._entries)} entries ...</GROUP>"
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
"FIELDref": self._add_fieldref,
"PARAMref": self._add_paramref,
"PARAM": self._add_param,
"GROUP": self._add_group,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "GROUP", config, pos)
self.description = data or None
elif tag == "GROUP":
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
"GROUP", attrib=w.object_attrs(self, ["ID", "name", "ref", "ucd", "utype"])
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
yield from entry.iter_fields_and_params()
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
yield from entry.iter_groups()
| Group |
python | great-expectations__great_expectations | docs/docusaurus/docs/reference/learn/data_quality_use_cases/integrity_resources/integrity_workflow.py | {
"start": 1788,
"end": 3406
} | class ____(gxe.UnexpectedRowsExpectation):
"""Expectation to validate that transfers are sent (`sent_ts`) and received (`received_ts`) within 45 seconds."""
description = "Transfers arrive within 45 seconds"
unexpected_rows_query: str = """
select *
from {batch}
where extract(epoch from (age(received_ts, sent_ts))) > 45
"""
validation_result_custom_sql_expectation = batch_transfer_txn.validate(
ExpectTransfersToArriveWithin45Seconds()
)
# </snippet>
assert validation_result_built_in_expectation["success"] is True
assert validation_result_custom_sql_expectation["success"] is False
# <snippet name="docs/docusaurus/docs/reference/learn/data_quality_use_cases/integrity_resources/integrity_workflow.py cross-table workflow">
import great_expectations as gx
import great_expectations.expectations as gxe
# Create Data Context.
context = gx.get_context()
# Connect to data and create Data Source.
CONNECTION_STRING = """
postgresql+psycopg2://try_gx:try_gx@postgres.workshops.greatexpectations.io/gx_learn_data_quality
"""
data_source = context.data_sources.add_postgres(
"postgres", connection_string=CONNECTION_STRING
)
# Create Data Asset, Batch Definition, and Batch.
data_asset_transfers = data_source.add_table_asset(
name="transfers", table_name="integrity_transfers"
)
batch_def_transfers = data_asset_transfers.add_batch_definition_whole_table(
"transfers batch definition"
)
batch_transfers = batch_def_transfers.get_batch()
# Create custom SQL Expectation by subclassing gxe.UnexpectedRowsExpectation.
| ExpectTransfersToArriveWithin45Seconds |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 3127,
"end": 26590
} | class ____(Qwen2_5OmniPreTrainedModel):
input_modalities = ("image", "video", "audio", "text")
def _prepare_4d_causal_attention_mask_with_cache_position(
self,
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
device: torch.device,
min_dtype: float,
cache_position: torch.Tensor,
batch_size: int,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to place the 4D attention mask on.
min_dtype (`float`):
The minimum value representable with the dtype `dtype`.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
def get_llm_pos_ids_for_vision(
self,
start_idx: int,
vision_idx: int,
spatial_merge_size: int,
t_index: list[int],
grid_hs: list[int],
grid_ws: list[int],
):
llm_pos_ids_list = []
llm_grid_h = grid_hs[vision_idx] // spatial_merge_size
llm_grid_w = grid_ws[vision_idx] // spatial_merge_size
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(len(t_index), -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(len(t_index), llm_grid_h, -1).flatten()
t_index = torch.Tensor(t_index).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten().long()
_llm_pos_ids = torch.stack([t_index, h_index, w_index])
llm_pos_ids_list.append(_llm_pos_ids + start_idx) # + 1 ) # 12.09 by malinhan
llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1)
return llm_pos_ids
def get_chunked_index(
self, token_indices: torch.Tensor, tokens_per_chunk: int, remove_index: int
) -> list[tuple[int, int]]:
"""
Splits token index list into chunks based on token value ranges.
Given a list of token indices, returns a list of (start, end) index tuples representing
slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`.
For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that:
- the first chunk contains token values < 1000,
- the second chunk contains values >= 1000 and < 2000, and so on.
Parameters:
token_indices (`torch.Tensor` of shape `(seq_len, )`): A monotonically increasing list of
token index values.
t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).
remove_index (`int`) An index id to subtract from `token_indices` before chunking
Returns:
`list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
and end (exclusive) indices of a chunk in `token_indices`.
"""
def _iter():
i, start_idx = 0, 0 # skip bos token
current_chunk = 1
while i < len(token_indices): # skip eos token
if token_indices[i] - remove_index >= current_chunk * tokens_per_chunk:
yield (start_idx, i)
start_idx = i
current_chunk += 1
i += 1
yield (start_idx, len(token_indices))
return list(_iter())
def get_rope_index(
self,
input_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
use_audio_in_video: bool = False,
audio_seqlens: Optional[torch.LongTensor] = None,
second_per_grids: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
Explanation:
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
Examples:
input_ids: [T T T T T], here T is for text.
temporal position_ids: [0, 1, 2, 3, 4]
height position_ids: [0, 1, 2, 3, 4]
width position_ids: [0, 1, 2, 3, 4]
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
and 1D rotary position embedding for text part.
Examples:
Temporal (Time): 3 patches, representing different segments of the video in time.
Height: 2 patches, dividing each frame vertically.
Width: 2 patches, dividing each frame horizontally.
We also have some important parameters:
fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
text temporal position_ids: [101, 102, 103, 104, 105]
text height position_ids: [101, 102, 103, 104, 105]
text width position_ids: [101, 102, 103, 104, 105]
Here we calculate the text start position_ids as the max vision position_ids plus 1.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
use_audio_in_video (`bool`, *optional*):
If set to `True`, use the audio in video.
audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
second_per_grids (`torch.LongTensor` of shape `(num_videos)`, *optional*):
The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
spatial_merge_size = self.spatial_merge_size
image_token_id = self.config.image_token_id
video_token_id = self.config.video_token_id
audio_token_id = self.config.audio_token_id
vision_start_token_id = self.config.vision_start_token_id
audio_start_token_id = self.config.audio_start_token_id
position_id_per_seconds = self.config.position_id_per_seconds
seconds_per_chunk = self.config.seconds_per_chunk
mrope_position_deltas = []
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
total_input_ids = input_ids
if attention_mask is not None:
attention_mask = attention_mask == 1
position_ids = torch.ones(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=input_ids.dtype,
device=input_ids.device,
)
image_idx, video_idx, audio_idx = 0, 0, 0
for i, input_ids in enumerate(total_input_ids):
if attention_mask is not None:
input_ids = input_ids[attention_mask[i]]
image_nums, video_nums, audio_nums = 0, 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
vision_tokens = input_ids[vision_start_indices + 1]
audio_nums = torch.sum(input_ids == audio_start_token_id)
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (
(vision_tokens == audio_start_token_id).sum()
if use_audio_in_video
else (vision_tokens == video_token_id).sum()
)
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos, remain_audios = image_nums, video_nums, audio_nums
multimodal_nums = (
image_nums + audio_nums if use_audio_in_video else image_nums + video_nums + audio_nums
)
for _ in range(multimodal_nums):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if audio_token_id in input_tokens and remain_audios > 0:
ed_audio = input_tokens.index(audio_token_id, st)
else:
ed_audio = len(input_tokens) + 1
min_ed = min(ed_image, ed_video, ed_audio)
if min_ed == ed_audio:
text_len = min_ed - st - 1
if text_len != 0:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
bos_len = 1
llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1
llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
llm_pos_ids_list.append(llm_pos_ids)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
eos_len = 1
llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
st += text_len + bos_len + audio_len + eos_len
audio_idx += 1
remain_audios -= 1
elif min_ed == ed_image:
text_len = min_ed - st - 1
if text_len != 0:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
bos_len = 1
llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
grid_t = image_grid_thw[image_idx][0]
grid_hs = image_grid_thw[:, 1]
grid_ws = image_grid_thw[:, 2]
t_index = (torch.arange(grid_t) * 1 * position_id_per_seconds).long()
llm_pos_ids = self.get_llm_pos_ids_for_vision(
st_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws
)
image_len = image_grid_thw[image_idx].prod() // (spatial_merge_size**2)
llm_pos_ids_list.append(llm_pos_ids)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
eos_len = 1
llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
st += text_len + bos_len + image_len + eos_len
image_idx += 1
remain_images -= 1
elif min_ed == ed_video and not use_audio_in_video:
text_len = min_ed - st - 1
if text_len != 0:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
bos_len = 1
llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
grid_t = video_grid_thw[video_idx][0]
grid_hs = video_grid_thw[:, 1]
grid_ws = video_grid_thw[:, 2]
t_index = (
torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
).long()
llm_pos_ids = self.get_llm_pos_ids_for_vision(
st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
)
video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
llm_pos_ids_list.append(llm_pos_ids)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
eos_len = 1
llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
st += text_len + bos_len + video_len + eos_len
video_idx += 1
remain_videos -= 1
elif min_ed == ed_video and use_audio_in_video:
text_len = min_ed - st - 2
if text_len != 0:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
bos_len = 1
llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1
audio_llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
grid_t = video_grid_thw[video_idx][0]
grid_hs = video_grid_thw[:, 1]
grid_ws = video_grid_thw[:, 2]
t_index = (
torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
).long()
video_llm_pos_ids = self.get_llm_pos_ids_for_vision(
st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
)
t_ntoken_per_chunk = int(position_id_per_seconds * seconds_per_chunk)
video_chunk_indexes = self.get_chunked_index(video_llm_pos_ids[0], t_ntoken_per_chunk, st_idx)
audio_chunk_indexes = self.get_chunked_index(audio_llm_pos_ids[0], t_ntoken_per_chunk, st_idx)
sub_len = 0
for j in range(max(len(video_chunk_indexes), len(audio_chunk_indexes))):
video_chunk_index = video_chunk_indexes[j] if j < len(video_chunk_indexes) else None
audio_chunk_index = audio_chunk_indexes[j] if j < len(audio_chunk_indexes) else None
if video_chunk_index is not None:
sub_len += video_chunk_index[1] - video_chunk_index[0]
llm_pos_ids_list.append(
video_llm_pos_ids[:, video_chunk_index[0] : video_chunk_index[1]]
)
if audio_chunk_index is not None:
sub_len += audio_chunk_index[1] - audio_chunk_index[0]
llm_pos_ids_list.append(
audio_llm_pos_ids[:, audio_chunk_index[0] : audio_chunk_index[1]]
)
video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
eos_len = 1
llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
st += text_len + bos_len * 2 + audio_len + video_len + eos_len * 2
audio_idx += 1
video_idx += 1
remain_videos -= 1
remain_audios -= 1
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
if attention_mask is not None:
position_ids[..., i, attention_mask[i]] = llm_positions.to(position_ids.device)
else:
position_ids[..., i, :] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(input_ids))
mrope_position_deltas = torch.tensor(mrope_position_deltas).unsqueeze(1).to(device=input_ids.device)
return position_ids, mrope_position_deltas
else:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True)
return position_ids, mrope_position_deltas
############################
# Start Thinker #
############################
@dataclass
@auto_docstring(
custom_intro="""
Base class for Qwen2.5OmniThinker causal language model (or autoregressive) outputs.
"""
)
| Qwen2_5OmniPreTrainedModelForConditionalGeneration |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 13431,
"end": 28545
} | class ____(BaseUserFunctionVariable):
"""Some unsupported user-defined global function"""
_nonvar_fields = {
"fn",
"is_constant",
*BaseUserFunctionVariable._nonvar_fields,
}
_TREE_MAP_MODULES = frozenset(
{
"optree",
"optree.ops",
"torch.utils._pytree",
"torch.utils._cxx_pytree",
}
)
@classmethod
def create_with_source(cls, value: Any, source: Any) -> "UserFunctionVariable":
install_guard(source.make_guard(GuardBuilder.CLOSURE_MATCH))
return cls(value, source=source)
def __init__(
self,
fn: types.FunctionType | torch.jit.ScriptFunction, # type: ignore[type-arg]
is_constant: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if getattr(fn, "_dynamo_marked_constant", False):
# This method should be treated as a constant for the purposes of compilation
self.is_constant = True
else:
self.is_constant = False
# TODO putting this here to avoid duplication, because we could hit this
# from several paths (e.g., SuperVariable or `var_getattr`s).
if not isinstance(fn, (types.FunctionType, torch.jit.ScriptFunction)):
unimplemented(
gb_type="can't handle functions not implemented in python ",
context=f"{fn}",
explanation="Dynamo can only handle functions defined in python",
hints=[
"Move usage of this function out of `torch.compile` region",
*graph_break_hints.INFERENCE_MODE,
],
)
# TODO(anijain2305) - Replace directly calling UserFunctionVariable with
# VariableBuilder, which handles the wrapping of _torchdynamo_inline.
# unpack @torch._dynamo.optimize()(fn) wrapped function
fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn)
self.fn = fn
def as_python_constant(self) -> Any:
if istype(self, UserFunctionVariable):
return self.fn
# subclasses (such as methods) usually aren't a constant
return super().as_python_constant()
def self_args(self) -> list[VariableTracker]:
return []
def get_function(self) -> types.FunctionType:
return self.fn
def get_code(self) -> types.CodeType:
return self.fn.__code__
def python_type(self) -> type:
return types.FunctionType
def has_self(self) -> bool:
return getattr(self.fn, "__self__", None) is not None
def get_globals(self) -> dict[str, Any]:
return self.fn.__globals__
def get_source(self) -> Source:
source = self.source
if source and isinstance(self, variables.UserMethodVariable):
source = self.source_fn # type: ignore[assignment]
return source # type: ignore[return-value]
def bind_args(
self,
parent: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> dict[str, VariableTracker]:
"""
Assume `args` and `kwargs` are VariableTracker arguments for a call to
this function, create new bindings for initial locals.
"""
assert not self.is_constant
fn: types.FunctionType = self.fn
if not isinstance(fn, FunctionType):
raise TypeError("Only supports regular Python functions.")
root_tx = parent.output.root_tx
source = self.get_source()
result = bind_args_cached(fn, root_tx, source, args, kwargs) # type: ignore[arg-type]
init_cellvars(parent, result, fn.__code__)
closure = self.fn.__closure__ or ()
assert len(closure) == len(self.fn.__code__.co_freevars)
for idx, name, cell in zip(
itertools.count(), self.fn.__code__.co_freevars, closure
):
# TODO refactor these 3 branches.
side_effects = parent.output.side_effects
if cell in side_effects:
cell_var = side_effects[cell]
elif source:
closure_cell = GetItemSource(ClosureSource(source), idx)
closure_cell_contents = AttrSource(closure_cell, "cell_contents")
try:
contents_var = VariableTracker.build(
parent, cell.cell_contents, closure_cell_contents
)
except ValueError:
# Cell has not yet been assigned
contents_var = variables.DeletedVariable()
cell_var = side_effects.track_cell_existing(
closure_cell, cell, contents_var
)
else:
# TODO figure out why source isn't available here, and whether
# we can fix that and remove this branch.
try:
contents_var = VariableTracker.build(parent, cell.cell_contents)
except ValueError:
# Cell has not yet been assigned
contents_var = variables.DeletedVariable()
cell_var = side_effects.track_cell_existing(None, cell, contents_var)
result[name] = cell_var
return result
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if name in cmp_name_to_op_mapping:
return variables.GetAttrVariable(self, name)
source = self.get_source()
return fn_var_getattr(tx, self.fn, source, name)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
result = hasattr(self.fn, name)
return variables.ConstantVariable.create(result)
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
# Handle patch_dynamo_config call
if self.fn is torch._dynamo.patch_dynamo_config:
try:
args_const = [arg.as_python_constant() for arg in args]
kwargs_const = {
key: val.as_python_constant() for key, val in kwargs.items()
}
changes = torch._dynamo.patch_dynamo_config(
*args_const, **kwargs_const
).changes
return variables.DynamoConfigPatchVariable(changes)
except AsPythonConstantNotImplementedError as e:
raise RuntimeError(
"Cannot convert patch_dynamo_config args/kwargs to constants. "
"Please fix your call to patch_dynamo_config by using simpler inputs. "
f"args: {args}, kwargs: {kwargs}"
) from e
elif self.fn is torch._dynamo.error_on_graph_break:
try:
bound = inspect.signature(self.fn).bind(*args, **kwargs)
error_on_graph_break = bound.arguments[
"error_on_graph_break"
].as_python_constant()
assert isinstance(error_on_graph_break, bool)
return variables.ErrorOnGraphBreakVariable(error_on_graph_break)
except Exception as e:
raise RuntimeError(
"Improper error_on_graph_break() call. Please fix your call to error_on_graph_break(). "
f"args: {args}, kwargs: {kwargs}"
) from e
# Handle a `nonstrict_trace(fn)` call
elif self.fn is torch._dynamo.nonstrict_trace:
bound = inspect.signature(self.fn).bind(*args, **kwargs)
fn_var = bound.args[0]
if not isinstance(fn_var, BaseUserFunctionVariable):
typ = fn_var.python_type()
msg = f"`nonstrict_trace` expects a callable, but got value of type <{typ.__name__}>"
unimplemented(
gb_type="TypeError from user code",
context=f"call_function({self.value}, {args}, {kwargs})", # type: ignore[attr-defined]
explanation=msg,
hints=[
*graph_break_hints.USER_ERROR,
],
)
if not isinstance(fn_var, UserFunctionVariable):
fn_name = fn_var.get_name()
msg = f"Applying `nonstrict_trace` to function <{fn_name}>; however, `nonstrict_trace` currently requires the function to be defined outside `torch.compile` region." # noqa: B950
unimplemented(
gb_type="Limitation of `nonstrict_trace",
context=f"{self}",
explanation=msg,
hints=[
f"make sure definition of {fn_name} is outside ",
"`torch.compile` region",
],
)
# pyrefly: ignore[missing-attribute]
fn = fn_var.fn
return variables.TorchInGraphFunctionVariable(fn, nonstrict_traceable=True)
if self.is_constant:
return invoke_and_store_as_constant(
tx, self.fn, self.get_name(), args, kwargs
)
if (
not tx.output.current_tracer.unsafe_allow_externally_visible_side_effects
and self.fn
is torch._dynamo.utils._disable_side_effect_safety_checks_for_current_subtracer
):
with torch._dynamo.side_effects.allow_externally_visible_side_effects_in_subtracer(
tx
):
return super().call_function(tx, args, kwargs)
if (
tx.output.current_tracer.under_activation_checkpoint
and not tx.output.current_tracer.allow_side_effects_under_checkpoint
):
try:
from torch.distributed.fsdp._fully_shard._fsdp_state import FSDPState
except Exception:
FSDPState = None # type: ignore[assignment, misc]
if FSDPState is not None and self.fn in [
FSDPState._pre_forward,
FSDPState._post_forward,
]:
with torch._dynamo.side_effects.allow_side_effects_under_checkpoint(tx):
return super().call_function(tx, args, kwargs)
tree_map_result = self._maybe_call_tree_map_fastpath(tx, args, kwargs)
if tree_map_result is not None:
return tree_map_result
return super().call_function(tx, args, kwargs)
def _maybe_call_tree_map_fastpath(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> Optional[VariableTracker]:
rewrite = self._rewrite_tree_map_only_call(tx, args, kwargs)
if rewrite is not None:
tree_map_fn, tree_map_args, tree_map_kwargs = rewrite
else:
tree_map_fn = self
tree_map_args = args
tree_map_kwargs = kwargs
if not (
isinstance(tree_map_fn, UserFunctionVariable)
and tree_map_fn._is_tree_map_function()
and not ({*tree_map_kwargs} - _SUPPORTED_TREE_MAP_KWARGS)
and len(tree_map_args) >= 2
):
return None
map_fn = tree_map_args[0]
first_tree = tree_map_args[1]
rest = tree_map_args[2:]
return first_tree.call_tree_map(
tx,
tree_map_fn,
map_fn,
rest,
tree_map_kwargs,
)
def _is_tree_map_function(self) -> bool:
return (
getattr(self.fn, "__name__", None) == "tree_map"
and getattr(self.fn, "__module__", None) in self._TREE_MAP_MODULES
)
def _is_tree_map_only_function(self) -> bool:
return (
getattr(self.fn, "__name__", None) == "tree_map_only"
and getattr(self.fn, "__module__", None) in self._TREE_MAP_MODULES
)
def _rewrite_tree_map_only_call(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> Optional[
tuple[
"UserFunctionVariable",
Sequence[VariableTracker],
dict[str, VariableTracker],
]
]:
if not self._is_tree_map_only_function():
return None
if len(args) != 3:
return None
if {*kwargs} - _TREE_MAP_ONLY_SUPPORTED_KWARGS:
return None
type_selector, map_fn, tree_arg = args
allowed_types = self._extract_tree_map_only_types(type_selector)
if allowed_types is None:
return None
tree_map_callable = self._lookup_tree_map_function()
if tree_map_callable is None:
return None
wrapped_map_fn = TreeMapOnlyFunctionVariable(
allowed_types,
map_fn,
source=getattr(map_fn, "source", None),
)
tree_map_variable = variables.UserFunctionVariable(tree_map_callable)
return tree_map_variable, [wrapped_map_fn, tree_arg], dict(kwargs)
def _lookup_tree_map_function(self) -> Optional[types.FunctionType]:
module_name = getattr(self.fn, "__module__", None)
if not module_name:
return None
module = sys.modules.get(module_name)
if module is None:
return None
tree_map = getattr(module, "tree_map", None)
if isinstance(tree_map, types.FunctionType):
return tree_map
return None
def _extract_tree_map_only_types(
self, selector: VariableTracker
) -> Optional[tuple[type, ...]]:
if not selector.is_python_constant():
return None
try:
raw_value = selector.as_python_constant()
except NotImplementedError:
return None
flattened = self._flatten_type_spec(raw_value)
if not flattened:
return None
if not all(isinstance(typ, type) for typ in flattened):
return None
return tuple(dict.fromkeys(flattened))
def _flatten_type_spec(self, value: Any) -> Optional[list[type]]:
if isinstance(value, type):
return [value]
if isinstance(value, tuple):
collected: list[type] = []
for entry in value:
flat = self._flatten_type_spec(entry)
if flat is None:
return None
collected.extend(flat)
return collected
union_type = getattr(types, "UnionType", None)
if union_type is not None and isinstance(value, union_type):
collected = []
for entry in value.__args__:
flat = self._flatten_type_spec(entry)
if flat is None:
return None
collected.extend(flat)
return collected
return None
| UserFunctionVariable |
python | lxml__lxml | src/lxml/sax.py | {
"start": 944,
"end": 5162
} | class ____(ContentHandler):
"""Build an lxml ElementTree from SAX events.
"""
def __init__(self, makeelement=None):
ContentHandler.__init__(self)
self._root = None
self._root_siblings = []
self._element_stack = []
self._default_ns = None
self._ns_mapping = { None : [None] }
self._new_mappings = {}
if makeelement is None:
makeelement = etree.Element
self._makeelement = makeelement
def _get_etree(self):
"Contains the generated ElementTree after parsing is finished."
return ElementTree(self._root)
etree = property(_get_etree, doc=_get_etree.__doc__)
def setDocumentLocator(self, locator):
pass
def startDocument(self):
pass
def endDocument(self):
pass
def startPrefixMapping(self, prefix, uri):
self._new_mappings[prefix] = uri
try:
self._ns_mapping[prefix].append(uri)
except KeyError:
self._ns_mapping[prefix] = [uri]
if prefix is None:
self._default_ns = uri
def endPrefixMapping(self, prefix):
ns_uri_list = self._ns_mapping[prefix]
ns_uri_list.pop()
if prefix is None:
self._default_ns = ns_uri_list[-1]
def _buildTag(self, ns_name_tuple):
ns_uri, local_name = ns_name_tuple
if ns_uri:
el_tag = "{%s}%s" % ns_name_tuple
elif self._default_ns:
el_tag = "{%s}%s" % (self._default_ns, local_name)
else:
el_tag = local_name
return el_tag
def startElementNS(self, ns_name, qname, attributes=None):
el_name = self._buildTag(ns_name)
if attributes:
attrs = {}
try:
iter_attributes = attributes.iteritems()
except AttributeError:
iter_attributes = attributes.items()
for name_tuple, value in iter_attributes:
if name_tuple[0]:
attr_name = "{%s}%s" % name_tuple
else:
attr_name = name_tuple[1]
attrs[attr_name] = value
else:
attrs = None
element_stack = self._element_stack
if self._root is None:
element = self._root = \
self._makeelement(el_name, attrs, self._new_mappings)
if self._root_siblings and hasattr(element, 'addprevious'):
for sibling in self._root_siblings:
element.addprevious(sibling)
del self._root_siblings[:]
else:
element = SubElement(element_stack[-1], el_name,
attrs, self._new_mappings)
element_stack.append(element)
self._new_mappings.clear()
def processingInstruction(self, target, data):
pi = ProcessingInstruction(target, data)
if self._root is None:
self._root_siblings.append(pi)
else:
self._element_stack[-1].append(pi)
def endElementNS(self, ns_name, qname):
element = self._element_stack.pop()
el_tag = self._buildTag(ns_name)
if el_tag != element.tag:
raise SaxError("Unexpected element closed: " + el_tag)
def startElement(self, name, attributes=None):
if attributes:
attributes = {(None, k): v for k, v in attributes.items()}
self.startElementNS((None, name), name, attributes)
def endElement(self, name):
self.endElementNS((None, name), name)
def characters(self, data):
last_element = self._element_stack[-1]
try:
# if there already is a child element, we must append to its tail
last_element = last_element[-1]
except IndexError:
# otherwise: append to the text
last_element.text = (last_element.text or '') + data
else:
last_element.tail = (last_element.tail or '') + data
ignorableWhitespace = characters
# Allow subscripting sax.ElementTreeContentHandler in type annotions (PEP 560)
def __class_getitem__(cls, item):
return _GenericAlias(cls, item)
| ElementTreeContentHandler |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/toys/user_computed_data_versions/external_system.py | {
"start": 856,
"end": 912
} | class ____(TypedDict):
data_version: str
| ObserveResult |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/python_venv/package.py | {
"start": 218,
"end": 475
} | class ____(Package):
"""A Spack managed Python virtual environment"""
homepage = "https://docs.python.org/3/library/venv.html"
has_code = False
version("1.0")
extends("python")
def install(self, spec, prefix):
pass
| PythonVenv |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_event_details.py | {
"start": 362,
"end": 1898
} | class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.project_1 = self.create_project()
self.release_version = uuid4().hex
release = Release.objects.create(
organization_id=self.project_1.organization_id, version=self.release_version
)
release.add_project(self.project_1)
self.event_a = self.store_event(
data={
"event_id": "a" * 32,
"environment": "development",
"timestamp": before_now(days=1).isoformat(),
"fingerprint": ["group-1"],
"release": self.release_version,
},
project_id=self.project_1.id,
)
self.event_b = self.store_event(
data={
"event_id": "b" * 32,
"environment": "production",
"timestamp": before_now(minutes=5).isoformat(),
"fingerprint": ["group-1"],
"release": self.release_version,
},
project_id=self.project_1.id,
)
self.event_c = self.store_event(
data={
"event_id": "c" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"release": self.release_version,
},
project_id=self.project_1.id,
)
| GroupEventDetailsEndpointTestBase |
python | wireservice__csvkit | tests/test_utilities/test_csvstack.py | {
"start": 3849,
"end": 6283
} | class ____(TestCSVStack):
def test_no_header_row_basic(self):
self.assertRows(['--no-header-row', 'examples/no_header_row.csv', 'examples/no_header_row2.csv'], [
['a', 'b', 'c'],
['1', '2', '3'],
['4', '5', '6'],
])
def test_no_header_row_basic_stdin(self):
with open('examples/no_header_row.csv', 'rb') as f, stdin_as_string(f):
self.assertRows(['--no-header-row', '-', 'examples/no_header_row2.csv'], [
['a', 'b', 'c'],
['1', '2', '3'],
['4', '5', '6'],
])
with open('examples/no_header_row.csv', 'rb') as f, stdin_as_string(f):
self.assertRows(['--no-header-row', 'examples/no_header_row2.csv', '-'], [
['a', 'b', 'c'],
['4', '5', '6'],
['1', '2', '3'],
])
def test_grouped_manual_and_named_column(self):
self.assertRows(
[
"--no-header-row",
"--groups",
"foo,bar",
"-n",
"hey",
"examples/dummy.csv",
"examples/dummy3.csv",
],
[
["hey", "a", "b", "c"],
["foo", "a", "b", "c"],
["foo", "1", "2", "3"],
["bar", "a", "b", "c"],
["bar", "1", "2", "3"],
["bar", "1", "4", "5"],
],
)
def test_grouped_filenames(self):
self.assertRows(
[
"-H",
"--filenames",
"examples/no_header_row.csv",
"examples/no_header_row2.csv",
],
[
["group", "a", "b", "c"],
["no_header_row.csv", "1", "2", "3"],
["no_header_row2.csv", "4", "5", "6"],
],
)
def test_grouped_filenames_and_named_column(self):
self.assertRows(
[
"-H",
"--filenames",
"-n",
"hello",
"examples/no_header_row.csv",
"examples/no_header_row2.csv",
],
[
["hello", "a", "b", "c"],
["no_header_row.csv", "1", "2", "3"],
["no_header_row2.csv", "4", "5", "6"],
],
)
| TestNoHeaderRow |
python | django__django | tests/basic/models.py | {
"start": 1098,
"end": 1209
} | class ____(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)
| PrimaryKeyWithDefault |
python | kamyu104__LeetCode-Solutions | Python/minimum-genetic-mutation.py | {
"start": 115,
"end": 929
} | class ____(object):
def minMutation(self, start, end, bank):
"""
:type start: str
:type end: str
:type bank: List[str]
:rtype: int
"""
lookup = {}
for b in bank:
lookup[b] = False
q = deque([(start, 0)])
while q:
cur, level = q.popleft()
if cur == end:
return level
for i in xrange(len(cur)):
for c in ['A', 'T', 'C', 'G']:
if cur[i] == c:
continue
next_str = cur[:i] + c + cur[i+1:]
if next_str in lookup and lookup[next_str] == False:
q.append((next_str, level+1))
lookup[next_str] = True
return -1
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 930965,
"end": 931344
} | class ____(
sgqlc.types.Type,
Node,
AuditEntry,
OrganizationAuditEntryData,
RepositoryAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("visibility",)
visibility = sgqlc.types.Field(
RepoAddMemberAuditEntryVisibility, graphql_name="visibility"
)
| RepoAddMemberAuditEntry |
python | pypa__pip | src/pip/_vendor/urllib3/packages/six.py | {
"start": 2657,
"end": 3105
} | class ____(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
| _LazyDescr |
python | h5py__h5py | h5py/tests/test_vds/test_highlevel_vds.py | {
"start": 3475,
"end": 6527
} | class ____(ut.TestCase):
def create_excalibur_fem_stripe_datafile(self, fname, nframes, excalibur_data,scale):
shape = (nframes,) + excalibur_data.fem_stripe_dimensions
max_shape = shape#(None,) + excalibur_data.fem_stripe_dimensions
chunk = (1,) + excalibur_data.fem_stripe_dimensions
with h5.File(fname, 'w', libver='latest') as f:
dset = f.create_dataset('data', shape=shape, maxshape=max_shape, chunks=chunk, dtype='uint16')
for data_value_index in np.arange(nframes):
dset[data_value_index] = excalibur_data.generate_fem_stripe_image(data_value_index*scale)
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.fname = ["stripe_%d.h5" % stripe for stripe in range(1,7)]
self.fname = [osp.join(self.working_dir, f) for f in self.fname]
nframes = 5
self.edata = ExcaliburData()
for k, raw_file in enumerate(self.fname):
self.create_excalibur_fem_stripe_datafile(raw_file, nframes, self.edata,k)
def test_excalibur_high_level(self):
outfile = osp.join(self.working_dir, make_name('excalibur{}.h5'))
f = h5.File(outfile,'w',libver='latest') # create an output file.
in_key = 'data' # where is the data at the input?
in_sh = h5.File(self.fname[0],'r')[in_key].shape # get the input shape
dtype = h5.File(self.fname[0],'r')[in_key].dtype # get the datatype
# now generate the output shape
vertical_gap = 10 # pixels spacing in the vertical
nfiles = len(self.fname)
nframes = in_sh[0]
width = in_sh[2]
height = (in_sh[1]*nfiles) + (vertical_gap*(nfiles-1))
out_sh = (nframes, height, width)
# Virtual layout is a representation of the output dataset
layout = h5.VirtualLayout(shape=out_sh, dtype=dtype)
offset = 0 # initial offset
for i, filename in enumerate(self.fname):
# A representation of the input dataset
vsource = h5.VirtualSource(filename, in_key, shape=in_sh)
layout[:, offset:(offset + in_sh[1]), :] = vsource # map them with indexing
offset += in_sh[1] + vertical_gap # increment the offset
# pass the fill value and list of maps
f.create_virtual_dataset('data', layout, fillvalue=0x1)
f.close()
f = h5.File(outfile,'r')['data']
self.assertEqual(f[3,100,0], 0.0)
self.assertEqual(f[3,260,0], 1.0)
self.assertEqual(f[3,350,0], 3.0)
self.assertEqual(f[3,650,0], 6.0)
self.assertEqual(f[3,900,0], 9.0)
self.assertEqual(f[3,1150,0], 12.0)
self.assertEqual(f[3,1450,0], 15.0)
f.file.close()
def tearDown(self):
shutil.rmtree(self.working_dir)
'''
Unit test for the high level vds interface for percival
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
| TestExcaliburHighLevel |
python | dask__distributed | distributed/shuffle/tests/test_shuffle.py | {
"start": 98815,
"end": 101070
} | class ____(ShuffleWorkerPlugin):
def __init__(
self,
failures: dict[str, tuple[int, type]] | None = None,
):
self.failures = failures or {}
super().__init__()
async def shuffle_inputs_done(self, comm: Comm, *args: Any, **kwargs: Any) -> None: # type: ignore
if self.worker.address in self.failures:
nfailures, exc_type = self.failures[self.worker.address]
if nfailures > 0:
nfailures -= 1
self.failures[self.worker.address] = nfailures, exc_type
if issubclass(exc_type, OSError):
# Aborting the Comm object triggers a different path in
# error handling that resembles a genuine connection failure
# like a timeout while an exception that is being raised by
# the handler will be serialized and sent to the scheduler
comm.abort()
raise exc_type # type: ignore
return await super().shuffle_inputs_done(*args, **kwargs)
@pytest.mark.parametrize(
"failures, expected_exc",
[
({}, None),
({0: (1, OSError)}, None),
({0: (1, RuntimeError)}, RuntimeError),
({0: (1, OSError), 1: (1, OSError)}, None),
({0: (1, OSError), 1: (1, RuntimeError)}, RuntimeError),
({0: (5, OSError)}, RuntimeError),
({0: (5, OSError), 1: (1, OSError)}, RuntimeError),
],
)
@pytest.mark.slow
@gen_cluster(client=True)
async def test_flaky_broadcast(c, s, a, b, failures, expected_exc):
names_to_address = {w.name: w.address for w in [a, b]}
failures = {names_to_address[name]: failures for name, failures in failures.items()}
plugin = BarrierInputsDoneOSErrorPlugin(failures)
await c.register_plugin(plugin, name="shuffle")
if expected_exc:
ctx = pytest.raises(expected_exc)
else:
ctx = contextlib.nullcontext()
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
ddf = dd.from_pandas(pdf, npartitions=2)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = ddf.shuffle("x", force=True)
res = c.compute(shuffled)
with ctx:
await c.gather(res)
| BarrierInputsDoneOSErrorPlugin |
python | tensorflow__tensorflow | tensorflow/python/framework/type_spec_test.py | {
"start": 3742,
"end": 4138
} | class ____:
"""A simple value type to test TypeSpec.
Contains two composite tensorstensors (x, y) and a string (color).
"""
def __init__(self, x, y, color="red"):
assert isinstance(color, str)
self.x = ops.convert_to_tensor_or_composite(x)
self.y = ops.convert_to_tensor_or_composite(y)
self.color = color
@type_spec_registry.register("tf.TwoCompositesSpec")
| TwoComposites |
python | kamyu104__LeetCode-Solutions | Python/final-array-state-after-k-multiplication-operations-i.py | {
"start": 4548,
"end": 4950
} | class ____(object):
def getFinalState(self, nums, k, multiplier):
"""
:type nums: List[int]
:type k: int
:type multiplier: int
:rtype: List[int]
"""
if multiplier == 1:
return nums
for _ in xrange(k):
i = min(xrange(len(nums)), key=lambda i: nums[i])
nums[i] *= multiplier
return nums
| Solution5 |
python | sphinx-doc__sphinx | sphinx/search/__init__.py | {
"start": 1179,
"end": 5212
} | class ____:
"""This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
of this class.
You should override `lang` class property too (e.g. 'en', 'fr' and so on).
.. attribute:: stopwords
This is a set of stop words of the target language. Default `stopwords`
is empty. This word is used for building index and embedded in JS.
.. attribute:: js_splitter_code
Return splitter function of JavaScript version. The function should be
named as ``splitQuery``. And it should take a string and return list of
strings.
.. versionadded:: 3.0
.. attribute:: js_stemmer_code
Return stemmer class of JavaScript version. This class' name should be
``Stemmer`` and this class must have ``stemWord`` method. This string is
embedded as-is in searchtools.js.
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang: str = ''
language_name: str = ''
stopwords: Set[str] = frozenset()
js_splitter_code: str = ''
js_stemmer_rawcode: str = ''
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
*/
var Stemmer = function () {
this.stemWord = function (w) {
return w;
};
};
"""
_word_re = re.compile(r'\w+')
def __init__(self, options: dict[str, str]) -> None:
"""Initialize the class with the options the user has given."""
self.options = options
def split(self, input: str) -> list[str]:
"""This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
return self._word_re.findall(input)
def stem(self, word: str) -> str:
"""This method implements stemming algorithm of the Python version.
Default implementation does nothing. You should implement this if the
language has any stemming rules.
This class is used to preprocess search words before registering them in
the search index. The stemming of the Python version and the JS version
(given in the js_stemmer_code attribute) must be compatible.
"""
return word
def word_filter(self, word: str) -> bool:
"""Return true if the target word should be registered in the search index.
This method is called after stemming.
"""
return not word.isdigit() and word not in self.stopwords
# SearchEnglish imported after SearchLanguage is defined due to circular import
from sphinx.search.en import SearchEnglish # NoQA: E402
def parse_stop_word(source: str) -> set[str]:
"""Collect the stopwords from a snowball style word list:
.. code:: text
list of space separated stop words | optional comment
"""
result: set[str] = set()
for line in source.splitlines():
line = line.split('|')[0] # remove comment
result.update(line.split())
return result
# maps language name to module.class or directly a class
languages: dict[str, str | type[SearchLanguage]] = {
'da': 'sphinx.search.da.SearchDanish',
'de': 'sphinx.search.de.SearchGerman',
'en': SearchEnglish,
'es': 'sphinx.search.es.SearchSpanish',
'fi': 'sphinx.search.fi.SearchFinnish',
'fr': 'sphinx.search.fr.SearchFrench',
'hu': 'sphinx.search.hu.SearchHungarian',
'it': 'sphinx.search.it.SearchItalian',
'ja': 'sphinx.search.ja.SearchJapanese',
'nl': 'sphinx.search.nl.SearchDutch',
'no': 'sphinx.search.no.SearchNorwegian',
'pt': 'sphinx.search.pt.SearchPortuguese',
'ro': 'sphinx.search.ro.SearchRomanian',
'ru': 'sphinx.search.ru.SearchRussian',
'sv': 'sphinx.search.sv.SearchSwedish',
'tr': 'sphinx.search.tr.SearchTurkish',
'zh': 'sphinx.search.zh.SearchChinese',
}
| SearchLanguage |
python | numba__numba | numba/_version.py | {
"start": 1160,
"end": 1645
} | class ____:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "numba-"
cfg.versionfile_source = "numba/_version.py"
cfg.verbose = False
return cfg
| VersioneerConfig |
python | plotly__plotly.py | plotly/graph_objs/choroplethmap/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8544
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmap.colorbar"
_path_str = "choroplethmap.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmap.
colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmap.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmap.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | matplotlib__matplotlib | lib/mpl_toolkits/axes_grid1/inset_locator.py | {
"start": 3813,
"end": 6311
} | class ____(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
"""
Return the ``(x, y)`` coordinates of corner *loc* of *bbox*; parameters
behave as documented for the `.BboxConnector` constructor.
"""
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
"""
Construct a `.Path` connecting corner *loc1* of *bbox1* to corner
*loc2* of *bbox2*, where parameters behave as documented as for the
`.BboxConnector` constructor.
"""
if isinstance(bbox1, Rectangle):
bbox1 = TransformedBbox(Bbox.unit(), bbox1.get_transform())
if isinstance(bbox2, Rectangle):
bbox2 = TransformedBbox(Bbox.unit(), bbox2.get_transform())
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
return Path([[x1, y1], [x2, y2]])
@_docstring.interpd
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
Connect two bboxes with a straight line.
Parameters
----------
bbox1, bbox2 : `~matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1, loc2 : {1, 2, 3, 4}
Corner of *bbox1* and *bbox2* to draw the line. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
*loc2* is optional and defaults to *loc1*.
**kwargs
Patch properties for the line drawn. Valid arguments include:
%(Patch:kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
kwargs.setdefault(
"fill", bool({'fc', 'facecolor', 'color'}.intersection(kwargs)))
super().__init__(**kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
# docstring inherited
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
| BboxConnector |
python | kamyu104__LeetCode-Solutions | Python/find-missing-and-repeated-values.py | {
"start": 50,
"end": 813
} | class ____(object):
def findMissingAndRepeatedValues(self, grid):
"""
:type grid: List[List[int]]
:rtype: List[int]
"""
n = len(grid)
a_xor_b = 0
for i in xrange(n**2):
r, c = divmod(i, n)
a_xor_b ^= grid[r][c]^(i+1)
base = a_xor_b&-a_xor_b
result = [0]*2
for i in xrange(n**2):
r, c = divmod(i, len(grid[0]))
result[1 if (i+1)&base != 0 else 0] ^= i+1
result[1 if grid[r][c]&base != 0 else 0] ^= grid[r][c]
if any(x == result[1] for row in grid for x in row):
result[0], result[1] = result[1], result[0]
return result
# Time: O(n^2)
# Space: O(n^2)
import collections
# freq table
| Solution |
python | django__django | tests/test_runner_apps/simple/tests.py | {
"start": 123,
"end": 231
} | class ____(DjangoTestCase):
def test_1(self):
pass
def test_2(self):
pass
| DjangoCase1 |
python | apache__airflow | providers/apache/cassandra/tests/integration/apache/cassandra/hooks/test_cassandra.py | {
"start": 1207,
"end": 9360
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="cassandra_test",
conn_type="cassandra",
host="host-1,host-2",
port=9042,
schema="test_keyspace",
extra='{"load_balancing_policy":"TokenAwarePolicy","protocol_version":4}',
)
)
create_connection_without_db(
Connection(
conn_id="cassandra_default_with_schema",
conn_type="cassandra",
host="cassandra",
port=9042,
schema="s",
)
)
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP SCHEMA IF EXISTS s",
"""
CREATE SCHEMA s WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
""",
]
for cql in cqls:
session.execute(cql)
session.shutdown()
hook.shutdown_cluster()
def test_get_conn(self):
with mock.patch.object(Cluster, "__init__") as mock_cluster_ctor:
mock_cluster_ctor.return_value = None
CassandraHook(cassandra_conn_id="cassandra_test")
mock_cluster_ctor.assert_called_once_with(
contact_points=["host-1", "host-2"],
port=9042,
protocol_version=4,
load_balancing_policy=mock.ANY,
)
assert isinstance(mock_cluster_ctor.call_args.kwargs["load_balancing_policy"], TokenAwarePolicy)
def test_get_lb_policy_with_no_args(self):
# test LB policies with no args
self._assert_get_lb_policy("RoundRobinPolicy", {}, RoundRobinPolicy)
self._assert_get_lb_policy("DCAwareRoundRobinPolicy", {}, DCAwareRoundRobinPolicy)
self._assert_get_lb_policy(
"TokenAwarePolicy", {}, TokenAwarePolicy, expected_child_policy_type=RoundRobinPolicy
)
def test_get_lb_policy_with_args(self):
# test DCAwareRoundRobinPolicy with args
self._assert_get_lb_policy(
"DCAwareRoundRobinPolicy",
{"local_dc": "foo", "used_hosts_per_remote_dc": "3"},
DCAwareRoundRobinPolicy,
)
# test WhiteListRoundRobinPolicy with args
fake_addr_info = [
["family", "sockettype", "proto", "canonname", ("2606:2800:220:1:248:1893:25c8:1946", 80, 0, 0)]
]
with mock.patch("socket.getaddrinfo", return_value=fake_addr_info):
self._assert_get_lb_policy(
"WhiteListRoundRobinPolicy", {"hosts": ["host1", "host2"]}, WhiteListRoundRobinPolicy
)
# test TokenAwarePolicy with args
with mock.patch("socket.getaddrinfo", return_value=fake_addr_info):
self._assert_get_lb_policy(
"TokenAwarePolicy",
{
"child_load_balancing_policy": "WhiteListRoundRobinPolicy",
"child_load_balancing_policy_args": {"hosts": ["host-1", "host-2"]},
},
TokenAwarePolicy,
expected_child_policy_type=WhiteListRoundRobinPolicy,
)
def test_get_lb_policy_invalid_policy(self):
# test invalid policy name should default to RoundRobinPolicy
self._assert_get_lb_policy("DoesNotExistPolicy", {}, RoundRobinPolicy)
# test invalid child policy name should default child policy to RoundRobinPolicy
self._assert_get_lb_policy(
"TokenAwarePolicy", {}, TokenAwarePolicy, expected_child_policy_type=RoundRobinPolicy
)
self._assert_get_lb_policy(
"TokenAwarePolicy",
{"child_load_balancing_policy": "DoesNotExistPolicy"},
TokenAwarePolicy,
expected_child_policy_type=RoundRobinPolicy,
)
def test_get_lb_policy_no_host_for_allow_list(self):
# test host not specified for WhiteListRoundRobinPolicy should throw exception
self._assert_get_lb_policy(
"WhiteListRoundRobinPolicy", {}, WhiteListRoundRobinPolicy, should_throw=True
)
self._assert_get_lb_policy(
"TokenAwarePolicy",
{"child_load_balancing_policy": "WhiteListRoundRobinPolicy"},
TokenAwarePolicy,
expected_child_policy_type=RoundRobinPolicy,
should_throw=True,
)
def _assert_get_lb_policy(
self,
policy_name,
policy_args,
expected_policy_type,
expected_child_policy_type=None,
should_throw=False,
):
thrown = False
try:
policy = CassandraHook.get_lb_policy(policy_name, policy_args)
assert isinstance(policy, expected_policy_type)
if expected_child_policy_type:
assert isinstance(policy._child_policy, expected_child_policy_type)
except Exception:
thrown = True
assert should_throw == thrown
def test_record_exists_with_keyspace_from_cql(self):
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS s.t",
"CREATE TABLE s.t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO s.t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("s.t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("s.t", {"pk1": "foo", "pk2": "baz"})
session.shutdown()
hook.shutdown_cluster()
def test_record_exists_with_keyspace_from_session(self):
hook = CassandraHook("cassandra_default_with_schema")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS t",
"CREATE TABLE t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("t", {"pk1": "foo", "pk2": "baz"})
session.shutdown()
hook.shutdown_cluster()
def test_table_exists_with_keyspace_from_cql(self):
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS s.t",
"CREATE TABLE s.t (pk1 text PRIMARY KEY)",
]
for cql in cqls:
session.execute(cql)
assert hook.table_exists("s.t")
assert not hook.table_exists("s.u")
session.shutdown()
hook.shutdown_cluster()
def test_table_exists_with_keyspace_from_session(self):
hook = CassandraHook("cassandra_default_with_schema")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS t",
"CREATE TABLE t (pk1 text PRIMARY KEY)",
]
for cql in cqls:
session.execute(cql)
assert hook.table_exists("t")
assert not hook.table_exists("u")
session.shutdown()
hook.shutdown_cluster()
def test_possible_sql_injection(self):
hook = CassandraHook("cassandra_default_with_schema")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS t",
"CREATE TABLE t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("tt", {"pk1": "foo", "pk2": "bar"})
with pytest.raises(ValueError, match=re.escape("Invalid input: t; DROP TABLE t; SELECT * FROM t")):
hook.record_exists("t; DROP TABLE t; SELECT * FROM t", {"pk1": "foo", "pk2": "baz"})
| TestCassandraHook |
python | tensorflow__tensorflow | tensorflow/python/saved_model/tracing_utils_test.py | {
"start": 1448,
"end": 2516
} | class ____(test.TestCase):
def test_trace_save_and_restore(self):
t = MyTrackable()
save_fn, restore_fn = tracing_utils.trace_save_and_restore(t)
self.assertDictEqual({"a": 0, "b": 1}, self.evaluate(save_fn()))
restore_fn({"a": constant_op.constant(2), "b": constant_op.constant(3)})
self.assertDictEqual({"a": 2, "b": 3}, self.evaluate(save_fn()))
def test_trace_save_and_restore_concrete(self):
t = MyTrackable()
t._serialize_to_tensors = (def_function.function(t._serialize_to_tensors)
.get_concrete_function())
restored_tensor_spec = t._serialize_to_tensors.structured_outputs
# The wrapped tf.function doesn't matter.
t._restore_from_tensors = (def_function.function(lambda x: x)
.get_concrete_function(restored_tensor_spec))
save_fn, restore_fn = tracing_utils.trace_save_and_restore(t)
self.assertIs(t._serialize_to_tensors, save_fn)
self.assertIs(t._restore_from_tensors, restore_fn)
if __name__ == "__main__":
test.main()
| TracingUtilsTest |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_fork.py | {
"start": 1208,
"end": 30735
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-fork"
method = "POST"
def setUp(self) -> None:
super().setUp()
self.superuser = self.create_user(is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.existing_org_owner = self.create_user(
email="existing_org_owner@example.com",
is_superuser=False,
is_staff=False,
is_active=True,
)
self.requested_org_slug = "testing"
self.existing_org = self.create_organization(
name=self.requested_org_slug,
owner=self.existing_org_owner,
region=EXPORTING_TEST_REGION,
)
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_simple(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
assert response.data["scheduledPauseAtStep"] is None
assert response.data["creator"]["id"] == str(self.superuser.id)
assert response.data["creator"]["email"] == str(self.superuser.email)
assert response.data["creator"]["username"] == str(self.superuser.username)
assert response.data["owner"]["id"] == str(self.existing_org_owner.id)
assert response.data["owner"]["email"] == str(self.existing_org_owner.email)
assert response.data["owner"]["username"] == str(self.existing_org_owner.username)
relocation: Relocation = Relocation.objects.get(owner_id=self.existing_org_owner.id)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_simple_using_organization_id(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.id)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
relocation: Relocation = Relocation.objects.get(owner_id=self.existing_org_owner.id)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options(
{
"relocation.enabled": True,
"relocation.daily-limit.small": 1,
"relocation.autopause.saas-to-saas": "IMPORTING",
}
)
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_with_valid_autopause_option(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.IMPORTING.name
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options(
{
"relocation.enabled": True,
"relocation.daily-limit.small": 1,
"relocation.autopause.self-hosted": "IMPORTING",
}
)
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_with_untriggered_autopause_option(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
assert response.data["scheduledPauseAtStep"] is None
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options(
{"relocation.enabled": False, "relocation.daily-limit.small": 1, "staff.ga-rollout": True}
)
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_staff_when_feature_disabled(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.staff_user, staff=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
assert response.data["scheduledPauseAtStep"] is None
assert response.data["creator"]["id"] == str(self.staff_user.id)
assert response.data["creator"]["email"] == str(self.staff_user.email)
assert response.data["creator"]["username"] == str(self.staff_user.username)
assert response.data["owner"]["id"] == str(self.existing_org_owner.id)
assert response.data["owner"]["email"] == str(self.existing_org_owner.email)
assert response.data["owner"]["username"] == str(self.existing_org_owner.username)
relocation: Relocation = Relocation.objects.get(owner_id=self.existing_org_owner.id)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options({"relocation.enabled": False, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_superuser_when_feature_disabled(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
assert response.data["scheduledPauseAtStep"] is None
assert response.data["creator"]["id"] == str(self.superuser.id)
assert response.data["creator"]["email"] == str(self.superuser.email)
assert response.data["creator"]["username"] == str(self.superuser.username)
assert response.data["owner"]["id"] == str(self.existing_org_owner.id)
assert response.data["owner"]["email"] == str(self.existing_org_owner.email)
assert response.data["owner"]["username"] == str(self.existing_org_owner.username)
relocation: Relocation = Relocation.objects.get(owner_id=self.existing_org_owner.id)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_organization_not_found(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_error_response("does-not-exist", status_code=404)
assert response.data.get("detail") == ERR_ORGANIZATION_NOT_FOUND.substitute(
pointer="does-not-exist"
)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_organization_mapping_not_found(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
with assume_test_silo_mode(SiloMode.CONTROL):
OrganizationMapping.objects.filter(slug=self.existing_org.slug).delete()
response = self.get_error_response(self.existing_org.slug, status_code=404)
assert response.data.get("detail") == ERR_ORGANIZATION_NOT_FOUND.substitute(
pointer=self.existing_org.slug
)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_cannot_fork_deleted_organization(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
self.existing_org.status = OrganizationStatus.DELETION_IN_PROGRESS
self.existing_org.save()
with assume_test_silo_mode(SiloMode.CONTROL):
org_mapping = OrganizationMapping.objects.get(slug=self.existing_org.slug)
org_mapping.status = OrganizationStatus.DELETION_IN_PROGRESS
org_mapping.save()
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_error_response(self.existing_org.slug, status_code=400)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_ORGANIZATION_INACTIVE.substitute(
slug=self.existing_org.slug,
status="DELETION_IN_PROGRESS",
)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
@patch(
"sentry.api.endpoints.organization_fork.CANNOT_FORK_FROM_REGION", {EXPORTING_TEST_REGION}
)
def test_bad_organization_in_forbidden_region(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_error_response(self.existing_org.slug, status_code=403)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_CANNOT_FORK_FROM_REGION.substitute(
region=EXPORTING_TEST_REGION,
)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
# Note that for this test we've changed this to `EXPORTING_TEST_REGION`
@assume_test_silo_mode(SiloMode.REGION, region_name=EXPORTING_TEST_REGION)
def test_bad_organization_already_in_region(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_error_response(self.existing_org.slug, status_code=400)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_CANNOT_FORK_INTO_SAME_REGION.substitute(
region=EXPORTING_TEST_REGION,
)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
for stat in [
Relocation.Status.SUCCESS,
Relocation.Status.FAILURE,
]:
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_completed_relocation_for_same_organization(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
stat=stat,
):
self.login_as(user=self.superuser, superuser=True)
Relocation.objects.create(
creator_id=self.superuser.id,
owner_id=self.existing_org_owner.id,
want_org_slugs=[self.existing_org.slug],
status=stat.value,
step=Relocation.Step.COMPLETED.value,
provenance=Relocation.Provenance.SAAS_TO_SAAS.value,
)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
relocation: Relocation = Relocation.objects.get(
owner_id=self.existing_org_owner.id, status=Relocation.Status.IN_PROGRESS.value
)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
for stat in [
Relocation.Status.IN_PROGRESS,
Relocation.Status.PAUSE,
]:
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_active_relocation_for_same_organization(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
stat=stat,
):
self.login_as(user=self.superuser, superuser=True)
existing_relocation = Relocation.objects.create(
creator_id=self.superuser.id,
owner_id=self.existing_org_owner.id,
want_org_slugs=[self.existing_org.slug],
status=stat.value,
step=Relocation.Step.UPLOADING.value,
provenance=Relocation.Provenance.SAAS_TO_SAAS.value,
)
response = self.get_error_response(self.existing_org.slug, status_code=409)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_DUPLICATE_ORGANIZATION_FORK.substitute(
uuid=str(existing_relocation.uuid)
)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
@override_options(
{"relocation.enabled": True, "relocation.daily-limit.small": 1, "staff.ga-rollout": True}
)
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_no_throttle_for_staff(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.staff_user, staff=True)
Relocation.objects.create(
creator_id=self.superuser.id,
owner_id=self.existing_org_owner.id,
want_org_slugs=["some-other-org"],
status=Relocation.Status.SUCCESS.value,
step=Relocation.Step.COMPLETED.value,
provenance=Relocation.Provenance.SAAS_TO_SAAS.value,
)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
relocation: Relocation = Relocation.objects.get(
owner_id=self.existing_org_owner.id, status=Relocation.Status.IN_PROGRESS.value
)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_good_no_throttle_for_superuser(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=True)
Relocation.objects.create(
creator_id=self.superuser.id,
owner_id=self.existing_org_owner.id,
want_org_slugs=["some-other-org"],
status=Relocation.Status.SUCCESS.value,
step=Relocation.Step.COMPLETED.value,
provenance=Relocation.Provenance.SAAS_TO_SAAS.value,
)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
response = self.get_success_response(self.existing_org.slug)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.UPLOADING.name
assert response.data["provenance"] == Relocation.Provenance.SAAS_TO_SAAS.name
relocation: Relocation = Relocation.objects.get(
owner_id=self.existing_org_owner.id, status=Relocation.Status.IN_PROGRESS.value
)
assert str(relocation.uuid) == response.data["uuid"]
assert relocation.want_org_slugs == [self.requested_org_slug]
assert Relocation.objects.count() == relocation_count + 1
assert RelocationFile.objects.count() == relocation_file_count
assert uploading_start_mock.call_count == 1
uploading_start_mock.assert_called_with(
args=[UUID(response.data["uuid"]), EXPORTING_TEST_REGION, self.requested_org_slug]
)
assert analytics_record_mock.call_count == 1
assert_last_analytics_event(
analytics_record_mock,
RelocationForkedEvent(
creator_id=int(response.data["creator"]["id"]),
owner_id=int(response.data["owner"]["id"]),
uuid=response.data["uuid"],
from_org_slug=self.requested_org_slug,
requesting_region_name=REQUESTING_TEST_REGION,
replying_region_name=EXPORTING_TEST_REGION,
),
)
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_without_superuser_or_staff(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.existing_org_owner, superuser=False, staff=False)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
self.get_error_response(self.existing_org.slug, status_code=403)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_superuser_not_active(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
self.login_as(user=self.superuser, superuser=False)
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
self.get_error_response(self.existing_org.slug, status_code=403)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
@override_options({"relocation.enabled": True, "relocation.daily-limit.small": 1})
@assume_test_silo_mode(SiloMode.REGION, region_name=REQUESTING_TEST_REGION)
def test_bad_no_auth(
self,
uploading_start_mock: Mock,
analytics_record_mock: Mock,
):
relocation_count = Relocation.objects.count()
relocation_file_count = RelocationFile.objects.count()
self.get_error_response(self.existing_org.slug, status_code=401)
assert uploading_start_mock.call_count == 0
assert analytics_record_mock.call_count == 0
assert Relocation.objects.count() == relocation_count
assert RelocationFile.objects.count() == relocation_file_count
| OrganizationForkTest |
python | jina-ai__jina | jina/excepts.py | {
"start": 1879,
"end": 2004
} | class ____(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
| BadYAMLVersion |
python | streamlit__streamlit | e2e_playwright/st_write_objects.py | {
"start": 1216,
"end": 1356
} | class ____(NamedTuple):
x: int
y: int
st.write(Point(1, 2))
st.subheader("st.write(help)")
st.write(st.dataframe)
@dataclass
| Point |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/private.py | {
"start": 6862,
"end": 7436
} | class ____(PrivateViewMixin, OrganizationTeamMemberView, FormView):
template_name = "organizations/team_member_create.html"
# No success message here, since it's set in the form.
def form_valid(self, form):
# Manually calling to save, since this isn't a ModelFormView.
result = form.save()
if isinstance(result, Invitation):
messages.success(self.request, _("Invitation sent"))
else:
messages.success(self.request, _("Member added to team"))
return super().form_valid(form)
| AddOrganizationTeamMember |
python | conda__conda | conda/auxlib/entity.py | {
"start": 17869,
"end": 18230
} | class ____(Field):
_type = datetime
def box(self, instance, instance_type, val):
try:
return isoparse(val) if isinstance(val, str) else val
except ValueError as e:
raise ValidationError(val, msg=e)
def dump(self, instance, instance_type, val):
return None if val is None else val.isoformat()
| DateField |
python | great-expectations__great_expectations | great_expectations/expectations/expectation_configuration.py | {
"start": 2604,
"end": 2936
} | class ____(SerializableDictDot):
def __init__(self, description: Optional[str] = None) -> None:
self._description = description
@property
def description(self):
return self._description
@description.setter
def description(self, value) -> None:
self._description = value
| ExpectationContext |
python | spack__spack | lib/spack/spack/vendor/jinja2/environment.py | {
"start": 4268,
"end": 41778
} | class ____:
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here are the possible initialization parameters:
`block_start_string`
The string marking the beginning of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the beginning of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the beginning of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is ``True``.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
``None`` implicitly into an empty string here.
`autoescape`
If set to ``True`` the XML/HTML autoescaping feature is enabled by
default. For more details about autoescaping see
:class:`~spack.vendor.markupsafe.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return ``True`` or ``False`` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``400`` which means
that if more than 400 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
.. versionchanged:: 2.8
The cache size was increased to 400 from a low 50.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
``auto_reload`` is set to ``True`` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
`enable_async`
If set to true this enables async template execution which
allows using async functions and generators.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at spack.vendor.jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to: t.Optional["Environment"] = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: the class that is used for code generation. See
#: :class:`~spack.vendor.jinja2.compiler.CodeGenerator` for more information.
code_generator_class: t.Type["CodeGenerator"] = CodeGenerator
#: the context class that is used for templates. See
#: :class:`~spack.vendor.jinja2.runtime.Context` for more information.
context_class: t.Type[Context] = Context
template_class: t.Type["Template"]
def __init__(
self,
block_start_string: str = BLOCK_START_STRING,
block_end_string: str = BLOCK_END_STRING,
variable_start_string: str = VARIABLE_START_STRING,
variable_end_string: str = VARIABLE_END_STRING,
comment_start_string: str = COMMENT_START_STRING,
comment_end_string: str = COMMENT_END_STRING,
line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
trim_blocks: bool = TRIM_BLOCKS,
lstrip_blocks: bool = LSTRIP_BLOCKS,
newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
optimized: bool = True,
undefined: t.Type[Undefined] = Undefined,
finalize: t.Optional[t.Callable[..., t.Any]] = None,
autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
loader: t.Optional["BaseLoader"] = None,
cache_size: int = 400,
auto_reload: bool = True,
bytecode_cache: t.Optional["BytecodeCache"] = None,
enable_async: bool = False,
):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined: t.Type[Undefined] = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# configurable policies
self.policies = DEFAULT_POLICIES.copy()
# load extensions
self.extensions = load_extensions(self, extensions)
self.is_async = enable_async
_environment_config_check(self)
def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None:
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes: t.Any) -> None:
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in attributes.items():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(
self,
block_start_string: str = missing,
block_end_string: str = missing,
variable_start_string: str = missing,
variable_end_string: str = missing,
comment_start_string: str = missing,
comment_end_string: str = missing,
line_statement_prefix: t.Optional[str] = missing,
line_comment_prefix: t.Optional[str] = missing,
trim_blocks: bool = missing,
lstrip_blocks: bool = missing,
extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing,
optimized: bool = missing,
undefined: t.Type[Undefined] = missing,
finalize: t.Optional[t.Callable[..., t.Any]] = missing,
autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing,
loader: t.Optional["BaseLoader"] = missing,
cache_size: int = missing,
auto_reload: bool = missing,
bytecode_cache: t.Optional["BytecodeCache"] = missing,
) -> "Environment":
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args["self"], args["cache_size"], args["extensions"]
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in args.items():
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in self.extensions.items():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_config_check(rv)
@property
def lexer(self) -> Lexer:
"""The lexer for this environment."""
return get_lexer(self)
def iter_extensions(self) -> t.Iterator["Extension"]:
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
def getitem(
self, obj: t.Any, argument: t.Union[str, t.Any]
) -> t.Union[t.Any, Undefined]:
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (AttributeError, TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Any:
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a string.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def _filter_test_common(
self,
name: t.Union[str, Undefined],
value: t.Any,
args: t.Optional[t.Sequence[t.Any]],
kwargs: t.Optional[t.Mapping[str, t.Any]],
context: t.Optional[Context],
eval_ctx: t.Optional[EvalContext],
is_filter: bool,
) -> t.Any:
if is_filter:
env_map = self.filters
type_name = "filter"
else:
env_map = self.tests
type_name = "test"
func = env_map.get(name) # type: ignore
if func is None:
msg = f"No {type_name} named {name!r}."
if isinstance(name, Undefined):
try:
name._fail_with_undefined_error()
except Exception as e:
msg = f"{msg} ({e}; did you forget to quote the callable name?)"
raise TemplateRuntimeError(msg)
args = [value, *(args if args is not None else ())]
kwargs = kwargs if kwargs is not None else {}
pass_arg = _PassArg.from_obj(func)
if pass_arg is _PassArg.context:
if context is None:
raise TemplateRuntimeError(
f"Attempted to invoke a context {type_name} without context."
)
args.insert(0, context)
elif pass_arg is _PassArg.eval_context:
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif pass_arg is _PassArg.environment:
args.insert(0, self)
return func(*args, **kwargs)
def call_filter(
self,
name: str,
value: t.Any,
args: t.Optional[t.Sequence[t.Any]] = None,
kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
context: t.Optional[Context] = None,
eval_ctx: t.Optional[EvalContext] = None,
) -> t.Any:
"""Invoke a filter on a value the same way the compiler does.
This might return a coroutine if the filter is running from an
environment in async mode and the filter supports async
execution. It's your responsibility to await this if needed.
.. versionadded:: 2.7
"""
return self._filter_test_common(
name, value, args, kwargs, context, eval_ctx, True
)
def call_test(
self,
name: str,
value: t.Any,
args: t.Optional[t.Sequence[t.Any]] = None,
kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
context: t.Optional[Context] = None,
eval_ctx: t.Optional[EvalContext] = None,
) -> t.Any:
"""Invoke a test on a value the same way the compiler does.
This might return a coroutine if the test is running from an
environment in async mode and the test supports async execution.
It's your responsibility to await this if needed.
.. versionchanged:: 3.0
Tests support ``@pass_context``, etc. decorators. Added
the ``context`` and ``eval_ctx`` parameters.
.. versionadded:: 2.7
"""
return self._filter_test_common(
name, value, args, kwargs, context, eval_ctx, False
)
@internalcode
def parse(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> nodes.Template:
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
self.handle_exception(source=source)
def _parse(
self, source: str, name: t.Optional[str], filename: t.Optional[str]
) -> nodes.Template:
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, filename).parse()
def lex(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> t.Iterator[t.Tuple[int, str, str]]:
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = str(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
self.handle_exception(source=source)
def preprocess(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> str:
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(
lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(),
str(source),
)
def _tokenize(
self,
source: str,
name: t.Optional[str],
filename: t.Optional[str] = None,
state: t.Optional[str] = None,
) -> TokenStream:
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~spack.vendor.jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream) # type: ignore
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename) # type: ignore
return stream
def _generate(
self,
source: nodes.Template,
name: t.Optional[str],
filename: t.Optional[str],
defer_init: bool = False,
) -> str:
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate( # type: ignore
source,
self,
name,
filename,
defer_init=defer_init,
optimized=self.optimized,
)
def _compile(self, source: str, filename: str) -> CodeType:
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, "exec") # type: ignore
@typing.overload
def compile( # type: ignore
self,
source: t.Union[str, nodes.Template],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
raw: "te.Literal[False]" = False,
defer_init: bool = False,
) -> CodeType:
...
@typing.overload
def compile(
self,
source: t.Union[str, nodes.Template],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
raw: "te.Literal[True]" = ...,
defer_init: bool = False,
) -> str:
...
@internalcode
def compile(
self,
source: t.Union[str, nodes.Template],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
raw: bool = False,
defer_init: bool = False,
) -> t.Union[str, CodeType]:
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, str):
source_hint = source
source = self._parse(source, name, filename)
source = self._generate(source, name, filename, defer_init=defer_init)
if raw:
return source
if filename is None:
filename = "<template>"
return self._compile(source, filename)
except TemplateSyntaxError:
self.handle_exception(source=source_hint)
def compile_expression(
self, source: str, undefined_to_none: bool = True
) -> "TemplateExpression":
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state="variable")
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError(
"chunk after expression", parser.stream.current.lineno, None, None
)
expr.set_environment(self)
except TemplateSyntaxError:
self.handle_exception(source=source)
body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(
self,
target: t.Union[str, os.PathLike],
extensions: t.Optional[t.Collection[str]] = None,
filter_func: t.Optional[t.Callable[[str], bool]] = None,
zip: t.Optional[str] = "deflated",
log_function: t.Optional[t.Callable[[str], None]] = None,
ignore_errors: bool = True,
) -> None:
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
By default a deflate zip algorithm is used. To switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
.. versionadded:: 2.4
"""
from .loaders import ModuleLoader
if log_function is None:
def log_function(x: str) -> None:
pass
assert log_function is not None
assert self.loader is not None, "No loader configured."
def write_file(filename: str, data: str) -> None:
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
with open(os.path.join(target, filename), "wb") as f:
f.write(data.encode("utf8"))
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(
target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
)
log_function(f"Compiling into Zip archive {target!r}")
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function(f"Compiling into folder {target!r}")
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function(f'Could not compile "{name}": {e}')
continue
filename = ModuleLoader.get_module_filename(name)
write_file(filename, code)
log_function(f'Compiled "{name}" as {filename}')
finally:
if zip:
zip_file.close()
log_function("Finished compiling templates")
def list_templates(
self,
extensions: t.Optional[t.Collection[str]] = None,
filter_func: t.Optional[t.Callable[[str], bool]] = None,
) -> t.List[str]:
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
assert self.loader is not None, "No loader configured."
names = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError(
"either extensions or filter_func can be passed, but not both"
)
def filter_func(x: str) -> bool:
return "." in x and x.rsplit(".", 1)[1] in extensions # type: ignore
if filter_func is not None:
names = [name for name in names if filter_func(name)]
return names
def handle_exception(self, source: t.Optional[str] = None) -> "te.NoReturn":
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
from .debug import rewrite_traceback_stack
raise rewrite_traceback_stack(source=source)
def join_path(self, template: str, parent: str) -> str:
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(
self, name: str, globals: t.Optional[t.Mapping[str, t.Any]]
) -> "Template":
if self.loader is None:
raise TypeError("no loader for this environment specified")
cache_key = (weakref.ref(self.loader), name)
if self.cache is not None:
template = self.cache.get(cache_key)
if template is not None and (
not self.auto_reload or template.is_up_to_date
):
# template.globals is a ChainMap, modifying it will only
# affect the template, not the environment globals.
if globals:
template.globals.update(globals)
return template
template = self.loader.load(self, name, self.make_globals(globals))
if self.cache is not None:
self.cache[cache_key] = template
return template
@internalcode
def get_template(
self,
name: t.Union[str, "Template"],
parent: t.Optional[str] = None,
globals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> "Template":
"""Load a template by name with :attr:`loader` and return a
:class:`Template`. If the template does not exist a
:exc:`TemplateNotFound` exception is raised.
:param name: Name of the template to load.
:param parent: The name of the parent template importing this
template. :meth:`join_path` can be used to implement name
transformations with this.
:param globals: Extend the environment :attr:`globals` with
these extra variables available for all renders of this
template. If the template has already been loaded and
cached, its globals are updated with any new items.
.. versionchanged:: 3.0
If a template is loaded from cache, ``globals`` will update
the template's globals instead of ignoring the new values.
.. versionchanged:: 2.4
If ``name`` is a :class:`Template` object it is returned
unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, globals)
@internalcode
def select_template(
self,
names: t.Iterable[t.Union[str, "Template"]],
parent: t.Optional[str] = None,
globals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> "Template":
"""Like :meth:`get_template`, but tries loading multiple names.
If none of the names can be loaded a :exc:`TemplatesNotFound`
exception is raised.
:param names: List of template names to try loading in order.
:param parent: The name of the parent template importing this
template. :meth:`join_path` can be used to implement name
transformations with this.
:param globals: Extend the environment :attr:`globals` with
these extra variables available for all renders of this
template. If the template has already been loaded and
cached, its globals are updated with any new items.
.. versionchanged:: 3.0
If a template is loaded from cache, ``globals`` will update
the template's globals instead of ignoring the new values.
.. versionchanged:: 2.11
If ``names`` is :class:`Undefined`, an :exc:`UndefinedError`
is raised instead. If no templates were found and ``names``
contains :class:`Undefined`, the message is more helpful.
.. versionchanged:: 2.4
If ``names`` contains a :class:`Template` object it is
returned unchanged.
.. versionadded:: 2.3
"""
if isinstance(names, Undefined):
names._fail_with_undefined_error()
if not names:
raise TemplatesNotFound(
message="Tried to select from an empty list of templates."
)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except (TemplateNotFound, UndefinedError):
pass
raise TemplatesNotFound(names) # type: ignore
@internalcode
def get_or_select_template(
self,
template_name_or_list: t.Union[
str, "Template", t.List[t.Union[str, "Template"]]
],
parent: t.Optional[str] = None,
globals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> "Template":
"""Use :meth:`select_template` if an iterable of template names
is given, or :meth:`get_template` if one name is given.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, (str, Undefined)):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(
self,
source: t.Union[str, nodes.Template],
globals: t.Optional[t.Mapping[str, t.Any]] = None,
template_class: t.Optional[t.Type["Template"]] = None,
) -> "Template":
"""Load a template from a source string without using
:attr:`loader`.
:param source: Jinja source to compile into a template.
:param globals: Extend the environment :attr:`globals` with
these extra variables available for all renders of this
template. If the template has already been loaded and
cached, its globals are updated with any new items.
:param template_class: Return an instance of this
:class:`Template` class.
"""
gs = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), gs, None)
def make_globals(
self, d: t.Optional[t.Mapping[str, t.Any]]
) -> t.MutableMapping[str, t.Any]:
"""Make the globals map for a template. Any given template
globals overlay the environment :attr:`globals`.
Returns a :class:`collections.ChainMap`. This allows any changes
to a template's globals to only affect that template, while
changes to the environment's globals are still reflected.
However, avoid modifying any globals after a template is loaded.
:param d: Dict of template-specific globals.
.. versionchanged:: 3.0
Use :class:`collections.ChainMap` to always prevent mutating
environment globals.
"""
if d is None:
d = {}
return ChainMap(d, self.globals)
| Environment |
python | pytorch__pytorch | test/test_utils_config_module.py | {
"start": 537,
"end": 14451
} | class ____(TestCase):
def tearDown(self):
# Config changes get persisted between test cases
for k in config._config:
config._config[k].user_override = _UNSET_SENTINEL
def test_base_value_loading(self):
self.assertTrue(config.e_bool)
self.assertTrue(config.nested.e_bool)
self.assertTrue(config.e_optional)
self.assertEqual(config.e_int, 1)
self.assertEqual(config.e_float, 1.0)
self.assertEqual(config.e_string, "string")
self.assertEqual(config.e_list, [1])
self.assertEqual(config.e_set, {1})
self.assertEqual(config.e_tuple, (1,))
self.assertEqual(config.e_dict, {1: 2})
self.assertEqual(config.e_none, None)
with self.assertRaises(
AttributeError, msg="fake_config_module.does_not_exist does not exist"
):
config.does_not_exist
def test_type_loading(self):
self.assertEqual(config.get_type("e_optional"), Optional[bool])
self.assertEqual(config.get_type("e_none"), Optional[bool])
def test_overrides(self):
config.e_bool = False
self.assertFalse(config.e_bool)
config.nested.e_bool = False
self.assertFalse(config.nested.e_bool)
config.e_int = 2
self.assertEqual(config.e_int, 2)
config.e_float = 2.0
self.assertEqual(config.e_float, 2.0)
config.e_string = "string2"
self.assertEqual(config.e_string, "string2")
config.e_list = [2]
self.assertEqual(config.e_list, [2])
config.e_set = {2}
self.assertEqual(config.e_set, {2})
config.e_tuple = (2,)
self.assertEqual(config.e_tuple, (2,))
config.e_dict = {2: 3}
self.assertEqual(config.e_dict, {2: 3})
config.e_none = "not none"
self.assertEqual(config.e_none, "not none")
config.e_none = None
self.assertEqual(config.e_none, None)
config.e_optional = None
self.assertEqual(config.e_optional, None)
config.e_optional = False
self.assertEqual(config.e_optional, False)
with self.assertRaises(
AttributeError, msg="fake_config_module.does_not_exist does not exist"
):
config.does_not_exist = 0
def test_none_override_semantics(self):
config.e_bool = None
self.assertIsNone(config.e_bool)
for k in config._config:
config._config[k].user_override = _UNSET_SENTINEL
def test_reference_semantics(self):
config.e_list.append(2)
self.assertEqual(config.e_list, [1, 2])
config.e_set.add(2)
self.assertEqual(config.e_set, {1, 2})
config.e_dict[2] = 3
self.assertEqual(config.e_dict, {1: 2, 2: 3})
def test_env_name_semantics(self):
self.assertTrue(config.e_env_default)
self.assertFalse(config.e_env_default_FALSE)
self.assertTrue(config.e_env_force)
config.e_env_default = False
self.assertFalse(config.e_env_default)
config.e_env_force = False
self.assertTrue(config.e_env_force)
def test_env_name_string_semantics(self):
self.assertEqual(config.e_env_default_str, "1234")
self.assertEqual(config.e_env_default_str_empty, "")
config.e_env_default_str = "override"
self.assertEqual(config.e_env_default_str, "override")
def test_multi_env(self):
self.assertTrue(config2.e_env_default_multi)
self.assertTrue(config2.e_env_force_multi)
def test_save_config(self):
p = config.save_config()
self.assertDictEqual(
pickle.loads(p),
{
"_cache_config_ignore_prefix": ["magic_cache_config"],
"e_bool": True,
"e_dict": {1: 2},
"e_float": 1.0,
"e_int": 1,
"e_list": [1],
"e_none": None,
"e_set": {1},
"e_string": "string",
"e_tuple": (1,),
"nested.e_bool": True,
"_e_ignored": True,
"e_compile_ignored": True,
"magic_cache_config_ignored": True,
"_save_config_ignore": ["e_ignored"],
"e_config": True,
"e_jk": True,
"e_jk_false": False,
"e_env_default": True,
"e_env_default_FALSE": False,
"e_env_default_str": "1234",
"e_env_default_str_empty": "",
"e_env_force": True,
"e_optional": True,
},
)
config.e_bool = False
config.e_ignored = False
config.load_config(p)
self.assertTrue(config.e_bool)
self.assertFalse(config.e_ignored)
def test_save_config_portable(self):
p = config.save_config_portable()
self.assertDictEqual(
p,
{
"e_bool": True,
"e_dict": {1: 2},
"e_float": 1.0,
"e_int": 1,
"e_list": [1],
"e_none": None,
"e_set": {1},
"e_string": "string",
"e_tuple": (1,),
"nested.e_bool": True,
"e_ignored": True,
"e_compile_ignored": True,
"e_config": True,
"e_jk": True,
"e_jk_false": False,
"e_env_default": True,
"e_env_default_FALSE": False,
"e_env_default_str": "1234",
"e_env_default_str_empty": "",
"e_env_force": True,
"e_optional": True,
},
)
config.e_bool = False
config._e_ignored = False
config.load_config(p)
self.assertTrue(config.e_bool)
self.assertFalse(config._e_ignored)
def test_codegen_config(self):
config.e_bool = False
config.e_ignored = False
code = config.codegen_config()
self.assertEqual(
code,
"""torch.testing._internal.fake_config_module.e_bool = False
torch.testing._internal.fake_config_module.e_env_default = True
torch.testing._internal.fake_config_module.e_env_default_FALSE = False
torch.testing._internal.fake_config_module.e_env_default_str = '1234'
torch.testing._internal.fake_config_module.e_env_default_str_empty = ''
torch.testing._internal.fake_config_module.e_env_force = True""",
)
def test_codegen_config_function(self):
import logging
import warnings
config3.e_list = [print, warnings.warn, logging.warn]
config3.e_set = {print}
config3.e_func = warnings.warn
code = config3.codegen_config()
self.assertIn("import _warnings", code)
self.assertIn("import logging", code)
self.assertIn(
"""torch.testing._internal.fake_config_module3.e_list = ['print', '_warnings.warn', 'logging.warn']
torch.testing._internal.fake_config_module3.e_set = { print }
torch.testing._internal.fake_config_module3.e_func = _warnings.warn""",
code,
)
def test_get_hash(self):
hash_value = b"\x87\xf7\xc6\x1di\x7f\x96-\x85\xdc\x04\xd5\xd0\xf6\x1c\x87"
self.assertEqual(
config.get_hash(),
hash_value,
)
# Test cached value
self.assertEqual(
config.get_hash(),
hash_value,
)
self.assertEqual(
config.get_hash(),
hash_value,
)
config._hash_digest = "fake"
self.assertEqual(config.get_hash(), "fake")
config.e_bool = False
self.assertNotEqual(
config.get_hash(),
hash_value,
)
config.e_bool = True
# Test ignored values
config.e_compile_ignored = False
self.assertEqual(
config.get_hash(),
hash_value,
)
def test_dict_copy_semantics(self):
p = config.shallow_copy_dict()
self.assertDictEqual(
p,
{
"e_bool": True,
"e_dict": {1: 2},
"e_float": 1.0,
"e_int": 1,
"e_list": [1],
"e_none": None,
"e_set": {1},
"e_string": "string",
"e_tuple": (1,),
"nested.e_bool": True,
"e_ignored": True,
"_e_ignored": True,
"e_compile_ignored": True,
"_cache_config_ignore_prefix": ["magic_cache_config"],
"_save_config_ignore": ["e_ignored"],
"magic_cache_config_ignored": True,
"e_config": True,
"e_jk": True,
"e_jk_false": False,
"e_env_default": True,
"e_env_default_FALSE": False,
"e_env_default_str": "1234",
"e_env_default_str_empty": "",
"e_env_force": True,
"e_optional": True,
},
)
p2 = config.to_dict()
self.assertEqual(
p2,
{
"e_bool": True,
"e_dict": {1: 2},
"e_float": 1.0,
"e_int": 1,
"e_list": [1],
"e_none": None,
"e_set": {1},
"e_string": "string",
"e_tuple": (1,),
"nested.e_bool": True,
"e_ignored": True,
"_e_ignored": True,
"e_compile_ignored": True,
"_cache_config_ignore_prefix": ["magic_cache_config"],
"_save_config_ignore": ["e_ignored"],
"magic_cache_config_ignored": True,
"e_config": True,
"e_jk": True,
"e_jk_false": False,
"e_env_default": True,
"e_env_default_FALSE": False,
"e_env_default_str": "1234",
"e_env_default_str_empty": "",
"e_env_force": True,
"e_optional": True,
},
)
p3 = config.get_config_copy()
self.assertEqual(
p3,
{
"e_bool": True,
"e_dict": {1: 2},
"e_float": 1.0,
"e_int": 1,
"e_list": [1],
"e_none": None,
"e_set": {1},
"e_string": "string",
"e_tuple": (1,),
"nested.e_bool": True,
"e_ignored": True,
"_e_ignored": True,
"e_compile_ignored": True,
"_cache_config_ignore_prefix": ["magic_cache_config"],
"_save_config_ignore": ["e_ignored"],
"magic_cache_config_ignored": True,
"e_config": True,
"e_jk": True,
"e_jk_false": False,
"e_env_default": True,
"e_env_default_FALSE": False,
"e_env_default_str": "1234",
"e_env_default_str_empty": "",
"e_env_force": True,
"e_optional": True,
},
)
# Shallow + deep copy semantics
config.e_dict[2] = 3
self.assertEqual(p["e_dict"], {1: 2})
self.assertEqual(p2["e_dict"], {1: 2})
self.assertEqual(p3["e_dict"], {1: 2})
def test_patch(self):
self.assertTrue(config.e_bool)
with config.patch("e_bool", False):
self.assertFalse(config.e_bool)
self.assertTrue(config.e_bool)
with config.patch(e_bool=False):
self.assertFalse(config.e_bool)
self.assertTrue(config.e_bool)
with self.assertRaises(AssertionError):
with config.patch("does_not_exist"):
pass
def test_make_closur_patcher(self):
revert = config._make_closure_patcher(e_bool=False)()
self.assertFalse(config.e_bool)
revert()
self.assertTrue(config.e_bool)
def test_unittest_patch(self):
with patch("torch.testing._internal.fake_config_module.e_bool", False):
with patch("torch.testing._internal.fake_config_module.e_bool", False):
self.assertFalse(config.e_bool)
# unittest.mock has some very weird semantics around deletion of attributes when undoing patches
self.assertFalse(config.e_bool)
self.assertTrue(config.e_bool)
def test_bad_jk_type(self):
with self.assertRaises(
AssertionError,
msg="AssertionError: justknobs only support booleans, thisisnotvalid is not a boolean",
):
_ConfigEntry(Config(default="bad", justknob="fake_knob"))
def test_alias(self):
self.assertFalse(config2.e_aliasing_bool)
self.assertFalse(config.e_aliased_bool)
with config2.patch(e_aliasing_bool=True):
self.assertTrue(config2.e_aliasing_bool)
self.assertTrue(config.e_aliased_bool)
with config.patch(e_aliased_bool=True):
self.assertTrue(config2.e_aliasing_bool)
def test_reference_is_default(self):
t = config.e_dict
self.assertTrue(config._is_default("e_dict"))
t["a"] = "b"
self.assertFalse(config._is_default("e_dict"))
def test_invalid_config_int(self):
with self.assertRaises(AssertionError):
_ConfigEntry(
Config(default=2, env_name_default="FAKE_DISABLE", value_type=int)
)
def test_invalid_config_float(self):
with self.assertRaises(AssertionError):
_ConfigEntry(
Config(default=2, env_name_force="FAKE_DISABLE", value_type=float)
)
if __name__ == "__main__":
run_tests()
| TestConfigModule |
python | mlflow__mlflow | mlflow/gateway/provider_registry.py | {
"start": 180,
"end": 3059
} | class ____:
def __init__(self):
self._providers: dict[str | Provider, type[BaseProvider]] = {}
def register(self, name: str, provider: type[BaseProvider]):
if name in self._providers:
raise MlflowException.invalid_parameter_value(
f"Provider {name} is already registered: {self._providers[name]}"
)
self._providers[name] = provider
def get(self, name: str) -> type[BaseProvider]:
if name not in self._providers:
raise MlflowException.invalid_parameter_value(f"Provider {name} not found")
return self._providers[name]
def keys(self):
return list(self._providers.keys())
def _register_default_providers(registry: ProviderRegistry):
from mlflow.gateway.providers.ai21labs import AI21LabsProvider
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.providers.bedrock import AmazonBedrockProvider
from mlflow.gateway.providers.cohere import CohereProvider
from mlflow.gateway.providers.gemini import GeminiProvider
from mlflow.gateway.providers.huggingface import HFTextGenerationInferenceServerProvider
from mlflow.gateway.providers.mistral import MistralProvider
from mlflow.gateway.providers.mlflow import MlflowModelServingProvider
from mlflow.gateway.providers.mosaicml import MosaicMLProvider
from mlflow.gateway.providers.openai import OpenAIProvider
from mlflow.gateway.providers.palm import PaLMProvider
from mlflow.gateway.providers.togetherai import TogetherAIProvider
registry.register(Provider.OPENAI, OpenAIProvider)
registry.register(Provider.ANTHROPIC, AnthropicProvider)
registry.register(Provider.COHERE, CohereProvider)
registry.register(Provider.AI21LABS, AI21LabsProvider)
registry.register(Provider.MOSAICML, MosaicMLProvider)
registry.register(Provider.PALM, PaLMProvider)
registry.register(Provider.GEMINI, GeminiProvider)
registry.register(Provider.MLFLOW_MODEL_SERVING, MlflowModelServingProvider)
registry.register(Provider.BEDROCK, AmazonBedrockProvider)
registry.register(Provider.AMAZON_BEDROCK, AmazonBedrockProvider)
registry.register(
Provider.HUGGINGFACE_TEXT_GENERATION_INFERENCE, HFTextGenerationInferenceServerProvider
)
registry.register(Provider.MISTRAL, MistralProvider)
registry.register(Provider.TOGETHERAI, TogetherAIProvider)
def _register_plugin_providers(registry: ProviderRegistry):
providers = get_entry_points("mlflow.gateway.providers")
for p in providers:
cls = p.load()
registry.register(p.name, cls)
def is_supported_provider(name: str) -> bool:
return name in provider_registry.keys()
provider_registry = ProviderRegistry()
_register_default_providers(provider_registry)
_register_plugin_providers(provider_registry)
| ProviderRegistry |
python | google__pytype | pytype/tests/test_anystr2.py | {
"start": 105,
"end": 1756
} | class ____(test_base.BaseTest):
"""Tests for issues related to AnyStr."""
def test_callable(self):
"""Tests Callable + AnyStr."""
self.Check("""
from typing import AnyStr, Callable
def f1(f: Callable[[AnyStr], AnyStr]):
f2(f)
def f2(f: Callable[[AnyStr], AnyStr]):
pass
""")
def test_unknown_against_multiple_anystr(self):
self.Check("""
from typing import Any, Dict, Tuple, AnyStr
def foo(x: Dict[Tuple[AnyStr], AnyStr]): ...
foo(__any_object__)
""")
def test_multiple_unknown_against_multiple_anystr(self):
self.Check("""
from typing import AnyStr, List
def foo(x: List[AnyStr], y: List[AnyStr]): ...
foo(__any_object__, [__any_object__])
""")
def test_anystr_in_closure(self):
self.assertNoCrash(
self.Check,
"""
from typing import AnyStr, Dict, Optional
def foo(d: Dict[unicode, Optional[AnyStr]] = None):
def bar() -> Optional[AnyStr]:
return __any_object__
d[__any_object__] = bar()
""",
)
def test_missing_import(self):
self.CheckWithErrors("""
def f(x: AnyStr): # name-error
pass
""")
def test_generic_inheritance(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import AnyStr, Generic
class Foo(Generic[AnyStr]):
@property
def name(self) -> AnyStr | None: ...
def dofoo() -> Foo[str]: ...
""",
)]):
self.Check("""
import foo
assert_type(foo.dofoo().name, 'Optional[str]')
assert_type(foo.dofoo().name, str | None)
""")
| AnyStrTest |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 36239,
"end": 36975
} | class ____(PipesParamsLoader):
"""Params loader that extracts params from a Mapping provided at init time."""
def __init__(self, mapping: Mapping[str, str]):
self._mapping = mapping
def is_dagster_pipes_process(self) -> bool:
# use the presence of DAGSTER_PIPES_CONTEXT to discern if we are in a pipes process
return DAGSTER_PIPES_CONTEXT_ENV_VAR in self._mapping
def load_context_params(self) -> PipesParams:
raw_value = self._mapping[DAGSTER_PIPES_CONTEXT_ENV_VAR]
return decode_param(raw_value)
def load_messages_params(self) -> PipesParams:
raw_value = self._mapping[DAGSTER_PIPES_MESSAGES_ENV_VAR]
return decode_param(raw_value)
| PipesMappingParamsLoader |
python | spack__spack | lib/spack/spack/directives_meta.py | {
"start": 541,
"end": 9520
} | class ____(type):
"""Flushes the directives that were temporarily stored in the staging
area into the package.
"""
# Set of all known directives
_directive_dict_names: Set[str] = set()
_directives_to_be_executed: List[Callable] = []
_when_constraints_from_context: List[spack.spec.Spec] = []
_default_args: List[dict] = []
def __new__(
cls: Type["DirectiveMeta"], name: str, bases: tuple, attr_dict: dict
) -> "DirectiveMeta":
# Initialize the attribute containing the list of directives
# to be executed. Here we go reversed because we want to execute
# commands:
# 1. in the order they were defined
# 2. following the MRO
attr_dict["_directives_to_be_executed"] = []
for base in reversed(bases):
try:
directive_from_base = base._directives_to_be_executed
attr_dict["_directives_to_be_executed"].extend(directive_from_base)
except AttributeError:
# The base class didn't have the required attribute.
# Continue searching
pass
# De-duplicates directives from base classes
attr_dict["_directives_to_be_executed"] = [
x for x in spack.llnl.util.lang.dedupe(attr_dict["_directives_to_be_executed"])
]
# Move things to be executed from module scope (where they
# are collected first) to class scope
if DirectiveMeta._directives_to_be_executed:
attr_dict["_directives_to_be_executed"].extend(
DirectiveMeta._directives_to_be_executed
)
DirectiveMeta._directives_to_be_executed = []
return super(DirectiveMeta, cls).__new__(cls, name, bases, attr_dict)
def __init__(cls: "DirectiveMeta", name: str, bases: tuple, attr_dict: dict):
# The instance is being initialized: if it is a package we must ensure
# that the directives are called to set it up.
if spack.repo.is_package_module(cls.__module__):
# Ensure the presence of the dictionaries associated with the directives.
# All dictionaries are defaultdicts that create lists for missing keys.
for d in DirectiveMeta._directive_dict_names:
setattr(cls, d, {})
# Lazily execute directives
for directive in cls._directives_to_be_executed:
directive(cls)
# Ignore any directives executed *within* top-level
# directives by clearing out the queue they're appended to
DirectiveMeta._directives_to_be_executed = []
super(DirectiveMeta, cls).__init__(name, bases, attr_dict)
@staticmethod
def push_to_context(when_spec: spack.spec.Spec) -> None:
"""Add a spec to the context constraints."""
DirectiveMeta._when_constraints_from_context.append(when_spec)
@staticmethod
def pop_from_context() -> spack.spec.Spec:
"""Pop the last constraint from the context"""
return DirectiveMeta._when_constraints_from_context.pop()
@staticmethod
def push_default_args(default_args: Dict[str, Any]) -> None:
"""Push default arguments"""
DirectiveMeta._default_args.append(default_args)
@staticmethod
def pop_default_args() -> dict:
"""Pop default arguments"""
return DirectiveMeta._default_args.pop()
@staticmethod
def directive(dicts: Optional[Union[Sequence[str], str]] = None) -> Callable:
"""Decorator for Spack directives.
Spack directives allow you to modify a package while it is being
defined, e.g. to add version or dependency information. Directives
are one of the key pieces of Spack's package "language", which is
embedded in python.
Here's an example directive:
.. code-block:: python
@directive(dicts="versions")
def version(pkg, ...):
...
This directive allows you write:
.. code-block:: python
class Foo(Package):
version(...)
The ``@directive`` decorator handles a couple things for you:
1. Adds the class scope (pkg) as an initial parameter when
called, like a class method would. This allows you to modify
a package from within a directive, while the package is still
being defined.
2. It automatically adds a dictionary called ``versions`` to the
package so that you can refer to pkg.versions.
The ``(dicts="versions")`` part ensures that ALL packages in Spack
will have a ``versions`` attribute after they're constructed, and
that if no directive actually modified it, it will just be an
empty dict.
This is just a modular way to add storage attributes to the
Package class, and it's how Spack gets information from the
packages to the core.
"""
if isinstance(dicts, str):
dicts = (dicts,)
if not isinstance(dicts, collections.abc.Sequence):
message = "dicts arg must be list, tuple, or string. Found {0}"
raise TypeError(message.format(type(dicts)))
# Add the dictionary names if not already there
DirectiveMeta._directive_dict_names |= set(dicts)
# This decorator just returns the directive functions
def _decorator(decorated_function: Callable) -> Callable:
directive_names.append(decorated_function.__name__)
@functools.wraps(decorated_function)
def _wrapper(*args, **_kwargs):
# First merge default args with kwargs
kwargs = dict()
for default_args in DirectiveMeta._default_args:
kwargs.update(default_args)
kwargs.update(_kwargs)
# Inject when arguments from the context
if DirectiveMeta._when_constraints_from_context:
# Check that directives not yet supporting the when= argument
# are not used inside the context manager
if decorated_function.__name__ == "version":
msg = (
'directive "{0}" cannot be used within a "when"'
' context since it does not support a "when=" '
"argument"
)
msg = msg.format(decorated_function.__name__)
raise DirectiveError(msg)
when_constraints = [
spack.spec.Spec(x) for x in DirectiveMeta._when_constraints_from_context
]
if kwargs.get("when"):
when_constraints.append(spack.spec.Spec(kwargs["when"]))
when_spec = spack.spec.Spec()
for current in when_constraints:
when_spec._constrain_symbolically(current, deps=True)
kwargs["when"] = when_spec
# If any of the arguments are executors returned by a
# directive passed as an argument, don't execute them
# lazily. Instead, let the called directive handle them.
# This allows nested directive calls in packages. The
# caller can return the directive if it should be queued.
def remove_directives(arg):
directives = DirectiveMeta._directives_to_be_executed
if isinstance(arg, (list, tuple)):
# Descend into args that are lists or tuples
for a in arg:
remove_directives(a)
else:
# Remove directives args from the exec queue
remove = next((d for d in directives if d is arg), None)
if remove is not None:
directives.remove(remove)
# Nasty, but it's the best way I can think of to avoid
# side effects if directive results are passed as args
remove_directives(args)
remove_directives(list(kwargs.values()))
# A directive returns either something that is callable on a
# package or a sequence of them
result = decorated_function(*args, **kwargs)
# ...so if it is not a sequence make it so
values = result
if not isinstance(values, collections.abc.Sequence):
values = (values,)
DirectiveMeta._directives_to_be_executed.extend(values)
# wrapped function returns same result as original so
# that we can nest directives
return result
return _wrapper
return _decorator
| DirectiveMeta |
python | PyCQA__pylint | pylint/checkers/match_statements_checker.py | {
"start": 1008,
"end": 8372
} | class ____(BaseChecker):
name = "match_statements"
msgs = {
"E1901": (
"The name capture `case %s` makes the remaining patterns unreachable. "
"Use a dotted name (for example an enum) to fix this.",
"bare-name-capture-pattern",
"Emitted when a name capture pattern is used in a match statement "
"and there are case statements below it.",
),
"E1902": (
"`__match_args__` must be a tuple of strings.",
"invalid-match-args-definition",
"Emitted if `__match_args__` isn't a tuple of strings required for match.",
),
"E1903": (
"%s expects %d positional sub-patterns (given %d)",
"too-many-positional-sub-patterns",
"Emitted when the number of allowed positional sub-patterns exceeds the "
"number of allowed sub-patterns specified in `__match_args__`.",
),
"E1904": (
"Multiple sub-patterns for attribute %s",
"multiple-class-sub-patterns",
"Emitted when there is more than one sub-pattern for a specific "
"attribute in a class pattern.",
),
"R1905": (
"Use '%s() as %s' instead",
"match-class-bind-self",
"Match class patterns are faster if the name binding happens "
"for the whole pattern and any lookup for `__match_args__` "
"can be avoided.",
),
"R1906": (
"Use keyword attributes instead of positional ones (%s)",
"match-class-positional-attributes",
"Keyword attributes are more explicit and slightly faster "
"since CPython can skip the `__match_args__` lookup.",
),
}
@only_required_for_messages("invalid-match-args-definition")
def visit_assignname(self, node: nodes.AssignName) -> None:
if (
node.name == "__match_args__"
and isinstance(node.frame(), nodes.ClassDef)
and isinstance(node.parent, nodes.Assign)
and not (
isinstance(node.parent.value, nodes.Tuple)
and all(
isinstance(el, nodes.Const) and isinstance(el.value, str)
for el in node.parent.value.elts
)
)
):
self.add_message(
"invalid-match-args-definition",
node=node.parent.value,
args=(),
confidence=HIGH,
)
@only_required_for_messages("bare-name-capture-pattern")
def visit_match(self, node: nodes.Match) -> None:
"""Check if a name capture pattern prevents the other cases from being
reached.
"""
for idx, case in enumerate(node.cases):
match case:
case nodes.MatchCase(
pattern=nodes.MatchAs(
pattern=None, name=nodes.AssignName(name=name)
),
guard=None,
) if (
idx < len(node.cases) - 1
):
self.add_message(
"bare-name-capture-pattern",
node=case.pattern,
args=(name,),
confidence=HIGH,
)
@only_required_for_messages("match-class-bind-self")
def visit_matchas(self, node: nodes.MatchAs) -> None:
match node:
case nodes.MatchAs(
parent=nodes.MatchClass(cls=nodes.Name() as cls_name, patterns=[_]),
name=nodes.AssignName(name=name),
pattern=None,
):
inferred = safe_infer(cls_name)
if (
isinstance(inferred, nodes.ClassDef)
and inferred.qname() in MATCH_CLASS_SELF_NAMES
):
self.add_message(
"match-class-bind-self",
node=node,
args=(cls_name.name, name),
confidence=HIGH,
)
@staticmethod
def get_match_args_for_class(node: nodes.NodeNG) -> list[str] | None:
"""Infer __match_args__ from class name."""
inferred = safe_infer(node)
if not isinstance(inferred, nodes.ClassDef):
return None
try:
match_args = inferred.getattr("__match_args__")
except astroid.exceptions.NotFoundError:
if inferred.qname() in MATCH_CLASS_SELF_NAMES:
return ["<self>"]
return None
match match_args:
case [
nodes.AssignName(parent=nodes.Assign(value=nodes.Tuple(elts=elts))),
*_,
] if all(
isinstance(el, nodes.Const) and isinstance(el.value, str) for el in elts
):
return [el.value for el in elts]
case _:
return None
def check_duplicate_sub_patterns(
self, name: str, node: nodes.NodeNG, *, attrs: set[str], dups: set[str]
) -> None:
"""Track attribute names and emit error if name is given more than once."""
if name in attrs and name not in dups:
dups.add(name)
self.add_message(
"multiple-class-sub-patterns",
node=node,
args=(name,),
confidence=INFERENCE,
)
else:
attrs.add(name)
@only_required_for_messages(
"match-class-positional-attributes",
"multiple-class-sub-patterns",
"too-many-positional-sub-patterns",
)
def visit_matchclass(self, node: nodes.MatchClass) -> None:
attrs: set[str] = set()
dups: set[str] = set()
if (
node.patterns
and (match_args := self.get_match_args_for_class(node.cls)) is not None
):
if len(node.patterns) > len(match_args):
self.add_message(
"too-many-positional-sub-patterns",
node=node,
args=(node.cls.as_string(), len(match_args), len(node.patterns)),
confidence=INFERENCE,
)
return
inferred = safe_infer(node.cls)
if not (
isinstance(inferred, nodes.ClassDef)
and (
inferred.qname() in MATCH_CLASS_SELF_NAMES
or "tuple" in inferred.basenames
)
):
attributes = [f"'{attr}'" for attr in match_args[: len(node.patterns)]]
self.add_message(
"match-class-positional-attributes",
node=node,
args=(", ".join(attributes),),
confidence=INFERENCE,
)
for i in range(len(node.patterns)):
name = match_args[i]
self.check_duplicate_sub_patterns(name, node, attrs=attrs, dups=dups)
for kw_name in node.kwd_attrs:
self.check_duplicate_sub_patterns(kw_name, node, attrs=attrs, dups=dups)
def register(linter: PyLinter) -> None:
linter.register_checker(MatchStatementChecker(linter))
| MatchStatementChecker |
python | sphinx-doc__sphinx | sphinx/domains/std/__init__.py | {
"start": 12317,
"end": 12856
} | class ____(SphinxDirective):
"""Directive to name the program for which options are documented."""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
program = ws_re.sub('-', self.arguments[0].strip())
if program == 'None':
self.env.ref_context.pop('std:program', None)
else:
self.env.ref_context['std:program'] = program
return []
| Program |
python | tensorflow__tensorflow | tensorflow/python/types/distribute.py | {
"start": 8014,
"end": 12650
} | class ____(Iterator):
"""An iterator over `tf.distribute.DistributedDataset`.
`tf.distribute.DistributedIterator` is the primary mechanism for enumerating
elements of a `tf.distribute.DistributedDataset`. It supports the Python
Iterator protocol, which means it can be iterated over using a for-loop or by
fetching individual elements explicitly via `get_next()`.
You can create a `tf.distribute.DistributedIterator` by calling `iter` on
a `tf.distribute.DistributedDataset` or creating a python loop over a
`tf.distribute.DistributedDataset`.
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def get_next(self):
"""Returns the next input from the iterator for all replicas.
Example use:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.range(100).batch(2)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset_iterator = iter(dist_dataset)
>>> @tf.function
... def one_step(input):
... return input
>>> step_num = 5
>>> for _ in range(step_num):
... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),))
>>> strategy.experimental_local_results(dist_dataset_iterator.get_next())
(<tf.Tensor: shape=(1,), dtype=int64, numpy=array([10])>,
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([11])>)
Returns:
A single `tf.Tensor` or a `tf.distribute.DistributedValues` which contains
the next input for all replicas.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError(
"DistributedIterator.get_next() must be implemented in descendants.")
@property
def element_spec(self):
# pylint: disable=line-too-long
"""The type specification of an element of `tf.distribute.DistributedIterator`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_iterator.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedIterator`. This returned value
is typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedIterator.element_spec() must be implemented in descendants")
def get_next_as_optional(self):
# pylint: disable=line-too-long
"""Returns a `tf.experimental.Optional` that contains the next value for all replicas.
If the `tf.distribute.DistributedIterator` has reached the end of the
sequence, the returned `tf.experimental.Optional` will have no value.
Example usage:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> global_batch_size = 2
>>> steps_per_loop = 2
>>> dataset = tf.data.Dataset.range(10).batch(global_batch_size)
>>> distributed_iterator = iter(
... strategy.experimental_distribute_dataset(dataset))
>>> def step_fn(x):
... # train the model with inputs
... return x
>>> @tf.function
... def train_fn(distributed_iterator):
... for _ in tf.range(steps_per_loop):
... optional_data = distributed_iterator.get_next_as_optional()
... if not optional_data.has_value():
... break
... per_replica_results = strategy.run(step_fn, args=(optional_data.get_value(),))
... tf.print(strategy.experimental_local_results(per_replica_results))
>>> train_fn(distributed_iterator)
... # ([0 1], [2 3])
... # ([4], [])
Returns:
An `tf.experimental.Optional` object representing the next value from the
`tf.distribute.DistributedIterator` (if it has one) or no value.
"""
# pylint: enable=line-too-long
raise NotImplementedError(
"get_next_as_optional() not implemented in descendants")
@tf_export("distribute.DistributedDataset", v1=[])
| DistributedIteratorInterface |
python | Textualize__textual | src/textual/events.py | {
"start": 1650,
"end": 1943
} | class ____(Event, bubble=False):
"""
Sent when the App is running but *before* the terminal is in application mode.
Use this event to run any setup that doesn't require any visuals such as loading
configuration and binding keys.
- [ ] Bubbles
- [ ] Verbose
"""
| Load |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/pyodbc.py | {
"start": 20047,
"end": 20167
} | class ____(_MSJsonIndexType):
def get_dbapi_type(self, dbapi):
return dbapi.SQL_WVARCHAR
| _JSONIndexType_pyodbc |
python | apache__airflow | airflow-core/tests/unit/core/test_settings.py | {
"start": 2446,
"end": 7823
} | class ____:
# Make sure that the configure_logging is not cached
def setup_method(self):
self.old_modules = dict(sys.modules)
def teardown_method(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
for mod in [m for m in sys.modules if m not in self.old_modules]:
del sys.modules[mod]
@mock.patch("airflow.settings.prepare_syspath_for_config_and_plugins")
@mock.patch("airflow.settings.import_local_settings")
def test_initialize_order(self, mock_import_local_settings, mock_prepare_syspath_for_config_and_plugins):
"""
Tests that import_local_settings is called after prepare_syspath_for_config_and_plugins
"""
mock_local_settings = mock.Mock()
mock_local_settings.attach_mock(
mock_prepare_syspath_for_config_and_plugins, "prepare_syspath_for_config_and_plugins"
)
mock_local_settings.attach_mock(mock_import_local_settings, "import_local_settings")
import airflow.settings
airflow.settings.initialize()
expected_calls = [
call.prepare_syspath_for_config_and_plugins(),
call.import_local_settings(),
]
mock_local_settings.assert_has_calls(expected_calls)
assert mock_local_settings.mock_calls == expected_calls
def test_import_with_dunder_all_not_specified(self):
"""
Tests that if __all__ is specified in airflow_local_settings,
only module attributes specified within are imported.
"""
with SettingsContext(SETTINGS_FILE_POLICY_WITH_DUNDER_ALL, "airflow_local_settings"):
from airflow import settings
settings.import_local_settings()
with pytest.raises(AttributeError):
settings.not_policy()
def test_import_with_dunder_all(self):
"""
Tests that if __all__ is specified in airflow_local_settings,
only module attributes specified within are imported.
"""
with SettingsContext(SETTINGS_FILE_POLICY_WITH_DUNDER_ALL, "airflow_local_settings"):
from airflow import settings
settings.import_local_settings()
task_instance = MagicMock()
settings.test_policy(task_instance)
assert task_instance.run_as_user == "myself"
@mock.patch("airflow.settings.log.debug")
def test_import_local_settings_without_syspath(self, log_mock):
"""
Tests that an ImportError is raised in import_local_settings
if there is no airflow_local_settings module on the syspath.
"""
from airflow import settings
settings.import_local_settings()
log_mock.assert_called_once_with("No airflow_local_settings to import.", exc_info=True)
def test_policy_function(self):
"""
Tests that task instances are mutated by the policy
function in airflow_local_settings.
"""
with SettingsContext(SETTINGS_FILE_POLICY, "airflow_local_settings"):
from airflow import settings
settings.import_local_settings()
task_instance = MagicMock()
settings.test_policy(task_instance)
assert task_instance.run_as_user == "myself"
def test_pod_mutation_hook(self):
"""
Tests that pods are mutated by the pod_mutation_hook
function in airflow_local_settings.
"""
with SettingsContext(SETTINGS_FILE_POD_MUTATION_HOOK, "airflow_local_settings"):
from airflow import settings
settings.import_local_settings()
pod = MagicMock()
settings.pod_mutation_hook(pod)
assert pod.namespace == "airflow-tests"
def test_custom_policy(self):
with SettingsContext(SETTINGS_FILE_CUSTOM_POLICY, "airflow_local_settings"):
from airflow import settings
settings.import_local_settings()
task_instance = MagicMock()
task_instance.owner = "airflow"
with pytest.raises(AirflowClusterPolicyViolation):
settings.task_must_have_owners(task_instance)
_local_db_path_error = pytest.raises(AirflowConfigException, match=r"Cannot use relative path:")
@pytest.mark.parametrize(
("value", "expectation"),
[
("sqlite:///./relative_path.db", _local_db_path_error),
("sqlite:///relative/path.db", _local_db_path_error),
pytest.param(
"sqlite:///C:/path/to/db",
_local_db_path_error,
marks=pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows"),
),
pytest.param(
r"sqlite:///C:\path\to\db",
_local_db_path_error,
marks=pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows"),
),
("sqlite://", contextlib.nullcontext()),
],
)
def test_sqlite_relative_path(value, expectation):
from airflow import settings
with (
patch("os.environ", {"_AIRFLOW_SKIP_DB_TESTS": "true"}),
patch("airflow.settings.SQL_ALCHEMY_CONN", value),
patch("airflow.settings.Session"),
patch("airflow.settings.engine"),
):
with expectation:
settings.configure_orm()
| TestLocalSettings |
python | imageio__imageio | imageio/plugins/pillow_legacy.py | {
"start": 8165,
"end": 14505
} | class ____(Format):
"""
Base format class for Pillow formats.
"""
_pillow_imported = False
_Image = None
_modes = "i"
_description = ""
def __init__(self, *args, plugin_id: str = None, **kwargs):
super(PillowFormat, self).__init__(*args, **kwargs)
# Used to synchronize _init_pillow(), see #244
self._lock = threading.RLock()
self._plugin_id = plugin_id
@property
def plugin_id(self):
"""The PIL plugin id."""
return self._plugin_id # Set when format is created
def _init_pillow(self):
with self._lock:
if not self._pillow_imported:
self._pillow_imported = True # more like tried to import
import PIL
if not hasattr(PIL, "__version__"): # pragma: no cover
raise ImportError(
"Imageio Pillow plugin requires " "Pillow, not PIL!"
)
from PIL import Image
self._Image = Image
elif self._Image is None: # pragma: no cover
raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.")
Image = self._Image
if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"):
Image.preinit()
else:
Image.init()
return Image
def _can_read(self, request):
Image = self._init_pillow()
if self.plugin_id in Image.OPEN:
factory, accept = Image.OPEN[self.plugin_id]
if accept:
if request.firstbytes and accept(request.firstbytes):
return True
def _can_write(self, request):
Image = self._init_pillow()
if request.extension in self.extensions or request._uri_type in [
URI_FILE,
URI_BYTES,
]:
if self.plugin_id in Image.SAVE:
return True
class Reader(Format.Reader):
def _open(self, pilmode=None, as_gray=False):
Image = self.format._init_pillow()
try:
factory, accept = Image.OPEN[self.format.plugin_id]
except KeyError:
raise RuntimeError("Format %s cannot read images." % self.format.name)
self._fp = self._get_file()
self._im = factory(self._fp, "")
if hasattr(Image, "_decompression_bomb_check"):
Image._decompression_bomb_check(self._im.size)
# Save the raw mode used by the palette for a BMP because it may not be the number of channels
# When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument
# However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame.
# This issue is resolved by using the raw palette data but the rawmode information is now lost. So we
# store the raw mode for later use
if self._im.palette and self._im.palette.dirty:
self._im.palette.rawmode_saved = self._im.palette.rawmode
pil_try_read(self._im)
# Store args
self._kwargs = dict(
as_gray=as_gray, is_gray=_palette_is_grayscale(self._im)
)
# setting mode=None is not the same as just not providing it
if pilmode is not None:
self._kwargs["mode"] = pilmode
# Set length
self._length = 1
if hasattr(self._im, "n_frames"):
self._length = self._im.n_frames
def _get_file(self):
self._we_own_fp = False
return self.request.get_file()
def _close(self):
save_pillow_close(self._im)
if self._we_own_fp:
self._fp.close()
# else: request object handles closing the _fp
def _get_length(self):
return self._length
def _seek(self, index):
try:
self._im.seek(index)
except EOFError:
raise IndexError("Could not seek to index %i" % index)
def _get_data(self, index):
if index >= self._length:
raise IndexError("Image index %i > %i" % (index, self._length))
i = self._im.tell()
if i > index:
self._seek(index) # just try
else:
while i < index: # some formats need to be read in sequence
i += 1
self._seek(i)
if self._im.palette and self._im.palette.dirty:
self._im.palette.rawmode_saved = self._im.palette.rawmode
self._im.getdata()[0]
im = pil_get_frame(self._im, **self._kwargs)
return im, self._im.info
def _get_meta_data(self, index):
if not (index is None or index == 0):
raise IndexError()
return self._im.info
class Writer(Format.Writer):
def _open(self):
Image = self.format._init_pillow()
try:
self._save_func = Image.SAVE[self.format.plugin_id]
except KeyError:
raise RuntimeError("Format %s cannot write images." % self.format.name)
self._fp = self.request.get_file()
self._meta = {}
self._written = False
def _close(self):
pass # request object handled closing _fp
def _append_data(self, im, meta):
if self._written:
raise RuntimeError(
"Format %s only supports single images." % self.format.name
)
# Pop unit dimension for grayscale images
if im.ndim == 3 and im.shape[-1] == 1:
im = im[:, :, 0]
self._written = True
self._meta.update(meta)
img = ndarray_to_pil(
im, self.format.plugin_id, self._meta.pop("prefer_uint8", True)
)
if "bits" in self._meta:
img = img.quantize() # Make it a P image, so bits arg is used
img.save(self._fp, format=self.format.plugin_id, **self._meta)
save_pillow_close(img)
def set_meta_data(self, meta):
self._meta.update(meta)
| PillowFormat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.