language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | test/units/mock/custom_types.py | {
"start": 1026,
"end": 1058
} | class ____(float): ...
| CustomFloat |
python | ray-project__ray | rllib/algorithms/ppo/torch/default_ppo_torch_rl_module.py | {
"start": 722,
"end": 3126
} | class ____(TorchRLModule, DefaultPPORLModule):
def __init__(self, *args, **kwargs):
catalog_class = kwargs.pop("catalog_class", None)
if catalog_class is None:
catalog_class = PPOCatalog
super().__init__(*args, **kwargs, catalog_class=catalog_class)
@override(RLModule)
def _forward(self, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:
"""Default forward pass (used for inference and exploration)."""
output = {}
# Encoder forward pass.
encoder_outs = self.encoder(batch)
# Stateful encoder?
if Columns.STATE_OUT in encoder_outs:
output[Columns.STATE_OUT] = encoder_outs[Columns.STATE_OUT]
# Pi head.
output[Columns.ACTION_DIST_INPUTS] = self.pi(encoder_outs[ENCODER_OUT][ACTOR])
return output
@override(RLModule)
def _forward_train(self, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:
"""Train forward pass (keep embeddings for possible shared value func. call)."""
output = {}
encoder_outs = self.encoder(batch)
output[Columns.EMBEDDINGS] = encoder_outs[ENCODER_OUT][CRITIC]
if Columns.STATE_OUT in encoder_outs:
output[Columns.STATE_OUT] = encoder_outs[Columns.STATE_OUT]
output[Columns.ACTION_DIST_INPUTS] = self.pi(encoder_outs[ENCODER_OUT][ACTOR])
return output
@override(ValueFunctionAPI)
def compute_values(
self,
batch: Dict[str, Any],
embeddings: Optional[Any] = None,
) -> TensorType:
if embeddings is None:
# Separate vf-encoder.
if hasattr(self.encoder, "critic_encoder"):
batch_ = batch
if self.is_stateful():
# The recurrent encoders expect a `(state_in, h)` key in the
# input dict while the key returned is `(state_in, critic, h)`.
batch_ = batch.copy()
batch_[Columns.STATE_IN] = batch[Columns.STATE_IN][CRITIC]
embeddings = self.encoder.critic_encoder(batch_)[ENCODER_OUT]
# Shared encoder.
else:
embeddings = self.encoder(batch)[ENCODER_OUT][CRITIC]
# Value head.
vf_out = self.vf(embeddings)
# Squeeze out last dimension (single node value head).
return vf_out.squeeze(-1)
| DefaultPPOTorchRLModule |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_magiclink.py | {
"start": 86,
"end": 2372
} | class ____(util.MdCase):
"""Test cases for repo link shortening."""
extension = [
'pymdownx.magiclink',
]
extension_configs = {
'pymdownx.magiclink': {
'repo_url_shortener': True
}
}
def test_user(self):
"""Test user shortening."""
# Test #! original syntax
self.check_markdown(
r'https://github.com/facelessuser',
r'<p><a class="magiclink magiclink-github magiclink-mention" href="https://github.com/facelessuser" title="GitHub User: facelessuser">@facelessuser</a></p>' # noqa: E501
)
def test_repo(self):
"""Test repository shortening."""
# Test #! original syntax
self.check_markdown(
r'https://github.com/facelessuser/pymdown-extensions',
r'<p><a class="magiclink magiclink-github magiclink-repository" href="https://github.com/facelessuser/pymdown-extensions" title="GitHub Repository: facelessuser/pymdown-extensions">facelessuser/pymdown-extensions</a></p>' # noqa: E501
)
def test_no_social(self):
"""Test that social shortening does not happen."""
self.check_markdown(
r'https://x.com/someuser',
r'<p><a href="https://x.com/someuser">https://x.com/someuser</a></p>'
)
def test_excluded_user(self):
"""Test excluded."""
self.check_markdown(
r'https://github.com/support',
r'<p><a href="https://github.com/support">https://github.com/support</a></p>'
)
def test_excluded_user_repo(self):
"""Test excluded."""
self.check_markdown(
r'https://github.com/support/repo',
r'<p><a href="https://github.com/support/repo">https://github.com/support/repo</a></p>'
)
def test_discuss(self):
"""Test discuss."""
self.check_markdown(
r'https://github.com/facelessuser/pymdown-extensions/discussions/1173',
r'<p><a class="magiclink magiclink-github magiclink-discussion" href="https://github.com/facelessuser/pymdown-extensions/discussions/1173" title="GitHub Discussion: facelessuser/pymdown-extensions #1173">facelessuser/pymdown-extensions?1173</a></p>' # noqa: E501
)
| TestMagicLinkShortner |
python | google__jax | tests/pjit_test.py | {
"start": 44770,
"end": 52170
} | class ____(jtu.JaxTestCase):
@parameterized.named_parameters(
('2d_array', (4, 2), (4, 2), ('x', 'y')),
# TODO(b/226977360): Support 3D mesh shape for example (2, 2, 2).
('3d_array', (1, 4, 2), (2, 4, 8, 4), ('x', 'y', 'z')),
('1d_array', (8,), (8, 2), ('x')),
)
def test_pjit_arr_auto_sharding_array(self, mesh_shape, global_input_shape,
mesh_axis_names):
if config.use_shardy_partitioner.value:
self.skipTest('Must register auto partitioner for Shardy')
global_mesh = jtu.create_mesh(mesh_shape, mesh_axis_names)
input_data = np.arange(
math.prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
f = jax.jit(lambda x: x, in_shardings=AUTO(global_mesh),
out_shardings=AUTO(global_mesh))
inp = core.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp).compile()
inputs = [create_array(global_input_shape, global_mesh, ip, input_data)[0]
for ip in compiled.input_shardings[0]]
out = compiled(*inputs)
self.assertIsInstance(out, array.ArrayImpl)
self.assertArraysEqual(out._value, input_data)
def test_xla_arr_sharding_mismatch(self):
if config.use_shardy_partitioner.value:
self.skipTest('Must register auto partitioner for Shardy')
global_mesh = jtu.create_mesh((2, 2), ('x', 'y'))
global_input_shape = (6, 2)
input_data = np.arange(
math.prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
with global_mesh:
f = pjit(lambda x: x, in_shardings=AUTO(global_mesh),
out_shardings=AUTO(global_mesh))
inp = core.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp).compile()
different_pspec = (
P('y', 'x')
if compiled.input_shardings[0][0].is_equivalent_to(
NamedSharding(global_mesh, P('x', 'y')), len(global_input_shape)
)
else P('x', 'y')
)
arr, _ = create_array(global_input_shape, global_mesh, different_pspec,
input_data)
with self.assertRaisesRegex(
ValueError,
r"Compiled object called with input sharding\(s\) does not match the "
r"sharding\(s\) the computation was compiled with.*\n.*for arg x"):
compiled(arr)
def test_gda_auto_shardings_len(self):
if config.use_shardy_partitioner.value:
self.skipTest('Must register auto partitioner for Shardy')
global_mesh = jtu.create_mesh((2, 2), ('x', 'y'))
global_input_shape = (4, 2)
input_data = np.arange(
math.prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
with global_mesh:
f = pjit(lambda x, y, z: (x, y, z), in_shardings=AUTO(global_mesh),
out_shardings=AUTO(global_mesh))
inp = core.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp, inp, inp).compile()
self.assertLen(compiled.output_shardings, 3)
self.assertLen(compiled.input_shardings[0], 3)
@parameterized.named_parameters(
('3d_array', (1, 1, 2), ('x', 'y', 'z'), P(('x', 'y', 'z'))),
('2d_array', (4, 2), ('x', 'y'), P('y', 'x')),
('1d_array', (8,), ('x'), P('x')),
)
def test_jit_arr_partial_auto_sharding_array(
self, mesh_shape, mesh_axis_names, pspec):
if config.use_shardy_partitioner.value:
self.skipTest('Must register auto partitioner for Shardy')
mesh = jtu.create_mesh(mesh_shape, mesh_axis_names)
global_input_shape = (8, 4)
input_data = np.arange(
math.prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
inp_s = NamedSharding(mesh, pspec)
f = jax.jit(
lambda x, y: (x, y),
in_shardings=(inp_s, AUTO(mesh)),
out_shardings=AUTO(mesh))
inp = core.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp, inp).compile()
inputs = [create_array(global_input_shape, mesh, ip, input_data)[0]
for ip in compiled.input_shardings[0]]
self.assertEqual(compiled.input_shardings[0][0], inp_s)
out1, out2 = compiled(*inputs)
for o in [out1, out2]:
self.assertIsInstance(o, array.ArrayImpl)
self.assertArraysEqual(o._value, input_data)
def test_jit_different_mesh_in_auto(self):
mesh1 = jtu.create_mesh((4,), ('x',))
dev = jax.devices()
mesh2 = jax.sharding.Mesh([dev[0], dev[3], dev[2], dev[1]], 'x')
f = jax.jit(lambda x, y: (x, y),
in_shardings=(NamedSharding(mesh2, P('x')), AUTO(mesh1)))
inp = jax.ShapeDtypeStruct((8, 2), np.float32)
with self.assertRaisesRegex(
ValueError,
"Received incompatible devices for jitted computation"):
f.lower(inp, inp).compile()
@parameterized.named_parameters(
('2d_array', (4, 2), ('x', 'y')),
('1d_array', (8,), ('x')),
)
def test_jit_auto_sharding_partial_tuple_input_shardings(
self, mesh_shape, mesh_axis_names):
if not jtu.test_device_matches(["tpu"]):
self.skipTest('Parameters are tupled only on TPU if >2000 parameters')
if config.use_shardy_partitioner.value:
self.skipTest('Must register auto partitioner for Shardy')
mesh = jtu.create_mesh(mesh_shape, mesh_axis_names)
global_input_shape = (8, 4)
input_data = np.arange(
math.prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
input_sharding = NamedSharding(mesh, P(mesh_axis_names)) # sharded
input_sharding_annotations = [AUTO(mesh)] * 2001
output_sharding = NamedSharding(mesh, P()) # replicated
output_sharding_annotations = [AUTO(mesh)] * 2001
for i in range(1000):
input_sharding_annotations[2*i] = input_sharding
output_sharding_annotations[2*i] = output_sharding
jit_tuple_identity_fn = jax.jit(
lambda *x: x,
in_shardings=input_sharding_annotations,
out_shardings=tuple(output_sharding_annotations))
inp = core.ShapedArray(input_data.shape, input_data.dtype)
compiled = jit_tuple_identity_fn.lower(*([inp] * 2001)).compile()
# Check sharding preservation for even numbered inputs.
for i in range(1000):
self.assertEqual(compiled.input_shardings[0][2*i], input_sharding)
self.assertEqual(compiled.output_shardings[2*i], output_sharding)
@unittest.skip('The error is not raised yet. Enable this back once we raise '
'the error in pjit again.')
def test_pjit_array_error(self):
global_mesh = jtu.create_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
math.prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
with global_mesh:
f = pjit(lambda x: x, in_shardings=AUTO(global_mesh),
out_shardings=AUTO(global_mesh))
inp = core.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp).compile()
inputs = [create_array(global_input_shape, global_mesh, ip, input_data)[0]
for ip in compiled.input_shardings[0]]
with self.assertRaisesRegex(
ValueError,
('Passing sharding on pjit and on args while using the '
'auto spmd partitioner is not allowed. Please call the '
'compiled object on the inputs.')):
f(*inputs)
@jtu.pytest_mark_if_available('multiaccelerator')
| AutoShardingPjitTest |
python | pytorch__pytorch | test/test_cuda.py | {
"start": 227582,
"end": 242125
} | class ____(TestCase):
# These tests will be instantiate with instantiate_device_type_tests
# to apply the new OptimizerInfo structure.
@onlyCUDA
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >=5.3 required for graphs"
)
@optims(
[optim for optim in optim_db if optim.has_capturable_arg],
dtypes=[torch.float32],
)
def test_graph_optims(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
steps_warmup = 3
steps_train = 2
for optim_input in all_optim_inputs:
kwargs = optim_input.kwargs
# lr and betas as a Tensor is not supported when capturable=False and foreach=True for torch.optim.adam
# and torch.optim.adamw
kwargs["lr"] = 0.1
if optim_cls in (torch.optim.Adam, torch.optim.AdamW):
kwargs["betas"] = (0.9, 0.99)
for actually_do_graphs in (True, False):
params = [
torch.randn((i + 5, i + 5), device=device) for i in range(2)
] + [torch.randn((), device=device)]
params_control = [p.clone().requires_grad_() for p in params]
params_graphed = [p.clone().requires_grad_() for p in params]
grads = [
[torch.randn_like(p) for p in params]
for _ in range(steps_warmup + steps_train)
]
# Control (capturable=False)
kwargs["capturable"] = False
opt = optim_cls(params_control, **kwargs)
for i in range(steps_warmup + steps_train):
for j, p in enumerate(params_control):
p.grad = grads[i][j]
opt.step()
# capturable=True
kwargs["capturable"] = True
opt = optim_cls(params_graphed, **kwargs)
for i in range(steps_warmup):
for j, p in enumerate(params_graphed):
p.grad = grads[i][j]
opt.step()
if actually_do_graphs:
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
opt.step()
for i in range(steps_train):
if actually_do_graphs:
for j, p in enumerate(params_graphed):
p.grad.copy_(grads[i + steps_warmup][j])
g.replay()
else:
# Passing capturable=True to the constructor and running without graphs should still be
# numerically correct, even if it's not ideal for performance.
for j, p in enumerate(params_graphed):
p.grad = grads[i + steps_warmup][j]
opt.step()
for p_control, p_graphed in zip(params_control, params_graphed):
self.assertEqual(p_control, p_graphed)
@onlyCUDA
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@optims(
[
optim
for optim in optim_db
if "fused" in optim.supported_impls and "cuda" in optim.supports_fused_on
],
dtypes=[torch.float32],
)
def test_graph_scaling_fused_optimizers(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
steps_warmup = 3
steps_train = 2
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
kwargs = optim_input.kwargs
kwargs["fused"] = True
for actually_do_graphs in (
(True, False) if optim_info.has_capturable_arg else (True,)
):
params = [torch.randn((i + 5, i + 5), device=device) for i in range(2)]
params_control = [p.clone().requires_grad_() for p in params]
params_graphed = [p.clone().requires_grad_() for p in params]
# `GradScaler` in-place updates gradients thus it's necessary to duplicate gradients.
grads = [
[torch.randn_like(p) for p in params]
for _ in range(steps_warmup + steps_train)
]
with torch.no_grad():
grads_control = [[g.clone() for g in gs] for gs in grads]
grads_graphed = [[g.clone() for g in gs] for gs in grads]
# Gradient Scaler
scaler_for_control = torch.cuda.amp.GradScaler(init_scale=128.0)
with torch.no_grad():
scaler_for_control._lazy_init_scale_growth_tracker(device)
scaler_for_graphed = torch.cuda.amp.GradScaler()
scaler_for_graphed.load_state_dict(scaler_for_control.state_dict())
with torch.no_grad():
scaler_for_graphed._lazy_init_scale_growth_tracker(device)
# Control (capturable=False)
if optim_info.has_capturable_arg:
kwargs["capturable"] = False
opt = optim_cls(params_control, **kwargs)
for i in range(steps_warmup + steps_train):
for j, p in enumerate(params_control):
p.grad = grads_control[i][j]
scaler_for_control.step(opt)
scaler_for_control.update()
# capturable=True
if optim_info.has_capturable_arg:
kwargs["capturable"] = True
opt = optim_cls(params_graphed, **kwargs)
for i in range(steps_warmup):
for j, p in enumerate(params_graphed):
p.grad = grads_graphed[i][j]
scaler_for_graphed.step(opt)
scaler_for_graphed.update()
if actually_do_graphs:
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
scaler_for_graphed.step(opt)
scaler_for_graphed.update()
for i in range(steps_train):
if actually_do_graphs:
for j, p in enumerate(params_graphed):
p.grad.copy_(grads_graphed[i + steps_warmup][j])
g.replay()
else:
# Passing capturable=True to the constructor and running without graphs should still be
# numerically correct, even if it's not ideal for performance.
for j, p in enumerate(params_graphed):
p.grad = grads_graphed[i + steps_warmup][j]
scaler_for_graphed.step(opt)
scaler_for_graphed.update()
for p_control, p_graphed in zip(params_control, params_graphed):
self.assertEqual(p_control, p_graphed)
@onlyNativeDeviceTypes
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_grad_scaling_autocast_fused_optimizers(self, device, dtype, optim_info):
device = device.split(":")[0]
if device not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
optim_inputs = optim_info.optim_inputs_func(device=device)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
for _separate_unscale in (True, False):
kwargs = optim_input.kwargs
kwargs["fused"] = True
torch.manual_seed(20)
(
mod_control,
mod_scaling,
opt_control,
opt_scaling,
data,
loss_fn,
_,
) = _create_scaling_case(
optimizer_ctor=optim_cls, optimizer_kwargs=kwargs, device=device
)
optimizer_kwargs = deepcopy(kwargs)
optimizer_kwargs["fused"] = False
if "lr" not in kwargs:
# _create_scaling_case will set lr = 1.0 if optimizer_kwargs do not set lr
optimizer_kwargs["lr"] = 1.0
opt_control = optim_cls(mod_control.parameters(), **optimizer_kwargs)
scaler_scaling = torch.amp.GradScaler(device, init_scale=128.0)
scaler_control = torch.amp.GradScaler(device, init_scale=128.0)
tracker = TensorTracker()
for input, target in data:
opt_control.zero_grad()
with torch.autocast(device_type=device, dtype=torch.half):
output_control = mod_control(input)
loss_control = loss_fn(output_control, target)
scaler_control.scale(loss_control).backward()
scaler_control.step(opt_control)
scaler_control.update()
opt_scaling.zero_grad()
with torch.autocast(device_type=device, dtype=torch.half):
output_scaling = mod_scaling(input)
loss_scaling = loss_fn(output_scaling, target)
scaler_scaling.scale(loss_scaling).backward()
if _separate_unscale:
scaler_scaling.unscale_(opt_scaling)
scaler_scaling.step(opt_scaling)
scaler_scaling.update()
tracker.add(loss_control)
tracker.pop_check_set(loss_scaling, self)
for param_control, param_scaling in zip(
mod_control.parameters(), mod_scaling.parameters()
):
tracker.add(param_control.grad)
tracker.pop_check_set(param_scaling.grad, self)
tracker.add(param_control)
tracker.pop_check_set(param_scaling, self)
state_control, state_scaling = (
opt_control.state[param_control],
opt_scaling.state[param_scaling],
)
for k in state_control:
actual = state_scaling[k]
if k == "step":
actual = actual.squeeze()
tracker.add(state_control[k])
tracker.pop_check_set(actual, self)
@onlyCUDA
@parametrize("in_place_unscale", [False, True])
@optims(
[optim for optim in optim_db if "cuda" in optim.supports_fused_on],
dtypes=[torch.float32],
)
def test_grad_scaler_with_preset_grad_scale(
self, device, dtype, optim_info, in_place_unscale
):
weight = torch.ones((5, 5), device="cuda", requires_grad=True)
weight.grad = torch.full_like(weight, fill_value=15)
opt = optim_info.optim_cls([weight], lr=0.1, fused=True)
scaler = torch.amp.GradScaler(init_scale=5)
# simulate scaling a loss
scaler.scale(torch.ones(5))
if in_place_unscale:
scaler.unscale_(opt)
# the gradient should have been divided in-place
self.assertEqual(weight.grad, torch.full_like(weight, fill_value=3))
# the user sets a `grad_scale` value which should be fused with the optimizer step
opt.grad_scale = torch.Tensor([3]).cuda()
scaler.step(opt)
# check that the user's grad_scale was respected (i.e. the gradient was divided by 5 * 3)
self.assertEqual(weight.grad, torch.full_like(weight, fill_value=1))
@onlyCUDA
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@parametrize("foreach, fused", [(False, False), (True, False), (False, True)])
@optims(
[
optim
for optim in optim_db
if "foreach" in optim.supported_impls and "cuda" in optim.supports_fused_on
],
dtypes=[torch.float32],
)
def test_graph_grad_scaling(self, device, dtype, optim_info, foreach, fused):
torch.cuda.empty_cache()
scaler = torch.amp.GradScaler(device="cuda", init_scale=4.0)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = optim_info.optim_cls([weight], lr=0.1, foreach=foreach, fused=fused)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.stream(s):
g.capture_begin()
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
g.capture_end()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(
input_vals, expected_scales, expected_growth_trackers, expected_grad_vals
):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests")
| TestCudaOptims |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 211953,
"end": 214427
} | class ____(multi_rv_frozen):
__class_getitem__ = None
def __init__(self, row, col, *, seed=None):
self._dist = random_table_gen(seed)
self._params = self._dist._process_parameters(row, col)
# monkey patch self._dist
def _process_parameters(r, c):
return self._params
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, None, None)
def pmf(self, x):
return self._dist.pmf(x, None, None)
def mean(self):
return self._dist.mean(None, None)
def rvs(self, size=None, method=None, random_state=None):
# optimisations are possible here
return self._dist.rvs(None, None, size=size, method=method,
random_state=random_state)
_ctab_doc_row_col = """\
row : array_like
Sum of table entries in each row.
col : array_like
Sum of table entries in each column."""
_ctab_doc_x = """\
x : array-like
Two-dimensional table of non-negative integers, or a
multi-dimensional array with the last two dimensions
corresponding with the tables."""
_ctab_doc_row_col_note = """\
The row and column vectors must be one-dimensional, not empty,
and each sum up to the same value. They cannot contain negative
or noninteger entries."""
_ctab_doc_mean_params = f"""
Parameters
----------
{_ctab_doc_row_col}"""
_ctab_doc_row_col_note_frozen = """\
See class definition for a detailed description of parameters."""
_ctab_docdict = {
"_doc_random_state": _doc_random_state,
"_doc_row_col": _ctab_doc_row_col,
"_doc_x": _ctab_doc_x,
"_doc_mean_params": _ctab_doc_mean_params,
"_doc_row_col_note": _ctab_doc_row_col_note,
}
_ctab_docdict_frozen = _ctab_docdict.copy()
_ctab_docdict_frozen.update({
"_doc_row_col": "",
"_doc_mean_params": "",
"_doc_row_col_note": _ctab_doc_row_col_note_frozen,
})
def _docfill(obj, docdict, template=None):
obj.__doc__ = doccer.docformat(template or obj.__doc__, docdict)
# Set frozen generator docstrings from corresponding docstrings in
# random_table and fill in default strings in class docstrings
_docfill(random_table_gen, _ctab_docdict)
for name in ['logpmf', 'pmf', 'mean', 'rvs']:
method = random_table_gen.__dict__[name]
method_frozen = random_table_frozen.__dict__[name]
_docfill(method_frozen, _ctab_docdict_frozen, method.__doc__)
_docfill(method, _ctab_docdict)
| random_table_frozen |
python | huggingface__transformers | src/transformers/models/depth_anything/modeling_depth_anything.py | {
"start": 7950,
"end": 9802
} | class ____(nn.Module):
"""
DepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For DepthAnything, it includes 2 stages:
* DepthAnythingReassembleStage
* DepthAnythingFeatureFusionStage.
Args:
config (dict): config dict.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.reassemble_stage = DepthAnythingReassembleStage(config)
self.convs = nn.ModuleList()
for channel in config.neck_hidden_sizes:
self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
# fusion
self.fusion_stage = DepthAnythingFeatureFusionStage(config)
def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")
# postprocess hidden states
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
# fusion blocks
output = self.fusion_stage(features)
return output
| DepthAnythingNeck |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/serdes/objects/package_entry.py | {
"start": 2419,
"end": 4119
} | class ____:
key: EnvRegistryKey
aliases: Sequence[EnvRegistryKey]
summary: Optional[str]
description: Optional[str]
owners: Optional[Sequence[str]]
tags: Optional[Sequence[str]]
feature_data: Sequence[EnvRegistryObjectFeatureData]
@property
def features(self) -> Sequence[EnvRegistryObjectFeature]:
return [type_data.feature for type_data in self.feature_data]
@overload
def get_feature_data(self, feature: Literal["component"]) -> Optional[ComponentFeatureData]: ...
@overload
def get_feature_data(
self, feature: Literal["scaffold-target"]
) -> Optional[ScaffoldTargetTypeData]: ...
def get_feature_data(
self, feature: EnvRegistryObjectFeature
) -> Optional[EnvRegistryObjectFeatureData]:
for feature_data in self.feature_data:
if feature_data.feature == feature:
return feature_data
return None
@property
def scaffolder_schema(self) -> Optional[dict[str, Any]]:
scaffolder_data = self.get_feature_data("scaffold-target")
return scaffolder_data.schema if scaffolder_data else None
@property
def component_schema(self) -> Optional[dict[str, Any]]:
component_data = self.get_feature_data("component")
return component_data.schema if component_data else None
@property
def is_component(self) -> bool:
return self.get_feature_data("component") is not None
@property
def all_keys(self) -> Sequence[EnvRegistryKey]:
"""Return all keys associated with this plugin object, including aliases."""
return [self.key, *self.aliases]
@whitelist_for_serdes
@record
| EnvRegistryObjectSnap |
python | huggingface__transformers | tests/models/smolvlm/test_video_processing_smolvlm.py | {
"start": 1054,
"end": 3114
} | class ____:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"longest_edge": 20}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.max_image_size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"max_image_size": self.max_image_size,
}
def expected_output_video_shape(self, videos):
return [
self.num_frames,
self.num_channels,
self.max_image_size["longest_edge"],
self.max_image_size["longest_edge"],
]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
| SmolVLMVideoProcessingTester |
python | pytorch__pytorch | test/distributed/test_c10d_spawn.py | {
"start": 3033,
"end": 9229
} | class ____(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _test_broadcast(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
y = torch.distributed.nn.broadcast(x, 1)
self.assertEqual(y, 1 + torch.ones(5, 5))
z = y.sin().sum()
z.backward()
# We can't check the gradient of communications numerically so we have to do some calculations
if self.rank == 1:
self.assertEqual(x.grad, 2 * torch.cos(x))
elif self.rank == 0:
self.assertEqual(x.grad, torch.zeros(5, 5, device=device))
def _test_reduce(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
y = torch.distributed.nn.reduce(x, 1, op=c10d.ReduceOp.SUM)
if self.rank == 1:
self.assertEqual(y, 3 * torch.ones(5, 5, device=device))
z = y.sin().sum()
z.backward()
# Gradients are broadcasted to both ranks
x_g = (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_g)
def _test_allreduce(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
y = torch.distributed.nn.all_reduce(x, op=c10d.ReduceOp.SUM)
self.assertEqual(y, 3 * torch.ones(5, 5, device=device))
z = y.sin().sum()
z.backward()
x_g = 2 * (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_g)
def _test_all_gather(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x = torch.ones(5, 5, device=device) + self.rank
x.requires_grad = True
tensors = torch.distributed.nn.all_gather(x)
for i, t in enumerate(tensors):
self.assertEqual(t, torch.ones(5, 5, device=device) + i)
y = torch.sum(torch.stack(tensors), axis=0)
z = y.sin().sum()
z.backward()
x_s = 2 * (3 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x.grad, x_s)
def _test_all_to_all(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
x0 = torch.ones(5, 5, device=device) + 2 * self.rank
x1 = torch.ones(5, 5, device=device) + 2 * self.rank
x0.requires_grad = True
x1.requires_grad = True
y0 = torch.empty_like(x0)
y1 = torch.empty_like(x1)
tensors = torch.distributed.nn.all_to_all([y0, y1], [x0, x1])
for i, t in enumerate(tensors):
self.assertEqual(t, torch.ones(5, 5, device=device) + 2 * i)
y = torch.sum(torch.stack(tensors), axis=0)
z = y.sin().sum()
z.backward()
x_s = (4 * torch.ones(5, 5, device=device)).cos()
self.assertEqual(x0.grad, x_s)
self.assertEqual(x1.grad, x_s)
def _test_all_to_all_single(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
# This is required because these functions calls directly to the .dist and needs
# the world to be initialized
c10d.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend=backend
)
device = torch.device(f"cuda:{self.rank}")
row = self.world_size * (self.rank + 1) * (self.world_size + 1) / 2
x = torch.ones(int(row), 5, device=device) * (self.rank + 1)
x.requires_grad = True
y = torch.empty_like(x)
split_sizes = [(i + 1) * (self.rank + 1) for i in range(self.world_size)]
y = torch.distributed.nn.all_to_all_single(
y, x, output_split_sizes=split_sizes, input_split_sizes=split_sizes
)
expected = []
for idx, tensor in enumerate(torch.split(x, split_sizes)):
expected.append(torch.full_like(tensor, (idx + 1)))
expected = torch.cat(expected)
self.assertEqual(y, expected)
z = y.sin().sum()
z.backward()
x_s = ((self.rank + 1) * torch.ones(int(row), 5, device=device)).cos()
self.assertEqual(x.grad, x_s)
if __name__ == "__main__":
run_tests()
| TestDistributedNNFunctions |
python | pennersr__django-allauth | allauth/usersessions/forms.py | {
"start": 76,
"end": 341
} | class ____(forms.Form):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request")
super().__init__(*args, **kwargs)
def save(self, request):
flows.sessions.end_other_sessions(request, request.user)
| ManageUserSessionsForm |
python | celery__celery | t/unit/worker/test_strategy.py | {
"start": 10389,
"end": 10524
} | class ____(test_default_strategy_proto2):
def get_message_class(self):
return self.TaskMessage1
| test_default_strategy_proto1 |
python | ray-project__ray | rllib/policy/torch_mixins.py | {
"start": 3416,
"end": 4693
} | class ____:
"""Assigns the `update_kl()` method to a TorchPolicy.
This is used by Algorithms to update the KL coefficient
after each learning step based on `config.kl_target` and
the measured KL value (from the train_batch).
"""
def __init__(self, config):
# The current KL value (as python float).
self.kl_coeff = config["kl_coeff"]
# Constant target value.
self.kl_target = config["kl_target"]
def update_kl(self, sampled_kl):
# Update the current KL value based on the recently measured value.
if sampled_kl > 2.0 * self.kl_target:
self.kl_coeff *= 1.5
elif sampled_kl < 0.5 * self.kl_target:
self.kl_coeff *= 0.5
# Return the current KL value.
return self.kl_coeff
def get_state(self) -> PolicyState:
state = super().get_state()
# Add current kl-coeff value.
state["current_kl_coeff"] = self.kl_coeff
return state
def set_state(self, state: PolicyState) -> None:
# Set current kl-coeff value first.
self.kl_coeff = state.pop("current_kl_coeff", self.config["kl_coeff"])
# Call super's set_state with rest of the state dict.
super().set_state(state)
@OldAPIStack
| KLCoeffMixin |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_crypto_ticker.py | {
"start": 1687,
"end": 3950
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid cryptocurrency tickers."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_crypto_ticker": [
"BTC",
"ETH",
"USDT",
"BNB",
],
"malformed_crypto_ticker": [
"",
"btc",
"ABCD",
"This is not a cryptocurrency.",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_crypto_ticker"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_crypto_ticker"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_crypto_ticker"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
"requirements": ["cryptocompare"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidCryptoTicker().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidCryptoTicker |
python | numba__numba | numba/cuda/tests/cudapy/test_compiler.py | {
"start": 9967,
"end": 10821
} | class ____(unittest.TestCase):
'''For tests where we can only check correctness by examining the compiler
output rather than observing the effects of execution.'''
def test_nanosleep(self):
def use_nanosleep(x):
# Sleep for a constant time
cuda.nanosleep(32)
# Sleep for a variable time
cuda.nanosleep(x)
ptx, resty = compile_ptx(use_nanosleep, (uint32,), cc=(7, 0))
nanosleep_count = 0
for line in ptx.split('\n'):
if 'nanosleep.u32' in line:
nanosleep_count += 1
expected = 2
self.assertEqual(expected, nanosleep_count,
(f'Got {nanosleep_count} nanosleep instructions, '
f'expected {expected}'))
if __name__ == '__main__':
unittest.main()
| TestCompileOnlyTests |
python | huggingface__transformers | src/transformers/models/regnet/modeling_regnet.py | {
"start": 12189,
"end": 14120
} | class ____(RegNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.regnet = RegNetModel(config)
# classification head
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
__all__ = ["RegNetForImageClassification", "RegNetModel", "RegNetPreTrainedModel"]
| RegNetForImageClassification |
python | dask__distributed | distributed/tests/test_scheduler.py | {
"start": 99474,
"end": 168658
} | class ____(ConnectionPool):
def __init__(self, *args, failing_connections=0, **kwargs):
self.cnn_count = 0
self.failing_connections = failing_connections
super().__init__(*args, **kwargs)
async def connect(self, *args, **kwargs):
self.cnn_count += 1
if self.cnn_count > self.failing_connections:
return await super().connect(*args, **kwargs)
else:
return BrokenComm()
@gen_cluster(client=True)
async def test_gather_failing_can_recover(c, s, a, b):
x = await c.scatter({"x": 1}, workers=a.address)
rpc = await FlakyConnectionPool(failing_connections=1)
with (
mock.patch.object(s, "rpc", rpc),
dask.config.set({"distributed.comm.retry.count": 1}),
captured_handler(
logging.getLogger("distributed").handlers[0]
) as distributed_log,
):
res = await s.gather(keys=["x"])
assert re.match(
r"\A\d+-\d+-\d+ \d+:\d+:\d+,\d+ - distributed.utils_comm - INFO - "
r"Retrying get_data_from_worker after exception in attempt 0/1: \n\Z",
distributed_log.getvalue(),
)
assert res["status"] == "OK"
@gen_cluster(client=True)
async def test_gather_failing_cnn_error(c, s, a, b):
x = await c.scatter({"x": 1}, workers=a.address)
rpc = await FlakyConnectionPool(failing_connections=10)
with mock.patch.object(s, "rpc", rpc):
res = await s.gather(keys=["x"])
assert res["status"] == "error"
assert list(res["keys"]) == ["x"]
@gen_cluster(client=True)
async def test_gather_no_workers(c, s, a, b):
await asyncio.sleep(1)
x = await c.scatter({"x": 1}, workers=a.address)
await a.close()
await b.close()
res = await s.gather(keys=["x"])
assert res["status"] == "error"
assert list(res["keys"]) == ["x"]
@pytest.mark.parametrize("direct", [False, True])
@gen_cluster(
client=True,
nthreads=[("", 1)],
# This behaviour is independent of retries.
# Disable them to reduce the complexity of this test.
config={"distributed.comm.retry.count": 0},
)
async def test_gather_bad_worker(c, s, a, direct):
"""Upon connection failure, gather() tries again indefinitely and transparently,
for as long as the batched comms channel is active.
"""
x = c.submit(inc, 1, key="x")
c.rpc = await FlakyConnectionPool(failing_connections=3)
s.rpc = await FlakyConnectionPool(failing_connections=1)
with captured_logger("distributed.scheduler") as sched_logger:
with captured_logger("distributed.client") as client_logger:
assert await c.gather(x, direct=direct) == 2
assert "Couldn't gather keys: {'x': 'memory'}" in sched_logger.getvalue()
assert "Couldn't gather 1 keys, rescheduling ('x',)" in client_logger.getvalue()
if direct:
# 1. try direct=True; fail
# 2. fall back to direct=False; fail
# 3. try direct=True again; fail
# 4. fall back to direct=False again; success
assert c.rpc.cnn_count == 2
assert s.rpc.cnn_count == 2
else:
# 1. try direct=False; fail
# 2. try again direct=False; success
assert c.rpc.cnn_count == 0
assert s.rpc.cnn_count == 2
@gen_cluster(client=True)
async def test_too_many_groups(c, s, a, b):
x = dask.delayed(inc)(1)
y = dask.delayed(dec)(2)
z = dask.delayed(operator.add)(x, y)
await c.compute(z)
while s.tasks:
await asyncio.sleep(0.01)
assert len(s.task_groups) < 3
@gen_test()
@pytest.mark.parametrize(
"dashboard_link_template,expected_dashboard_link",
(
("{scheme}://{host}:{port}/status", r"dashboard at:\s*http://"),
("{ENV_VAR_MISSING}", r"dashboard at:\s*:\d*"),
),
)
async def test_multiple_listeners(dashboard_link_template, expected_dashboard_link):
with dask.config.set({"distributed.dashboard.link": dashboard_link_template}):
with captured_logger("distributed.scheduler") as log:
async with Scheduler(
dashboard_address=":0", protocol=["inproc", "tcp"]
) as s:
async with Worker(s.listeners[0].contact_address) as a:
async with Worker(s.listeners[1].contact_address) as b:
assert a.address.startswith("inproc")
assert a.scheduler.address.startswith("inproc")
assert b.address.startswith("tcp")
assert b.scheduler.address.startswith("tcp")
async with Client(s.address, asynchronous=True) as c:
futures = c.map(inc, range(20))
await wait(futures)
# Force inter-worker communication both ways
await c.submit(sum, futures, workers=[a.address])
await c.submit(len, futures, workers=[b.address])
log = log.getvalue()
assert re.search(r"Scheduler at:\s*tcp://", log)
assert re.search(r"Scheduler at:\s*inproc://", log)
# Dashboard link formatting can fail if template contains env vars which aren't
# present. Don't kill scheduler, but revert to outputting the port and helpful msg
assert re.search(expected_dashboard_link, log)
if "ENV_VAR_MISSING" in dashboard_link_template:
msg = r"Failed to format dashboard link, unknown value: 'ENV_VAR_MISSING'"
assert re.search(msg, log)
@gen_cluster(nthreads=[("127.0.0.1", 1)])
async def test_worker_name_collision(s, a):
# test that a name collision for workers produces the expected response
# and leaves the data structures of Scheduler in a good state
# is not updated by the second worker
with captured_logger("distributed.scheduler") as log:
with raises_with_cause(
RuntimeError, None, ValueError, f"name taken, {a.name!r}"
):
await Worker(s.address, name=a.name, host="127.0.0.1")
s.validate_state()
assert set(s.workers) == {a.address}
assert s.aliases == {a.name: a.address}
log = log.getvalue()
assert "duplicate" in log
assert str(a.name) in log
@gen_cluster(client=True, config={"distributed.scheduler.unknown-task-duration": "1h"})
async def test_unknown_task_duration_config(client, s, a, b):
future = client.submit(slowinc, 1)
while not s.tasks:
await asyncio.sleep(0.001)
assert sum(s._get_prefix_duration(ts.prefix) for ts in s.tasks.values()) == 3600
extension = s.extensions["stealing"]
assert len(extension.unknown_durations) == 1
await wait(future)
assert len(extension.unknown_durations) == 0
@gen_cluster()
async def test_unknown_task_duration_config_2(s, a, b):
assert s.idle_since == s.time_started
@gen_cluster(client=True)
async def test_retire_state_change(c, s, a, b):
np = pytest.importorskip("numpy")
y = c.map(lambda x: x**2, range(10))
await c.scatter(y)
coros = []
for _ in range(2):
v = c.map(lambda i: i * np.random.randint(1000), y)
k = c.map(lambda i: i * np.random.randint(1000), v)
foo = c.map(lambda j: j * 6, k)
step = c.compute(foo)
coros.append(c.gather(step))
await c.retire_workers(workers=[a.address])
await asyncio.gather(*coros)
@gen_cluster()
async def test_get_worker_monitor_info(s, a, b):
res = await s.get_worker_monitor_info()
ms = ["cpu", "time", "host_net_io.read_bps", "host_net_io.write_bps"]
if not WINDOWS:
ms += ["num_fds"]
for w in (a, b):
assert all(res[w.address]["range_query"][m] is not None for m in ms)
assert res[w.address]["count"] is not None
assert res[w.address]["last_time"] is not None
@gen_cluster(client=True)
async def test_quiet_cluster_round_robin(c, s, a, b):
await c.submit(inc, 1)
await c.submit(inc, 2)
await c.submit(inc, 3)
assert a.state.log and b.state.log
def test_memorystate():
m = MemoryState(
process=100,
unmanaged_old=15,
managed=68,
spilled=12,
)
assert m.process == 100
assert m.managed_total == 80
assert m.managed == 68
assert m.spilled == 12
assert m.unmanaged == 32
assert m.unmanaged_old == 15
assert m.unmanaged_recent == 17
assert m.optimistic == 83
with pytest.warns(FutureWarning):
assert m.managed_spilled == m.spilled
with pytest.warns(FutureWarning):
assert m.managed_in_memory == m.managed
assert (
repr(m)
== dedent(
"""
Process memory (RSS) : 100 B
- managed by Dask : 68 B
- unmanaged (old) : 15 B
- unmanaged (recent): 17 B
Spilled to disk : 12 B
"""
).lstrip()
)
def test_memorystate_sum():
m1 = MemoryState(
process=100,
unmanaged_old=15,
managed=68,
spilled=12,
)
m2 = MemoryState(
process=80,
unmanaged_old=10,
managed=58,
spilled=2,
)
m3 = MemoryState.sum(m1, m2)
assert m3.process == 180
assert m3.unmanaged_old == 25
assert m3.managed_total == 140
assert m3.spilled == 14
@pytest.mark.parametrize(
"process,unmanaged_old,managed,spilled",
list(product(*[[0, 1, 2, 3]] * 4)),
)
def test_memorystate_adds_up(process, unmanaged_old, managed, spilled):
"""Input data is massaged by __init__ so that everything adds up by construction"""
m = MemoryState(
process=process,
unmanaged_old=unmanaged_old,
managed=managed,
spilled=spilled,
)
assert m.managed + m.unmanaged == m.process
assert m.managed + m.spilled == m.managed_total
assert m.unmanaged_old + m.unmanaged_recent == m.unmanaged
assert m.optimistic + m.unmanaged_recent == m.process
def test_memorystate__to_dict():
m = MemoryState(process=11, unmanaged_old=2, managed=3, spilled=1)
assert m._to_dict() == {
"managed": 3,
"managed_total": 4,
"optimistic": 5,
"process": 11,
"spilled": 1,
"unmanaged": 8,
"unmanaged_old": 2,
"unmanaged_recent": 6,
}
_test_leak = []
def leaking(out_mib, leak_mib, sleep_time):
out = "x" * (out_mib * 2**20)
_test_leak.append("x" * (leak_mib * 2**20))
sleep(sleep_time)
return out
def clear_leak():
_test_leak.clear()
async def assert_memory(
scheduler_or_workerstate: Scheduler | WorkerState,
attr: str,
/,
min_mib: float,
max_mib: float,
*,
timeout: float = 10,
) -> None:
t0 = time()
while True:
minfo = scheduler_or_workerstate.memory
nmib = getattr(minfo, attr) / 2**20
if min_mib <= nmib <= max_mib:
return
if time() - t0 > timeout:
raise AssertionError(
f"Expected {min_mib} MiB <= {attr} <= {max_mib} MiB; got:\n{minfo!r}"
)
await asyncio.sleep(0.01)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
config={
"distributed.worker.memory.recent-to-old-time": "4s",
"distributed.worker.memory.spill": 0.7,
"distributed.worker.memory.spill-compression": "zlib",
},
worker_kwargs={
"heartbeat_interval": "20ms",
"memory_limit": "700 MiB",
},
)
async def test_memory(c, s, *nannies):
# WorkerState objects, as opposed to the Nanny objects passed by gen_cluster
a, b = s.workers.values()
def print_memory_info(msg: str) -> None:
print(f"==== {msg} ====")
print(f"---- a ----\n{a.memory}")
print(f"---- b ----\n{b.memory}")
print(f"---- s ----\n{s.memory}")
s_m0 = s.memory
assert s_m0.process == a.memory.process + b.memory.process
assert s_m0.managed == 0
assert a.memory.managed == 0
assert b.memory.managed == 0
# Trigger potential imports inside WorkerPlugin.transition
await c.submit(inc, 0, workers=[a.address])
await c.submit(inc, 1, workers=[b.address])
# Wait for the memory readings to stabilize after workers go online
await asyncio.sleep(2)
await asyncio.gather(
assert_memory(a, "unmanaged_recent", 0, 5, timeout=10),
assert_memory(b, "unmanaged_recent", 0, 5, timeout=10),
assert_memory(s, "unmanaged_recent", 0, 10, timeout=10.1),
)
print()
print_memory_info("Starting memory")
# 50 MiB heap + 100 MiB leak
# Note that runtime=2s is less than recent-to-old-time=4s
f1 = c.submit(leaking, 50, 100, 2, key="f1", workers=[a.name])
f2 = c.submit(leaking, 50, 100, 2, key="f2", workers=[b.name])
await asyncio.gather(
assert_memory(a, "unmanaged_recent", 150, 170, timeout=1.8),
assert_memory(b, "unmanaged_recent", 150, 170, timeout=1.8),
assert_memory(s, "unmanaged_recent", 300, 340, timeout=1.9),
)
await wait([f1, f2])
# On each worker, we now have 50 MiB managed + 100 MiB fresh leak
await asyncio.gather(
assert_memory(a, "managed", 50, 51, timeout=0),
assert_memory(b, "managed", 50, 51, timeout=0),
assert_memory(s, "managed", 100, 101, timeout=0),
assert_memory(a, "unmanaged_recent", 100, 120, timeout=0),
assert_memory(b, "unmanaged_recent", 100, 120, timeout=0),
assert_memory(s, "unmanaged_recent", 200, 240, timeout=0),
)
# Force the output of f1 and f2 to spill to disk
print_memory_info("Before spill")
a_leak = round(700 * 0.7 - a.memory.process / 2**20)
b_leak = round(700 * 0.7 - b.memory.process / 2**20)
assert a_leak > 50 and b_leak > 50
a_leak += 10
b_leak += 10
print(f"Leaking additional memory: {a_leak=}; {b_leak=}")
await wait(
[
c.submit(leaking, 0, a_leak, 0, pure=False, workers=[a.name]),
c.submit(leaking, 0, b_leak, 0, pure=False, workers=[b.name]),
]
)
# dask serialization compresses ("x" * 50 * 2**20) from 50 MiB to ~50 kiB.
# Test that spilled reports the actual size on disk and not the output of
# sizeof().
await asyncio.gather(
assert_memory(a, "spilled", 0.04, 0.08, timeout=3),
assert_memory(b, "spilled", 0.04, 0.08, timeout=3),
assert_memory(s, "spilled", 0.08, 0.16, timeout=3.1),
)
# FIXME on Windows and MacOS we occasionally observe managed = 49 bytes
await asyncio.gather(
assert_memory(a, "managed", 0, 0.1, timeout=0),
assert_memory(b, "managed", 0, 0.1, timeout=0),
assert_memory(s, "managed", 0, 0.1, timeout=0),
)
print_memory_info("After spill")
# Delete spilled keys
del f1
del f2
await asyncio.gather(
assert_memory(a, "spilled", 0, 0, timeout=3),
assert_memory(b, "spilled", 0, 0, timeout=3),
assert_memory(s, "spilled", 0, 0, timeout=3.1),
)
print_memory_info("After clearing spilled keys")
# Wait until 4s have passed since the spill to observe unmanaged_recent
# transition into unmanaged_old
await asyncio.gather(
assert_memory(a, "unmanaged_recent", 0, 5, timeout=4.5),
assert_memory(b, "unmanaged_recent", 0, 5, timeout=4.5),
assert_memory(s, "unmanaged_recent", 0, 10, timeout=4.6),
)
# When the leaked memory is cleared, unmanaged and unmanaged_old drop.
# On MacOS and Windows, the process memory of the Python interpreter does not shrink
# as fast as on Linux. Note that this behaviour is heavily impacted by OS tweaks,
# meaning that what you observe on your local host may behave differently on CI.
if not LINUX:
return
print_memory_info("Before clearing memory leak")
prev_unmanaged_a = a.memory.unmanaged / 2**20
prev_unmanaged_b = b.memory.unmanaged / 2**20
await c.run(clear_leak)
await asyncio.gather(
assert_memory(a, "unmanaged", 0, prev_unmanaged_a - 50, timeout=10),
assert_memory(b, "unmanaged", 0, prev_unmanaged_b - 50, timeout=10),
)
await asyncio.gather(
assert_memory(a, "unmanaged_recent", 0, 5, timeout=0),
assert_memory(b, "unmanaged_recent", 0, 5, timeout=0),
)
@gen_cluster(client=True, worker_kwargs={"memory_limit": 0})
async def test_memory_no_zict(c, s, a, b):
"""When Worker.data is not a SpillBuffer, test that querying spilled
defaults to 0 and doesn't raise KeyError
"""
await c.wait_for_workers(2)
assert isinstance(a.data, dict)
assert isinstance(b.data, dict)
f = c.submit(leaking, 10, 0, 0)
await f
assert 10 * 2**20 < s.memory.managed < 11 * 2**20
assert s.memory.spilled == 0
@gen_cluster(nthreads=[])
async def test_memory_no_workers(s):
assert s.memory.process == 0
assert s.memory.managed == 0
@gen_cluster(config={"distributed.admin.system-monitor.interval": "999s"})
async def test_infrequent_sysmon(s, a, b):
"""It doesn't matter how infrequently SystemMonitor.update() is called; there's
always one invocation before the first heartbeat.
"""
assert s.memory.process > 0
@gen_cluster()
async def test_close_scheduler__close_workers_Worker(s, a, b):
with captured_logger("distributed.comm", level=logging.DEBUG) as log:
await s.close()
while not a.status == Status.closed:
await asyncio.sleep(0.05)
log = log.getvalue()
assert "retry" not in log
@pytest.mark.slow
@gen_cluster(Worker=Nanny)
async def test_close_scheduler__close_workers_Nanny(s, a, b):
with captured_logger("distributed.comm", level=logging.DEBUG) as log:
await s.close()
while not a.status == Status.closed:
await asyncio.sleep(0.05)
log = log.getvalue()
assert "retry" not in log
async def assert_ndata(client, by_addr, total=None):
"""Test that the number of elements in Worker.data is as expected.
To be used when the worker is wrapped by a nanny.
by_addr: dict of either exact numbers or (min, max) tuples
total: optional exact match on the total number of keys (with duplicates) across all
workers
"""
out = await client.run(lambda dask_worker: len(dask_worker.data))
try:
for k, v in by_addr.items():
if isinstance(v, tuple):
assert v[0] <= out[k] <= v[1]
else:
assert out[k] == v
if total is not None:
assert sum(out.values()) == total
except AssertionError:
raise AssertionError(f"Expected {by_addr}; {total=}; got {out}")
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1 GiB"},
config=merge(NO_AMM, {"distributed.worker.memory.rebalance.sender-min": 0.3}),
)
async def test_rebalance(c, s, a, b):
# We used nannies to have separate processes for each worker
# Generate 500 buffers worth 512 MiB total on worker a. This sends its memory
# utilisation slightly above 50% (after counting unmanaged) which is above the
# distributed.worker.memory.rebalance.sender-min threshold.
futures = c.map(
lambda _: "x" * (2**29 // 500), range(500), workers=[a.worker_address]
)
await wait(futures)
# Wait for heartbeats
await assert_memory(s, "process", 512, 1024)
await assert_ndata(c, {a.worker_address: 500, b.worker_address: 0})
await s.rebalance()
# Allow for some uncertainty as the unmanaged memory is not stable
await assert_ndata(
c, {a.worker_address: (50, 450), b.worker_address: (50, 450)}, total=500
)
# rebalance() when there is nothing to do
await s.rebalance()
await assert_ndata(
c, {a.worker_address: (50, 450), b.worker_address: (50, 450)}, total=500
)
# Set rebalance() to work predictably on small amounts of managed memory. By default, it
# uses optimistic memory, which would only be possible to test by allocating very large
# amounts of managed memory, so that they would hide variations in unmanaged memory.
REBALANCE_MANAGED_CONFIG = merge(
NO_AMM,
{
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0,
"distributed.worker.memory.rebalance.sender-recipient-gap": 0,
},
)
@gen_cluster(client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_managed_memory(c, s, a, b):
futures = await c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
await s.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(nthreads=[("", 1)] * 3, client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_workers_and_keys(client, s, a, b, c):
futures = await client.scatter(range(100), workers=[a.address])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Passing empty iterables is not the same as omitting the arguments
await s.rebalance(keys=[])
await s.rebalance(workers=[])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await s.rebalance(
keys=[futures[3].key, futures[7].key], workers=[a.address, b.address]
)
assert (len(a.data), len(b.data), len(c.data)) == (98, 2, 0)
with pytest.raises(KeyError):
await s.rebalance(workers=["notexist"])
@gen_cluster(config=NO_AMM)
async def test_rebalance_missing_data1(s, a, b):
"""key never existed"""
out = await s.rebalance(keys=["notexist"])
assert out == {"status": "partial-fail", "keys": ["notexist"]}
@gen_cluster(client=True, config=NO_AMM)
async def test_rebalance_missing_data2(c, s, a, b):
"""keys exist but belong to unfinished futures. Unlike Client.rebalance(),
Scheduler.rebalance() does not wait for unfinished futures.
"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await asyncio.sleep(0.1)
out = await s.rebalance(keys=[f.key for f in futures])
assert out["status"] == "partial-fail"
assert 8 <= len(out["keys"]) <= 10
@pytest.mark.parametrize("explicit", [False, True])
@gen_cluster(client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_raises_missing_data3(c, s, a, b, explicit):
"""keys exist when the sync part of rebalance runs, but are gone by the time the
actual data movement runs.
There is an error message only if the keys are explicitly listed in the API call.
"""
futures = await c.scatter(range(100), workers=[a.address])
if explicit:
pytest.xfail(
reason="""Freeing keys and gathering data is using different
channels (stream vs explicit RPC). Therefore, the
partial-fail is very timing sensitive and subject to a race
condition. This test assumes that the data is freed before
the rebalance get_data requests come in but merely deleting
the futures is not sufficient to guarantee this"""
)
keys = [f.key for f in futures]
del futures
out = await s.rebalance(keys=keys)
assert out["status"] == "partial-fail"
assert 1 <= len(out["keys"]) <= 100
else:
del futures
out = await s.rebalance()
assert out == {"status": "OK"}
@gen_cluster(nthreads=[])
async def test_rebalance_no_workers(s):
await s.rebalance()
@gen_cluster(
client=True,
worker_kwargs={"memory_limit": 0},
config=merge(NO_AMM, {"distributed.worker.memory.rebalance.measure": "managed"}),
)
async def test_rebalance_no_limit(c, s, a, b):
futures = await c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
await s.rebalance()
# Disabling memory_limit made us ignore all % thresholds set in the config
assert len(a.data) == 50
assert len(b.data) == 50
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1000 MiB"},
config=merge(
NO_AMM,
{
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0.2,
"distributed.worker.memory.rebalance.recipient-max": 0.1,
},
),
)
async def test_rebalance_no_recipients(c, s, a, b):
"""There are sender workers, but no recipient workers"""
# Fill 25% of the memory of a and 10% of the memory of b
fut_a = c.map(lambda _: "x" * (2**20), range(250), workers=[a.worker_address])
fut_b = c.map(lambda _: "x" * (2**20), range(100), workers=[b.worker_address])
await wait(fut_a + fut_b)
await assert_memory(s, "managed", 350, 351)
await assert_ndata(c, {a.worker_address: 250, b.worker_address: 100})
await s.rebalance()
await assert_ndata(c, {a.worker_address: 250, b.worker_address: 100})
@gen_cluster(
nthreads=[("", 1)] * 3,
client=True,
worker_kwargs={"memory_limit": 0},
config=merge(NO_AMM, {"distributed.worker.memory.rebalance.measure": "managed"}),
)
async def test_rebalance_skip_recipient(client, s, a, b, c):
"""A recipient is skipped because it already holds a copy of the key to be sent"""
futures = await client.scatter(range(10), workers=[a.address])
await client.replicate(futures[0:2], workers=[a.address, b.address])
await client.replicate(futures[2:4], workers=[a.address, c.address])
assert (len(a.data), len(b.data), len(c.data)) == (10, 2, 2)
await client.rebalance(futures[:2])
assert (len(a.data), len(b.data), len(c.data)) == (8, 2, 4)
@gen_cluster(
client=True,
worker_kwargs={"memory_limit": 0},
config=merge(NO_AMM, {"distributed.worker.memory.rebalance.measure": "managed"}),
)
async def test_rebalance_skip_all_recipients(c, s, a, b):
"""All recipients are skipped because they already hold copies"""
futures = await c.scatter(range(10), workers=[a.address])
await wait(futures)
await c.replicate([futures[0]])
assert (len(a.data), len(b.data)) == (10, 1)
await c.rebalance(futures[:2])
assert (len(a.data), len(b.data)) == (9, 2)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1000 MiB"},
config=merge(NO_AMM, {"distributed.worker.memory.rebalance.measure": "managed"}),
)
async def test_rebalance_sender_below_mean(c, s, *_):
"""A task remains on the sender because moving it would send it below the mean"""
a, b = s.workers
f1 = c.submit(lambda: "x" * (400 * 2**20), workers=[a])
await wait([f1])
f2 = c.submit(lambda: "x" * (10 * 2**20), workers=[a])
await wait([f2])
await assert_memory(s, "managed", 410, 411)
await assert_ndata(c, {a: 2, b: 0})
await s.rebalance()
assert await c.has_what() == {a: (f1.key,), b: (f2.key,)}
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1000 MiB"},
config=merge(
NO_AMM,
{
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0.3,
},
),
)
async def test_rebalance_least_recently_inserted_sender_min(c, s, *_):
"""
1. keys are picked using a least recently inserted policy
2. workers below sender-min are never senders
"""
a, b = s.workers
small_futures = c.map(lambda _: "x", range(10), workers=[a])
await wait(small_futures)
await assert_ndata(c, {a: 10, b: 0})
await s.rebalance()
await assert_ndata(c, {a: 10, b: 0})
large_future = c.submit(lambda: "x" * (300 * 2**20), workers=[a])
await wait([large_future])
await assert_memory(s, "managed", 300, 301)
await assert_ndata(c, {a: 11, b: 0})
await s.rebalance()
await assert_ndata(c, {a: 1, b: 10})
has_what = await c.has_what()
assert has_what[a] == (large_future.key,)
assert sorted(has_what[b]) == sorted(f.key for f in small_futures)
@gen_cluster(client=True)
async def test_gather_on_worker(c, s, a, b):
x = await c.scatter("x", workers=[a.address])
x_ts = s.tasks[x.key]
a_ws = s.workers[a.address]
b_ws = s.workers[b.address]
assert a_ws.nbytes > 0
assert b_ws.nbytes == 0
assert x_ts in a_ws.has_what
assert x_ts not in b_ws.has_what
assert x_ts.who_has == {a_ws}
out = await s.gather_on_worker(b.address, {x.key: [a.address]})
assert out == set()
assert a.data[x.key] == "x"
assert b.data[x.key] == "x"
assert b_ws.nbytes == a_ws.nbytes
assert x_ts in b_ws.has_what
assert x_ts.who_has == {a_ws, b_ws}
@gen_cluster(client=True, scheduler_kwargs={"timeout": "100ms"})
async def test_gather_on_worker_bad_recipient(c, s, a, b):
"""The recipient is missing"""
x = await c.scatter("x")
await b.close()
await async_poll_for(lambda: s.workers.keys() == {a.address}, timeout=5)
out = await s.gather_on_worker(b.address, {x.key: [a.address]})
assert out == {x.key}
@gen_cluster(client=True, worker_kwargs={"timeout": "100ms"})
async def test_gather_on_worker_bad_sender(c, s, a, b):
"""The only sender for a key is missing"""
out = await s.gather_on_worker(a.address, {"x": ["tcp://127.0.0.1:12345"]})
assert out == {"x"}
@pytest.mark.parametrize("missing_first", [False, True])
@gen_cluster(client=True, worker_kwargs={"timeout": "100ms"})
async def test_gather_on_worker_bad_sender_replicated(c, s, a, b, missing_first):
"""One of the senders for a key is missing, but the key is available somewhere else"""
x = await c.scatter("x", workers=[a.address])
bad_addr = "tcp://127.0.0.1:12345"
# Order matters; test both
addrs = [bad_addr, a.address] if missing_first else [a.address, bad_addr]
out = await s.gather_on_worker(b.address, {x.key: addrs})
assert out == set()
assert a.data[x.key] == "x"
assert b.data[x.key] == "x"
@gen_cluster(client=True)
async def test_gather_on_worker_key_not_on_sender(c, s, a, b):
"""The only sender for a key does not actually hold it"""
out = await s.gather_on_worker(a.address, {"x": [b.address]})
assert out == {"x"}
@pytest.mark.parametrize("missing_first", [False, True])
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_gather_on_worker_key_not_on_sender_replicated(
client, s, a, b, c, missing_first
):
"""One of the senders for a key does not actually hold it, but the key is available
somewhere else
"""
x = await client.scatter("x", workers=[a.address])
# Order matters; test both
addrs = [b.address, a.address] if missing_first else [a.address, b.address]
out = await s.gather_on_worker(c.address, {x.key: addrs})
assert out == set()
assert a.data[x.key] == "x"
assert c.data[x.key] == "x"
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3, config=NO_AMM)
async def test_gather_on_worker_duplicate_task(client, s, a, b, c):
"""Race condition where the recipient worker receives the same task twice.
Test that the task nbytes are not double-counted on the recipient.
"""
x = await client.scatter("x", workers=[a.address, b.address], broadcast=True)
assert a.data[x.key] == "x"
assert b.data[x.key] == "x"
assert x.key not in c.data
out = await asyncio.gather(
s.gather_on_worker(c.address, {x.key: [a.address]}),
s.gather_on_worker(c.address, {x.key: [b.address]}),
)
assert out == [set(), set()]
assert c.data[x.key] == "x"
a_ws = s.workers[a.address]
b_ws = s.workers[b.address]
c_ws = s.workers[c.address]
assert a_ws.nbytes > 0
assert c_ws.nbytes == b_ws.nbytes == a_ws.nbytes
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)] * 3,
scheduler_kwargs={"timeout": "100ms"},
config=NO_AMM,
)
async def test_rebalance_dead_recipient(client, s, a, b, c):
"""A key fails to be rebalanced due to recipient failure.
The key is not deleted from the sender.
Unrelated, successful keys are deleted from the senders.
"""
x, y = await client.scatter(["x", "y"], workers=[a.address])
a_ws = s.workers[a.address]
b_ws = s.workers[b.address]
c_ws = s.workers[c.address]
x_ts = s.tasks[x.key]
y_ts = s.tasks[y.key]
await c.close()
assert s.workers.keys() == {a.address, b.address}
out = await s._rebalance_move_data(
[(a_ws, b_ws, x_ts), (a_ws, c_ws, y_ts)], stimulus_id="test"
)
assert out == {"status": "partial-fail", "keys": [y.key]}
assert a.data == {y.key: "y"}
assert b.data == {x.key: "x"}
assert await client.has_what() == {a.address: (y.key,), b.address: (x.key,)}
@gen_cluster(client=True, config=NO_AMM)
async def test_delete_worker_data(c, s, a, b):
# delete only copy of x
# delete one of the copies of y
# don't touch z
x, y, z = await c.scatter(["x", "y", "z"], workers=[a.address])
await c.replicate(y)
assert a.data == {x.key: "x", y.key: "y", z.key: "z"}
assert b.data == {y.key: "y"}
assert s.tasks.keys() == {x.key, y.key, z.key}
await s.delete_worker_data(a.address, [x.key, y.key], stimulus_id="test")
assert a.data == {z.key: "z"}
assert b.data == {y.key: "y"}
assert s.tasks.keys() == {y.key, z.key}
assert s.workers[a.address].nbytes == s.tasks[z.key].nbytes
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_delete_worker_data_double_delete(c, s, a):
"""_delete_worker_data race condition where the same key is deleted twice.
WorkerState.nbytes is not double-decreased.
"""
x, y = await c.scatter(["x", "y"])
await asyncio.gather(
s.delete_worker_data(a.address, [x.key], stimulus_id="test"),
s.delete_worker_data(a.address, [x.key], stimulus_id="test"),
)
assert a.data == {y.key: "y"}
a_ws = s.workers[a.address]
y_ts = s.tasks[y.key]
assert a_ws.nbytes == y_ts.nbytes
@gen_cluster(scheduler_kwargs={"timeout": "100ms"})
async def test_delete_worker_data_bad_worker(s, a, b):
"""_delete_worker_data gracefully handles a non-existing worker;
e.g. a sender died in the middle of rebalance()
"""
await a.close()
assert s.workers.keys() == {b.address}
await s.delete_worker_data(a.address, ["x"], stimulus_id="test")
@pytest.mark.parametrize("bad_first", [False, True])
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_delete_worker_data_bad_task(c, s, a, bad_first):
"""_delete_worker_data gracefully handles a non-existing key;
e.g. a task was stolen by work stealing in the middle of a rebalance().
Other tasks on the same worker are deleted.
"""
x, y = await c.scatter(["x", "y"])
assert a.data == {x.key: "x", y.key: "y"}
assert s.tasks.keys() == {x.key, y.key}
keys = ["notexist", x.key] if bad_first else [x.key, "notexist"]
await s.delete_worker_data(a.address, keys, stimulus_id="test")
assert a.data == {y.key: "y"}
assert s.tasks.keys() == {y.key}
assert s.workers[a.address].nbytes == s.tasks[y.key].nbytes
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_transition_counter(c, s, a):
assert s.transition_counter == 0
assert a.state.transition_counter == 0
await c.submit(inc, 1)
assert s.transition_counter > 1
assert a.state.transition_counter > 1
@gen_cluster(client=True)
async def test_transition_counter_max_scheduler(c, s, a, b):
# This is set by @gen_cluster; it's False in production
assert s.transition_counter_max > 0
s.transition_counter_max = 1
with captured_logger("distributed.scheduler") as logger:
with pytest.raises(AssertionError):
await c.submit(inc, 2)
assert s.transition_counter == 1
with pytest.raises(AssertionError):
s.validate_state()
assert "transition_counter_max" in logger.getvalue()
# Scheduler state is corrupted. Avoid test failure on gen_cluster teardown.
s.validate = False
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_transition_counter_max_worker(c, s, a):
# This is set by @gen_cluster; it's False in production
assert s.transition_counter_max > 0
a.state.transition_counter_max = 1
fut = c.submit(inc, 2)
with captured_logger("distributed.worker") as logger:
await async_poll_for(lambda: a.state.transition_counter > 0, timeout=5)
assert "TransitionCounterMaxExceeded" in logger.getvalue()
# Worker state is corrupted. Avoid test failure on gen_cluster teardown.
a.state.validate = False
@gen_cluster(
client=True,
nthreads=[("", 1)],
scheduler_kwargs={"transition_counter_max": False},
worker_kwargs={"transition_counter_max": False},
)
async def test_disable_transition_counter_max(c, s, a, b):
"""Test that the cluster can run indefinitely if transition_counter_max is disabled.
This is the default outside of @gen_cluster.
"""
assert s.transition_counter_max is False
assert a.state.transition_counter_max is False
assert await c.submit(inc, 1) == 2
assert s.transition_counter > 1
assert a.state.transition_counter > 1
s.validate_state()
a.validate_state()
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1) for _ in range(10)],
)
async def test_worker_heartbeat_after_cancel(c, s, *workers):
"""This test is intended to ensure that after cancellation of a graph, the
worker heartbeat is always successful. The heartbeat may not be successful if
the worker and scheduler state drift and the scheduler doesn't handle
unknown information gracefully. One example would be a released/cancelled
computation where the worker returns metrics about duration, type, etc. and
the scheduler doesn't handle the forgotten task gracefully.
See also https://github.com/dask/distributed/issues/4587
"""
for w in workers:
w.periodic_callbacks["heartbeat"].stop()
futs = c.map(slowinc, range(100), delay=0.1)
while sum(w.state.executing_count for w in workers) < len(workers):
await asyncio.sleep(0.001)
await c.cancel(futs)
while any(w.state.tasks for w in workers):
await asyncio.gather(*(w.heartbeat() for w in workers))
@gen_cluster(client=True, nthreads=[("", 1)] * 2)
async def test_set_restrictions(c, s, a, b):
f = c.submit(inc, 1, key="f", workers=[b.address])
await f
s.set_restrictions(worker={f.key: a.address})
assert s.tasks[f.key].worker_restrictions == {a.address}
await b.close()
await f
@gen_cluster(
client=True,
nthreads=[("", 1)] * 3,
config={"distributed.worker.memory.pause": False},
)
async def test_avoid_paused_workers(c, s, w1, w2, w3):
w2.status = Status.paused
while s.workers[w2.address].status != Status.paused:
await asyncio.sleep(0.01)
futures = c.map(slowinc, range(8), delay=0.1)
await wait(futures)
assert w1.data
assert not w2.data
assert w3.data
assert len(w1.data) + len(w3.data) == 8
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_Scheduler__to_dict(c, s, a):
futs = c.map(inc, range(2))
await c.gather(futs)
d = s._to_dict()
assert d.keys() == {
"type",
"id",
"address",
"extensions",
"services",
"started",
"n_workers",
"total_threads",
"total_memory",
"workers",
"status",
"thread_id",
"transition_log",
"transition_counter",
"memory",
"tasks",
"task_groups",
"events",
"clients",
}
# TaskStates are serialized as dicts under tasks and as strings under
# workers.*.has_what and under clients.*.wants_what
# WorkerStates are serialized s dicts under workers and as
# strings under tasks.*.who_has
assert d["tasks"][futs[0].key]["who_has"] == [
f"<WorkerState '{a.address}', "
"name: 0, status: running, memory: 2, processing: 0>"
]
assert sorted(d["workers"][a.address]["has_what"]) == sorted(
[
f"<TaskState '{futs[0].key}' memory>",
f"<TaskState '{futs[1].key}' memory>",
]
)
assert sorted(d["clients"][c.id]["wants_what"]) == sorted(
[
f"<TaskState '{futs[0].key}' memory>",
f"<TaskState '{futs[1].key}' memory>",
]
)
# TaskGroups are serialized as dicts under task_groups and as strings under
# tasks.*.group
assert d["tasks"][futs[0].key]["group"] == "<inc: memory: 2>"
assert d["task_groups"]["inc"]["prefix"] == "<inc: memory: 2>"
# ClientStates are serialized as dicts under clients and as strings under
# tasks.*.who_wants
assert d["clients"][c.id]["client_key"] == c.id
assert d["tasks"][futs[0].key]["who_wants"] == [f"<Client '{c.id}'>"]
# Test MemoryState dump
assert isinstance(d["memory"]["process"], int)
assert isinstance(d["workers"][a.address]["memory"]["process"], int)
@gen_cluster(
client=True, nthreads=[], config={"distributed.scheduler.worker-saturation": 1.0}
)
async def test_TaskState__to_dict(c, s):
"""tasks that are listed as dependencies of other tasks are dumped as a short repr
and always appear in full under Scheduler.tasks
"""
x = c.submit(inc, 1, key="x")
y = c.submit(inc, x, key="y")
z = c.submit(inc, 2, key="z")
while len(s.tasks) < 3:
await asyncio.sleep(0.01)
tasks = s._to_dict()["tasks"]
assert isinstance(tasks["x"], dict)
assert isinstance(tasks["y"], dict)
assert isinstance(tasks["z"], dict)
assert tasks["x"]["dependents"] == ["<TaskState 'y' waiting>"]
assert tasks["y"]["dependencies"] == ["<TaskState 'x' queued>"]
def _verify_cluster_state(
state: dict,
workers: Collection[Worker],
allow_missing: bool = False,
) -> None:
addrs = {w.address for w in workers}
assert state.keys() == {"scheduler", "workers", "versions"}
assert state["workers"].keys() == addrs
if allow_missing:
assert state["versions"]["workers"].keys() <= addrs
else:
assert state["versions"]["workers"].keys() == addrs
@gen_cluster(nthreads=[("", 1)] * 2)
async def test_get_cluster_state(s, *workers):
state = await s.get_cluster_state([])
_verify_cluster_state(state, workers)
await asyncio.gather(*(w.close() for w in workers))
while s.workers:
await asyncio.sleep(0.01)
state_no_workers = await s.get_cluster_state([])
_verify_cluster_state(state_no_workers, [])
@gen_cluster(
nthreads=[("", 1)] * 2,
config={"distributed.comm.timeouts.connect": "200ms"},
)
async def test_get_cluster_state_worker_error(s, a, b):
a.stop()
state = await s.get_cluster_state([])
_verify_cluster_state(state, [a, b], allow_missing=True)
assert state["workers"][a.address] == (
f"OSError('Timed out trying to connect to {a.address} after 0.2 s')"
)
assert isinstance(state["workers"][b.address], dict)
assert state["versions"]["workers"].keys() == {b.address}
def _verify_cluster_dump(url: str, format: str, workers: Collection[Worker]) -> dict:
import fsspec
if format == "msgpack":
import msgpack
url += ".msgpack.gz"
loader = msgpack.unpack
else:
import yaml
url += ".yaml"
loader = yaml.safe_load
with fsspec.open(url, mode="rb", compression="infer") as f:
state = loader(f)
_verify_cluster_state(state, workers)
return state
@pytest.mark.parametrize("format", ["msgpack", "yaml"])
@gen_cluster(nthreads=[("", 1)] * 2)
async def test_dump_cluster_state(s, *workers, format):
fsspec = pytest.importorskip("fsspec")
try:
await s.dump_cluster_state_to_url(
"memory://state-dumps/two-workers", [], format
)
_verify_cluster_dump("memory://state-dumps/two-workers", format, workers)
await asyncio.gather(*(w.close() for w in workers))
while s.workers:
await asyncio.sleep(0.01)
await s.dump_cluster_state_to_url("memory://state-dumps/no-workers", [], format)
_verify_cluster_dump("memory://state-dumps/no-workers", format, [])
finally:
fs = fsspec.filesystem("memory")
fs.rm("state-dumps", recursive=True)
@gen_cluster(nthreads=[("", 1)])
async def test_repr(s, a):
async with Worker(s.address, nthreads=2) as b: # name = address by default
ws_a = s.workers[a.address]
ws_b = s.workers[b.address]
while ws_b.status != Status.running:
await asyncio.sleep(0.01)
assert repr(s) == f"<Scheduler {s.address!r}, workers: 2, cores: 3, tasks: 0>"
assert (
repr(a)
== f"<Worker {a.address!r}, name: 0, status: running, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>"
)
assert (
repr(b)
== f"<Worker {b.address!r}, status: running, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>"
)
assert (
repr(ws_a)
== f"<WorkerState {a.address!r}, name: 0, status: running, memory: 0, processing: 0>"
)
assert (
repr(ws_b)
== f"<WorkerState {b.address!r}, status: running, memory: 0, processing: 0>"
)
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "2s"})
async def test_ensure_events_dont_include_taskstate_objects(c, s, a, b):
event = Event()
def block(x, event):
event.wait()
return x
futs = c.map(block, range(100), event=event)
while not a.state.tasks:
await asyncio.sleep(0.1)
await a.close(executor_wait=False)
await event.set()
await c.gather(futs)
assert not any("TaskState" in str(event) for event in s.get_events())
@gen_cluster(nthreads=[("", 1)])
async def test_worker_state_unique_regardless_of_address(s, w):
ws1 = s.workers[w.address]
host, port = parse_host_port(ws1.address)
await w.close()
while s.workers:
await asyncio.sleep(0.1)
async with Worker(s.address, port=port, host=host) as w2:
ws2 = s.workers[w2.address]
assert ws1 is not ws2
assert ws1 != ws2
assert hash(ws1) != ws2
def test_runspec_regression_sync(loop):
# https://github.com/dask/distributed/issues/6624
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
with Client(loop=loop, dashboard_address=":0"):
v = da.random.random((20, 20), chunks=(5, 5))
overlapped = da.map_overlap(np.sum, v, depth=2, boundary="reflect")
# This computation is somehow broken but we want to avoid catching any
# serialization errors that result in KilledWorker
with pytest.raises(IndexError):
overlapped.compute()
@gen_cluster(config={"distributed.scheduler.allowed-failures": 666})
async def test_KilledWorker_informative_message(s, a, b):
ws = s.workers[a.address].clean()
ex = KilledWorker("foo-bar", ws, s.allowed_failures)
with pytest.raises(KilledWorker) as excinfo:
raise ex
msg = str(excinfo.value)
assert "Attempted to run task 'foo-bar' on 667 different workers" in msg
assert a.address in msg
assert "worker logs" in msg
assert "https://distributed.dask.org/en/stable/killed.html" in msg
@gen_cluster(client=True)
async def test_count_task_prefix(c, s, a, b):
futures = c.map(inc, range(10))
await c.gather(futures)
assert s.task_prefixes["inc"].state_counts["memory"] == 10
assert s.task_prefixes["inc"].state_counts["erred"] == 0
futures = c.map(inc, range(10, 20))
await c.gather(futures)
assert s.task_prefixes["inc"].state_counts["memory"] == 20
assert s.task_prefixes["inc"].state_counts["erred"] == 0
@gen_cluster(client=True)
async def test_transition_waiting_memory(c, s, a, b):
"""Test race condition where a task transitions to memory while its state on the
scheduler is waiting:
1. worker a finishes x
2. y transitions to processing and is assigned to worker b
3. b fetches x and sends an add_keys message to the scheduler
4. In the meantime, a dies and causes x to be scheduled back to released/waiting.
5. Scheduler queues up a free-keys intended for b to cancel both x and y
6. Before free-keys arrives to b, the worker runs and completes y, sending a
finished-task message to the scheduler
7. {op: add-keys, keys=[x]} from b finally arrives to the scheduler. This triggers
a {op: remove-replicas, keys=[x]} message from the scheduler to worker b, because
add-keys when the task state is not memory triggers a cleanup of redundant
replicas (see Scheduler.add_keys) - in this, add-keys differs from task-finished!
8. {op: task-finished, key=y} from b arrives to the scheduler and it is ignored.
"""
x = c.submit(inc, 1, key="x", workers=[a.address])
y = c.submit(inc, x, key="y", workers=[b.address])
await wait_for_state("x", "memory", b, interval=0)
# Note interval=0 above. It means that x has just landed on b this instant and the
# scheduler doesn't know yet.
assert b.state.tasks["y"].state == "executing"
assert s.tasks["x"].who_has == {s.workers[a.address]}
with freeze_batched_send(b.batched_stream):
with freeze_batched_send(s.stream_comms[b.address]):
await s.remove_worker(a.address, stimulus_id="remove_a")
assert s.tasks["x"].state == "no-worker"
assert s.tasks["y"].state == "waiting"
await wait_for_state("y", "memory", b)
await async_poll_for(lambda: not b.state.tasks, timeout=5)
assert s.tasks["x"].state == "no-worker"
assert s.tasks["y"].state == "waiting"
assert_story(s.story("y"), [("y", "waiting", "waiting", {})])
@pytest.mark.parametrize(
"rootish",
[
pytest.param(
True,
marks=pytest.mark.skipif(
not QUEUING_ON_BY_DEFAULT,
reason="Nothing will be classified as root-ish",
),
),
False,
],
)
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_deadlock_resubmit_queued_tasks_fast(c, s, a, rootish):
# See https://github.com/dask/distributed/issues/7200
block = Event()
block2 = Event()
executing = Event()
executing2 = Event()
def block_on_event(*args, block, executing):
executing.set()
block.wait()
if rootish:
ntasks = s.total_nthreads * 2 + 1
else:
ntasks = 1
keys = [f"fut-{i}" for i in range(ntasks)]
def submit_tasks():
# Use case would be a client rescheduling the same or a similar graph
# multiple times, closely followed
# df.head()
# df.size.compute()
# We're emulating this by submitting the sames *keys*
return c.map(
block_on_event, range(len(keys)), block=block, executing=executing, key=keys
)
def assert_rootish():
# Just to verify our assumptions in case the definition changes. This is
# currently a bit brittle
if rootish:
assert all(s.is_rootish(s.tasks[k]) for k in keys)
else:
assert not any(s.is_rootish(s.tasks[k]) for k in keys)
f1 = submit_tasks()
# Make sure that the worker is properly saturated
nblocking_tasks = 5
# This set of tasks is there to guarantee that the worker is saturated after
# releasing the first set of tasks s.t. a subsequent submission would run
# into queuing
fut2 = c.map(
block_on_event, range(nblocking_tasks), block=block2, executing=executing2
)
# Once the task is on the threadpool, the client/scheduler may start its
# release chain
await executing.wait()
assert len(a.state.tasks)
# To trigger this condition, the scheduler needs to receive the
# `task-finished` message after it performed the client release transitions
# Therefore, the worker must not receive the `free-keys`` signal before it
# can finish the task since otherwise the worker would recognize it as
# cancelled and would forget about it. We emulate this behavior by blocking
# the outgoing scheduler stream until that happens, i.e. this introduces
# artificial latency
with freeze_batched_send(s.stream_comms[a.address]):
del f1
while any(k in s.tasks for k in keys):
await asyncio.sleep(0.005)
assert len(s.tasks) == nblocking_tasks
fut3 = submit_tasks()
while len(s.tasks) == nblocking_tasks:
await asyncio.sleep(0.005)
assert_rootish()
if rootish:
assert all(s.tasks[k] in s.queued for k in keys), [s.tasks[k] for k in keys]
await block.set()
# At this point we need/want to wait for the task-finished message to
# arrive on the scheduler. There is no proper hook to wait, therefore we
# sleep
await asyncio.sleep(0.2)
# Everything should finish properly after this
await block2.set()
await c.gather(fut2)
await c.gather(fut3)
@gen_test()
async def test_transition_failure_triggers_log_event():
def block_on_event(input, block, executing):
executing.set()
block.wait()
return input
# Manually spin up cluster to avoid state validation on cluster shutdown in gen_cluster
async with (
Scheduler(dashboard_address=":0") as s,
Worker(s.address) as w,
Client(s.address, asynchronous=True) as c,
):
block = Event()
executing = Event()
fut = c.submit(block_on_event, 0, block, executing)
await executing.wait()
# Manually corrupt the state of the processing task
s.tasks[fut.key].processing_on = None
await block.set()
await async_poll_for(
lambda: sum(
event["action"] == "scheduler-transition-failed"
for _, event in s.get_events("transitions")
)
== 1,
timeout=5,
)
@pytest.mark.skipif(
not QUEUING_ON_BY_DEFAULT,
reason="The situation handled in this test requires queueing.",
)
@pytest.mark.parametrize("validate", [True, False])
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_deadlock_dependency_of_queued_released_when_worker_replaced(
c, s, a, validate
):
@delayed
def inc(input):
return input + 1
@delayed
def block_on_event(input, block, executing):
executing.set()
block.wait()
return input
block = Event()
executing = Event()
dep = inc(0)
futs = [
block_on_event(dep, block, executing, dask_key_name=("rootish", i))
for i in range(s.total_nthreads * 2 + 1)
]
del dep
futs = c.compute(futs)
await executing.wait()
assert s.queued
await s.remove_worker(address=a.address, stimulus_id="test")
if validate:
s.validate_state()
await block.set()
await executing.clear()
async with Worker(s.address) as b:
if validate:
s.validate_state()
await c.gather(futs)
if validate:
s.validate_state()
@pytest.mark.skipif(
not QUEUING_ON_BY_DEFAULT,
reason="The situation handled in this test requires queueing.",
)
@pytest.mark.parametrize("validate", [True, False])
@gen_cluster(client=True)
async def test_deadlock_dependency_of_queued_released_when_worker_removed(
c, s, a, b, validate
):
@delayed
def inc(input):
return input + 1
@delayed
def block_on_event(input, block):
block.wait()
return input
block = Event()
with dask.annotate(workers=a.address, allow_other_workers=True):
dep = inc(0)
futs = [
block_on_event(dep, block, dask_key_name=("rootish", i))
for i in range(s.total_nthreads * 2 + 1)
]
dep.release()
futs = c.compute(futs)
with freeze_batched_send(b.batched_stream):
await async_poll_for(
lambda: b.state.tasks.get(dep.key) is not None
and b.state.tasks.get(dep.key).state == "memory",
timeout=5,
)
assert s.queued
await s.remove_worker(address=a.address, stimulus_id="test")
if validate:
s.validate_state()
await block.set()
if validate:
s.validate_state()
await c.gather(futs)
if validate:
s.validate_state()
@gen_cluster(client=True)
async def test_submit_dependency_of_erred_task(c, s, a, b):
x = c.submit(lambda: 1 / 0, key="x")
await wait(x)
y = c.submit(inc, x, key="y")
with pytest.raises(ZeroDivisionError):
await y
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="asyncio.wait_for is unreliable on 3.10 and below",
)
@gen_cluster(
client=True,
config={
# In this test we want to make sure that the connections are severed
# before the timeout hits. Therefore, the connection timeout should be
# higher than the test timeout.
# At the time of writing, the test timeout was 30s
"distributed.comm.timeouts.connect": "120s"
},
)
async def test_tell_workers_when_peers_have_left(c, s, a, b):
f = (await c.scatter({"f": 1}, workers=[a.address, b.address], broadcast=True))["f"]
workers = {a.address: a, b.address: b}
connect_timeout = parse_timedelta(
dask.config.get("distributed.comm.timeouts.connect"), default="seconds"
)
class BrokenGatherDep(Worker):
async def gather_dep(self, worker, *args, **kwargs):
w = workers.pop(worker, None)
if w is not None and workers:
w.listener.stop()
s.stream_comms[worker].abort()
return await super().gather_dep(worker, *args, **kwargs)
async with BrokenGatherDep(s.address, nthreads=1) as w3:
start = time()
g = await c.submit(inc, f, key="g", workers=[w3.address])
# fails over to the second worker in less than the connect timeout
assert time() < start + connect_timeout
@gen_cluster(client=True)
async def test_scatter_creates_ts(c, s, a, b):
"""A TaskState object is created by scatter, and only later becomes runnable
See also
--------
test_scheduler.py::test_client_desires_keys_creates_ts
test_spans.py::test_scatter_creates_ts
"""
x1 = (await c.scatter({"x": 1}, workers=[a.address]))["x"]
await wait_for_state("x", "memory", s)
assert s.tasks["x"].run_spec is None
async with Client(s.address, asynchronous=True) as c2:
x2 = c2.submit(inc, 1, key="x")
assert await x2 == 1
await a.close()
assert await x2 == 2
assert s.tasks["x"].run_spec is not None
@pytest.mark.parametrize("finalize", [False, True])
@gen_cluster(
client=True,
nthreads=[("", 1)] * 4,
worker_kwargs={"memory_limit": "100 kB"},
config={
"distributed.worker.memory.target": False,
"distributed.worker.memory.spill": False,
"distributed.worker.memory.pause": False,
},
)
async def test_refuse_to_schedule_huge_task(c, s, *workers, finalize):
"""If the total size of a task's input grossly exceed the memory available on the
worker, the scheduler must refuse to compute it
"""
bg = bag.from_sequence(
[random.randbytes(30_000) for _ in range(4)],
npartitions=4,
)
match = r"worth of input dependencies, but worker .* has memory_limit set to"
if finalize:
fut = c.compute(bg)
match += r".* you called client.compute()"
else:
bg = c.persist(bg.repartition(npartitions=1))
fut = list(c.futures_of(bg))[0]
with pytest.raises(MemoryError, match=match):
await fut
# The task never reached the workers
for w in workers:
for ev in w.state.log:
assert fut.key not in ev
@gen_cluster(client=True)
async def test_html_repr(c, s, a, b):
futs = c.map(slowinc, range(10), delay=0.1, key=[("slowinc", i) for i in range(10)])
f = c.submit(sum, futs)
while not f.done():
assert isinstance(s._repr_html_(), str)
assert isinstance(s.workers[a.address]._repr_html_(), str)
assert isinstance(s.workers[b.address]._repr_html_(), str)
for ts in s.tasks.values():
assert isinstance(ts._repr_html_(), str)
await asyncio.sleep(0.01)
await f
@pytest.mark.parametrize("add_deps", [False, True])
@gen_cluster(client=True, nthreads=[])
async def test_resubmit_nondeterministic_task_different_deps(c, s, add_deps):
"""Some run_specs can't be tokenized deterministically. Silently skip comparison on
the run_spec in those cases. However, fail anyway if dependencies have changed.
"""
o = object()
x1 = c.submit(inc, 1, key="x1") if not add_deps else 2
x2 = c.submit(inc, 2, key="x2")
y1 = c.persist(delayed(lambda i, j: i)(x1, o, dask_key_name="y"))
y2 = delayed(lambda i, j: i)(x2, o, dask_key_name="y")
z = delayed(inc)(y2, dask_key_name="z")
with captured_logger("distributed.scheduler", level=logging.WARNING) as log:
fut = c.compute(z)
await wait_for_state("z", "waiting", s)
assert "Detected different `run_spec` for key 'y'" in log.getvalue()
async with Worker(s.address):
assert await fut == 3
def block(x, in_event, block_event):
in_event.set()
block_event.wait()
return x
@gen_cluster(
client=True,
nthreads=[("", 1, {"resources": {"a": 1}})],
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_fan_out_pattern_deadlock(c, s, a):
"""Regression test for https://github.com/dask/distributed/issues/8548
This test heavily uses resources to force scheduling decisions.
"""
in_f, block_f = Event(), Event()
in_ha, block_ha = Event(), Event()
in_hb, block_hb = Event(), Event()
# Input task to 'g' that we can fail
with dask.annotate(resources={"b": 1}):
f = delayed(block)(1, in_f, block_f, dask_key_name="f")
g = delayed(inc)(f, dask_key_name="g")
# Fan-out from 'g' and run h1 and h2 on different workers
hb = delayed(block)(g, in_hb, block_hb, dask_key_name="hb")
with dask.annotate(resources={"a": 1}):
ha = delayed(block)(g, in_ha, block_ha, dask_key_name="ha")
f, ha, hb = c.compute([f, ha, hb])
with captured_logger("distributed.scheduler", level=logging.ERROR) as logger:
async with Worker(s.address, nthreads=1, resources={"b": 1}) as b:
await block_f.set()
await in_ha.wait()
await in_hb.wait()
await in_f.clear()
# Make sure that the scheduler knows that both workers hold 'g' in memory
await async_poll_for(lambda: len(s.tasks["g"].who_has) == 2, timeout=5)
# Remove worker 'b' while it's processing h1
await s.remove_worker(b.address, stimulus_id="remove_b1")
await block_hb.set()
await block_f.clear()
# Remove the new instance of the 'b' worker while it processes 'f'
# to trigger an transition for 'f' to 'erred'
async with Worker(s.address, nthreads=1, resources={"b": 1}) as b:
await in_f.wait()
await in_f.clear()
await s.remove_worker(b.address, stimulus_id="remove_b2")
await block_f.set()
await block_f.clear()
await block_ha.set()
await ha
with pytest.raises(KilledWorker, match="Attempted to run task 'hb'"):
await hb
del ha, hb
# Make sure that h2 gets forgotten on worker 'a'
await async_poll_for(lambda: not a.state.tasks, timeout=5)
# Ensure that no other errors including transition failures were logged
assert (
logger.getvalue()
== "Task hb marked as failed because 1 workers died while trying to run it\nTask f marked as failed because 1 workers died while trying to run it\n"
)
@gen_cluster(
client=True,
nthreads=[("", 1, {"resources": {"a": 1}})],
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_stimulus_from_erred_task(c, s, a):
"""This test heavily uses resources to force scheduling decisions."""
in_f, block_f = Event(), Event()
in_g, block_g = Event(), Event()
with dask.annotate(resources={"b": 1}):
f = delayed(block)(1, in_f, block_f, dask_key_name="f")
with dask.annotate(resources={"a": 1}):
g = delayed(block)(f, in_g, block_g, dask_key_name="g")
f, g = c.compute([f, g])
with captured_logger("distributed.scheduler", level=logging.ERROR) as logger:
frozen_stream_from_a_ctx = freeze_batched_send(a.batched_stream)
frozen_stream_from_a_ctx.__enter__()
async with Worker(s.address, nthreads=1, resources={"b": 1}) as b1:
await block_f.set()
await in_g.wait()
await in_f.clear()
frozen_stream_to_a_ctx = freeze_batched_send(s.stream_comms[a.address])
frozen_stream_to_a_ctx.__enter__()
await s.remove_worker(b1.address, stimulus_id="remove_b1")
await block_f.clear()
# Remove the new instance of the 'b' worker while it processes 'f'
# to trigger a transition for 'f' to 'erred'
async with Worker(s.address, nthreads=1, resources={"b": 1}) as b2:
await in_f.wait()
await in_f.clear()
await s.remove_worker(b2.address, stimulus_id="remove_b2")
await block_f.set()
with pytest.raises(KilledWorker, match="Attempted to run task 'f'"):
await f
# g has already been transitioned to 'erred' because 'f' failed
with pytest.raises(KilledWorker, match="Attempted to run task 'f'"):
await g
# Finish 'g' and let the scheduler know so it can trigger cleanup
await block_g.set()
with mock.patch.object(
s, "stimulus_task_finished", wraps=s.stimulus_task_finished
) as wrapped_stimulus:
frozen_stream_from_a_ctx.__exit__(None, None, None)
# Make sure the `stimulus_task_finished` gets processed
await async_poll_for(lambda: wrapped_stimulus.call_count == 1, timeout=5)
# Allow the scheduler to talk to the worker again
frozen_stream_to_a_ctx.__exit__(None, None, None)
# Make sure all data gets forgotten on worker 'a'
await async_poll_for(lambda: not a.state.tasks, timeout=5)
# Ensure that no other errors including transition failures were logged
assert (
logger.getvalue()
== "Task f marked as failed because 1 workers died while trying to run it\n"
)
@gen_cluster(client=True)
async def test_concurrent_close_requests(c, s, *workers):
class BeforeCloseCounterPlugin(SchedulerPlugin):
async def start(self, scheduler):
self.call_count = 0
async def before_close(self):
self.call_count += 1
await c.register_plugin(BeforeCloseCounterPlugin(), name="before_close")
with captured_logger("distributed.scheduler", level=logging.INFO) as caplog:
await asyncio.gather(*[s.close(reason="test-reason") for _ in range(5)])
assert s.plugins["before_close"].call_count == 1
lines = caplog.getvalue().split("\n")
assert sum("Closing scheduler" in line for line in lines) == 1
@gen_cluster(
client=True,
config={
"distributed.scheduler.rootish-taskgroup": 10,
"distributed.scheduler.rootish-taskgroup-dependencies": 15,
},
)
async def test_rootish_taskgroup_configuration(c, s, *workers):
assert s.rootish_tg_threshold == 10
assert s.rootish_tg_dependencies_threshold == 15
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_alias_resolving_break_queuing(c, s, a):
pytest.importorskip("numpy")
import dask.array as da
arr = da.random.random((90, 100), chunks=(10, 50))
result = arr.rechunk(((10, 7, 7, 6) * 3, (50, 50)))
result = result.sum(split_every=1000)
x = c.persist(result)
while not s.tasks:
await asyncio.sleep(0.01)
assert sum([s.is_rootish(v) for v in s.tasks.values()]) == 18
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_data_producers(c, s, a):
from dask._task_spec import DataNode, Task, TaskRef
def func(*args):
return 100
class MyArray(DaskMethodsMixin):
__dask_optimize__ = None
def __dask_graph__(self):
return {
"a": DataNode("a", 10),
"b": Task("b", func, TaskRef("a"), _data_producer=True),
"c": Task("c", func, TaskRef("b")),
"d": Task("d", func, TaskRef("c")),
}
def __dask_keys__(self):
return ["d"]
def __dask_postcompute__(self):
return func, ()
arr = MyArray()
x = c.compute(arr)
await async_poll_for(lambda: s.tasks, 5)
assert (
sum([s.is_rootish(v) and v.run_spec.data_producer for v in s.tasks.values()])
== 2
)
| FlakyConnectionPool |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/langhelpers.py | {
"start": 33341,
"end": 34608
} | class ____(Generic[_T_co]):
"""Descriptor which proxies a function when the attribute is not
present in dict
This superclass is organized in a particular way with "memoized" and
"non-memoized" implementation classes that are hidden from type checkers,
as Mypy seems to not be able to handle seeing multiple kinds of descriptor
classes used for the same attribute.
"""
fget: Callable[..., _T_co]
__doc__: Optional[str]
__name__: str
def __init__(self, fget: Callable[..., _T_co], doc: Optional[str] = None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
@overload
def __get__(self: _GFD, obj: None, cls: Any) -> _GFD: ...
@overload
def __get__(self, obj: object, cls: Any) -> _T_co: ...
def __get__(self: _GFD, obj: Any, cls: Any) -> Union[_GFD, _T_co]:
raise NotImplementedError()
if TYPE_CHECKING:
def __set__(self, instance: Any, value: Any) -> None: ...
def __delete__(self, instance: Any) -> None: ...
def _reset(self, obj: Any) -> None:
raise NotImplementedError()
@classmethod
def reset(cls, obj: Any, name: str) -> None:
raise NotImplementedError()
| generic_fn_descriptor |
python | spack__spack | lib/spack/spack/audit.py | {
"start": 1831,
"end": 2431
} | class ____:
"""Information on an error reported in a test."""
def __init__(self, summary, details):
self.summary = summary
self.details = tuple(details)
def __str__(self):
if self.details:
return f"{self.summary}\n" + "\n".join(f" {detail}" for detail in self.details)
return self.summary
def __eq__(self, other):
if self.summary != other.summary or self.details != other.details:
return False
return True
def __hash__(self):
value = (self.summary, self.details)
return hash(value)
| Error |
python | huggingface__transformers | src/transformers/models/granite_speech/modeling_granite_speech.py | {
"start": 4127,
"end": 5032
} | class ____(nn.Module):
"""Feedforward module for conformer encoder blocks."""
def __init__(self, config: GraniteSpeechEncoderConfig):
super().__init__()
self.pre_norm = nn.LayerNorm(config.hidden_dim)
self.up_proj = nn.Linear(config.hidden_dim, config.hidden_dim * config.feedforward_mult)
self.silu = nn.SiLU()
self.dropout = nn.Dropout(config.dropout)
self.down_proj = nn.Linear(config.hidden_dim * config.feedforward_mult, config.hidden_dim)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.pre_norm(hidden_states)
hidden_states = self.up_proj(hidden_states)
hidden_states = self.dropout(self.silu(hidden_states))
hidden_states = self.down_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| GraniteSpeechConformerFeedForward |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_hash.py | {
"start": 664,
"end": 737
} | class ____:
def __hash__(self):
x = 7741
return x
| Hash2 |
python | django__django | django/contrib/gis/gdal/field.py | {
"start": 4552,
"end": 4735
} | class ____(Field):
@property
def value(self):
"Return a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
| OFTReal |
python | langchain-ai__langchain | libs/core/langchain_core/utils/function_calling.py | {
"start": 1100,
"end": 1383
} | class ____(TypedDict):
"""Representation of a callable function to send to an LLM."""
name: str
"""The name of the function."""
description: str
"""A description of the function."""
parameters: dict
"""The parameters of the function."""
| FunctionDescription |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 30694,
"end": 31572
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks.
"""
)
| YosoClassificationHead |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 159043,
"end": 159502
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("comment_id", "body", "client_mutation_id")
comment_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="commentId")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateDiscussionCommentInput |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/agent.py | {
"start": 32886,
"end": 62129
} | class ____(Chain):
"""Agent that is using tools."""
agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable
"""The agent to run for creating a plan and determining actions
to take at each step of the execution loop."""
tools: Sequence[BaseTool]
"""The valid tools the agent can call."""
return_intermediate_steps: bool = False
"""Whether to return the agent's trajectory of intermediate steps
at the end in addition to the final output."""
max_iterations: int | None = 15
"""The maximum number of steps to take before ending the execution
loop.
Setting to 'None' could lead to an infinite loop."""
max_execution_time: float | None = None
"""The maximum amount of wall clock time to spend in the execution
loop.
"""
early_stopping_method: str = "force"
"""The method to use for early stopping if the agent never
returns `AgentFinish`. Either 'force' or 'generate'.
`"force"` returns a string saying that it stopped because it met a
time or iteration limit.
`"generate"` calls the agent's LLM Chain one final time to generate
a final answer based on the previous steps.
"""
handle_parsing_errors: bool | str | Callable[[OutputParserException], str] = False
"""How to handle errors raised by the agent's output parser.
Defaults to `False`, which raises the error.
If `true`, the error will be sent back to the LLM as an observation.
If a string, the string itself will be sent to the LLM as an observation.
If a callable function, the function will be called with the exception as an
argument, and the result of that function will be passed to the agent as an
observation.
"""
trim_intermediate_steps: (
int | Callable[[list[tuple[AgentAction, str]]], list[tuple[AgentAction, str]]]
) = -1
"""How to trim the intermediate steps before returning them.
Defaults to -1, which means no trimming.
"""
@classmethod
def from_agent_and_tools(
cls,
agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable,
tools: Sequence[BaseTool],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentExecutor:
"""Create from agent and tools.
Args:
agent: Agent to use.
tools: Tools to use.
callbacks: Callbacks to use.
kwargs: Additional arguments.
Returns:
Agent executor object.
"""
return cls(
agent=agent,
tools=tools,
callbacks=callbacks,
**kwargs,
)
@model_validator(mode="after")
def validate_tools(self) -> Self:
"""Validate that tools are compatible with agent.
Args:
values: Values to validate.
Returns:
Validated values.
Raises:
ValueError: If allowed tools are different than provided tools.
"""
agent = self.agent
tools = self.tools
allowed_tools = agent.get_allowed_tools() # type: ignore[union-attr]
if allowed_tools is not None and set(allowed_tools) != {
tool.name for tool in tools
}:
msg = (
f"Allowed tools ({allowed_tools}) different than "
f"provided tools ({[tool.name for tool in tools]})"
)
raise ValueError(msg)
return self
@model_validator(mode="before")
@classmethod
def validate_runnable_agent(cls, values: dict) -> Any:
"""Convert runnable to agent if passed in.
Args:
values: Values to validate.
Returns:
Validated values.
"""
agent = values.get("agent")
if agent and isinstance(agent, Runnable):
try:
output_type = agent.OutputType
except TypeError:
multi_action = False
except Exception:
logger.exception("Unexpected error getting OutputType from agent")
multi_action = False
else:
multi_action = output_type == list[AgentAction] | AgentFinish
stream_runnable = values.pop("stream_runnable", True)
if multi_action:
values["agent"] = RunnableMultiActionAgent(
runnable=agent,
stream_runnable=stream_runnable,
)
else:
values["agent"] = RunnableAgent(
runnable=agent,
stream_runnable=stream_runnable,
)
return values
@property
def _action_agent(self) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Type cast self.agent.
If the `agent` attribute is a Runnable, it will be converted one of
RunnableAgentType in the validate_runnable_agent root_validator.
To support instantiating with a Runnable, here we explicitly cast the type
to reflect the changes made in the root_validator.
"""
if isinstance(self.agent, Runnable):
return cast("RunnableAgentType", self.agent)
return self.agent
@override
def save(self, file_path: Path | str) -> None:
"""Raise error - saving not supported for Agent Executors.
Args:
file_path: Path to save to.
Raises:
ValueError: Saving not supported for agent executors.
"""
msg = (
"Saving not supported for agent executors. "
"If you are trying to save the agent, please use the "
"`.save_agent(...)`"
)
raise ValueError(msg)
def save_agent(self, file_path: Path | str) -> None:
"""Save the underlying agent.
Args:
file_path: Path to save to.
"""
return self._action_agent.save(file_path)
def iter(
self,
inputs: Any,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
async_: bool = False, # noqa: ARG002 arg kept for backwards compat, but ignored
) -> AgentExecutorIterator:
"""Enables iteration over steps taken to reach final output.
Args:
inputs: Inputs to the agent.
callbacks: Callbacks to run.
include_run_info: Whether to include run info.
async_: Whether to run async. (Ignored)
Returns:
Agent executor iterator object.
"""
return AgentExecutorIterator(
self,
inputs,
callbacks,
tags=self.tags,
include_run_info=include_run_info,
)
@property
def input_keys(self) -> list[str]:
"""Return the input keys."""
return self._action_agent.input_keys
@property
def output_keys(self) -> list[str]:
"""Return the singular output key."""
if self.return_intermediate_steps:
return [*self._action_agent.return_values, "intermediate_steps"]
return self._action_agent.return_values
def lookup_tool(self, name: str) -> BaseTool:
"""Lookup tool by name.
Args:
name: Name of tool.
Returns:
Tool object.
"""
return {tool.name: tool for tool in self.tools}[name]
def _should_continue(self, iterations: int, time_elapsed: float) -> bool:
if self.max_iterations is not None and iterations >= self.max_iterations:
return False
return self.max_execution_time is None or time_elapsed < self.max_execution_time
def _return(
self,
output: AgentFinish,
intermediate_steps: list,
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
if run_manager:
run_manager.on_agent_finish(output, color="green", verbose=self.verbose)
final_output = output.return_values
if self.return_intermediate_steps:
final_output["intermediate_steps"] = intermediate_steps
return final_output
async def _areturn(
self,
output: AgentFinish,
intermediate_steps: list,
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
if run_manager:
await run_manager.on_agent_finish(
output,
color="green",
verbose=self.verbose,
)
final_output = output.return_values
if self.return_intermediate_steps:
final_output["intermediate_steps"] = intermediate_steps
return final_output
def _consume_next_step(
self,
values: NextStepOutput,
) -> AgentFinish | list[tuple[AgentAction, str]]:
if isinstance(values[-1], AgentFinish):
if len(values) != 1:
msg = "Expected a single AgentFinish output, but got multiple values."
raise ValueError(msg)
return values[-1]
return [(a.action, a.observation) for a in values if isinstance(a, AgentStep)]
def _take_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: CallbackManagerForChainRun | None = None,
) -> AgentFinish | list[tuple[AgentAction, str]]:
return self._consume_next_step(
list(
self._iter_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager,
),
),
)
def _iter_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: CallbackManagerForChainRun | None = None,
) -> Iterator[AgentFinish | AgentAction | AgentStep]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = self._action_agent.plan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
msg = (
"An output parsing error occurred. "
"In order to pass this error back to the agent and have it try "
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {e!s}"
)
raise ValueError(msg) from e
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = "Invalid or incomplete response"
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
msg = "Got unexpected type of `handle_parsing_errors`" # type: ignore[unreachable]
raise ValueError(msg) from e # noqa: TRY004
output = AgentAction("_Exception", observation, text)
if run_manager:
run_manager.on_agent_action(output, color="green")
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(
output.tool_input,
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
yield AgentStep(action=output, observation=observation)
return
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
yield output
return
actions: list[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
for agent_action in actions:
yield agent_action
for agent_action in actions:
yield self._perform_agent_action(
name_to_tool_map,
color_mapping,
agent_action,
run_manager,
)
def _perform_agent_action(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
agent_action: AgentAction,
run_manager: CallbackManagerForChainRun | None = None,
) -> AgentStep:
if run_manager:
run_manager.on_agent_action(agent_action, color="green")
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = tool.run(
agent_action.tool_input,
verbose=self.verbose,
color=color,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = InvalidTool().run(
{
"requested_tool_name": agent_action.tool,
"available_tool_names": list(name_to_tool_map.keys()),
},
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
return AgentStep(action=agent_action, observation=observation)
async def _atake_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> AgentFinish | list[tuple[AgentAction, str]]:
return self._consume_next_step(
[
a
async for a in self._aiter_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager,
)
],
)
async def _aiter_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> AsyncIterator[AgentFinish | AgentAction | AgentStep]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = await self._action_agent.aplan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
msg = (
"An output parsing error occurred. "
"In order to pass this error back to the agent and have it try "
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {e!s}"
)
raise ValueError(msg) from e
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = "Invalid or incomplete response"
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
msg = "Got unexpected type of `handle_parsing_errors`" # type: ignore[unreachable]
raise ValueError(msg) from e # noqa: TRY004
output = AgentAction("_Exception", observation, text)
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = await ExceptionTool().arun(
output.tool_input,
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
yield AgentStep(action=output, observation=observation)
return
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
yield output
return
actions: list[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
for agent_action in actions:
yield agent_action
# Use asyncio.gather to run multiple tool.arun() calls concurrently
result = await asyncio.gather(
*[
self._aperform_agent_action(
name_to_tool_map,
color_mapping,
agent_action,
run_manager,
)
for agent_action in actions
],
)
# TODO: This could yield each result as it becomes available
for chunk in result:
yield chunk
async def _aperform_agent_action(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
agent_action: AgentAction,
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> AgentStep:
if run_manager:
await run_manager.on_agent_action(
agent_action,
verbose=self.verbose,
color="green",
)
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = await tool.arun(
agent_action.tool_input,
verbose=self.verbose,
color=color,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = await InvalidTool().arun(
{
"requested_tool_name": agent_action.tool,
"available_tool_names": list(name_to_tool_map.keys()),
},
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
return AgentStep(action=agent_action, observation=observation)
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Run text through and get agent response."""
# Construct a mapping of tool name to tool for easy lookup
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools],
excluded_colors=["green", "red"],
)
intermediate_steps: list[tuple[AgentAction, str]] = []
# Let's start tracking the number of iterations and time elapsed
iterations = 0
time_elapsed = 0.0
start_time = time.time()
# We now enter the agent loop (until it returns something).
while self._should_continue(iterations, time_elapsed):
next_step_output = self._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager=run_manager,
)
if isinstance(next_step_output, AgentFinish):
return self._return(
next_step_output,
intermediate_steps,
run_manager=run_manager,
)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
# See if tool should return directly
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return self._return(
tool_return,
intermediate_steps,
run_manager=run_manager,
)
iterations += 1
time_elapsed = time.time() - start_time
output = self._action_agent.return_stopped_response(
self.early_stopping_method,
intermediate_steps,
**inputs,
)
return self._return(output, intermediate_steps, run_manager=run_manager)
async def _acall(
self,
inputs: dict[str, str],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, str]:
"""Async run text through and get agent response."""
# Construct a mapping of tool name to tool for easy lookup
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools],
excluded_colors=["green"],
)
intermediate_steps: list[tuple[AgentAction, str]] = []
# Let's start tracking the number of iterations and time elapsed
iterations = 0
time_elapsed = 0.0
start_time = time.time()
# We now enter the agent loop (until it returns something).
try:
async with asyncio_timeout(self.max_execution_time):
while self._should_continue(iterations, time_elapsed):
next_step_output = await self._atake_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager=run_manager,
)
if isinstance(next_step_output, AgentFinish):
return await self._areturn(
next_step_output,
intermediate_steps,
run_manager=run_manager,
)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
# See if tool should return directly
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return await self._areturn(
tool_return,
intermediate_steps,
run_manager=run_manager,
)
iterations += 1
time_elapsed = time.time() - start_time
output = self._action_agent.return_stopped_response(
self.early_stopping_method,
intermediate_steps,
**inputs,
)
return await self._areturn(
output,
intermediate_steps,
run_manager=run_manager,
)
except (TimeoutError, asyncio.TimeoutError):
# stop early when interrupted by the async timeout
output = self._action_agent.return_stopped_response(
self.early_stopping_method,
intermediate_steps,
**inputs,
)
return await self._areturn(
output,
intermediate_steps,
run_manager=run_manager,
)
def _get_tool_return(
self,
next_step_output: tuple[AgentAction, str],
) -> AgentFinish | None:
"""Check if the tool is a returning tool."""
agent_action, observation = next_step_output
name_to_tool_map = {tool.name: tool for tool in self.tools}
return_value_key = "output"
if len(self._action_agent.return_values) > 0:
return_value_key = self._action_agent.return_values[0]
# Invalid tools won't be in the map, so we return False.
if (
agent_action.tool in name_to_tool_map
and name_to_tool_map[agent_action.tool].return_direct
):
return AgentFinish(
{return_value_key: observation},
"",
)
return None
def _prepare_intermediate_steps(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> list[tuple[AgentAction, str]]:
if (
isinstance(self.trim_intermediate_steps, int)
and self.trim_intermediate_steps > 0
):
return intermediate_steps[-self.trim_intermediate_steps :]
if callable(self.trim_intermediate_steps):
return self.trim_intermediate_steps(intermediate_steps)
return intermediate_steps
@override
def stream(
self,
input: dict[str, Any] | Any,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Iterator[AddableDict]:
"""Enables streaming over steps taken to reach final output.
Args:
input: Input to the agent.
config: Config to use.
kwargs: Additional arguments.
Yields:
Addable dictionary.
"""
config = ensure_config(config)
iterator = AgentExecutorIterator(
self,
input,
config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.get("run_id"),
yield_actions=True,
**kwargs,
)
yield from iterator
@override
async def astream(
self,
input: dict[str, Any] | Any,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[AddableDict]:
"""Async enables streaming over steps taken to reach final output.
Args:
input: Input to the agent.
config: Config to use.
kwargs: Additional arguments.
Yields:
Addable dictionary.
"""
config = ensure_config(config)
iterator = AgentExecutorIterator(
self,
input,
config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.get("run_id"),
yield_actions=True,
**kwargs,
)
async for step in iterator:
yield step
| AgentExecutor |
python | Textualize__textual | src/textual/worker.py | {
"start": 794,
"end": 860
} | class ____(Exception):
"""A worker related error."""
| WorkerError |
python | pytorch__pytorch | test/distributed/test_c10d_common.py | {
"start": 40142,
"end": 51631
} | class ____:
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
@property
def device(self):
self.fail("test subclass didn't override device")
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=device_type)
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = dict(obj_list)
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i] for i in rank_to_seq_num if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.accelerator.set_device_index(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d._get_default_group(),
ranks=list(range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.accelerator.set_device_index(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
def _test_warn_not_in_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
in_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
group = dist.new_group(in_group_ranks)
x = torch.zeros(2, 2).to(self.rank)
xs = [torch.zeros(2, 2).to(self.rank) for _ in range(len(in_group_ranks))]
if self.rank not in in_group_ranks:
msg = ".*{}.*does not belong to.*"
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_gather")):
dist.all_gather(xs, x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_reduce")):
dist.all_reduce(x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("barrier")):
dist.barrier(group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("broadcast")):
dist.broadcast(x, src=0, group=group)
else:
dist.all_gather(xs, x, group=group)
dist.all_reduce(x, group=group)
dist.barrier(group=group)
dist.broadcast(x, src=0, group=group)
def _test_rank_membership(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self.assertTrue(self.world_size > 1)
group = dist.new_group(ranks=[1])
self.assertEqual(dist.get_group_rank(group, 1), 0)
with self.assertRaisesRegex(ValueError, "not part of group"):
dist.get_group_rank(group, 0)
with self.assertRaisesRegex(ValueError, "not registered"):
dist.get_group_rank(DummyProcessGroup(self.rank, self.world_size), 0)
self.assertEqual(dist.get_global_rank(group, 0), 1)
with self.assertRaisesRegex(ValueError, "not part of group"):
dist.get_global_rank(group, 1)
with self.assertRaisesRegex(ValueError, "not registered"):
dist.get_global_rank(DummyProcessGroup(self.rank, self.world_size), 0)
self.assertEqual(dist.get_process_group_ranks(group), [1])
def _test_tensor_dtype_mismatch(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
tensor = torch.ones(2, 2, device=self.device) * 7
tensor_h = tensor.half()
tensor_list = [
torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)
]
tensor_list_h = list(tensor_list)
tensor_list_h[1] = tensor_list_h[1].half()
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather(tensor_list_h, tensor)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather(tensor_list, tensor_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather_coalesced([tensor_list_h], tensor_list)
dist.all_gather_coalesced([tensor_list], tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_reduce_coalesced(tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.reduce_scatter(tensor, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.reduce_scatter(tensor_h, tensor_list)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_to_all_single(tensor_h, tensor)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_to_all(tensor_list_h, tensor_list)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_to_all(tensor_list, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.scatter(tensor, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.gather(tensor_h, tensor_list)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.gather(tensor, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.scatter(tensor_h, tensor_list)
def _test_tensor_dtype_complex(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
tensor = torch.rand(2, device=self.device)
tensor_c = torch.view_as_complex(tensor)
tensor_list = [
torch.rand(2, device=self.device) for _ in range(self.world_size)
]
tensor_list_c = list(tensor_list)
tensor_list_c[1] = torch.view_as_complex(tensor_list_c[1])
dist.all_gather(tensor_list, tensor)
dist.all_gather(tensor_list, tensor_c)
dist.all_gather(tensor_list_c, tensor)
dist.all_gather(tensor_list_c, tensor_c)
def _test_bool_tensors(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda" if backend == "nccl" else "xpu" if backend == "xccl" else "cpu"
# test alltoall_base
tensor = torch.tensor([1, 0, 0, 1], dtype=torch.bool, device=device)
zeros = torch.tensor([0, 0, 0, 0], dtype=torch.bool, device=device)
outensor = zeros if self.rank > 0 else tensor
dist.broadcast(outensor, src=0)
self.assertEqual(outensor, tensor)
# Variant of AbstractCommTest that expects world size of 4
| AbstractCommTest |
python | django-extensions__django-extensions | tests/management/commands/test_create_jobs.py | {
"start": 1109,
"end": 2730
} | class ____(CreateJobsTestsMixin, TestCase):
def test_should_create_jobs_directory_structure_silently(self):
call_command("create_jobs", "testapp_with_no_models_file")
self.assertTrue(os.path.exists(JOBS_DIR))
@patch("sys.stdout", new_callable=StringIO)
def test_should_create_jobs_directory_structure_and_print_SUCCESS_message(
self, m_stdout
):
call_command("create_jobs", "testapp_with_no_models_file", verbosity=2)
self.assertTrue(os.path.exists(JOBS_DIR))
for time_period in TIME_PERIODS:
self.assertIn(
"testapp_with_no_models_file/jobs/{}/__init__.py".format(time_period),
m_stdout.getvalue(),
)
@patch("sys.stdout", new_callable=StringIO)
def test_should_not_override_already_created_jobs_directory_structure_and_print_that_already_exists(
self, m_stdout
):
call_command("create_jobs", "testapp_with_no_models_file")
sample_file_path = os.path.join(JOBS_DIR, "sample.py")
TEST_COMMENT = "# test"
with open(sample_file_path, "a") as f:
f.write(TEST_COMMENT)
call_command("create_jobs", "testapp_with_no_models_file", verbosity=2)
self.assertTrue(os.path.exists(JOBS_DIR))
self.assertIn(TEST_COMMENT, open(sample_file_path).read())
for time_period in TIME_PERIODS:
self.assertIn(
"testapp_with_no_models_file/jobs/{}/__init__.py already exists".format(
time_period
),
m_stdout.getvalue(),
)
| CreateJobsTests |
python | huggingface__transformers | src/transformers/models/whisper/english_normalizer.py | {
"start": 2078,
"end": 2783
} | class ____:
def __init__(self, remove_diacritics: bool = False, split_letters: bool = False):
self.clean = remove_symbols_and_diacritics if remove_diacritics else remove_symbols
self.split_letters = split_letters
def __call__(self, s: str):
s = s.lower()
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
s = self.clean(s).lower()
if self.split_letters:
s = " ".join(regex.findall(r"\X", s, regex.U))
s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space
return s
| BasicTextNormalizer |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 92030,
"end": 94162
} | class ____(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
super().__init__()
self.leaky_relu_slope = leaky_relu_slope
self.convs1 = nn.ModuleList(
[
nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=dilation[i],
padding=self.get_padding(kernel_size, dilation[i]),
)
for i in range(len(dilation))
]
)
self.convs2 = nn.ModuleList(
[
nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=1,
padding=self.get_padding(kernel_size, 1),
)
for _ in range(len(dilation))
]
)
def get_padding(self, kernel_size, dilation=1):
return (kernel_size * dilation - dilation) // 2
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
for layer in self.convs1:
weight_norm(layer)
for layer in self.convs2:
weight_norm(layer)
def remove_weight_norm(self):
for layer in self.convs1:
nn.utils.remove_weight_norm(layer)
for layer in self.convs2:
nn.utils.remove_weight_norm(layer)
def forward(self, hidden_states):
for conv1, conv2 in zip(self.convs1, self.convs2):
residual = hidden_states
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = conv1(hidden_states)
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = conv2(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
| HifiGanResidualBlock |
python | pandas-dev__pandas | pandas/tests/util/test_assert_produces_warning.py | {
"start": 8296,
"end": 10123
} | class ____:
def test_raise_on_warning(self, false_or_none):
msg = r"Caused unexpected warning\(s\)"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(false_or_none):
f()
def test_no_raise_without_warning(self, false_or_none):
with tm.assert_produces_warning(false_or_none):
pass
def test_no_raise_with_false_raise_on_extra(self, false_or_none):
with tm.assert_produces_warning(false_or_none, raise_on_extra_warnings=False):
f()
def test_raises_during_exception():
msg = "Did not see expected warning of class 'UserWarning'"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(UserWarning):
raise ValueError
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(UserWarning):
warnings.warn(
"FutureWarning", FutureWarning
) # pdlint: ignore[warning_class]
raise IndexError
msg = "Caused unexpected warning"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(None):
warnings.warn(
"FutureWarning", FutureWarning
) # pdlint: ignore[warning_class]
raise SystemError
def test_passes_during_exception():
with pytest.raises(SyntaxError, match="Error"):
with tm.assert_produces_warning(None):
raise SyntaxError("Error")
with pytest.raises(ValueError, match="Error"):
with tm.assert_produces_warning(FutureWarning, match="FutureWarning"):
warnings.warn(
"FutureWarning", FutureWarning
) # pdlint: ignore[warning_class]
raise ValueError("Error")
| TestFalseOrNoneExpectedWarning |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_retry_execution.py | {
"start": 2078,
"end": 3389
} | class ____(ReadonlyGraphQLContextTestMatrix):
def test_retry_execution_permission_failure(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "eventually_successful")
code_location = graphql_context.get_code_location(main_repo_location_name())
repository = code_location.get_repository("test_repo")
remote_job_origin = repository.get_full_job("eventually_successful").get_remote_origin()
run_id = create_run_for_test(
graphql_context.instance,
"eventually_successful",
remote_job_origin=remote_job_origin,
).run_id
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": {},
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
assert result.data["launchPipelineReexecution"]["__typename"] == "UnauthorizedError"
| TestRetryExecutionReadonly |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 17053,
"end": 21800
} | class ____(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, config: TestDetrConfig, num_heads: int, n_points: int):
super().__init__()
self.attn = MultiScaleDeformableAttention()
if config.d_model % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}"
)
dim_per_head = config.d_model // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in TestDetrMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 64
self.d_model = config.d_model
self.n_levels = config.num_feature_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
self.value_proj = nn.Linear(config.d_model, config.d_model)
self.output_proj = nn.Linear(config.d_model, config.d_model)
self.disable_custom_kernels = config.disable_custom_kernels
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
total_elements = sum(height * width for height, width in spatial_shapes_list)
if total_elements != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = F.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
num_coordinates = reference_points.shape[-1]
if num_coordinates == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif num_coordinates == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
output = self.attn(
value,
spatial_shapes,
spatial_shapes_list,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
output = self.output_proj(output)
return output, attention_weights
| TestDetrMultiscaleDeformableAttention |
python | milvus-io__pymilvus | pymilvus/client/asynch.py | {
"start": 5179,
"end": 5331
} | class ____(Future):
def on_response(self, response: Any):
check_status(response.status)
return MutationResult(response)
| MutationFuture |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 66064,
"end": 66505
} | class ____(sgqlc.types.Enum):
"""Properties by which project workflows can be ordered.
Enumeration Choices:
* `CREATED_AT`: The workflows' date and time of creation
* `NAME`: The workflows' name
* `NUMBER`: The workflows' number
* `UPDATED_AT`: The workflows' date and time of update
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT", "NAME", "NUMBER", "UPDATED_AT")
| ProjectV2WorkflowsOrderField |
python | google__jax | tests/state_test.py | {
"start": 24661,
"end": 37070
} | class ____(jtu.JaxTestCase):
def test_discharge_get(self):
def f(a_ref):
a = ref_get(a_ref, ())
return [a + 1]
in_avals = [shaped_array_ref((), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1),
in_avals)
# Discharging should just turn this into a jaxpr that just adds 1.
discharged_jaxpr, _ = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 1)
self.assertLen(discharged_jaxpr.outvars, 2)
self.assertEqual(discharged_jaxpr.eqns[0].primitive, lax.add_p)
# Should be able to evaluate this jaxpr
self.assertListEqual(core.eval_jaxpr(discharged_jaxpr, (),
jnp.float32(1.)), [2., 1.])
def test_discharge_get_with_slice(self):
def f(a_ref):
a = ref_get(a_ref, (0, 1))
return [a + 1]
in_avals = [shaped_array_ref((4, 3, 2), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1),
in_avals)
# Discharging should just turn this into a jaxpr that just adds 1.
discharged_jaxpr, () = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 1)
self.assertLen(discharged_jaxpr.outvars, 2)
self.assertIn(lax.dynamic_slice_p,
{eqn.primitive for eqn in discharged_jaxpr.eqns})
# Should be able to evaluate this jaxpr
inval = jnp.arange(24., dtype=jnp.float32).reshape((4, 3, 2))
outval, refval = core.eval_jaxpr(discharged_jaxpr, (), inval)
self.assertTrue((outval == inval[0, 1] + 1).all())
self.assertTrue((refval == inval).all())
def test_discharge_get_with_gather(self):
def f(a_ref):
a = a_ref[jnp.array([0, 1])]
return [a + 1]
in_avals = [shaped_array_ref((4, 3), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 1), in_avals)
discharged_jaxpr, discharged_consts = discharge_state(
stateful_jaxpr, consts)
inval = jnp.arange(4 * 3, dtype=jnp.float32).reshape((4, 3))
outval, refval = core.eval_jaxpr(discharged_jaxpr, discharged_consts, inval)
self.assertTrue((outval == inval[jnp.array([0, 1])] + 1).all())
self.assertTrue((refval == inval).all())
def test_discharge_set(self):
def f(a_ref, b):
ref_set(a_ref, (), b + 1)
return []
in_avals = [shaped_array_ref((), jnp.dtype('float32')),
core.ShapedArray((), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 2),
in_avals)
# Discharging should just turn this into a jaxpr that ignores the first
# value and returns second value plus 1.
discharged_jaxpr, _ = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 2)
self.assertLen(discharged_jaxpr.outvars, 1)
self.assertEqual(core.eval_jaxpr(discharged_jaxpr, (), jnp.float32(0.),
jnp.float32(1.))[0], 2.)
self.assertEqual(core.eval_jaxpr(discharged_jaxpr, (), jnp.float32(2.),
jnp.float32(1.))[0], 2.)
def test_discharge_set_with_slice(self):
def f(a_ref):
ref_set(a_ref, (0, 1), jnp.ones(2, dtype=jnp.dtype('float32')))
return []
in_avals = [shaped_array_ref((4, 3, 2), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1), in_avals)
# Discharging should just turn this into a jaxpr that just adds 1.
discharged_jaxpr, () = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 1)
self.assertLen(discharged_jaxpr.outvars, 1)
self.assertIn(lax.dynamic_update_slice_p,
{eqn.primitive for eqn in discharged_jaxpr.eqns})
self.assertIn(lax.dynamic_slice_p,
{eqn.primitive for eqn in discharged_jaxpr.eqns})
# Should be able to evaluate this jaxpr
inval = jnp.arange(24., dtype=jnp.float32).reshape((4, 3, 2))
refval, = core.eval_jaxpr(discharged_jaxpr, (), inval)
self.assertTrue((refval == inval.at[0, 1].set(1.)).all())
def test_discharge_set_with_gather(self):
def f(a_ref):
a_ref[jnp.array([0, 1])] = jnp.ones((2, 3), 'float32')
return []
in_avals = [shaped_array_ref((4, 3), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1),
in_avals)
discharged_jaxpr, discharged_consts = discharge_state(
stateful_jaxpr, consts)
inval = jnp.arange(4 * 3, dtype=jnp.float32).reshape((4, 3))
refval, = core.eval_jaxpr(discharged_jaxpr, discharged_consts, inval)
self.assertTrue((refval == inval.at[jnp.array([0, 1])].set(1.)).all())
def test_discharge_swap(self):
def f(a_ref):
a = ref_swap(
a_ref.at[0:4, 0:3, 0:2].at[1:3, :, 0],
(slice(None), slice(1, 3)),
jnp.zeros((2, 2), jnp.float32))
return [a + 1]
in_avals = [shaped_array_ref((4, 3, 2), jnp.float32)]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 1), in_avals)
discharged_jaxpr, () = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 1)
self.assertLen(discharged_jaxpr.outvars, 2)
inval = jnp.arange(24., dtype=jnp.float32).reshape((4, 3, 2))
outval, refval = core.eval_jaxpr(discharged_jaxpr, (), inval)
self.assertArraysEqual(outval, inval[1:3, 1:3, 0] + 1)
self.assertArraysEqual(refval, inval.at[1:3, 1:3, 0].set(0))
def test_discharge_addupdate(self):
def f(a_ref, b):
ref_addupdate(a_ref, (), b + 1)
return []
in_avals = [shaped_array_ref((), jnp.dtype('float32')),
core.ShapedArray((), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 2),
in_avals)
# Discharging should just turn this into a jaxpr that adds the first value,
# second value, and 1.
discharged_jaxpr, _ = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 2)
self.assertLen(discharged_jaxpr.outvars, 1)
self.assertEqual(core.eval_jaxpr(discharged_jaxpr, (), jnp.float32(0.),
jnp.float32(1.))[0], 2.)
self.assertEqual(core.eval_jaxpr(discharged_jaxpr, (), jnp.float32(2.),
jnp.float32(1.))[0], 4.)
def test_discharge_addupdate_with_slice(self):
def f(a_ref):
ref_addupdate(a_ref, (0, 1),
jnp.ones(2, dtype=jnp.dtype('float32')))
return []
in_avals = [shaped_array_ref((4, 3, 2), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1),
in_avals)
discharged_jaxpr, _ = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 1)
self.assertLen(discharged_jaxpr.outvars, 1)
self.assertIn(lax.dynamic_update_slice_p,
{eqn.primitive for eqn in discharged_jaxpr.eqns})
self.assertIn(lax.add_p,
{eqn.primitive for eqn in discharged_jaxpr.eqns})
self.assertIn(lax.dynamic_slice_p,
{eqn.primitive for eqn in discharged_jaxpr.eqns})
inval = jnp.arange(24., dtype=jnp.float32).reshape((4, 3, 2))
refval, = core.eval_jaxpr(discharged_jaxpr, (), inval)
self.assertTrue((refval == inval.at[0, 1].add(1.)).all())
def test_discharge_addupdate_with_gather(self):
def f(a_ref):
ref_addupdate(a_ref, (jnp.array([0, 1]),),
jnp.ones((2, 3), 'float32'))
return []
in_avals = [shaped_array_ref((4, 3), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1),
in_avals)
discharged_jaxpr, discharged_consts = discharge_state(
stateful_jaxpr, consts)
inval = jnp.arange(4 * 3, dtype=jnp.float32).reshape((4, 3))
refval, = core.eval_jaxpr(discharged_jaxpr, discharged_consts, inval)
self.assertTrue((refval == inval.at[jnp.array([0, 1])].add(1.)).all())
def test_discharge_jaxpr_with_multiple_outputs(self):
def f(a_ref):
a = ref_get(a_ref, ())
b = a + 1
return [a, b]
in_avals = [shaped_array_ref((4,), jnp.dtype('float32'))]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 1),
in_avals)
discharged_jaxpr, _ = discharge_state(stateful_jaxpr, consts)
self.assertLen(discharged_jaxpr.invars, 1)
self.assertLen(discharged_jaxpr.outvars, 3)
inval = jnp.arange(4., dtype=jnp.float32)
a, b, refval = core.eval_jaxpr(discharged_jaxpr, (), inval)
self.assertTrue((a == inval).all())
self.assertTrue((b == inval + 1).all())
self.assertTrue((refval == inval).all())
def test_partially_discharging_jaxpr_keeps_refs(self):
def f(a_ref, b_ref):
ref_set(a_ref, (), jnp.ones(4, jnp.float32))
ref_set(b_ref, (), jnp.ones(4, jnp.float32))
return []
in_avals = [
shaped_array_ref((4,), jnp.dtype('float32')),
shaped_array_ref((4,), jnp.dtype('float32'))
]
stateful_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrap_init(f, 2),
in_avals)
discharged_jaxpr, _ = discharge_state(
stateful_jaxpr, consts, should_discharge=[False, True])
self.assertLen(discharged_jaxpr.invars, 2)
self.assertLen(discharged_jaxpr.outvars, 1)
self.assertIsInstance(discharged_jaxpr.invars[0].aval, AbstractRef)
self.assertIsInstance(discharged_jaxpr.invars[1].aval, core.ShapedArray)
self.assertEqual(discharged_jaxpr.effects,
{WriteEffect(len(discharged_jaxpr.constvars))})
def test_ellipsis_index(self):
def f(ref):
ref_set(ref, ..., jnp.array(0., dtype=jnp.float32))
ref_get(ref, ...)
ref[...] = jnp.array(0., dtype=jnp.float32)
ref[...]
return []
in_avals = [shaped_array_ref((), jnp.float32)]
pe.trace_to_jaxpr_dynamic(wrap_init(f, 1), in_avals)
def test_partial_discharge(self):
def f(a_ref, b_ref):
a_ref[...] = jnp.array(0., dtype=jnp.float32)
b_ref[...] = jnp.array(1., dtype=jnp.float32)
return a_ref[...], b_ref[...]
scalar_ref_1 = shaped_array_ref((), jnp.float32)
scalar_ref_2 = shaped_array_ref((), jnp.float32)
jaxpr, _, _ = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 2), [scalar_ref_1, scalar_ref_2])
discharged_jaxpr, _ = discharge_state(jaxpr, (), should_discharge=[False, True])
prim_count = lambda p, jaxpr: sum(eqn.primitive == p for eqn in jaxpr.eqns)
self.assertEqual(prim_count(swap_p, jaxpr) // 2, prim_count(swap_p, discharged_jaxpr))
self.assertEqual(prim_count(get_p, jaxpr) // 2, prim_count(get_p, discharged_jaxpr))
def test_partial_fori_discharge(self):
def f(a_ref, b_ref):
def body(i, st):
a_ref[...] += 2 * i
b_ref[...] += i
return ()
lax.fori_loop(0, 5, body, init_val=())
return a_ref[...], b_ref[...]
ref = lambda x: AbstractRef(core.get_aval(x))
f_jaxpr = jax.make_jaxpr(f)(ref(1.), ref(2.))
jaxpr, _ = discharge_state(f_jaxpr.jaxpr, (), should_discharge=[False, True])
# Effects on y_ref were discharged away but not the effects on x_ref
self.assertEqual(f_jaxpr.effects, {ReadEffect(0), WriteEffect(0), ReadEffect(1), WriteEffect(1)})
self.assertEqual(jaxpr.effects, {ReadEffect(0), WriteEffect(0)})
# x_ref arg is still a reference but y_ref is discharged
self.assertNotIsInstance(jaxpr.invars[1].aval, AbstractRef)
self.assertIsInstance(jaxpr.invars[0].aval, AbstractRef)
# x_ref value is returned as part of the discharged refs set.
self.assertLen(f_jaxpr.out_avals, 2)
self.assertLen(jaxpr.outvars, 3)
def index_arrays(size, idx_shape):
valid_idx = hps.integers(min_value=-size, max_value=size - 1)
return hnp.arrays(np.int32, idx_shape, elements=valid_idx)
Shape = tuple[int, ...]
| StateDischargeTest |
python | python__mypy | mypy/nodes.py | {
"start": 72999,
"end": 73332
} | class ____(Expression):
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression | None
def __init__(self, expr: Expression | None) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_yield_expr(self)
| YieldExpr |
python | Farama-Foundation__Gymnasium | gymnasium/spaces/tuple.py | {
"start": 262,
"end": 7598
} | class ____(Space[tuple[Any, ...]], typing.Sequence[Any]):
"""A tuple (more precisely: the cartesian product) of :class:`Space` instances.
Elements of this space are tuples of elements of the constituent spaces.
Example:
>>> from gymnasium.spaces import Tuple, Box, Discrete
>>> observation_space = Tuple((Discrete(2), Box(-1, 1, shape=(2,))), seed=42)
>>> observation_space.sample()
(np.int64(0), array([-0.3991573 , 0.21649833], dtype=float32))
"""
def __init__(
self,
spaces: Iterable[Space[Any]],
seed: int | typing.Sequence[int] | np.random.Generator | None = None,
):
r"""Constructor of :class:`Tuple` space.
The generated instance will represent the cartesian product :math:`\text{spaces}[0] \times ... \times \text{spaces}[-1]`.
Args:
spaces (Iterable[Space]): The spaces that are involved in the cartesian product.
seed: Optionally, you can use this argument to seed the RNGs of the ``spaces`` to ensure reproducible sampling.
"""
self.spaces = tuple(spaces)
for space in self.spaces:
assert isinstance(
space, Space
), f"{space} does not inherit from `gymnasium.Space`. Actual Type: {type(space)}"
super().__init__(None, None, seed) # type: ignore
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return all(space.is_np_flattenable for space in self.spaces)
def seed(self, seed: int | typing.Sequence[int] | None = None) -> tuple[int, ...]:
"""Seed the PRNG of this space and all subspaces.
Depending on the type of seed, the subspaces will be seeded differently
* ``None`` - All the subspaces will use a random initial seed
* ``Int`` - The integer is used to seed the :class:`Tuple` space that is used to generate seed values for each of the subspaces. Warning, this does not guarantee unique seeds for all the subspaces.
* ``List`` / ``Tuple`` - Values used to seed the subspaces. This allows the seeding of multiple composite subspaces ``[42, 54, ...]``.
Args:
seed: An optional list of ints or int to seed the (sub-)spaces.
Returns:
A tuple of the seed values for all subspaces
"""
if seed is None:
return tuple(space.seed(None) for space in self.spaces)
elif isinstance(seed, int):
super().seed(seed)
subseeds = self.np_random.integers(
np.iinfo(np.int32).max, size=len(self.spaces)
)
return tuple(
subspace.seed(int(subseed))
for subspace, subseed in zip(self.spaces, subseeds)
)
elif isinstance(seed, (tuple, list)):
if len(seed) != len(self.spaces):
raise ValueError(
f"Expects that the subspaces of seeds equals the number of subspaces. Actual length of seeds: {len(seed)}, length of subspaces: {len(self.spaces)}"
)
return tuple(
space.seed(subseed) for subseed, space in zip(seed, self.spaces)
)
else:
raise TypeError(
f"Expected seed type: list, tuple, int or None, actual type: {type(seed)}"
)
def sample(
self,
mask: tuple[Any | None, ...] | None = None,
probability: tuple[Any | None, ...] | None = None,
) -> tuple[Any, ...]:
"""Generates a single random sample inside this space.
This method draws independent samples from the subspaces.
Args:
mask: An optional tuple of optional masks for each of the subspace's samples,
expects the same number of masks as spaces
probability: An optional tuple of optional probability masks for each of the subspace's samples,
expects the same number of probability masks as spaces
Returns:
Tuple of the subspace's samples
"""
if mask is not None and probability is not None:
raise ValueError(
f"Only one of `mask` or `probability` can be provided, actual values: mask={mask}, probability={probability}"
)
elif mask is not None:
assert isinstance(
mask, tuple
), f"Expected type of `mask` to be tuple, actual type: {type(mask)}"
assert len(mask) == len(
self.spaces
), f"Expected length of `mask` to be {len(self.spaces)}, actual length: {len(mask)}"
return tuple(
space.sample(mask=space_mask)
for space, space_mask in zip(self.spaces, mask)
)
elif probability is not None:
assert isinstance(
probability, tuple
), f"Expected type of `probability` to be tuple, actual type: {type(probability)}"
assert len(probability) == len(
self.spaces
), f"Expected length of `probability` to be {len(self.spaces)}, actual length: {len(probability)}"
return tuple(
space.sample(probability=space_probability)
for space, space_probability in zip(self.spaces, probability)
)
else:
return tuple(space.sample() for space in self.spaces)
def contains(self, x: Any) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
if isinstance(x, (list, np.ndarray)):
x = tuple(x) # Promote list and ndarray to tuple for contains check
return (
isinstance(x, tuple)
and len(x) == len(self.spaces)
and all(space.contains(part) for (space, part) in zip(self.spaces, x))
)
def __repr__(self) -> str:
"""Gives a string representation of this space."""
return "Tuple(" + ", ".join([str(s) for s in self.spaces]) + ")"
def to_jsonable(
self, sample_n: typing.Sequence[tuple[Any, ...]]
) -> list[list[Any]]:
"""Convert a batch of samples from this space to a JSONable data type."""
# serialize as list-repr of tuple of vectors
return [
space.to_jsonable([sample[i] for sample in sample_n])
for i, space in enumerate(self.spaces)
]
def from_jsonable(self, sample_n: list[list[Any]]) -> list[tuple[Any, ...]]:
"""Convert a JSONable data type to a batch of samples from this space."""
return [
sample
for sample in zip(
*[
space.from_jsonable(sample_n[i])
for i, space in enumerate(self.spaces)
]
)
]
def __getitem__(self, index: int) -> Space[Any]:
"""Get the subspace at specific `index`."""
return self.spaces[index]
def __len__(self) -> int:
"""Get the number of subspaces that are involved in the cartesian product."""
return len(self.spaces)
def __eq__(self, other: Any) -> bool:
"""Check whether ``other`` is equivalent to this instance."""
return isinstance(other, Tuple) and self.spaces == other.spaces
| Tuple |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property17.py | {
"start": 477,
"end": 602
} | class ____(Generic[T]):
@property
def prop(self: RootProto[T]) -> T:
return self.root.prop
@dataclass
| RootMixin |
python | doocs__leetcode | solution/0800-0899/0875.Koko Eating Bananas/Solution.py | {
"start": 0,
"end": 250
} | class ____:
def minEatingSpeed(self, piles: List[int], h: int) -> int:
def check(k: int) -> bool:
return sum((x + k - 1) // k for x in piles) <= h
return 1 + bisect_left(range(1, max(piles) + 1), True, key=check)
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/errors.py | {
"start": 1700,
"end": 2603
} | class ____(IHaveNew):
config_type_snap: ConfigTypeSnap
incoming_fields: Sequence[str]
def __new__(
cls,
*,
config_type_snap: ConfigTypeSnap,
incoming_fields: Sequence[str],
):
check.inst_param(config_type_snap, "config_type_snap", ConfigTypeSnap)
check.param_invariant(config_type_snap.kind == ConfigTypeKind.SELECTOR, "config_type")
return super().__new__(
cls,
config_type_snap=config_type_snap,
incoming_fields=incoming_fields,
)
ERROR_DATA_UNION = Union[
FieldNotDefinedErrorData,
FieldsNotDefinedErrorData,
MissingFieldErrorData,
MissingFieldsErrorData,
RuntimeMismatchErrorData,
SelectorTypeErrorData,
SerializableErrorInfo,
FieldAliasCollisionErrorData,
]
ERROR_DATA_TYPES = ERROR_DATA_UNION.__args__ # type: ignore
@record
| SelectorTypeErrorData |
python | lepture__authlib | authlib/oauth1/rfc5849/wrapper.py | {
"start": 481,
"end": 4073
} | class ____:
def __init__(self, method, uri, body=None, headers=None):
InsecureTransportError.check(uri)
self.method = method
self.uri = uri
self.body = body
self.headers = headers or {}
# states namespaces
self.client = None
self.credential = None
self.user = None
self.query = urlparse.urlparse(uri).query
self.query_params = url_decode(self.query)
self.body_params = extract_params(body) or []
self.auth_params, self.realm = _parse_authorization_header(headers)
self.signature_type, self.oauth_params = _parse_oauth_params(
self.query_params, self.body_params, self.auth_params
)
params = []
params.extend(self.query_params)
params.extend(self.body_params)
params.extend(self.auth_params)
self.params = params
@property
def client_id(self):
return self.oauth_params.get("oauth_consumer_key")
@property
def client_secret(self):
if self.client:
return self.client.get_client_secret()
@property
def rsa_public_key(self):
if self.client:
return self.client.get_rsa_public_key()
@property
def timestamp(self):
return self.oauth_params.get("oauth_timestamp")
@property
def redirect_uri(self):
return self.oauth_params.get("oauth_callback")
@property
def signature(self):
return self.oauth_params.get("oauth_signature")
@property
def signature_method(self):
return self.oauth_params.get("oauth_signature_method")
@property
def token(self):
return self.oauth_params.get("oauth_token")
@property
def token_secret(self):
if self.credential:
return self.credential.get_oauth_token_secret()
def _filter_oauth(params):
for k, v in params:
if k.startswith("oauth_"):
yield (k, v)
def _parse_authorization_header(headers):
"""Parse an OAuth authorization header into a list of 2-tuples."""
authorization_header = headers.get("Authorization")
if not authorization_header:
return [], None
auth_scheme = "oauth "
if authorization_header.lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme) :])
try:
items = parse_keqv_list(items).items()
auth_params = [(unescape(k), unescape(v)) for k, v in items]
realm = dict(auth_params).get("realm")
return auth_params, realm
except (IndexError, ValueError):
pass
raise ValueError("Malformed authorization header")
def _parse_oauth_params(query_params, body_params, auth_params):
oauth_params_set = [
(SIGNATURE_TYPE_QUERY, list(_filter_oauth(query_params))),
(SIGNATURE_TYPE_BODY, list(_filter_oauth(body_params))),
(SIGNATURE_TYPE_HEADER, list(_filter_oauth(auth_params))),
]
oauth_params_set = [params for params in oauth_params_set if params[1]]
if len(oauth_params_set) > 1:
found_types = [p[0] for p in oauth_params_set]
raise DuplicatedOAuthProtocolParameterError(
'"oauth_" params must come from only 1 signature type '
"but were found in {}".format(",".join(found_types))
)
if oauth_params_set:
signature_type = oauth_params_set[0][0]
oauth_params = dict(oauth_params_set[0][1])
else:
signature_type = None
oauth_params = {}
return signature_type, oauth_params
| OAuth1Request |
python | prabhupant__python-ds | data_structures/graphs/all_paths_between_two_vertices.py | {
"start": 208,
"end": 1148
} | class ____:
def __init__(self, vertices):
self.vertices = vertices
self.graph = [[] for i in range(vertices)]
def add_edge(self, u, v):
self.graph[u].append(v)
def count_paths_util(self, u, v, visited, counter):
visited[u] = True
# If the destination vertex is found
if u == v:
counter[0] += 1
else:
for i in range(len(self.graph[u])):
if not visited[self.graph[u][i]]:
self.count_paths_util(self.graph[u][i], v, visited, counter)
visited[u] = False
def count_paths(self, u, v):
visited = [False] * self.vertices
counter = [0]
self.count_paths_util(u, v, visited, counter)
return counter[0]
g = Graph(4)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(0, 3)
g.add_edge(2, 0)
g.add_edge(2, 1)
g.add_edge(1, 3)
print(g.count_paths(2, 3))
| Graph |
python | pypa__setuptools | setuptools/_vendor/backports/tarfile/__init__.py | {
"start": 9951,
"end": 10033
} | class ____(TarError):
"""Base exception for header errors."""
pass
| HeaderError |
python | huggingface__transformers | src/transformers/models/voxtral/modular_voxtral.py | {
"start": 1459,
"end": 1876
} | class ____(Qwen2AudioPreTrainedModel):
_supports_flex_attn = True
_supports_cache_class = True
_supports_attention_backend = True
_can_compile_fullgraph = True
_no_split_modules = None
# TODO: @eustlb, I would really prefer to use WhisperEncoder but it's messing with modular
@auto_docstring(
custom_intro="""
The Voxtral encoder, which is a Whisper encoder.
"""
)
| VoxtralPreTrainedModel |
python | huggingface__transformers | src/transformers/models/marian/modeling_marian.py | {
"start": 56810,
"end": 61482
} | class ____(MarianPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.weight": "model.decoder.embed_tokens.weight",
}
def __init__(self, config):
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = MarianDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MarianForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en")
>>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = ["MarianForCausalLM", "MarianModel", "MarianMTModel", "MarianPreTrainedModel"]
| MarianForCausalLM |
python | networkx__networkx | networkx/exception.py | {
"start": 1495,
"end": 1658
} | class ____(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a problem
instance that has no feasible solution."""
| NetworkXUnfeasible |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/newType1.py | {
"start": 1982,
"end": 2066
} | class ____(ABC):
@abstractmethod
def method1(self, /) -> int: ...
| AbstractBase |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/relationship.py | {
"start": 5220,
"end": 5565
} | class ____:
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
b_id: Mapped[int] = mapped_column(ForeignKey("b.id"))
number: Mapped[int] = mapped_column(primary_key=True)
number2: Mapped[int] = mapped_column(primary_key=True)
if TYPE_CHECKING:
__table__: ClassVar[Table]
@mapper_registry.mapped
| A |
python | pypa__hatch | backend/src/hatchling/bridge/app.py | {
"start": 82,
"end": 3242
} | class ____:
"""
The way output is displayed can be [configured](../config/hatch.md#terminal) by users.
!!! important
Never import this directly; Hatch judiciously decides if a type of plugin requires
the capabilities herein and will grant access via an attribute.
"""
def __init__(self) -> None:
self.__verbosity = int(os.environ.get("HATCH_VERBOSE", "0")) - int(os.environ.get("HATCH_QUIET", "0"))
@property
def verbosity(self) -> int:
"""
The verbosity level of the application, with 0 as the default.
"""
return self.__verbosity
@staticmethod
def display(message: str = "", **kwargs: Any) -> None: # noqa: ARG004
# Do not document
_display(message, always=True)
def display_info(self, message: str = "", **kwargs: Any) -> None: # noqa: ARG002
"""
Meant to be used for messages conveying basic information.
"""
if self.__verbosity >= 0:
_display(message)
def display_waiting(self, message: str = "", **kwargs: Any) -> None: # noqa: ARG002
"""
Meant to be used for messages shown before potentially time consuming operations.
"""
if self.__verbosity >= 0:
_display(message)
def display_success(self, message: str = "", **kwargs: Any) -> None: # noqa: ARG002
"""
Meant to be used for messages indicating some positive outcome.
"""
if self.__verbosity >= 0:
_display(message)
def display_warning(self, message: str = "", **kwargs: Any) -> None: # noqa: ARG002
"""
Meant to be used for messages conveying important information.
"""
if self.__verbosity >= -1:
_display(message)
def display_error(self, message: str = "", **kwargs: Any) -> None: # noqa: ARG002
"""
Meant to be used for messages indicating some unrecoverable error.
"""
if self.__verbosity >= -2: # noqa: PLR2004
_display(message)
def display_debug(self, message: str = "", level: int = 1, **kwargs: Any) -> None: # noqa: ARG002
"""
Meant to be used for messages that are not useful for most user experiences.
The `level` option must be between 1 and 3 (inclusive).
"""
if not 1 <= level <= 3: # noqa: PLR2004
error_message = "Debug output can only have verbosity levels between 1 and 3 (inclusive)"
raise ValueError(error_message)
if self.__verbosity >= level:
_display(message)
def display_mini_header(self, message: str = "", **kwargs: Any) -> None: # noqa: ARG002
if self.__verbosity >= 0:
_display(f"[{message}]")
def abort(self, message: str = "", code: int = 1, **kwargs: Any) -> None: # noqa: ARG002
"""
Terminate the program with the given return code.
"""
if message and self.__verbosity >= -2: # noqa: PLR2004
_display(message)
sys.exit(code)
def get_safe_application(self) -> SafeApplication:
return SafeApplication(self)
| Application |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/forms.py | {
"start": 1485,
"end": 1599
} | class ____(ContactWizard):
storage_name = 'formtools.wizard.storage.session.SessionStorage'
| SessionContactWizard |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 211458,
"end": 212920
} | class ____(Operation):
def call(self, xs):
return backend.numpy.vstack(xs)
def compute_output_spec(self, xs):
first_shape = xs[0].shape
total_size_on_axis = 0
dtypes_to_resolve = []
for x in xs:
if not shape_equal(x.shape, first_shape, axis=[0], allow_none=True):
raise ValueError(
"Every value in `xs` must have the same shape except on "
f"the `axis` dim. But found element of shape {x.shape}, "
f"which is different from the first element's "
f"shape {first_shape}."
)
if total_size_on_axis is None or x.shape[0] is None:
total_size_on_axis = None
else:
total_size_on_axis += x.shape[0]
dtypes_to_resolve.append(getattr(x, "dtype", type(x)))
output_shape = list(first_shape)
output_shape[0] = total_size_on_axis
output_dtype = dtypes.result_type(*dtypes_to_resolve)
return KerasTensor(output_shape, output_dtype)
@keras_export(["keras.ops.vstack", "keras.ops.numpy.vstack"])
def vstack(xs):
"""Stack tensors in sequence vertically (row wise).
Args:
xs: Sequence of tensors.
Returns:
Tensor formed by stacking the given tensors.
"""
if any_symbolic_tensors((xs,)):
return Vstack().symbolic_call(xs)
return backend.numpy.vstack(xs)
| Vstack |
python | getsentry__sentry | tests/sentry/db/test_router.py | {
"start": 343,
"end": 5206
} | class ____(TestCase):
"""Simulated mode can resolve both silos to separate connections"""
@override_settings(SILO_MODE=None)
def test_simulated_no_silo(self) -> None:
# Simulated silo mode should work the same as with a silo mode defined..
router = SiloRouter()
router.use_simulated(True)
assert "default" == router.db_for_read(Organization)
assert "default" == router.db_for_write(Organization)
assert router.allow_migrate("default", "sentry", Organization)
assert not router.allow_migrate("control", "sentry", Organization)
assert "control" == router.db_for_write(Permission)
assert "control" == router.db_for_read(User)
assert "control" == router.db_for_write(User)
assert router.allow_migrate("control", "sentry", User)
assert not router.allow_migrate("default", "sentry", User)
assert not router.allow_migrate("default", "django.contrib.auth", Permission)
assert router.allow_migrate("control", "django.contrib.auth", Permission)
assert not router.allow_migrate(
"default", "sentry", model=None, tables=["jira_ac_tenant"]
), "Removed tables should not error and not route"
@override_settings(SILO_MODE="CONTROL")
def test_for_control(self) -> None:
router = SiloRouter()
router.use_simulated(True)
assert "default" == router.db_for_read(Organization)
assert "default" == router.db_for_write(Organization)
assert router.allow_migrate("default", "sentry", Organization)
assert not router.allow_migrate("control", "sentry", Organization)
assert "control" == router.db_for_read(User)
assert "control" == router.db_for_write(User)
assert router.allow_migrate("control", "sentry", User)
assert not router.allow_migrate("default", "sentry", User)
assert not router.allow_migrate(
"default", "sentry", model=None, tables=["jira_ac_tenant"]
), "Removed tables should not error and not route"
@override_settings(SILO_MODE="REGION")
def test_for_region(self) -> None:
router = SiloRouter()
router.use_simulated(True)
assert "default" == router.db_for_read(Organization)
assert "default" == router.db_for_write(Organization)
assert router.allow_migrate("default", "sentry", Organization)
assert not router.allow_migrate("control", "sentry", Organization)
assert "control" == router.db_for_read(User)
assert "control" == router.db_for_write(User)
assert router.allow_migrate("control", "sentry", User)
assert not router.allow_migrate("default", "sentry", User)
@override_settings(SILO_MODE="MONOLITH")
def test_for_monolith_simulated(self) -> None:
router = SiloRouter()
router.use_simulated(True)
assert "default" == router.db_for_read(Organization)
assert "control" == router.db_for_read(User)
assert "default" == router.db_for_write(Organization)
assert "control" == router.db_for_write(User)
assert router.allow_migrate("default", "sentry", Organization)
assert not router.allow_migrate("control", "sentry", Organization)
assert router.allow_migrate("control", "sentry", User)
assert not router.allow_migrate("default", "sentry", User)
@pytest.mark.skipif(use_split_dbs(), reason="requires single db mode")
@override_settings(SILO_MODE="MONOLITH")
def test_for_monolith_single(self) -> None:
router = SiloRouter()
assert "default" == router.db_for_read(Organization)
assert "default" == router.db_for_read(User)
assert "default" == router.db_for_write(Organization)
assert "default" == router.db_for_write(User)
assert router.allow_migrate("default", "sentry", Organization)
assert router.allow_migrate("default", "sentry", User)
@pytest.mark.skipif(not use_split_dbs(), reason="requires split db mode")
@override_settings(SILO_MODE="MONOLITH")
def test_for_monolith_split(self) -> None:
router = SiloRouter()
assert "default" == router.db_for_read(Organization)
assert "control" == router.db_for_read(User)
assert "default" == router.db_for_write(Organization)
assert "control" == router.db_for_write(User)
assert router.allow_migrate("default", "sentry", Organization)
assert router.allow_migrate("control", "sentry", User)
@pytest.mark.skipif(not use_split_dbs(), reason="requires split db mode")
@override_settings(SILO_MODE="REGION")
def test_removed_region_model(self) -> None:
router = SiloRouter()
assert router.allow_migrate(
"default", "sentry", hints={"tables": ["sentry_pagerdutyservice"]}
)
| SiloRouterSimulatedTest |
python | pallets__werkzeug | examples/coolmagic/application.py | {
"start": 619,
"end": 2468
} | class ____:
"""
The application class. It's passed a directory with configuration values.
"""
def __init__(self, config):
self.config = config
for fn in listdir(path.join(path.dirname(__file__), "views")):
if fn.endswith(".py") and fn != "__init__.py":
__import__(f"coolmagic.views.{fn[:-3]}")
from coolmagic.utils import exported_views
rules = [
# url for shared data. this will always be unmatched
# because either the middleware or the webserver
# handles that request first.
Rule("/public/<path:file>", endpoint="shared_data")
]
self.views = {}
for endpoint, (func, rule, extra) in exported_views.items():
if rule is not None:
rules.append(Rule(rule, endpoint=endpoint, **extra))
self.views[endpoint] = func
self.url_map = Map(rules)
def __call__(self, environ, start_response):
urls = self.url_map.bind_to_environ(environ)
req = Request(environ, urls)
try:
endpoint, args = urls.match(req.path)
resp = self.views[endpoint](**args)
except NotFound:
resp = self.views["static.not_found"]()
except (HTTPException, RequestRedirect) as e:
resp = e
return resp(environ, start_response)
def make_app(config=None):
"""
Factory function that creates a new `CoolmagicApplication`
object. Optional WSGI middlewares should be applied here.
"""
config = config or {}
app = CoolMagicApplication(config)
# static stuff
app = SharedDataMiddleware(
app, {"/public": path.join(path.dirname(__file__), "public")}
)
# clean up locals
app = local_manager.make_middleware(app)
return app
| CoolMagicApplication |
python | walkccc__LeetCode | solutions/3424. Minimum Cost to Make Arrays Identical/3424.py | {
"start": 0,
"end": 259
} | class ____:
def minCost(self, arr: list[int], brr: list[int], k: int) -> int:
def cost(arr: list[int], brr: list[int]) -> int:
return sum(abs(a - b) for a, b in zip(arr, brr))
return min(cost(arr, brr), cost(sorted(arr), sorted(brr)) + k)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_grad_test.py | {
"start": 4720,
"end": 6750
} | class ____(test.TestCase):
def run_test(self, x, y):
with self.test_session():
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-3)
@test_util.run_deprecated_v1
def testDepthwiseConv2dGradWRTInput(self):
x = array_ops.placeholder(
dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input')
f = constant_op.constant([0.5],
dtype=dtypes.float32,
shape=[2, 2, 3, 2],
name='filter')
strides = [1, 1, 1, 1]
padding = 'SAME'
y = nn_impl.depthwise_conv2d(x, f, strides, padding)
self.run_test(x, y)
@test_util.run_deprecated_v1
def testDepthwiseConv2dGradWRTFilter(self):
x = constant_op.constant([0.5],
dtype=dtypes.float32,
shape=[1, 4, 4, 3],
name='input')
f = array_ops.placeholder(
dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter')
strides = [1, 1, 1, 1]
padding = 'SAME'
y = nn_impl.depthwise_conv2d(x, f, strides, padding)
self.run_test(f, y)
@test_util.run_deprecated_v1
def testDepthwiseConv2dBackpropFilterGrad(self):
x = array_ops.placeholder(
dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input')
f = constant_op.constant([0.5],
dtype=dtypes.float32,
shape=[2, 2, 3, 2],
name='filter')
strides = [1, 1, 1, 1]
padding = 'SAME'
out = nn_impl.depthwise_conv2d(x, f, strides, padding)
grad_wrt_input = gradients_impl.gradients(out, x)[0]
self.run_test(f, grad_wrt_input)
grad_wrt_filter = gradients_impl.gradients(out, f)[0]
self.run_test(x, grad_wrt_filter)
| DepthwiseConv2dTest |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace_item_attributes.py | {
"start": 28577,
"end": 33662
} | class ____(
OrganizationTraceItemAttributesEndpointTestBase, TraceMetricsTestCase
):
feature_flags = {"organizations:tracemetrics-enabled": True}
item_type = SupportedTraceItemType.TRACEMETRICS
def test_no_feature(self) -> None:
response = self.do_request(features={})
assert response.status_code == 404, response.content
def test_invalid_item_type(self) -> None:
response = self.do_request(query={"itemType": "invalid"})
assert response.status_code == 400, response.content
assert response.data == {
"itemType": [
ErrorDetail(string='"invalid" is not a valid choice.', code="invalid_choice")
],
}
def test_trace_metrics_string_attributes(self) -> None:
"""Test that we can successfully retrieve string attributes from trace metrics"""
metrics = [
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=123.45,
metric_type="distribution",
organization=self.organization,
project=self.project,
attributes={
"http.method": "GET",
"http.status_code": "200",
"environment": "production",
},
),
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=234.56,
metric_type="distribution",
organization=self.organization,
project=self.project,
attributes={
"http.method": "POST",
"http.status_code": "201",
"environment": "staging",
},
),
]
self.store_trace_metrics(metrics)
response = self.do_request(query={"attributeType": "string"})
assert response.status_code == 200, response.content
data = response.data
assert len(data) > 0
# Verify that our custom attributes are returned
attribute_keys = {item["key"] for item in data}
assert "http.method" in attribute_keys
assert "http.status_code" in attribute_keys
# Environment may be stored as tags[environment,string]
assert "environment" in attribute_keys or "tags[environment,string]" in attribute_keys
def test_trace_metrics_filter_by_metric_name(self) -> None:
"""Test that we can filter trace metrics attributes by metric name using query parameter"""
metrics = [
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=100.0,
metric_type="distribution",
organization=self.organization,
project=self.project,
attributes={
"http.method": "GET",
"http.route": "/api/users",
},
),
self.create_trace_metric(
metric_name="database.query.duration",
metric_value=50.0,
metric_type="distribution",
organization=self.organization,
project=self.project,
attributes={
"db.system": {"string_value": "postgresql"},
"db.operation": {"string_value": "SELECT"},
},
),
]
self.store_trace_metrics(metrics)
# Query for http metric attributes
response = self.do_request(
query={
"attributeType": "string",
"query": 'metric.name:"http.request.duration"',
}
)
assert response.status_code == 200, response.content
data = response.data
attribute_keys = {item["key"] for item in data}
# Should include HTTP attributes
assert "http.method" in attribute_keys or "http.route" in attribute_keys
def test_trace_metrics_number_attributes(self) -> None:
"""Test that we can retrieve number attributes from trace metrics"""
metrics = [
self.create_trace_metric(
metric_name="custom.metric",
metric_value=100.0,
metric_type="distribution",
organization=self.organization,
project=self.project,
attributes={
"request.size": {"int_value": 1024},
"response.time": {"double_value": 42.5},
},
),
]
self.store_trace_metrics(metrics)
response = self.do_request(query={"attributeType": "number"})
assert response.status_code == 200, response.content
data = response.data
# Verify number attributes are returned
# Note: The exact keys depend on how the backend processes numeric attributes
assert len(data) >= 0 # May be 0 if number attributes are handled differently
| OrganizationTraceItemAttributesEndpointTraceMetricsTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 30138,
"end": 30514
} | class ____(sgqlc.types.Enum):
"""Properties by which issue connections can be ordered.
Enumeration Choices:
* `COMMENTS`: Order issues by comment count
* `CREATED_AT`: Order issues by creation time
* `UPDATED_AT`: Order issues by update time
"""
__schema__ = github_schema
__choices__ = ("COMMENTS", "CREATED_AT", "UPDATED_AT")
| IssueOrderField |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 5728,
"end": 5787
} | class ____(PolymorphicModel):
pass
| ParentModelWithManager |
python | spack__spack | lib/spack/spack/multimethod.py | {
"start": 11655,
"end": 12039
} | class ____(spack.error.SpackError):
"""Raised when we can't find a version of a multi-method."""
def __init__(self, cls, method_name, spec, possible_specs):
super().__init__(
"Package %s does not support %s called with %s. Options are: %s"
% (cls.__name__, method_name, spec, ", ".join(str(s) for s in possible_specs))
)
| NoSuchMethodError |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 9088,
"end": 18228
} | class ____:
@staticmethod
def operator_call(sparse_query, indices, query_num_block, key_num_block):
batch_size, num_block, block_size, _ = sparse_query.size()
if len(sparse_query.size()) != 4:
raise ValueError("sparse_query must be a 4-dimensional tensor.")
if len(indices.size()) != 2:
raise ValueError("indices must be a 2-dimensional tensor.")
_, _, block_size, _ = sparse_query.size()
batch_size, num_block = indices.size()
sparse_query = sparse_query.sum(dim=2).reshape(batch_size * num_block, block_size)
batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device)
global_idxes = (
torch.div(indices, key_num_block, rounding_mode="floor").long() + batch_idx[:, None] * query_num_block
).reshape(batch_size * num_block)
temp = torch.zeros(
(batch_size * query_num_block, block_size), dtype=sparse_query.dtype, device=sparse_query.device
)
output = temp.index_add(0, global_idxes, sparse_query).reshape(batch_size, query_num_block, block_size)
output = output.reshape(batch_size, query_num_block * block_size)
return output
def get_low_resolution_logit(query, key, block_size, mask=None, value=None):
"""
Compute low resolution approximation.
"""
batch_size, seq_len, head_dim = query.size()
num_block_per_row = seq_len // block_size
value_hat = None
if mask is not None:
token_count = mask.reshape(batch_size, num_block_per_row, block_size).sum(dim=-1)
query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / (
token_count[:, :, None] + 1e-6
)
key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / (
token_count[:, :, None] + 1e-6
)
if value is not None:
value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / (
token_count[:, :, None] + 1e-6
)
else:
token_count = block_size * torch.ones(batch_size, num_block_per_row, dtype=torch.float, device=query.device)
query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2)
key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2)
if value is not None:
value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2)
low_resolution_logit = torch.matmul(query_hat, key_hat.transpose(-1, -2)) / math.sqrt(head_dim)
low_resolution_logit_row_max = low_resolution_logit.max(dim=-1, keepdims=True).values
if mask is not None:
low_resolution_logit = (
low_resolution_logit - 1e4 * ((token_count[:, None, :] * token_count[:, :, None]) < 0.5).float()
)
return low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat
def get_block_idxes(
low_resolution_logit, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks
):
"""
Compute the indices of the subset of components to be used in the approximation.
"""
batch_size, total_blocks_per_row, _ = low_resolution_logit.shape
if initial_prior_diagonal_n_blocks > 0:
offset = initial_prior_diagonal_n_blocks // 2
temp_mask = torch.ones(total_blocks_per_row, total_blocks_per_row, device=low_resolution_logit.device)
diagonal_mask = torch.tril(torch.triu(temp_mask, diagonal=-offset), diagonal=offset)
low_resolution_logit = low_resolution_logit + diagonal_mask[None, :, :] * 5e3
if initial_prior_first_n_blocks > 0:
low_resolution_logit[:, :initial_prior_first_n_blocks, :] = (
low_resolution_logit[:, :initial_prior_first_n_blocks, :] + 5e3
)
low_resolution_logit[:, :, :initial_prior_first_n_blocks] = (
low_resolution_logit[:, :, :initial_prior_first_n_blocks] + 5e3
)
top_k_vals = torch.topk(
low_resolution_logit.reshape(batch_size, -1), num_blocks, dim=-1, largest=True, sorted=False
)
indices = top_k_vals.indices
if approx_mode == "full":
threshold = top_k_vals.values.min(dim=-1).values
high_resolution_mask = (low_resolution_logit >= threshold[:, None, None]).float()
elif approx_mode == "sparse":
high_resolution_mask = None
else:
raise ValueError(f"{approx_mode} is not a valid approx_model value.")
return indices, high_resolution_mask
def mra2_attention(
query,
key,
value,
mask,
num_blocks,
approx_mode,
block_size=32,
initial_prior_first_n_blocks=0,
initial_prior_diagonal_n_blocks=0,
):
"""
Use Mra to approximate self-attention.
"""
if mra_cuda_kernel is None:
return torch.zeros_like(query).requires_grad_()
batch_size, num_head, seq_len, head_dim = query.size()
meta_batch = batch_size * num_head
if seq_len % block_size != 0:
raise ValueError("sequence length must be divisible by the block_size.")
num_block_per_row = seq_len // block_size
query = query.reshape(meta_batch, seq_len, head_dim)
key = key.reshape(meta_batch, seq_len, head_dim)
value = value.reshape(meta_batch, seq_len, head_dim)
if mask is not None:
query = query * mask[:, :, None]
key = key * mask[:, :, None]
value = value * mask[:, :, None]
if approx_mode == "full":
low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat = get_low_resolution_logit(
query, key, block_size, mask, value
)
elif approx_mode == "sparse":
with torch.no_grad():
low_resolution_logit, token_count, low_resolution_logit_row_max, _ = get_low_resolution_logit(
query, key, block_size, mask
)
else:
raise Exception('approx_mode must be "full" or "sparse"')
with torch.no_grad():
low_resolution_logit_normalized = low_resolution_logit - low_resolution_logit_row_max
indices, high_resolution_mask = get_block_idxes(
low_resolution_logit_normalized,
num_blocks,
approx_mode,
initial_prior_first_n_blocks,
initial_prior_diagonal_n_blocks,
)
high_resolution_logit = MraSampledDenseMatMul.operator_call(
query, key, indices, block_size=block_size
) / math.sqrt(head_dim)
max_vals, max_vals_scatter = sparse_max(high_resolution_logit, indices, num_block_per_row, num_block_per_row)
high_resolution_logit = high_resolution_logit - max_vals_scatter
if mask is not None:
high_resolution_logit = high_resolution_logit - 1e4 * (1 - sparse_mask(mask, indices)[:, :, :, None])
high_resolution_attn = torch.exp(high_resolution_logit)
high_resolution_attn_out = MraSparseDenseMatMul.operator_call(
high_resolution_attn, indices, value, num_block_per_row
)
high_resolution_normalizer = MraReduceSum.operator_call(
high_resolution_attn, indices, num_block_per_row, num_block_per_row
)
if approx_mode == "full":
low_resolution_attn = (
torch.exp(low_resolution_logit - low_resolution_logit_row_max - 1e4 * high_resolution_mask)
* token_count[:, None, :]
)
low_resolution_attn_out = (
torch.matmul(low_resolution_attn, value_hat)[:, :, None, :]
.repeat(1, 1, block_size, 1)
.reshape(meta_batch, seq_len, head_dim)
)
low_resolution_normalizer = (
low_resolution_attn.sum(dim=-1)[:, :, None].repeat(1, 1, block_size).reshape(meta_batch, seq_len)
)
log_correction = low_resolution_logit_row_max.repeat(1, 1, block_size).reshape(meta_batch, seq_len) - max_vals
if mask is not None:
log_correction = log_correction * mask
low_resolution_corr = torch.exp(log_correction * (log_correction <= 0).float())
low_resolution_attn_out = low_resolution_attn_out * low_resolution_corr[:, :, None]
low_resolution_normalizer = low_resolution_normalizer * low_resolution_corr
high_resolution_corr = torch.exp(-log_correction * (log_correction > 0).float())
high_resolution_attn_out = high_resolution_attn_out * high_resolution_corr[:, :, None]
high_resolution_normalizer = high_resolution_normalizer * high_resolution_corr
context_layer = (high_resolution_attn_out + low_resolution_attn_out) / (
high_resolution_normalizer[:, :, None] + low_resolution_normalizer[:, :, None] + 1e-6
)
elif approx_mode == "sparse":
context_layer = high_resolution_attn_out / (high_resolution_normalizer[:, :, None] + 1e-6)
else:
raise Exception('config.approx_mode must be "full" or "sparse"')
if mask is not None:
context_layer = context_layer * mask[:, :, None]
context_layer = context_layer.reshape(batch_size, num_head, seq_len, head_dim)
return context_layer
| MraReduceSum |
python | ray-project__ray | python/ray/data/_internal/datasource/json_datasource.py | {
"start": 6390,
"end": 9730
} | class ____(FileBasedDatasource):
# Buffer size in bytes for reading files. Default is 1MB.
#
# pandas reads data in small chunks (~8 KiB), which leads to many costly
# small read requests when accessing cloud storage. To reduce overhead and
# improve performance, we wrap the file in a larger buffered reader that
# reads bigger blocks at once.
_BUFFER_SIZE = 1024**2
# In the case of zipped json files, we cannot infer the chunk_size.
_DEFAULT_CHUNK_SIZE = 10000
def __init__(
self,
paths: Union[str, List[str]],
target_output_size_bytes: int,
**file_based_datasource_kwargs,
):
super().__init__(paths, **file_based_datasource_kwargs)
self._target_output_size_bytes = target_output_size_bytes
def _read_stream(self, f: "pyarrow.NativeFile", path: str):
chunksize = self._estimate_chunksize(f)
stream = StrictBufferedReader(f, buffer_size=self._BUFFER_SIZE)
if chunksize is None:
# When chunksize=None, pandas returns DataFrame directly (no context manager)
df = pd.read_json(stream, chunksize=chunksize, lines=True)
yield _cast_range_index_to_string(df)
else:
# When chunksize is a number, pandas returns JsonReader (supports context manager)
with pd.read_json(stream, chunksize=chunksize, lines=True) as reader:
for df in reader:
yield _cast_range_index_to_string(df)
def _estimate_chunksize(self, f: "pyarrow.NativeFile") -> Optional[int]:
"""Estimate the chunksize by sampling the first row.
This is necessary to avoid OOMs while reading the file.
"""
if not f.seekable():
return self._DEFAULT_CHUNK_SIZE
assert f.tell() == 0, "File pointer must be at the beginning"
if self._target_output_size_bytes is None:
return None
stream = StrictBufferedReader(f, buffer_size=self._BUFFER_SIZE)
with pd.read_json(stream, chunksize=1, lines=True) as reader:
try:
df = _cast_range_index_to_string(next(reader))
except StopIteration:
return 1
block_accessor = PandasBlockAccessor.for_block(df)
if block_accessor.num_rows() == 0:
chunksize = 1
else:
bytes_per_row = block_accessor.size_bytes() / block_accessor.num_rows()
chunksize = max(round(self._target_output_size_bytes / bytes_per_row), 1)
# Reset file pointer to the beginning.
f.seek(0)
return chunksize
def _open_input_source(
self,
filesystem: "pyarrow.fs.FileSystem",
path: str,
**open_args,
) -> "pyarrow.NativeFile":
compression = self.resolve_compression(path, open_args)
if compression is None:
# We use a seekable file to estimate chunksize.
return filesystem.open_input_file(path)
return super()._open_input_source(filesystem, path, **open_args)
def _cast_range_index_to_string(df: pd.DataFrame):
# NOTE: PandasBlockAccessor doesn't support RangeIndex, so we need to convert
# to string.
if isinstance(df.columns, pd.RangeIndex):
df.columns = df.columns.astype(str)
return df
| PandasJSONDatasource |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/attributes.py | {
"start": 338,
"end": 373
} | class ____:
token: str = ""
| Token |
python | sanic-org__sanic | sanic/request/types.py | {
"start": 2405,
"end": 37161
} | class ____(Generic[sanic_type, ctx_type]):
"""State of HTTP request.
Args:
url_bytes (bytes): Raw URL bytes.
headers (Header): Request headers.
version (str): HTTP version.
method (str): HTTP method.
transport (TransportProtocol): Transport protocol.
app (Sanic): Sanic instance.
head (bytes, optional): Request head. Defaults to `b""`.
stream_id (int, optional): HTTP/3 stream ID. Defaults to `0`.
"""
_current: ContextVar[Request] = ContextVar("request")
_loads = json_loads
__slots__ = (
"__weakref__",
"_cookies",
"_ctx",
"_id",
"_ip",
"_parsed_url",
"_port",
"_protocol",
"_remote_addr",
"_request_middleware_started",
"_response_middleware_started",
"_scheme",
"_socket",
"_stream_id",
"_match_info",
"_name",
"app",
"body",
"conn_info",
"head",
"headers",
"method",
"parsed_accept",
"parsed_args",
"parsed_cookies",
"parsed_credentials",
"parsed_files",
"parsed_form",
"parsed_forwarded",
"parsed_json",
"parsed_not_grouped_args",
"parsed_token",
"raw_url",
"responded",
"route",
"stream",
"transport",
"version",
)
def __init__(
self,
url_bytes: bytes,
headers: Header,
version: str,
method: str,
transport: TransportProtocol,
app: sanic_type,
head: bytes = b"",
stream_id: int = 0,
):
self.raw_url = url_bytes
try:
self._parsed_url = parse_url(url_bytes)
except HttpParserInvalidURLError:
url = url_bytes.decode(errors="backslashreplace")
raise BadURL(f"Bad URL: {url}")
self._id: Optional[Union[uuid.UUID, str, int]] = None
self._name: Optional[str] = None
self._stream_id = stream_id
self.app = app
self.headers = Header(headers)
self.version = version
self.method = method
self.transport = transport
self.head = head
# Init but do not inhale
self.body = b""
self.conn_info: Optional[ConnInfo] = None
self._ctx: Optional[ctx_type] = None
self.parsed_accept: Optional[AcceptList] = None
self.parsed_args: DefaultDict[
tuple[bool, bool, str, str], RequestParameters
] = defaultdict(RequestParameters)
self.parsed_cookies: Optional[RequestParameters] = None
self.parsed_credentials: Optional[Credentials] = None
self.parsed_files: Optional[RequestParameters] = None
self.parsed_form: Optional[RequestParameters] = None
self.parsed_forwarded: Optional[Options] = None
self.parsed_json = None
self.parsed_not_grouped_args: DefaultDict[
tuple[bool, bool, str, str], list[tuple[str, str]]
] = defaultdict(list)
self.parsed_token: Optional[str] = None
self._request_middleware_started = False
self._response_middleware_started = False
self.responded: bool = False
self.route: Optional[Route] = None
self.stream: Optional[Stream] = None
self._match_info: dict[str, Any] = {}
self._protocol: Optional[BaseProtocol] = None
def __repr__(self):
class_name = self.__class__.__name__
return f"<{class_name}: {self.method} {self.path}>"
@staticmethod
def make_context() -> ctx_type:
"""Create a new context object.
This method is called when a new request context is pushed. It is
a great candidate for overriding in a subclass if you want to
control the type of context object that is created.
By default, it returns a `types.SimpleNamespace` instance.
Returns:
ctx_type: A new context object.
"""
return cast(ctx_type, SimpleNamespace())
@classmethod
def get_current(cls) -> Request:
"""Retrieve the current request object
This implements [Context Variables](https://docs.python.org/3/library/contextvars.html)
to allow for accessing the current request from anywhere.
A typical usecase is when you want to access the current request
from a function that is not a handler, such as a logging function:
```python
import logging
class LoggingFormater(logging.Formatter):
def format(self, record):
request = Request.get_current()
record.url = request.url
record.ip = request.ip
return super().format(record)
```
Returns:
Request: The current request object
Raises:
sanic.exceptions.ServerError: If it is outside of a request
lifecycle.
""" # noqa: E501
request = cls._current.get(None)
if not request:
raise ServerError("No current request")
return request
@classmethod
def generate_id(*_) -> Union[uuid.UUID, str, int]:
"""Generate a unique ID for the request.
This method is called to generate a unique ID for each request.
By default, it returns a `uuid.UUID` instance.
Returns:
Union[uuid.UUID, str, int]: A unique ID for the request.
"""
return uuid.uuid4()
@property
def ctx(self) -> ctx_type:
"""The current request context.
This is a context object for the current request. It is created
by `Request.make_context` and is a great place to store data
that you want to be accessible during the request lifecycle.
Returns:
ctx_type: The current request context.
"""
if not self._ctx:
self._ctx = self.make_context()
return self._ctx
@property
def stream_id(self) -> int:
"""Access the HTTP/3 stream ID.
Raises:
sanic.exceptions.ServerError: If the request is not HTTP/3.
Returns:
int: The HTTP/3 stream ID.
"""
if self.protocol.version is not HTTP.VERSION_3:
raise ServerError(
"Stream ID is only a property of a HTTP/3 request"
)
return self._stream_id
def reset_response(self) -> None:
"""Reset the response object.
This clears much of the state of the object. It should
generally not be called directly, but is called automatically as
part of the request lifecycle.
Raises:
sanic.exceptions.ServerError: If the response has already been
sent.
"""
try:
if (
self.stream is not None
and self.stream.stage is not Stage.HANDLER
):
raise ServerError(
"Cannot reset response because previous response was sent."
)
self.stream.response.stream = None # type: ignore
self.stream.response = None # type: ignore
self.responded = False
except AttributeError:
pass
async def respond(
self,
response: Optional[BaseHTTPResponse] = None,
*,
status: int = 200,
headers: Optional[Union[Header, dict[str, str]]] = None,
content_type: Optional[str] = None,
):
"""Respond to the request without returning.
This method can only be called once, as you can only respond once.
If no ``response`` argument is passed, one will be created from the
``status``, ``headers`` and ``content_type`` arguments.
**The first typical usecase** is if you wish to respond to the
request without returning from the handler:
```python
@app.get("/")
async def handler(request: Request):
data = ... # Process something
json_response = json({"data": data})
await request.respond(json_response)
@app.on_response
async def add_header(_, response: HTTPResponse):
# Middlewares still get executed as expected
response.headers["one"] = "two"
```
**The second possible usecase** is for when you want to directly
respond to the request:
```python
response = await request.respond(content_type="text/csv")
await response.send("foo,")
await response.send("bar")
# You can control the completion of the response by calling
# the 'eof()' method:
await response.eof()
```
Args:
response (ResponseType): Response instance to send.
status (int): Status code to return in the response.
headers (Optional[Dict[str, str]]): Headers to return in the response, defaults to None.
content_type (Optional[str]): Content-Type header of the response, defaults to None.
Returns:
FinalResponseType: Final response being sent (may be different from the
"response" parameter because of middlewares), which can be
used to manually send data.
""" # noqa: E501
try:
if self.stream is not None and self.stream.response:
raise ServerError("Second respond call is not allowed.")
except AttributeError:
pass
# This logic of determining which response to use is subject to change
if response is None:
response = HTTPResponse(
status=status,
headers=headers,
content_type=content_type,
)
# Connect the response
if isinstance(response, BaseHTTPResponse) and self.stream:
response = self.stream.respond(response)
if isawaitable(response):
response = await response # type: ignore
# Run response middleware
try:
middleware = (
self.route and self.route.extra.response_middleware
) or self.app.response_middleware
if middleware and not self._response_middleware_started:
self._response_middleware_started = True
response = await self.app._run_response_middleware(
self, response, middleware
)
except CancelledErrors:
raise
except Exception:
error_logger.exception(
"Exception occurred in one of response middleware handlers"
)
self.responded = True
return response
async def receive_body(self):
"""Receive request.body, if not already received.
Streaming handlers may call this to receive the full body. Sanic calls
this function before running any handlers of non-streaming routes.
Custom request classes can override this for custom handling of both
streaming and non-streaming routes.
"""
if not self.body:
self.body = b"".join([data async for data in self.stream])
@property
def name(self) -> Optional[str]:
"""The route name
In the following pattern:
```
<AppName>.[<BlueprintName>.]<HandlerName>
```
Returns:
Optional[str]: The route name
"""
if self._name:
return self._name
elif self.route:
return self.route.name
return None
@property
def endpoint(self) -> Optional[str]:
"""Alias of `sanic.request.Request.name`
Returns:
Optional[str]: The route name
"""
return self.name
@property
def uri_template(self) -> Optional[str]:
"""The defined URI template
Returns:
Optional[str]: The defined URI template
"""
if self.route:
return f"/{self.route.path}"
return None
@property
def protocol(self) -> TransportProtocol:
"""The HTTP protocol instance
Returns:
Protocol: The HTTP protocol instance
"""
if not self._protocol:
self._protocol = self.transport.get_protocol()
return self._protocol # type: ignore
@property
def raw_headers(self) -> bytes:
"""The unparsed HTTP headers
Returns:
bytes: The unparsed HTTP headers
"""
_, headers = self.head.split(b"\r\n", 1)
return bytes(headers)
@property
def request_line(self) -> bytes:
"""The first line of a HTTP request
Returns:
bytes: The first line of a HTTP request
"""
reqline, _ = self.head.split(b"\r\n", 1)
return bytes(reqline)
@property
def id(self) -> Optional[Union[uuid.UUID, str, int]]:
"""A request ID passed from the client, or generated from the backend.
By default, this will look in a request header defined at:
`self.app.config.REQUEST_ID_HEADER`. It defaults to
`X-Request-ID`. Sanic will try to cast the ID into a `UUID` or an
`int`.
If there is not a UUID from the client, then Sanic will try
to generate an ID by calling `Request.generate_id()`. The default
behavior is to generate a `UUID`. You can customize this behavior
by subclassing `Request` and overwriting that method.
```python
from sanic import Request, Sanic
from itertools import count
class IntRequest(Request):
counter = count()
def generate_id(self):
return next(self.counter)
app = Sanic("MyApp", request_class=IntRequest)
```
Returns:
Optional[Union[uuid.UUID, str, int]]: A request ID passed from the
client, or generated from the backend.
"""
if not self._id:
self._id = self.headers.getone(
self.app.config.REQUEST_ID_HEADER,
self.__class__.generate_id(self), # type: ignore
)
# Try casting to a UUID or an integer
if isinstance(self._id, str):
try:
self._id = uuid.UUID(self._id)
except ValueError:
try:
self._id = int(self._id) # type: ignore
except ValueError:
...
return self._id # type: ignore
@property
def json(self) -> Any:
"""The request body parsed as JSON
Returns:
Any: The request body parsed as JSON
"""
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=None) -> Any:
"""Load the request body as JSON
Args:
loads (Callable, optional): A custom JSON loader. Defaults to None.
Raises:
BadRequest: If the request body cannot be parsed as JSON
Returns:
Any: The request body parsed as JSON
"""
try:
if not loads:
loads = self.__class__._loads
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise BadRequest("Failed when parsing body as json")
return self.parsed_json
@property
def accept(self) -> AcceptList:
"""Accepted response content types.
A convenience handler for easier RFC-compliant matching of MIME types,
parsed as a list that can match wildcards and includes */* by default.
Returns:
AcceptList: Accepted response content types
"""
if self.parsed_accept is None:
self.parsed_accept = parse_accept(self.headers.get("accept"))
return self.parsed_accept
@property
def token(self) -> Optional[str]:
"""Attempt to return the auth header token.
Returns:
Optional[str]: The auth header token
"""
if self.parsed_token is None:
prefixes = ("Bearer", "Token")
_, token = parse_credentials(
self.headers.getone("authorization", None), prefixes
)
self.parsed_token = token
return self.parsed_token
@property
def credentials(self) -> Optional[Credentials]:
"""Attempt to return the auth header value.
Covers NoAuth, Basic Auth, Bearer Token, Api Token authentication
schemas.
Returns:
Optional[Credentials]: A Credentials object with token, or username
and password related to the request
"""
if self.parsed_credentials is None:
try:
prefix, credentials = parse_credentials(
self.headers.getone("authorization", None)
)
if credentials:
self.parsed_credentials = Credentials(
auth_type=prefix, token=credentials
)
except ValueError:
pass
return self.parsed_credentials
def get_form(
self, keep_blank_values: bool = False
) -> Optional[RequestParameters]:
"""Method to extract and parse the form data from a request.
Args:
keep_blank_values (bool): Whether to discard blank values from the form data.
Returns:
Optional[RequestParameters]: The parsed form data.
""" # noqa: E501
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.getone(
"content-type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_content_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(
self.body.decode("utf-8"),
keep_blank_values=keep_blank_values,
)
)
elif content_type == "multipart/form-data":
# TODO: Stream this instead of reading to/from memory
boundary = parameters["boundary"].encode( # type: ignore
"utf-8"
) # type: ignore
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def form(self) -> Optional[RequestParameters]:
"""The request body parsed as form data
Returns:
Optional[RequestParameters]: The request body parsed as form data
"""
if self.parsed_form is None:
self.get_form()
return self.parsed_form
@property
def files(self) -> Optional[RequestParameters]:
"""The request body parsed as uploaded files
Returns:
Optional[RequestParameters]: The request body parsed as uploaded files
""" # noqa: E501
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
def get_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> RequestParameters:
"""Parse `query_string` using `urllib.parse.parse_qs`.
This methods is used by the `args` property, but it also
can be used directly if you need to change default parameters.
Args:
keep_blank_values (bool): Flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A `True` value indicates that blanks should be retained as
blank strings. The default `False` value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing (bool): Flag indicating what to do with parsing
errors. If `False` (the default), errors are silently ignored.
If `True`, errors raise a `ValueError` exception.
encoding (str): Specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the
`bytes.decode()` method.
errors (str): Specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the
`bytes.decode()` method.
Returns:
RequestParameters: A dictionary containing the parsed arguments.
"""
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_args:
if self.query_string:
self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = RequestParameters(
parse_qs(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
)
return self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
args = property(get_args)
"""Convenience property to access `Request.get_args` with default values.
"""
def get_query_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> list:
"""Parse `query_string` using `urllib.parse.parse_qsl`.
This methods is used by `query_args` propertyn but can be used
directly if you need to change default parameters.
Args:
keep_blank_values (bool): Flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A `True` value indicates that blanks should be retained as
blank strings. The default `False` value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing (bool): Flag indicating what to do with
parsing errors. If `False` (the default), errors are
silently ignored. If `True`, errors raise a
`ValueError` exception.
encoding (str): Specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the
`bytes.decode()` method.
errors (str): Specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the
`bytes.decode()` method.
Returns:
list: A list of tuples containing the parsed arguments.
"""
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_not_grouped_args:
if self.query_string:
self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = parse_qsl(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
return self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
query_args = property(get_query_args)
"""Convenience property to access `Request.get_query_args` with default values.
""" # noqa: E501
def get_cookies(self) -> RequestParameters:
cookie = self.headers.getone("cookie", "")
self.parsed_cookies = CookieRequestParameters(parse_cookie(cookie))
return self.parsed_cookies
@property
def cookies(self) -> RequestParameters:
"""Incoming cookies on the request
Returns:
RequestParameters: Incoming cookies on the request
"""
if self.parsed_cookies is None:
self.get_cookies()
return cast(CookieRequestParameters, self.parsed_cookies)
@property
def content_type(self) -> str:
"""Content-Type header form the request
Returns:
str: Content-Type header form the request
"""
return self.headers.getone("content-type", DEFAULT_HTTP_CONTENT_TYPE)
@property
def match_info(self) -> dict[str, Any]:
"""Matched path parameters after resolving route
Returns:
Dict[str, Any]: Matched path parameters after resolving route
"""
return self._match_info
@match_info.setter
def match_info(self, value):
self._match_info = value
@property
def ip(self) -> str:
"""Peer ip of the socket
Returns:
str: Peer ip of the socket
"""
return self.conn_info.client_ip if self.conn_info else ""
@property
def port(self) -> int:
"""Peer port of the socket
Returns:
int: Peer port of the socket
"""
return self.conn_info.client_port if self.conn_info else 0
@property
def socket(self) -> Union[tuple[str, int], tuple[None, None]]:
"""Information about the connected socket if available
Returns:
Tuple[Optional[str], Optional[int]]: Information about the
connected socket if available, in the form of a tuple of
(ip, port)
"""
return (
self.conn_info.peername
if self.conn_info and self.conn_info.peername
else (None, None)
)
@property
def path(self) -> str:
"""Path of the local HTTP request
Returns:
str: Path of the local HTTP request
"""
return self._parsed_url.path.decode("utf-8")
@property
def network_paths(self) -> Optional[list[Any]]:
"""Access the network paths if available
Returns:
Optional[List[Any]]: Access the network paths if available
"""
if self.conn_info is None:
return None
return self.conn_info.network_paths
# Proxy properties (using SERVER_NAME/forwarded/request/transport info)
@property
def forwarded(self) -> Options:
"""Active proxy information obtained from request headers, as specified in Sanic configuration.
Field names by, for, proto, host, port and path are normalized.
- for and by IPv6 addresses are bracketed
- port (int) is only set by port headers, not from host.
- path is url-unencoded
Additional values may be available from new style Forwarded headers.
Returns:
Options: proxy information from request headers
""" # noqa: E501
if self.parsed_forwarded is None:
self.parsed_forwarded = (
parse_forwarded(self.headers, self.app.config)
or parse_xforwarded(self.headers, self.app.config)
or {}
)
return self.parsed_forwarded
@property
def remote_addr(self) -> str:
"""Client IP address, if available from proxy.
Returns:
str: IPv4, bracketed IPv6, UNIX socket name or arbitrary string
"""
if not hasattr(self, "_remote_addr"):
self._remote_addr = str(self.forwarded.get("for", ""))
return self._remote_addr
@property
def client_ip(self) -> str:
"""
Client IP address.
1. proxied remote address `self.forwarded['for']`
2. local peer address `self.ip`
New in Sanic 23.6. Prefer this over `remote_addr` for determining the
client address regardless of whether the service runs behind a proxy
or not (proxy deployment needs separate configuration).
Returns:
str: IPv4, bracketed IPv6, UNIX socket name or arbitrary string
"""
return self.remote_addr or self.ip
@property
def scheme(self) -> str:
"""Determine request scheme.
1. `config.SERVER_NAME` if in full URL format
2. proxied proto/scheme
3. local connection protocol
Returns:
str: http|https|ws|wss or arbitrary value given by the headers.
"""
if not hasattr(self, "_scheme"):
if (
self.app.websocket_enabled
and self.headers.upgrade.lower() == "websocket"
):
scheme = "ws"
else:
scheme = "http"
proto = None
sp = self.app.config.get("SERVER_NAME", "").split("://", 1)
if len(sp) == 2:
proto = sp[0]
elif "proto" in self.forwarded:
proto = str(self.forwarded["proto"])
if proto:
# Give ws/wss if websocket, otherwise keep the same
scheme = proto.replace("http", scheme)
elif self.conn_info and self.conn_info.ssl:
scheme += "s"
self._scheme = scheme
return self._scheme
@property
def host(self) -> str:
"""The currently effective server 'host' (hostname or hostname:port).
1. `config.SERVER_NAME` overrides any client headers
2. proxied host of original request
3. request host header
hostname and port may be separated by
`sanic.headers.parse_host(request.host)`.
Returns:
str: the first matching host found, or empty string
"""
server_name = self.app.config.get("SERVER_NAME")
if server_name:
return server_name.split("//", 1)[-1].split("/", 1)[0]
return str(
self.forwarded.get("host") or self.headers.getone("host", "")
)
@property
def server_name(self) -> str:
"""hostname the client connected to, by `request.host`
Returns:
str: hostname the client connected to, by `request.host`
"""
return parse_host(self.host)[0] or ""
@property
def server_port(self) -> int:
"""The port the client connected to, by forwarded `port` or `request.host`.
Default port is returned as 80 and 443 based on `request.scheme`.
Returns:
int: The port the client connected to, by forwarded `port` or `request.host`.
""" # noqa: E501
port = self.forwarded.get("port") or parse_host(self.host)[1]
return int(port or (80 if self.scheme in ("http", "ws") else 443))
@property
def server_path(self) -> str:
"""Full path of current URL; uses proxied or local path
Returns:
str: Full path of current URL; uses proxied or local path
"""
return str(self.forwarded.get("path") or self.path)
@property
def query_string(self) -> str:
"""Representation of the requested query
Returns:
str: Representation of the requested query
"""
if self._parsed_url.query:
return self._parsed_url.query.decode("utf-8")
else:
return ""
@property
def url(self) -> str:
"""The URL
Returns:
str: The URL
"""
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
def url_for(self, view_name: str, **kwargs) -> str:
"""Retrieve a URL for a given view name.
Same as `sanic.Sanic.url_for`, but automatically determine `scheme`
and `netloc` base on the request. Since this method is aiming
to generate correct schema & netloc, `_external` is implied.
Args:
view_name (str): The view name to generate URL for.
**kwargs: Arbitrary keyword arguments to build URL query string.
Returns:
str: The generated URL.
"""
# Full URL SERVER_NAME can only be handled in app.url_for
try:
sp = self.app.config.get("SERVER_NAME", "").split("://", 1)
if len(sp) == 2:
return self.app.url_for(view_name, _external=True, **kwargs)
except AttributeError:
pass
scheme = self.scheme
host = self.server_name
port = self.server_port
if (scheme.lower() in ("http", "ws") and port == 80) or (
scheme.lower() in ("https", "wss") and port == 443
):
netloc = host
else:
netloc = f"{host}:{port}"
return self.app.url_for(
view_name, _external=True, _scheme=scheme, _server=netloc, **kwargs
)
@property
def scope(self) -> ASGIScope:
"""The ASGI scope of the request.
Returns:
ASGIScope: The ASGI scope of the request.
Raises:
NotImplementedError: If the app isn't an ASGI app.
"""
if not self.app.asgi:
raise NotImplementedError(
"App isn't running in ASGI mode. "
"Scope is only available for ASGI apps."
)
return self.transport.scope
@property
def is_safe(self) -> bool:
"""Whether the HTTP method is safe.
See https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
Returns:
bool: Whether the HTTP method is safe.
"""
return self.method in SAFE_HTTP_METHODS
@property
def is_idempotent(self) -> bool:
"""Whether the HTTP method is iempotent.
See https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
Returns:
bool: Whether the HTTP method is iempotent.
"""
return self.method in IDEMPOTENT_HTTP_METHODS
@property
def is_cacheable(self) -> bool:
"""Whether the HTTP method is cacheable.
See https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
Returns:
bool: Whether the HTTP method is cacheable.
"""
return self.method in CACHEABLE_HTTP_METHODS
| Request |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/conditional_constrained_dependencies/package.py | {
"start": 216,
"end": 664
} | class ____(Package):
"""Package that has a variant which adds a dependency forced to
use non default values.
"""
homepage = "https://dev.null"
version("1.0")
# This variant is on by default and attaches a dependency
# with a lot of variants set at their non-default values
variant("dep", default=True, description="nope")
depends_on("dep-with-variants+foo+bar+baz", when="+dep")
| ConditionalConstrainedDependencies |
python | pytorch__pytorch | test/distributed/_composable/test_composability/test_2d_composability.py | {
"start": 21246,
"end": 25607
} | class ____(DTensorTestBase):
def _compare_params(self, m1, m2):
with FSDP.summon_full_params(m1):
with FSDP.summon_full_params(m2):
for n_p1, n_p2 in zip(m1.named_parameters(), m2.named_parameters()):
p1 = n_p1[1]
p2 = n_p2[1]
if n_p1[0] != n_p2[0]:
self.assertTrue(n_p1[0] in n_p2[0])
name = n_p1[0]
if name == "net2.bias" and self.rank != 0:
continue
if type(p2) is DTensor:
p2 = p2.redistribute(p2.device_mesh, [Replicate()]).to_local()
self.assertTrue(torch.allclose(p1, p2), f"{p1} vs {p2}")
@with_comms
@skip_if_lt_x_gpu(4)
def test_2d_fsdp_state_enable_extension(self):
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
model = FSDP(
SimpleModel().to(device_type),
device_mesh=mesh_2d["dp"],
)
fsdp_state = _get_module_fsdp_state(model)
self.assertTrue(isinstance(fsdp_state._fsdp_extension, DTensorExtensions))
def _test_2d_e2e_training(
self,
use_orig_params=False,
recompute_activation=False,
) -> None:
torch.manual_seed(0)
model = SimpleModel().to(f"{device_type}:{self.rank}")
model = FSDP(model, use_orig_params=use_orig_params)
optim = torch.optim.Adam(model.parameters(), lr=0.01)
torch.manual_seed(0)
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model_2d = parallelize_module(
SimpleModel().to(device_type), tp_mesh, parallelize_plan
)
model_2d = FSDP(
model_2d,
device_mesh=dp_mesh,
use_orig_params=use_orig_params,
)
optim_2d = torch.optim.Adam(model_2d.parameters(), lr=0.01)
if recompute_activation:
model_2d = input_reshard(model_2d, mesh_2d["tp"], 0)
# Check named parameters are returning the same name at least.
param_names_2d = [
clean_tensor_name(name) for name, _ in model_2d.named_parameters()
]
for name, _ in model.named_parameters():
name = clean_tensor_name(name)
if name not in param_names_2d:
print(name, param_names_2d)
self.assertTrue(name in param_names_2d)
self._compare_params(model, model_2d)
# TODO: add additional tests for multi_param_group and optim_in_backward.
for i in range(5):
# Ensure all input across TP ranks are same.
# TODO: add a get_group_rank() to DeviceMesh.
torch.manual_seed(i + dist.get_rank(dp_mesh.get_group(mesh_dim=0)))
input = torch.rand(4, 5).to(f"{device_type}:{self.rank}")
output = model(input)
output_2d = model_2d(input)
self.assertEqual(output, output_2d)
output.sum().backward()
output_2d.sum().backward()
optim.step()
optim_2d.step()
self.assertEqual(model(input), model_2d(input))
# Ensure all params are still the same after optimizer update.
self._compare_params(model, model_2d)
@with_comms
@skip_if_lt_x_gpu(4)
def test_2d_e2e_training_default(self):
self._test_2d_e2e_training()
@with_comms
@skip_if_lt_x_gpu(4)
def test_2d_e2e_training_use_orig_params(self):
self._test_2d_e2e_training(use_orig_params=True)
@with_comms
@skip_if_lt_x_gpu(4)
def test_2d_e2e_training_not_use_orig_params(self):
# TODO: need to revisit input_reshard API about why it failed multi-gpu tests.
# self._test_2d_e2e_training(recompute_activation=True)
self._test_2d_e2e_training(recompute_activation=False)
# TODO: update all state dict unit tests to use distributed.checkpoint.state_dict,
# and consolidate all the state_dict test in test.distributed.checkpoint.
| TestNew2dParallelTraining |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 1387,
"end": 1455
} | class ____(BaseModel, from_attributes=list):
pass
| KwargsBadConfig2 |
python | PyCQA__pylint | tests/functional/p/property_affectation_py26.py | {
"start": 104,
"end": 447
} | class ____:
"""Smallest test case for reported issue."""
def __init__(self):
self._thing = None
@property
def myattr(self):
"""Getter for myattr"""
return self._thing
@myattr.setter
def myattr(self, value):
"""Setter for myattr."""
self._thing = value
Test().myattr = 'grou'
| Test |
python | ray-project__ray | python/ray/tune/tests/test_function_api.py | {
"start": 695,
"end": 3602
} | class ____(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.TemporaryDirectory()
self.logger_creator = creator_generator(
os.path.join(self.tmpdir.name, "logdir")
)
def create_trainable(self, train_fn):
return wrap_function(train_fn)(
logger_creator=self.logger_creator, storage=mock_storage_context()
)
def tearDown(self):
self.tmpdir.cleanup()
def testCheckpointReuse(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train_fn(config):
checkpoint = ray.tune.get_checkpoint()
if checkpoint:
with checkpoint.as_directory() as checkpoint_dir:
count = sum(
"checkpoint-" in path for path in os.listdir(checkpoint_dir)
)
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
path = os.path.join(
temp_checkpoint_dir, "checkpoint-{}".format(step)
)
open(path, "a").close()
ray.tune.report(
dict(test=step),
checkpoint=Checkpoint.from_directory(temp_checkpoint_dir),
)
checkpoint = None
for i in range(5):
new_trainable = self.create_trainable(train_fn)
if checkpoint:
new_trainable.restore(checkpoint)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
assert result[TRAINING_ITERATION] == 10
def testFunctionRecurringSave(self):
"""This tests that save and restore are commutative."""
def train_fn(config):
for step in range(10):
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
if step % 3 == 0:
path = os.path.join(temp_checkpoint_dir, "checkpoint.json")
with open(path, "w") as f:
json.dump({"step": step}, f)
ray.tune.report(
dict(test=step),
checkpoint=Checkpoint.from_directory(temp_checkpoint_dir),
)
new_trainable = self.create_trainable(train_fn)
new_trainable.train()
checkpoint_obj = new_trainable.save()
new_trainable.restore(checkpoint_obj)
checkpoint = new_trainable.save()
new_trainable.stop()
new_trainable2 = self.create_trainable(train_fn)
new_trainable2.restore(checkpoint)
new_trainable2.train()
new_trainable2.stop()
| FunctionCheckpointingTest |
python | huggingface__transformers | src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py | {
"start": 14781,
"end": 17069
} | class ____(nn.Module):
def __init__(self, config, layer_number):
super().__init__()
self.layer_number = layer_number
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# activate bias only last layer
self.attention = GPTNeoXJapaneseAttention(
config=config, use_bias=layer_number == config.num_hidden_layers - 1, layer_idx=layer_number
)
self.mlp = GPTNeoXJapaneseMLP(config)
self.hidden_dropout = config.hidden_dropout
def forward(
self,
hidden_states: Optional[torch.FloatTensor],
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = False,
layer_past: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
):
residual = hidden_states
ln_out = self.input_layernorm(hidden_states)
attn_output, attn_weights, attn_bias = self.attention(
ln_out,
attention_mask=attention_mask,
layer_past=layer_past,
use_cache=use_cache,
output_attentions=output_attentions,
position_ids=position_ids,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
# attn_output = (atten_output + bias) + residual
attn_output = bias_dropout_add(
attn_output,
bias=attn_bias.expand_as(residual) if attn_bias is not None else attn_bias,
residual=residual,
prob=self.hidden_dropout,
training=self.training,
)
mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
# attn_output = (mlp_output + mlp_bias) + atten_output
attn_output = bias_dropout_add(
mlp_output, bias=None, residual=attn_output, prob=self.hidden_dropout, training=self.training
)
return attn_output, attn_weights
@auto_docstring
| GPTNeoXJapaneseLayer |
python | pydata__xarray | xarray/tests/test_combine.py | {
"start": 12246,
"end": 12882
} | class ____:
def test_check_depths(self):
ds = create_test_data(0)
combined_tile_ids = {(0,): ds, (0, 1): ds}
with pytest.raises(
ValueError, match=r"sub-lists do not have consistent depths"
):
_check_shape_tile_ids(combined_tile_ids)
def test_check_lengths(self):
ds = create_test_data(0)
combined_tile_ids = {(0, 0): ds, (0, 1): ds, (0, 2): ds, (1, 0): ds, (1, 1): ds}
with pytest.raises(
ValueError, match=r"sub-lists do not have consistent lengths"
):
_check_shape_tile_ids(combined_tile_ids)
| TestCheckShapeTileIDs |
python | modin-project__modin | modin/tests/pandas/native_df_interoperability/test_compiler_caster.py | {
"start": 4165,
"end": 4354
} | class ____(CloudQC):
def get_backend(self):
return "Cloud_High_Self"
def stay_cost(self, api_cls_name, op, arguments):
return QCCoercionCost.COST_HIGH
| CloudQCHighSelf |
python | pytest-dev__pytest-xdist | testing/test_remote.py | {
"start": 791,
"end": 2502
} | class ____:
def __init__(
self, request: pytest.FixtureRequest, pytester: pytest.Pytester
) -> None:
self.request = request
self.pytester = pytester
self.use_callback = False
self.events = Queue() # type: ignore[var-annotated]
def setup(self) -> None:
self.pytester.chdir()
# import os ; os.environ['EXECNET_DEBUG'] = "2"
self.gateway = execnet.makegateway("execmodel=main_thread_only//popen")
self.config = config = self.pytester.parseconfigure()
putevent = self.events.put if self.use_callback else None
class DummyMananger:
testrunuid = uuid.uuid4().hex
specs = [0, 1]
nodemanager = cast(NodeManager, DummyMananger)
self.slp = WorkerController(
nodemanager=nodemanager,
gateway=self.gateway,
config=config,
putevent=putevent, # type: ignore[arg-type]
)
self.request.addfinalizer(self.slp.ensure_teardown)
self.slp.setup()
def popevent(self, name: str | None = None) -> EventCall:
while 1:
if self.use_callback:
data = self.events.get(timeout=WAIT_TIMEOUT)
else:
data = self.slp.channel.receive(timeout=WAIT_TIMEOUT)
ev = EventCall(data)
if name is None or ev.name == name:
return ev
print(f"skipping {ev}")
def sendcommand(self, name: str, **kwargs: Any) -> None:
self.slp.sendcommand(name, **kwargs)
@pytest.fixture
def worker(request: pytest.FixtureRequest, pytester: pytest.Pytester) -> WorkerSetup:
return WorkerSetup(request, pytester)
| WorkerSetup |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 296,
"end": 3933
} | class ____(Exception):
error = None
status_code = 400
description = ''
def __init__(self, description=None, uri=None, state=None,
status_code=None, request=None):
"""
:param description: A human-readable ASCII [USASCII] text providing
additional information, used to assist the client
developer in understanding the error that occurred.
Values for the "error_description" parameter
MUST NOT include characters outside the set
x20-21 / x23-5B / x5D-7E.
:param uri: A URI identifying a human-readable web page with information
about the error, used to provide the client developer with
additional information about the error. Values for the
"error_uri" parameter MUST conform to the URI- Reference
syntax, and thus MUST NOT include characters outside the set
x21 / x23-5B / x5D-7E.
:param state: A CSRF protection value received from the client.
:param status_code:
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
if description is not None:
self.description = description
message = '({}) {}'.format(self.error, self.description)
if request:
message += ' ' + repr(request)
super().__init__(message)
self.uri = uri
self.state = state
if status_code:
self.status_code = status_code
if request:
self.redirect_uri = request.redirect_uri
self.client_id = request.client_id
self.scopes = request.scopes
self.response_type = request.response_type
self.response_mode = request.response_mode
self.grant_type = request.grant_type
if state is None:
self.state = request.state
else:
self.redirect_uri = None
self.client_id = None
self.scopes = None
self.response_type = None
self.response_mode = None
self.grant_type = None
def in_uri(self, uri):
fragment = self.response_mode == "fragment"
return add_params_to_uri(uri, self.twotuples, fragment)
@property
def twotuples(self):
error = [('error', self.error)]
if self.description:
error.append(('error_description', self.description))
if self.uri:
error.append(('error_uri', self.uri))
if self.state:
error.append(('state', self.state))
return error
@property
def urlencoded(self):
return urlencode(self.twotuples)
@property
def json(self):
return json.dumps(dict(self.twotuples))
@property
def headers(self):
if self.status_code == 401:
"""
https://tools.ietf.org/html/rfc6750#section-3
All challenges defined by this specification MUST use the auth-scheme
value "Bearer". This scheme MUST be followed by one or more
auth-param values.
"""
authvalues = ['error="{}"'.format(self.error)]
if self.description:
authvalues.append('error_description="{}"'.format(self.description))
if self.uri:
authvalues.append('error_uri="{}"'.format(self.uri))
return {"WWW-Authenticate": "Bearer " + ", ".join(authvalues)}
return {}
| OAuth2Error |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-vertex/tests/test_embeddings_vertex.py | {
"start": 4500,
"end": 6620
} | class ____(unittest.IsolatedAsyncioTestCase):
@patch("vertexai.init")
@patch("vertexai.language_models.TextEmbeddingModel.from_pretrained")
async def test_get_embedding_retrieval(
self, model_mock: AsyncMock, init_mock: AsyncMock
):
model = MagicMock()
model.get_embeddings_async = (
AsyncMock()
) # Ensure get_embeddings is an AsyncMock for async calls
model_mock.return_value = model
mock_cred = Mock(return_value="mock_credentials_instance")
embedding = VertexTextEmbedding(
project="test-project",
location="us-test-location",
embed_mode=VertexEmbeddingMode.RETRIEVAL_MODE,
additional_kwargs={"auto_truncate": True},
credentials=mock_cred,
)
model.get_embeddings_async.return_value = [
TextEmbedding(values=[0.1, 0.2, 0.3])
]
result = await embedding.aget_text_embedding("some text")
model.get_embeddings_async.assert_called_once()
positional_args, keyword_args = model.get_embeddings_async.call_args
model.get_embeddings_async.reset_mock()
self.assertEqual(len(positional_args[0]), 1)
self.assertEqual(positional_args[0][0].text, "some text")
self.assertEqual(positional_args[0][0].task_type, "RETRIEVAL_DOCUMENT")
self.assertEqual(result, [0.1, 0.2, 0.3])
self.assertTrue(keyword_args["auto_truncate"])
model.get_embeddings_async.return_value = [
TextEmbedding(values=[0.1, 0.2, 0.3])
]
result = await embedding.aget_query_embedding("some query text")
model.get_embeddings_async.assert_called_once()
positional_args, keyword_args = model.get_embeddings_async.call_args
self.assertEqual(len(positional_args[0]), 1)
self.assertEqual(positional_args[0][0].text, "some query text")
self.assertEqual(positional_args[0][0].task_type, "RETRIEVAL_QUERY")
self.assertEqual(result, [0.1, 0.2, 0.3])
self.assertTrue(keyword_args["auto_truncate"])
| VertexTextEmbeddingTestAsync |
python | PyCQA__pydocstyle | src/pydocstyle/parser.py | {
"start": 6838,
"end": 6943
} | class ____(Function):
"""A Python source code nested function."""
is_public = False
| NestedFunction |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/graphql.py | {
"start": 10984,
"end": 11625
} | class ____:
def __init__(self, typenames):
self.typename_to_prio = {o: prio for prio, o in enumerate(reversed(typenames))}
self.count = itertools.count()
self.storage = []
def add_cursor(self, typename, cursor, total_count, parent_id=None):
priority = self.typename_to_prio[typename]
heapq.heappush(self.storage, (priority, next(self.count), (typename, cursor, total_count, parent_id)))
def get_cursor(self):
if self.storage:
_, _, c = heapq.heappop(self.storage)
return {"typename": c[0], "cursor": c[1], "total_count": c[2], "parent_id": c[3]}
| CursorStorage |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 93603,
"end": 94101
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint), # input
('engineId', c_uint), # input. One of NVML_ENGINE_TYPE*
('schedulerPolicy', c_uint), # output
('arrMode', c_uint), # output
('schedulerParams', c_nvmlVgpuSchedulerParams_t), # output
]
nvmlVgpuSchedulerStateInfo_v1 = 0x1000018
| c_nvmlVgpuSchedulerStateInfo_v1_t |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 9967,
"end": 12565
} | class ____(VOTableChangeWarning):
"""Implicitly generating an ID from a name.
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ("x", "y")
| W03 |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_slugs.py | {
"start": 1666,
"end": 2280
} | class ____(util.MdCase):
"""Test Unicode cased, encoded slugs."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.slugify(percent_encode=True)
}
}
def test_slug(self):
"""Test the slug output."""
self.check_markdown(
r'# Testing cased unicode-slugs_headers ±♠Ωℑ with encoding',
r'<h1 id="Testing-cased-unicode-slugs_headers-%CE%A9%E2%84%91-with-encoding">'
'Testing cased unicode-slugs_headers ±♠Ωℑ with encoding</h1>'
)
| TestUslugifyCasedEncoded |
python | Lightning-AI__lightning | src/lightning/fabric/utilities/data.py | {
"start": 1292,
"end": 21615
} | class ____(LightningEnum):
SET = "set"
DEL = "del"
def __call__(self, *args: Any) -> None:
fn: Union[Callable[[object, str], None], Callable[[object, str, Any], None]]
fn = setattr if self == self.SET else delattr
return fn(*args)
def has_iterable_dataset(dataloader: object) -> bool:
return hasattr(dataloader, "dataset") and isinstance(dataloader.dataset, IterableDataset)
def sized_len(dataloader: object) -> Optional[int]:
"""Try to get the length of an object, return ``None`` otherwise."""
try:
# try getting the length
length = len(dataloader) # type: ignore [arg-type]
except (TypeError, NotImplementedError):
length = None
return length
def has_len(dataloader: object) -> TypeGuard[Sized]:
"""Checks if a given object has ``__len__`` method implemented."""
length = sized_len(dataloader)
if length == 0:
rank_zero_warn(
f"`{dataloader.__class__.__name__}` returned 0 length. Please make sure this was your intention."
)
if length is not None and has_iterable_dataset(dataloader):
rank_zero_warn(
"Your `IterableDataset` has `__len__` defined."
" In combination with multi-process data loading (when num_workers > 1),"
" `__len__` could be inaccurate if each worker is not configured independently"
" to avoid having duplicate data."
)
return length is not None
def _update_dataloader(dataloader: DataLoader, sampler: Union[Sampler, Iterable]) -> DataLoader:
dl_args, dl_kwargs = _get_dataloader_init_args_and_kwargs(dataloader, sampler)
return _reinstantiate_wrapped_cls(dataloader, *dl_args, **dl_kwargs)
def _get_dataloader_init_args_and_kwargs(
dataloader: DataLoader,
sampler: Union[Sampler, Iterable],
) -> tuple[tuple[Any], dict[str, Any]]:
if not isinstance(dataloader, DataLoader):
raise ValueError(f"The dataloader {dataloader} needs to subclass `torch.utils.data.DataLoader`")
was_wrapped = hasattr(dataloader, "__pl_saved_args")
if was_wrapped:
dl_args = dataloader.__pl_saved_args
dl_kwargs = dataloader.__pl_saved_kwargs
arg_names = dataloader.__pl_saved_arg_names
original_dataset = dataloader.__dataset # we have this saved from _wrap_init
else:
# get the dataloader instance attributes
attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith("_")}
# We cannot be 100% sure the class sets dataset argument. Let's set it to None to be safe
# and hope we can get it from the instance attributes
original_dataset = None
# not part of `vars`
attrs["multiprocessing_context"] = dataloader.multiprocessing_context
arg_names = ()
# get the dataloader instance `__init__` parameters
params = dict(inspect.signature(dataloader.__init__).parameters) # type: ignore[misc]
has_variadic_kwargs = any(p.kind is p.VAR_KEYWORD for p in params.values())
if has_variadic_kwargs:
# if the signature takes **kwargs, assume they will be passed down with `super().__init__(**kwargs)`
if was_wrapped:
# if the dataloader was wrapped in a hook, only take arguments with default values
# and assume user passes their kwargs correctly
params.update({
k: v for k, v in inspect.signature(DataLoader.__init__).parameters.items() if v.default is not v.empty
})
else:
params.update(inspect.signature(DataLoader.__init__).parameters)
params.pop("self", None)
if not was_wrapped:
# keep only the params whose default is different to the current attr value
non_defaults = {name for name, p in params.items() if name in attrs and p.default is not attrs[name]}
# add `dataset` as it might have been replaced with `*args`
non_defaults.add("dataset")
# kwargs to re-construct the dataloader
dl_kwargs = {k: v for k, v in attrs.items() if k in non_defaults}
dl_args = ()
dataset = dl_kwargs.get("dataset", original_dataset)
if isinstance(dataset, IterableDataset):
dl_kwargs["batch_sampler"] = None
dl_kwargs["sampler"] = None
else:
dl_kwargs.update(_dataloader_init_kwargs_resolve_sampler(dataloader, sampler))
required_args = {
p.name
for p in params.values()
if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
and p.default is p.empty
and p.name not in dl_kwargs
and p.name not in arg_names
}
# the dataloader has required args which we could not extract from the existing attributes
if required_args:
sorted_required_args = sorted(required_args)
dataloader_cls_name = dataloader.__class__.__name__
missing_args_message = ", ".join(f"`self.{arg_name}`" for arg_name in sorted_required_args)
raise MisconfigurationException(
f"Trying to inject custom `Sampler` into the `{dataloader_cls_name}` instance. "
"This would fail as some of the `__init__` arguments are not available as instance attributes. "
f"The missing attributes are {sorted_required_args}. If you instantiate your `{dataloader_cls_name}` "
"inside a `*_dataloader` hook of your module, we will do this for you."
f" Otherwise, define {missing_args_message} inside your `__init__`."
)
if not has_variadic_kwargs:
# the dataloader signature does not allow keyword arguments that need to be passed
missing_kwargs = (set(dl_kwargs) | set(arg_names)) - params.keys()
if missing_kwargs:
sorted_missing_kwargs = sorted(missing_kwargs)
dataloader_cls_name = dataloader.__class__.__name__
raise TypeError(
f"Trying to inject parameters into the `{dataloader_cls_name}` instance. "
"This would fail as it doesn't expose all its attributes in the `__init__` signature. "
f"The missing arguments are {sorted_missing_kwargs}. HINT: If you wrote the `{dataloader_cls_name}` "
"class, add the `__init__` arguments or allow passing `**kwargs`"
)
return dl_args, dl_kwargs
def _dataloader_init_kwargs_resolve_sampler(
dataloader: DataLoader,
sampler: Union[Sampler, Iterable],
) -> dict[str, Any]:
"""This function is used to handle the sampler, batch_sampler arguments associated within a DataLoader for its re-
instantiation."""
batch_sampler = getattr(dataloader, "batch_sampler")
if batch_sampler is not None and type(batch_sampler) is not BatchSampler:
batch_sampler_cls = type(batch_sampler)
if hasattr(batch_sampler, "__pl_saved_args"):
# This is a PyTorch `BatchSampler` subclass for which we captured the init args
args = batch_sampler.__pl_saved_args
kwargs = batch_sampler.__pl_saved_kwargs
default_kwargs = batch_sampler.__pl_saved_default_kwargs
arg_names = batch_sampler.__pl_saved_arg_names
success, args, kwargs = _replace_value_in_saved_args(
"sampler", sampler, args, kwargs, default_kwargs, arg_names
)
if not success:
raise TypeError(
"Trying to inject a modified sampler into the batch sampler; however, it seems the class "
f"`{batch_sampler_cls.__qualname__}` does not have an argument called `sampler.` To mitigate "
"this, expose an argument `sampler` in the `__init__` method of your custom class."
)
batch_sampler = _reinstantiate_wrapped_cls(batch_sampler, *args, **kwargs)
elif hasattr(batch_sampler, "batch_size") and hasattr(batch_sampler, "drop_last"):
# This is a sampler for which we could not capture the init args, but it kinda looks like a batch sampler
# even if it does not inherit from PyTorch's interface.
try:
batch_sampler = batch_sampler_cls(
sampler,
batch_size=batch_sampler.batch_size,
drop_last=batch_sampler.drop_last,
)
except TypeError as ex:
import re
match = re.match(r".*__init__\(\) (got multiple values)|(missing \d required)", str(ex))
if not match:
# an unexpected `TypeError`, continue failure
raise
# There could either be too few or too many arguments. Customizing the message based on this doesn't
# make much sense since our MisconfigurationException is going to be raised from the original one.
raise TypeError(
" Lightning can't inject a (distributed) sampler into your batch sampler, because it doesn't"
" subclass PyTorch's `BatchSampler`. To mitigate this, either follow the API of `BatchSampler`"
" or set`.setup_dataloaders(..., use_distributed_sampler=False)`. If you choose the latter, you"
" will be responsible for handling the distributed sampling within your batch sampler."
) from ex
else:
# The sampler is not a PyTorch `BatchSampler`, we don't know how to inject a custom sampler
raise TypeError(
" Lightning can't inject a (distributed) sampler into your batch sampler, because it doesn't"
" subclass PyTorch's `BatchSampler`. To mitigate this, either follow the API of `BatchSampler`"
" or set`.setup_dataloaders(..., use_distributed_sampler=False)`. If you choose the latter, you"
" will be responsible for handling the distributed sampling within your batch sampler."
)
return {
"sampler": None,
"shuffle": False,
"batch_sampler": batch_sampler,
"batch_size": 1,
"drop_last": False,
}
return {"sampler": sampler, "shuffle": False, "batch_sampler": None}
def _auto_add_worker_init_fn(dataloader: object, rank: int) -> None:
if not hasattr(dataloader, "worker_init_fn"):
return
if int(os.environ.get("PL_SEED_WORKERS", 0)) and dataloader.worker_init_fn is None:
dataloader.worker_init_fn = partial(pl_worker_init_function, rank=rank)
def _reinstantiate_wrapped_cls(orig_object: Any, *args: Any, explicit_cls: Optional[type] = None, **kwargs: Any) -> Any:
constructor = type(orig_object) if explicit_cls is None else explicit_cls
try:
result = constructor(*args, **kwargs)
except TypeError as ex:
# improve exception message due to an incorrect implementation of the `DataLoader` where multiple subclass
# `__init__` arguments map to one `DataLoader.__init__` argument
import re
match = re.match(r".*__init__\(\) got multiple values .* '(\w+)'", str(ex))
if not match:
# an unexpected `TypeError`, continue failure
raise
argument = match.groups()[0]
message = (
f"The {constructor.__name__} implementation has an error where more than one `__init__` argument"
f" can be passed to its parent's `{argument}=...` `__init__` argument. This is likely caused by allowing"
f" passing both a custom argument that will map to the `{argument}` argument as well as `**kwargs`."
f" `kwargs` should be filtered to make sure they don't contain the `{argument}` key."
" This argument was automatically passed to your object by PyTorch Lightning."
)
raise MisconfigurationException(message) from ex
attrs_record = getattr(orig_object, "__pl_attrs_record", [])
for args, fn in attrs_record:
fn(result, *args)
return result
def _wrap_init_method(init: Callable, store_explicit_arg: Optional[str] = None) -> Callable:
"""Wraps the ``__init__`` method of classes (currently :class:`~torch.utils.data.DataLoader` and
:class:`~torch.utils.data.BatchSampler`) in order to enable re-instantiation of custom subclasses."""
@functools.wraps(init)
def wrapper(obj: Any, *args: Any, **kwargs: Any) -> None:
# We need to inspect `init`, as inspecting `obj.__init__`
# can lead to inspecting the wrong function with multiple inheritance
old_inside_init = getattr(obj, "__pl_inside_init", False)
object.__setattr__(obj, "__pl_inside_init", True)
params = inspect.signature(init).parameters
parameters_defaults = OrderedDict(
(param.name, param.default)
for param in params.values()
if param.name != "self" and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
)
param_names = tuple(parameters_defaults)[: len(args)]
default_kwargs = {
name: value
for name, value in parameters_defaults.items()
if name not in kwargs and name not in param_names and value != inspect.Parameter.empty
}
if not hasattr(obj, "__pl_saved_args"):
object.__setattr__(obj, "__pl_saved_args", args)
object.__setattr__(obj, "__pl_saved_kwargs", kwargs)
object.__setattr__(obj, "__pl_saved_arg_names", param_names)
object.__setattr__(obj, "__pl_saved_default_kwargs", default_kwargs)
# We want to use the latest possible value for explicit argument (i.e. ideally what gets passed to base class)
# so that we can be sure, that it will not get changed anymore.
# That is why we are setting this in every `__init__`
if store_explicit_arg is not None:
if store_explicit_arg in param_names:
object.__setattr__(obj, f"__{store_explicit_arg}", args[param_names.index(store_explicit_arg)])
elif store_explicit_arg in kwargs:
object.__setattr__(obj, f"__{store_explicit_arg}", kwargs[store_explicit_arg])
init(obj, *args, **kwargs)
object.__setattr__(obj, "__pl_inside_init", old_inside_init)
return wrapper
def _wrap_attr_method(method: Callable, tag: _WrapAttrTag) -> Callable:
"""Wraps the ``__setattr__`` or ``__delattr__`` method of classes (currently :class:`~torch.utils.data.DataLoader`
and :class:`~torch.utils.data.BatchSampler`) in order to enable re- instantiation of custom subclasses."""
@functools.wraps(method)
def wrapper(obj: Any, *args: Any) -> None:
# First, let's find out if we're the first in inheritance chain calling the patched method.
name, *_ = args
prev_call_name, prev_call_method = getattr(obj, "__pl_current_call", (None, "method"))
first_call = not (prev_call_name == name and prev_call_method == tag)
# Then mark the current called method
object.__setattr__(obj, "__pl_current_call", (name, tag))
# call original method
method(obj, *args)
if first_call and not getattr(obj, "__pl_inside_init", True):
# and save the value it was called with to the internal list,
# if we're outside of __init__ and the original call did not fail and we're the first call
attrs_record = getattr(obj, "__pl_attrs_record", [])
attrs_record.append((args, tag))
object.__setattr__(obj, "__pl_attrs_record", attrs_record)
object.__setattr__(obj, "__pl_current_call", (prev_call_name, prev_call_method))
return wrapper
@contextmanager
def _replace_dunder_methods(base_cls: type, store_explicit_arg: Optional[str] = None) -> Generator[None, None, None]:
"""This context manager is used to add support for re-instantiation of custom (subclasses) of `base_cls`.
It patches the ``__init__``, ``__setattr__`` and ``__delattr__`` methods.
"""
classes = get_all_subclasses(base_cls) | {base_cls}
for cls in classes:
# Check that __init__ belongs to the class
# https://stackoverflow.com/a/5253424
if "__init__" in cls.__dict__:
cls.__old__init__ = cls.__init__ # type: ignore[misc]
cls.__init__ = _wrap_init_method(cls.__init__, store_explicit_arg) # type: ignore[misc]
# we want at least one setattr/delattr in the chain to be patched and it can happen, that none of the subclasses
# implement `__setattr__`/`__delattr__`. Therefore, we are always patching the `base_cls`
for patch_fn_name, tag in (("__setattr__", _WrapAttrTag.SET), ("__delattr__", _WrapAttrTag.DEL)):
if patch_fn_name in cls.__dict__ or cls is base_cls:
saved_name = f"__old{patch_fn_name}"
setattr(cls, saved_name, getattr(cls, patch_fn_name))
setattr(cls, patch_fn_name, _wrap_attr_method(getattr(cls, patch_fn_name), tag))
yield
for cls in classes:
for patched_name in ("__setattr__", "__delattr__", "__init__"):
# Check that __old__{init,setattr,delattr} belongs to the class
# https://stackoverflow.com/a/5253424
if f"__old{patched_name}" in cls.__dict__:
setattr(cls, patched_name, getattr(cls, f"__old{patched_name}"))
delattr(cls, f"__old{patched_name}")
def _replace_value_in_saved_args(
replace_key: str,
replace_value: Any,
args: tuple[Any, ...],
kwargs: dict[str, Any],
default_kwargs: dict[str, Any],
arg_names: tuple[str, ...],
) -> tuple[bool, tuple[Any, ...], dict[str, Any]]:
"""Tries to replace an argument value in a saved list of args and kwargs.
Returns a tuple indicating success of the operation and modified saved args and kwargs
"""
if replace_key in arg_names:
replace_index = arg_names.index(replace_key)
args = args[:replace_index] + (replace_value,) + args[replace_index + 1 :]
return True, args, kwargs
if replace_key in kwargs or replace_key in default_kwargs:
kwargs[replace_key] = replace_value
return True, args, kwargs
return False, args, kwargs
def _set_sampler_epoch(dataloader: object, epoch: int) -> None:
"""Calls the ``set_epoch`` method on either the sampler of the given dataloader.
Every PyTorch dataloader has either a sampler or a batch sampler. If the sampler is wrapped by a
:class:`~torch.utils.data.distributed.DistributedSampler`, ``set_epoch`` must be called at the beginning
of every epoch to ensure shuffling applies a new ordering. This has no effect if shuffling is off.
"""
# cannot use a set because samplers might be unhashable: use a dict based on the id to drop duplicates
objects: dict[int, Any] = {}
# check dataloader.sampler
if (sampler := getattr(dataloader, "sampler", None)) is not None:
objects[id(sampler)] = sampler
# check dataloader.batch_sampler.sampler
if (batch_sampler := getattr(dataloader, "batch_sampler", None)) is not None and (
sampler := getattr(batch_sampler, "sampler", None)
) is not None:
objects[id(sampler)] = sampler
for obj in objects.values():
set_epoch = getattr(obj, "set_epoch", None)
if callable(set_epoch):
set_epoch(epoch)
def suggested_max_num_workers(local_world_size: int) -> int:
"""Suggests an upper bound of ``num_workers`` to use in a PyTorch :class:`~torch.utils.data.DataLoader` based on
the number of CPU cores available on the system and the number of distributed processes in the current machine.
Args:
local_world_size: The number of distributed processes running on the current machine. Set this to the number
of devices configured in Fabric/Trainer.
"""
if local_world_size < 1:
raise ValueError(f"`local_world_size` should be >= 1, got {local_world_size}.")
cpu_count = _num_cpus_available()
return max(1, cpu_count // local_world_size - 1) # -1 to leave some resources for main process
def _num_cpus_available() -> int:
if hasattr(os, "sched_getaffinity"):
return len(os.sched_getaffinity(0))
cpu_count = os.cpu_count()
return 1 if cpu_count is None else cpu_count
| _WrapAttrTag |
python | wandb__wandb | tests/system_tests/test_core/test_torch_full.py | {
"start": 1493,
"end": 1825
} | class ____(nn.Module):
def __init__(self, num_outputs=2):
super().__init__()
self.linear1 = nn.Linear(1, 10)
self.linear2 = nn.Linear(10, num_outputs)
self.dist = Discrete()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return self.dist(x)
| DiscreteModel |
python | coleifer__peewee | peewee.py | {
"start": 12927,
"end": 13673
} | class ____(Proxy):
"""
Proxy implementation specifically for proxying `Database` objects.
"""
__slots__ = ('obj', '_callbacks', '_Model')
def connection_context(self):
return ConnectionContext(self)
def atomic(self, *args, **kwargs):
return _atomic(self, *args, **kwargs)
def manual_commit(self):
return _manual(self)
def transaction(self, *args, **kwargs):
return _transaction(self, *args, **kwargs)
def savepoint(self):
return _savepoint(self)
@property
def Model(self):
if not hasattr(self, '_Model'):
class Meta: database = self
self._Model = type('BaseModel', (Model,), {'Meta': Meta})
return self._Model
| DatabaseProxy |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_first_seen_event_handler.py | {
"start": 427,
"end": 2974
} | class ____(ConditionTestCase):
condition = Condition.FIRST_SEEN_EVENT
payload = {"id": FirstSeenEventCondition.id}
def setUp(self) -> None:
super().setUp()
self.event_data = WorkflowEventData(
event=self.group_event,
group=self.group_event.group,
group_state=GroupState(
{
"id": 1,
"is_regression": True,
"is_new": True,
"is_new_group_environment": True,
}
),
workflow_env=None,
)
self.dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison is True
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
dc.comparison = False
dc.save()
dc.comparison = {"time": "asdf"}
with pytest.raises(ValidationError):
dc.save()
dc.comparison = "hello"
with pytest.raises(ValidationError):
dc.save()
def test(self) -> None:
self.assert_passes(self.dc, self.event_data)
assert self.event_data.group_state
self.event_data.group_state["is_new"] = False
self.assert_does_not_pass(self.dc, self.event_data)
def test_with_environment(self) -> None:
self.event_data = replace(self.event_data, workflow_env=self.environment)
assert self.event_data.group_state
self.assert_passes(self.dc, self.event_data)
self.event_data.group_state["is_new"] = False
self.event_data.group_state["is_new_group_environment"] = True
self.assert_passes(self.dc, self.event_data)
self.event_data.group_state["is_new"] = True
self.event_data.group_state["is_new_group_environment"] = False
self.assert_does_not_pass(self.dc, self.event_data)
self.event_data.group_state["is_new"] = False
self.event_data.group_state["is_new_group_environment"] = False
self.assert_does_not_pass(self.dc, self.event_data)
| TestFirstSeenEventCondition |
python | apache__airflow | airflow-core/src/airflow/utils/db.py | {
"start": 44587,
"end": 52121
} | class ____(enum.IntEnum):
"""
Cross-db Identifiers for advisory global database locks.
Postgres uses int64 lock ids so we use the integer value, MySQL uses names, so we
call ``str()`, which is implemented using the ``_name_`` field.
"""
MIGRATIONS = enum.auto()
SCHEDULER_CRITICAL_SECTION = enum.auto()
def __str__(self):
return f"airflow_{self._name_}"
@contextlib.contextmanager
def create_global_lock(
session: Session,
lock: DBLocks,
lock_timeout: int = 1800,
) -> Generator[None, None, None]:
"""Contextmanager that will create and teardown a global db lock."""
bind = session.get_bind()
if hasattr(bind, "connect"):
conn = bind.connect()
else:
conn = bind
dialect_name = get_dialect_name(session)
try:
if dialect_name == "postgresql":
if USE_PSYCOPG3:
# psycopg3 doesn't support parameters for `SET`. Use `set_config` instead.
# The timeout value must be passed as a string of milliseconds.
conn.execute(
text("SELECT set_config('lock_timeout', :timeout, false)"),
{"timeout": str(lock_timeout)},
)
conn.execute(text("SELECT pg_advisory_lock(:id)"), {"id": lock.value})
else:
conn.execute(text("SET LOCK_TIMEOUT to :timeout"), {"timeout": lock_timeout})
conn.execute(text("SELECT pg_advisory_lock(:id)"), {"id": lock.value})
elif (
dialect_name == "mysql"
and conn.dialect.server_version_info
and conn.dialect.server_version_info >= (5, 6)
):
conn.execute(text("SELECT GET_LOCK(:id, :timeout)"), {"id": str(lock), "timeout": lock_timeout})
yield
finally:
if dialect_name == "postgresql":
if USE_PSYCOPG3:
# Use set_config() to reset the timeout to its default (0 = off/wait forever).
conn.execute(text("SELECT set_config('lock_timeout', '0', false)"))
else:
conn.execute(text("SET LOCK_TIMEOUT TO DEFAULT"))
result = conn.execute(text("SELECT pg_advisory_unlock(:id)"), {"id": lock.value}).fetchone()
if result is None:
raise RuntimeError("Error releasing DB lock!")
(unlocked,) = result
if not unlocked:
raise RuntimeError("Error releasing DB lock!")
elif (
dialect_name == "mysql"
and conn.dialect.server_version_info
and conn.dialect.server_version_info >= (5, 6)
):
conn.execute(text("select RELEASE_LOCK(:id)"), {"id": str(lock)})
def compare_type(context, inspected_column, metadata_column, inspected_type, metadata_type):
"""
Compare types between ORM and DB .
return False if the metadata_type is the same as the inspected_type
or None to allow the default implementation to compare these
types. a return value of True means the two types do not
match and should result in a type change operation.
"""
if context.dialect.name == "mysql":
from sqlalchemy import String
from sqlalchemy.dialects import mysql
if isinstance(inspected_type, mysql.VARCHAR) and isinstance(metadata_type, String):
# This is a hack to get around MySQL VARCHAR collation
# not being possible to change from utf8_bin to utf8mb3_bin.
# We only make sure lengths are the same
if inspected_type.length != metadata_type.length:
return True
return False
return None
def compare_server_default(
context, inspected_column, metadata_column, inspected_default, metadata_default, rendered_metadata_default
):
"""
Compare server defaults between ORM and DB .
return True if the defaults are different, False if not, or None to allow the default implementation
to compare these defaults
In SQLite: task_instance.map_index & task_reschedule.map_index
are not comparing accurately. Sometimes they are equal, sometimes they are not.
Alembic warned that this feature has varied accuracy depending on backends.
See: (https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.environment.EnvironmentContext.configure.params.compare_server_default)
"""
dialect_name = context.connection.dialect.name
if dialect_name in ["sqlite"]:
return False
if (
dialect_name == "mysql"
and metadata_column.name == "pool_slots"
and metadata_column.table.name == "task_instance"
):
# We removed server_default value in ORM to avoid expensive migration
# (it was removed in postgres DB in migration head 7b2661a43ba3 ).
# As a side note, server default value here was only actually needed for the migration
# where we added the column in the first place -- now that it exists and all
# existing rows are populated with a value this server default is never used.
return False
return None
def get_sqla_model_classes():
"""
Get all SQLAlchemy class mappers.
SQLAlchemy < 1.4 does not support registry.mappers so we use
try/except to handle it.
"""
from airflow.models.base import Base
try:
return [mapper.class_ for mapper in Base.registry.mappers]
except AttributeError:
return Base._decl_class_registry.values()
def get_query_count(query_stmt: Select, *, session: Session) -> int:
"""
Get count of a query.
A SELECT COUNT() FROM is issued against the subquery built from the
given statement. The ORDER BY clause is stripped from the statement
since it's unnecessary for COUNT, and can impact query planning and
degrade performance.
:meta private:
"""
count_stmt = select(func.count()).select_from(query_stmt.order_by(None).subquery())
result = session.scalar(count_stmt)
return result or 0
async def get_query_count_async(statement: Select, *, session: AsyncSession) -> int:
"""
Get count of a query.
A SELECT COUNT() FROM is issued against the subquery built from the
given statement. The ORDER BY clause is stripped from the statement
since it's unnecessary for COUNT, and can impact query planning and
degrade performance.
:meta private:
"""
count_stmt = select(func.count()).select_from(statement.order_by(None).subquery())
result = await session.scalar(count_stmt)
return result or 0
def check_query_exists(query_stmt: Select, *, session: Session) -> bool:
"""
Check whether there is at least one row matching a query.
A SELECT 1 FROM is issued against the subquery built from the given
statement. The ORDER BY clause is stripped from the statement since it's
unnecessary, and can impact query planning and degrade performance.
:meta private:
"""
count_stmt = select(literal(True)).select_from(query_stmt.order_by(None).subquery())
# we must cast to bool because scalar() can return None
return bool(session.scalar(count_stmt))
def exists_query(*where: ColumnElement[bool], session: Session) -> bool:
"""
Check whether there is at least one row matching given clauses.
This does a SELECT 1 WHERE ... LIMIT 1 and check the result.
:meta private:
"""
stmt = select(literal(True)).where(*where).limit(1)
return session.scalar(stmt) is not None
@attrs.define(slots=True)
| DBLocks |
python | scrapy__scrapy | scrapy/spidermiddlewares/referer.py | {
"start": 5974,
"end": 6944
} | class ____(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-strict-origin
The "strict-origin" policy sends the ASCII serialization
of the origin of the request client when making requests:
- from a TLS-protected environment settings object to a potentially trustworthy URL, and
- from non-TLS-protected environment settings objects to any origin.
Requests from TLS-protected request clients to non- potentially trustworthy URLs,
on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
"""
name: str = POLICY_STRICT_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
if (
self.tls_protected(response_url)
and self.potentially_trustworthy(request_url)
) or not self.tls_protected(response_url):
return self.origin_referrer(response_url)
return None
| StrictOriginPolicy |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 125693,
"end": 129515
} | class ____(Request):
"""
Get user and system tags used for the tasks under the specified projects
:param include_system: If set to 'true' then the list of the system tags is
also returned. The default value is 'false'
:type include_system: bool
:param projects: The list of projects under which the tags are searched. If not
passed or empty then all the projects are searched
:type projects: Sequence[str]
:param filter: Filter on entities to collect tags from
:type filter: dict
"""
_service = "projects"
_action = "get_task_tags"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"filter": {
"description": "Filter on entities to collect tags from",
"properties": {
"system_tags": {
"description": "The list of system tag values to filter by. Use 'null' value to specify empty system tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "The list of tag values to filter by. Use 'null' value to specify empty tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
},
"type": ["object", "null"],
},
"include_system": {
"default": False,
"description": "If set to 'true' then the list of the system tags is also returned. The default value is 'false'",
"type": ["boolean", "null"],
},
"projects": {
"description": "The list of projects under which the tags are searched. If not passed or empty then all the projects are searched",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
include_system: Optional[bool] = False,
projects: Optional[List[str]] = None,
filter: Optional[dict] = None,
**kwargs: Any
) -> None:
super(GetTaskTagsRequest, self).__init__(**kwargs)
self.include_system = include_system
self.projects = projects
self.filter = filter
@schema_property("include_system")
def include_system(self) -> Optional[bool]:
return self._property_include_system
@include_system.setter
def include_system(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_system = None
return
self.assert_isinstance(value, "include_system", (bool,))
self._property_include_system = value
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("filter")
def filter(self) -> Optional[dict]:
return self._property_filter
@filter.setter
def filter(self, value: Optional[dict]) -> None:
if value is None:
self._property_filter = None
return
self.assert_isinstance(value, "filter", (dict,))
self._property_filter = value
| GetTaskTagsRequest |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 2415,
"end": 2544
} | class ____(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
| SearchFilterModel |
python | ApeWorX__ape | src/ape/plugins/config.py | {
"start": 146,
"end": 1075
} | class ____(PluginType):
"""
A registered config item. Plugins register config implementations
when they allow additional user-configuration, set in the ``ape-config.yaml``.
See the :class:`~ape.managers.config.ConfigManager` documentation for more
information on the ``ape-config.yaml``.
"""
@hookspec
def config_class(self) -> type["PluginConfig"]: # type: ignore[empty-body]
"""
A hook that returns a :class:`~ape.api.config.PluginConfig` parser class that can be
used to deconstruct the user config options for this plugins.
**NOTE**: If none are specified, all injected :class:`ape.api.config.PluginConfig`'s
are empty.
Usage example::
@plugins.register(plugins.Config)
def config_class():
return MyPluginConfig
Returns:
type[:class:`~ape.api.config.PluginConfig`]
"""
| Config |
python | altair-viz__altair | tools/datasets/models.py | {
"start": 1141,
"end": 1252
} | class ____(TypedDict, total=False):
title: str
path: Required[str]
email: str
version: str
| Source |
python | crytic__slither | slither/detectors/attributes/constant_pragma.py | {
"start": 431,
"end": 2179
} | class ____(AbstractDetector):
"""
Check that the same pragma is used in all the files
"""
ARGUMENT = "pragma"
HELP = "If different pragma directives are used"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#different-pragma-directives-are-used"
WIKI_TITLE = "Different pragma directives are used"
WIKI_DESCRIPTION = "Detect whether different Solidity versions are used."
WIKI_RECOMMENDATION = "Use one Solidity version."
def _detect(self) -> List[Output]:
results = []
pragma_directives_by_version = OrderedDict()
for pragma in self.compilation_unit.pragma_directives:
if pragma.is_solidity_version:
if pragma.version not in pragma_directives_by_version:
pragma_directives_by_version[pragma.version] = [pragma]
else:
pragma_directives_by_version[pragma.version].append(pragma)
versions = list(pragma_directives_by_version.keys())
if len(versions) > 1:
info: DETECTOR_INFO = [f"{len(versions)} different versions of Solidity are used:\n"]
for version in versions:
pragmas = pragma_directives_by_version[version]
info += [f"\t- Version constraint {version} is used by:\n"]
for pragma in pragmas:
info += ["\t\t-", pragma, "\n"]
res = self.generate_result(info)
results.append(res)
return results
@staticmethod
def _format(slither: SlitherCompilationUnit, result: Dict) -> None:
custom_format(slither, result)
| ConstantPragma |
python | django-extensions__django-extensions | django_extensions/management/commands/dumpscript.py | {
"start": 28992,
"end": 29125
} | class ____:
def __init__(self, string):
self.repr = string
def __repr__(self):
return self.repr
| StrToCodeChanger |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 65643,
"end": 72535
} | class ____(Generic[CSEVariableType, AugmentedKeyT]):
"""Common subexpression elimination"""
def __init__(
self,
prefix: str = "",
suffix: str = "",
name_prefix: str = "tmp",
iter_buffers: Optional[itertools.count[int]] = None,
store_cache: Optional[MutableMapping[str, CSEVariableType]] = None,
reduction_cache: Optional[
MutableMapping[ReductionCacheKey, CSEVariableType]
] = None,
varname_map: Optional[dict[str, CSEVariableType]] = None,
):
self.prefix = prefix
self.suffix = suffix
self._cache: MutableMapping[AugmentedKeyT, CSEVariableType] = {}
self.name_prefix = name_prefix
self.store_cache: MutableMapping[str, CSEVariableType] = store_cache or {}
self.reduction_cache: MutableMapping[ReductionCacheKey, CSEVariableType] = (
reduction_cache or {}
)
self.iter_buffer_ids: itertools.count[int] = iter_buffers or itertools.count()
self.invalidated_stores: OrderedSet[str] = OrderedSet()
self.varname_map: dict[str, CSEVariableType] = varname_map or {}
def invalidate(self, keep_vars: OrderedSet[CSEVariable]) -> None:
for name, tmp in [*self.store_cache.items()]:
if tmp not in keep_vars:
del self.store_cache[name]
self.invalidated_stores.add(name)
if keep_vars:
self._cache = {k: v for k, v in self._cache.items() if v in keep_vars}
else:
self._cache = {}
def clone(self) -> Self:
return type(self)(
prefix=self.prefix,
suffix=self.suffix,
name_prefix=self.name_prefix,
iter_buffers=self.iter_buffer_ids,
store_cache=self.store_cache,
varname_map=self.varname_map,
reduction_cache=self.reduction_cache,
)
def scoped_copy(self) -> Self:
"""Return a copy of using ScopedDict so changes to *_cache aren't visible in self"""
new_cse = self.clone()
new_cse._cache = ScopedDict(self._cache)
new_cse.reduction_cache = ScopedDict(self.reduction_cache)
new_cse.store_cache = ScopedDict(self.store_cache)
return new_cse
def augment_key(self, cache_key: str) -> AugmentedKeyT:
"Override this method to augment cache key with backend specifics"
return cast(AugmentedKeyT, cache_key)
def put(self, cache_key: str, val: CSEVariableType) -> None:
self._cache[self.augment_key(cache_key)] = val
def contains(self, cache_key: str) -> bool:
return self.augment_key(cache_key) in self._cache
def try_get(self, cache_key: str) -> Optional[CSEVariableType]:
return self._cache.get(self.augment_key(cache_key), None)
def get(self, cache_key: str) -> CSEVariableType:
return self._cache[self.augment_key(cache_key)]
def generate(
self,
buffer: IndentedBuffer,
expr: Union[str, CSEVariable, OpsValue, IndentedBuffer, DeferredLineBase],
*,
bounds: ValueRanges[Any] = ValueRanges.unknown(),
write: bool = True,
assignment: bool = True,
dtype: Optional[torch.dtype] = None,
shape: BlockShapeType = None,
) -> CSEVariableType:
if isinstance(expr, OpsValue):
expr = expr.value
assert write or assignment
if isinstance(expr, CSEVariable):
# If the expressions were always created with all the information, we could
# assert expr.bounds == bounds, but sometimes the expression is created
# with the loose ValueRanges.unknown(), so we need to tighten the bounds
expr.bounds = expr.bounds.tighten(bounds)
expr.use_count += 1
return cast(CSEVariableType, expr)
elif isinstance(expr, IndentedBuffer):
cache_key = expr.getvalue()
elif isinstance(expr, DeferredLineBase):
cache_key = expr.line
else:
assert isinstance(expr, str)
cache_key = expr
var = self.try_get(cache_key)
if shape is None and not assignment:
# since there's no assignment to a variable, use any shape here
# other than None to avoid the unknown shape failures
shape = ()
if not var:
var = self.newvar(bounds, dtype, shape)
self.put(cache_key, var)
if write:
if V.kernel.current_node:
V.kernel.current_node.codegen_originating_info(
buffer, only_once=True
)
if isinstance(expr, IndentedBuffer):
if assignment:
buffer.writeline(f"{self.prefix}{var} =")
buffer.splice(expr)
buffer.writeline(self.suffix)
elif isinstance(expr, DeferredLineBase):
assert assignment
buffer.writeline(
expr._new_line(f"{self.prefix}{var} = {expr.line}{self.suffix}")
)
else:
if assignment:
line = f"{self.prefix}{var} = {expr}{self.suffix}"
else:
line = f"{expr}{self.suffix}"
buffer.writeline(line)
# cpp backend cannot determine is_vec at this point
if (
assignment
and (
config.test_configs.runtime_triton_dtype_assert
or config.test_configs.static_cpp_dtype_assert
)
and dtype is not None
and get_current_backend() != "cpp"
):
check_dtype(buffer, var, dtype)
else:
var.bounds = var.bounds.tighten(bounds)
var.use_count += 1
return var
def newvar(
self,
bounds: ValueRanges[Any] = ValueRanges.unknown(),
dtype: Optional[torch.dtype] = None,
shape: BlockShapeType = None,
) -> CSEVariableType:
var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}"
var = V.kernel.create_cse_var(var_name, bounds, dtype, shape)
self.varname_map[var_name] = var
return var
def namedvar(
self,
name: str,
bounds: ValueRanges[Any] = ValueRanges.unknown(),
dtype: Optional[torch.dtype] = None,
shape: BlockShapeType = None,
) -> CSEVariableType:
torch._check_value(
name not in self.varname_map, lambda: f"duplicate name: {name}"
)
var = V.kernel.create_cse_var(name, bounds, dtype, shape)
self.varname_map[name] = var
return var
| CSE |
python | huggingface__transformers | src/transformers/models/zamba2/modeling_zamba2.py | {
"start": 25309,
"end": 47701
} | class ____(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
"""
def __init__(self, config: Zamba2Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = int(config.mamba_expand * self.hidden_size)
self.layer_idx = layer_idx
self.use_conv_bias = config.use_conv_bias
self.activation = "silu"
self.act = nn.SiLU()
self.use_mem_eff_path = config.use_mem_eff_path
self.n_groups = config.mamba_ngroups
self.head_dim = config.mamba_headdim
self.num_heads = self.config.n_mamba_heads
self.chunk_size = config.chunk_size
self.time_step_limit = config.time_step_limit
self.time_step_min = config.time_step_min
self.time_step_max = config.time_step_max
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=True,
kernel_size=config.mamba_d_conv,
groups=self.conv_dim,
padding=config.mamba_d_conv - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=config.add_bias_linear,
)
# selective projection used to make dt, B and C input dependent
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.norm = Zamba2RMSNormGated(
self.intermediate_size, group_size=self.intermediate_size // self.n_groups, eps=1e-5
)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[Zamba2HybridDynamicCache] = None,
attention_mask: Optional[torch.Tensor] = None,
):
# set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
# getting projected states from cache if it exists
if cache_params is not None and cache_params.has_previous_state:
in_projected_states = self.in_proj(hidden_states.squeeze(1)) # (B 2D)
d_mlp = (in_projected_states.shape[-1] - d_to_remove) // 2
split_projection_dim = [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads]
_, _, gate, hidden_states_B_C, dt = torch.split(in_projected_states, split_projection_dim, dim=-1)
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
hidden_states = self.norm(hidden_states, gate)
out = self.out_proj(hidden_states)[:, None, ...]
# if no cache is found, calling the kernel
else:
if attention_mask is not None and not torch.all(attention_mask == 1):
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
# 1. Gated MLP's linear projection
projected_states = self.in_proj(hidden_states)
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit is None else {"dt_limit": self.time_step_limit}
if attention_mask is not None:
input_not_masked = torch.all(attention_mask == 1)
else:
input_not_masked = True
if self.use_mem_eff_path and self.training and cache_params is None and input_not_masked:
out, ssm_state = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
chunk_size=self.chunk_size,
seq_idx=None,
activation=self.activation,
rmsnorm_weight=self.norm.weight,
rmsnorm_eps=self.norm.variance_epsilon,
outproj_weight=self.out_proj.weight,
outproj_bias=self.out_proj.bias,
headdim=self.head_dim,
ngroups=self.n_groups,
norm_before_gate=False,
return_final_states=True,
**dt_limit_kwargs,
)
else:
gate, hidden_states_B_C, time_step = torch.split(
projected_states,
[self.intermediate_size, self.conv_dim, self.num_heads],
dim=-1,
)
# 1D Convolution
if cache_params is not None:
hidden_states_B_C_t = hidden_states_B_C.transpose(1, 2)
conv_state = nn.functional.pad(
hidden_states_B_C_t, (self.conv_kernel_size - hidden_states_B_C_t.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_state)
if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
hidden_states_B_C = self.act(
self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
) # (B, L, self.d_inner + 2 * ngroups * d_state)
else:
hidden_states_B_C = causal_conv1d_fn(
x=hidden_states_B_C.transpose(1, 2),
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
if attention_mask is not None and not torch.all(attention_mask == 1):
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
scan_output, ssm_state = mamba_chunk_scan_combined(
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
time_step,
A,
B.view(batch_size, seq_len, self.n_groups, -1),
C.view(batch_size, seq_len, self.n_groups, -1),
chunk_size=self.chunk_size,
D=self.D,
z=None,
seq_idx=None,
return_final_states=True,
dt_bias=self.dt_bias,
dt_softplus=True,
**dt_limit_kwargs,
)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
# Multiply "gate" branch and apply extra normalization layer
scan_output = self.norm(scan_output, gate)
out = self.out_proj(scan_output)
return out
# fmt: off
def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# Gated MLP's linear projection
if cache_params is not None and cache_params.has_previous_state:
projected_states = self.in_proj(input_states.squeeze(1))
else:
if attention_mask is not None and not torch.all(attention_mask==1):
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
input_states = (input_states * attention_mask[:, :, None]).to(dtype)
projected_states = self.in_proj(input_states)
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size- self.num_heads) // 2
_, _, gate, hidden_states, dt = projected_states.split(
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# Convolution sequence transformation
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
if cache_params.has_previous_state:
gate = gate.unsqueeze(1)
conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
# handle batched generation - states are copied through
conv_state[:, :, -1] = hidden_states[:, 0, :] if hidden_states.ndim == 3 else hidden_states
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = torch.sum(conv_state.to(projected_states.device) * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype)[:, None, ...] # [batch, 1, intermediate_size] : decoding
else:
hidden_states = hidden_states.transpose(1,2)
conv_state = nn.functional.pad(
hidden_states,
(self.conv_kernel_size - hidden_states.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = self.act(self.conv1d(hidden_states).transpose(1,2))[:, :seq_len, :] # [batch, intermediate_size, seq_len]
if attention_mask is not None and not torch.all(attention_mask==1):
dtype = hidden_states.dtype
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
else:
ssm_state = torch.zeros(
(batch_size, self.num_heads, self.head_dim, self.ssm_state_size),
device=hidden_states.device, dtype=dtype
)
hidden_states = self.act(self.conv1d(hidden_states.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states, B, C = torch.split(hidden_states, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
A = -torch.exp(self.A_log.float()) # [num_heads]
if cache_params is not None and cache_params.has_previous_state:
# Note: there is no need to pad parameter matrices here, as there is just one new token
# for batched generation
dt = dt[:, None, ...] if dt.ndim == 2 else dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
# [num_heads] -> [num_heads, head_dim]
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_min) #, self.time_step_max)
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
# [bsz, num_heads, head_dim, state_size]
dA = torch.exp(dt[..., None] * A)
# Discretize B
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
# [bsz, num_heads, head_dim, state_size]
dB = dt[..., None] * B[..., None, :]
# Discretize x into dB
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = dB * hidden_states[..., None]
# State calculation
cache_params.ssm_states[self.layer_idx].copy_(
cache_params.ssm_states[self.layer_idx] * dA + dBx
)
# Subsequent output
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
# [bsz, num_heads, head_dim]
ssm_states = cache_params.ssm_states[self.layer_idx].to(C.dtype) # Shape: [b, h, d, n]
# Reshape ssm_states to merge the first two dimensions
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
# D skip connection
# [num_heads] -> [num_heads, head_dim]
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
y = y.reshape(batch_size, -1)[:, None, ...]
else:
# begin ssd naive implementation without einsums
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_min)
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
# Discretize x and A
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
# Rearrange into blocks/chunks
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
# 1. Compute the output for each intra-chunk (diagonal blocks)
# This is the analog of a causal mask
L = torch.exp(segment_sum(A))
# First, contraction of C and B to get G (attention-weights like)
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, : ,:] # shape: (b, c, l, s, h, n)
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
# Step 2: Compute M, equivalent to applying attention mask to weights
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
# Step 3: Compute Y_diag (apply to values)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(3)
# (right term of low-rank factorization of off-diagonal blocks; B terms)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay_contraction = B * decay_states.permute(0, 2, 3, 1)[..., None]
# permute back B * decay states
states = (B_decay_contraction.permute(0, 1, 3, 2, 4)[..., None] * hidden_states.permute(0, 1, 3, 2, 4)[..., None, :]).sum(dim=3).permute(0, 1, 2, 4, 3)
if cache_params is not None and cache_params.has_previous_state:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...]
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
states_permuted = states.permute(0, 2, 1, 3, 4)
result = (decay_chunk[..., None, None] * states_permuted[:, :, None, ...]).sum(dim=2)
new_states = result.permute(0, 2, 1, 3, 4)
states, ssm_state = new_states[:, :-1], new_states[:, -1]
# Compute state -> output conversion per chunk
# (left term of low-rank factorization of off-diagonal blocks; C terms)
state_decay_out = torch.exp(A_cumsum)
# compute Yoff
C_times_states = (C[..., None, :] * states[:, :, None, ...])
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
y = Y_diag + Y_off
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
# Cutting off padded chunks
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = self.norm(y, gate)
# end ssd naive
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
return contextualized_states
# fmt: on
def forward(
self,
hidden_states,
cache_params: Optional[Zamba2HybridDynamicCache] = None,
attention_mask: Optional[torch.Tensor] = None,
):
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
return self.torch_forward(hidden_states, cache_params, attention_mask)
| Zamba2MambaMixer |
python | ansible__ansible | lib/ansible/utils/encrypt.py | {
"start": 2865,
"end": 3478
} | class ____(object):
algorithms = {
'md5_crypt': _Algo(crypt_id='1', salt_size=8),
'bcrypt': _Algo(crypt_id='2b', salt_size=22, implicit_rounds=12, salt_exact=True, implicit_ident='2b', rounds_format='cost'),
'sha256_crypt': _Algo(crypt_id='5', salt_size=16, implicit_rounds=535000, rounds_format='rounds'),
'sha512_crypt': _Algo(crypt_id='6', salt_size=16, implicit_rounds=656000, rounds_format='rounds'),
}
def __init__(self, algorithm):
self.algorithm = algorithm
display.vv(f"Using {self.__class__.__name__} to hash input with {algorithm!r}")
| BaseHash |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 10497,
"end": 11021
} | class ____(Benchmark):
params = [10, 100, 1000, 5000, 10000]
param_names = ['num_points']
def setup(self, num_points):
self.points = generate_spherical_points(num_points)
self.sv = SphericalVoronoi(self.points, radius=1,
center=np.zeros(3))
def time_spherical_polygon_vertex_sorting(self, num_points):
"""Time the vertex sorting operation in the Spherical Voronoi
code.
"""
self.sv.sort_vertices_of_regions()
| SphericalVorSort |
python | PyCQA__pylint | tests/functional/ext/private_import/private_import.py | {
"start": 4483,
"end": 4721
} | class ____:
def save(self):
return self
# Treat relative imports as internal
from .other_file import _private
from ..parent import _private
from _private_module_x import some_name # [import-private-name]
VAR = some_name
| Example |
python | scipy__scipy | scipy/sparse/linalg/_dsolve/tests/test_linsolve.py | {
"start": 16639,
"end": 26645
} | class ____:
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(n, n)).tocsc()
def _smoketest(self, spxlu, check, dtype, idx_dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
A.indices = A.indices.astype(idx_dtype, copy=False)
A.indptr = A.indptr.astype(idx_dtype, copy=False)
lu = spxlu(A)
rng = np.random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = f"k={k!r}"
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
def test_splu_smoketest(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A @ x
assert_(abs(r - b).max() < 1e3*eps, msg)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
for idx_dtype in [np.int32, np.int64]:
self._smoketest(splu, check, dtype, idx_dtype)
def test_spilu_smoketest(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A @ x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
for idx_dtype in [np.int32, np.int64]:
self._smoketest(spilu, check, dtype, idx_dtype)
assert_(max(errors) > 1e-5)
def test_spilu_drop_rule(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
# Test passing in the drop_rule argument to spilu.
A = eye_array(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_array((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_array((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = np.random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_array(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_array(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
rng = np.random.default_rng(1342354)
a = rng.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_array(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_array(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
def test_natural_permc(self, splu_fun, rtol):
# Test that the "NATURAL" permc_spec does not permute the matrix
rng = np.random.RandomState(42)
n = 500
p = 0.01
A = scipy.sparse.random(n, n, p, random_state=rng)
x = rng.rand(n)
# Make A diagonal dominant to make sure it is not singular
A += (n+1)*scipy.sparse.eye_array(n)
A_ = csc_array(A)
b = A_ @ x
# without permc_spec, permutation is not identity
lu = splu_fun(A_)
assert_(np.any(lu.perm_c != np.arange(n)))
# with permc_spec="NATURAL", permutation is identity
lu = splu_fun(A_, permc_spec="NATURAL")
assert_array_equal(lu.perm_c, np.arange(n))
# Also, lu decomposition is valid
x2 = lu.solve(b)
assert_allclose(x, x2, rtol=rtol)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
rng = np.random.default_rng(1342354)
a = rng.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_array(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
rng = np.random.default_rng(235634)
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = rng.random(42)
B = rng.random((42, 3))
BB = rng.random((self.n, 3, 9))
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
def test_superlu_dlamch_i386_nan(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(n, n))
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.toarray()
assert_(not np.isnan(B).any())
def test_lu_attr(self):
def check(dtype, complex_2=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertised
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L @ lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.slow
def test_threads_parallel(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
def test_singular_matrix(self):
# Test that SuperLU does not print to stdout when a singular matrix is
# passed. See gh-20993.
A = eye_array(10, format='csr')
A[-1, -1] = 0
b = np.zeros(10)
with pytest.warns(MatrixRankWarning):
res = spsolve(A, b)
assert np.isnan(res).all()
| TestSplu |
python | Lightning-AI__lightning | src/lightning/pytorch/accelerators/cpu.py | {
"start": 1061,
"end": 3313
} | class ____(Accelerator):
"""Accelerator for CPU devices."""
@override
def setup_device(self, device: torch.device) -> None:
"""
Raises:
MisconfigurationException:
If the selected device is not CPU.
"""
if device.type != "cpu":
raise MisconfigurationException(f"Device should be CPU, got {device} instead.")
@override
def get_device_stats(self, device: _DEVICE) -> dict[str, Any]:
"""Get CPU stats from ``psutil`` package."""
return get_cpu_stats()
@override
def teardown(self) -> None:
pass
@staticmethod
@override
def parse_devices(devices: Union[int, str]) -> int:
"""Accelerator device parsing logic."""
return _parse_cpu_cores(devices)
@staticmethod
@override
def get_parallel_devices(devices: Union[int, str]) -> list[torch.device]:
"""Gets parallel devices for the Accelerator."""
devices = _parse_cpu_cores(devices)
return [torch.device("cpu")] * devices
@staticmethod
@override
def auto_device_count() -> int:
"""Get the devices when set to auto."""
return 1
@staticmethod
@override
def is_available() -> bool:
"""CPU is always available for execution."""
return True
@staticmethod
@override
def name() -> str:
return "cpu"
@classmethod
@override
def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None:
accelerator_registry.register(
cls.name(),
cls,
description=cls.__name__,
)
# CPU device metrics
_CPU_VM_PERCENT = "cpu_vm_percent"
_CPU_PERCENT = "cpu_percent"
_CPU_SWAP_PERCENT = "cpu_swap_percent"
_PSUTIL_AVAILABLE = RequirementCache("psutil")
def get_cpu_stats() -> dict[str, float]:
if not _PSUTIL_AVAILABLE:
raise ModuleNotFoundError(
f"Fetching CPU device stats requires `psutil` to be installed. {str(_PSUTIL_AVAILABLE)}"
)
import psutil
return {
_CPU_VM_PERCENT: psutil.virtual_memory().percent,
_CPU_PERCENT: psutil.cpu_percent(),
_CPU_SWAP_PERCENT: psutil.swap_memory().percent,
}
| CPUAccelerator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.