language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/export/test_export.py | {
"start": 640391,
"end": 663898
} | class ____(TestCase):
def test_scaled_dot_product_attention_cpu(self):
"""
This test makes sure we are always getting the same decomposition result for SDPA.
As of now _scaled_dot_product_flash_attention_for_cpu is expected to show up in
export() result. Some downstream backend then further decompose it into core ATen
ops in torch/_decomp/decompositions.py (search for
_scaled_dot_product_flash_attention_for_cpu).
Export is decomposing based on the CompositeImplicitAutograd kernel implementation
of SDPA. If this test fails, it means the kernel is being modified. In this case
we strongly encourage you to change the decomposition rule under
torch/_decomp/decompositions.py along with the kernel changes, so all of the
downstream backends are not being affected.
"""
class ScaledDotProductAttention(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, q, k, v):
attn_output = F.scaled_dot_product_attention(
q, k, v, None, dropout_p=0.0, is_causal=True
)
return attn_output
q = torch.randn(1, 1, 8, 8, device="cpu")
k = torch.randn(1, 1, 8, 8, device="cpu")
v = torch.randn(1, 1, 8, 8, device="cpu")
from torch.nn.attention import SDPBackend
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]):
ep = torch.export.export(ScaledDotProductAttention(), (q, k, v))
ep.run_decompositions()
@skipIfCrossRef
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION,
"Can't run fused SDPA on this platform",
)
def test_scaled_dot_product_attention_cuda(self):
"""
This test makes sure we are always getting the same decomposition result for SDPA.
As of now _scaled_dot_product_flash_attention is expected to show up in
export() result (GPU tensors are given). Currently there's no downstream
backend relies on this export result so if this test fails, feel free to
change it to the latest export() result.
"""
class ScaledDotProductAttention(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, q, k, v):
attn_output = F.scaled_dot_product_attention(
q, k, v, None, dropout_p=0.0, is_causal=True
)
return attn_output
q = torch.randn(1, 16, 16, 64, dtype=torch.bfloat16, device="cuda")
k = torch.randn(1, 16, 16, 64, dtype=torch.bfloat16, device="cuda")
v = torch.randn(1, 16, 16, 64, dtype=torch.bfloat16, device="cuda")
ep = torch.export.export(
ScaledDotProductAttention(), (q, k, v)
).run_decompositions()
code_str = """\
def forward(self, q, k, v):
_scaled_dot_product_flash_attention = torch.ops.aten._scaled_dot_product_flash_attention.default(q, k, v, 0.0, True, scale = 0.125); q = k = v = None
getitem = _scaled_dot_product_flash_attention[0]; _scaled_dot_product_flash_attention = None
return (getitem,)"""
try:
self.assertExpectedInline(
ep.graph_module.code.strip(),
code_str,
)
except AssertionError:
code_str = """\
def forward(self, q, k, v):
_scaled_dot_product_cudnn_attention = torch.ops.aten._scaled_dot_product_cudnn_attention.default(q, k, v, None, False, 0.0, True); q = k = v = None
getitem = _scaled_dot_product_cudnn_attention[0]; _scaled_dot_product_cudnn_attention = None
return (getitem,)"""
self.assertExpectedInline(
ep.graph_module.code.strip(),
code_str,
)
def test_int_list_output(self):
class M(torch.nn.Module):
def forward(self, x):
return [((1, 3), [x + x, x * x])]
ep = torch.export.export(M(), (torch.ones(2, 3),))
res = ep.module()(torch.ones(2, 3))
self.assertEqual(res[0][0], (1, 3))
def test_primitive_constant_output(self):
class Z(torch.nn.Module):
def forward(self, x, y):
with torch.no_grad():
return y * x, "moo"
ep = torch.export.export(Z(), (torch.tensor(3), 5))
res = ep.module()(torch.tensor(4), 5)
self.assertEqual(res[0], torch.tensor(20))
self.assertEqual(res[1], "moo")
class B(torch.nn.Module):
def forward(self, x, y):
return y * x, y
ep = torch.export.export(B(), (torch.tensor(3), 5))
res = ep.module()(torch.tensor(4), 5)
self.assertEqual(res[0], torch.tensor(20))
self.assertEqual(res[1], 5)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: y == 5"),
):
# expected 5, but got 20
res = ep.module()(torch.tensor(4), 20)
class F(torch.nn.Module):
def forward(self, x):
# return a constant of primitive type
y = 5
return y * x, y
ep = torch.export.export(F(), (torch.tensor(3),))
res = ep.module()(torch.tensor(4))
self.assertEqual(res[0], torch.tensor(20))
self.assertEqual(res[1], 5)
class Q(torch.nn.Module):
def forward(self, x, y):
return y * x, y - 1
ep = torch.export.export(Q(), (torch.tensor(3), 5))
res = ep.module()(torch.tensor(4), 5)
self.assertEqual(res[0], torch.tensor(20))
self.assertEqual(res[1], 4)
def test_unbacked_sdpa(self):
import torch
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.functional import scaled_dot_product_attention
class Module(torch.nn.Module):
def forward(
self, query: torch.Tensor, cache: torch.Tensor, start_pos: torch.Tensor
) -> torch.Tensor:
# x.sizes(): 1, 128, 16, 128
sp = start_pos.item()
# Checks needed for slicing.
torch._check(sp >= 0)
torch._check(sp <= 126)
key = cache[:, : sp + 1, :, :] # 1, sp+1, 16, 128
value = cache[:, : sp + 1, :, :] # 1, sp+1, 16, 128
query = query.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/transformers/attention.cpp#L732
return scaled_dot_product_attention(query, key, value)
cache = torch.randn(1, 128, 16, 128, dtype=torch.float16)
query = torch.randn(1, 1, 16, 128, dtype=torch.float16)
start_pos = torch.tensor([0])
with sdpa_kernel(SDPBackend.MATH), torch.no_grad():
ep = torch.export.export(Module(), (query, cache, start_pos))
args = (query, cache, start_pos)
self.assertEqual(ep.module()(*args), Module()(*args))
args = (query, cache, torch.tensor([3]))
self.assertEqual(ep.module()(*args), Module()(*args))
args = (query, cache, torch.tensor([126]))
self.assertEqual(ep.module()(*args), Module()(*args))
def test_none_input_output(self):
class Z(torch.nn.Module):
def forward(self, x, y):
return x * x
ep = torch.export.export(Z(), (torch.tensor(3), None))
res = ep.module()(torch.tensor(4), None)
self.assertEqual(res, torch.tensor(16))
class B(torch.nn.Module):
def forward(self, x, y):
return x * x, y
ep = torch.export.export(B(), (torch.tensor(3), None))
res = ep.module()(torch.tensor(4), None)
self.assertEqual(res[0], torch.tensor(16))
self.assertEqual(res[1], None)
decomp = ep.run_decompositions()
gm = decomp.module()
res = gm(torch.tensor(4), None)
self.assertEqual(res[0], torch.tensor(16))
self.assertEqual(res[1], None)
def test_print(self):
class M(torch.nn.Module):
def forward(self, x):
print("start")
x1 = x + x
print(x1)
x2 = x1 * x1
print(1, 2, 3)
x3 = x2 + x2
return (x1, x3)
gm = export(M(), (torch.randn(3, 3),)).graph_module
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
add = torch.ops.aten.add.Tensor(x, x); x = None
mul = torch.ops.aten.mul.Tensor(add, add)
add_1 = torch.ops.aten.add.Tensor(mul, mul); mul = None
return (add, add_1)""",
)
def test_print_graph_signature(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.buf = torch.nn.Buffer(torch.ones(3))
def forward(self, x):
x.add_(1)
self.buf.add_(2)
return self.buf + x
ep = export(M(), (torch.ones(3),))
self.assertExpectedInline(
str(ep.graph_signature).strip(),
"""\
# inputs
b_buf: BUFFER target='buf' persistent=True
x: USER_INPUT
# outputs
add: USER_OUTPUT""",
)
ep = ep.run_decompositions({})
self.assertExpectedInline(
str(ep.graph_signature).strip(),
"""\
# inputs
b_buf: BUFFER target='buf' persistent=True
x: USER_INPUT
# outputs
add_1: BUFFER_MUTATION target='buf'
add: USER_INPUT_MUTATION target='x'
add_2: USER_OUTPUT""",
)
@unittest.skipIf(not TEST_TRANSFORMERS, "No transformers")
def test_hf_logging_logger(self):
import transformers
logger = transformers.utils.logging.get_logger(__name__)
class M(torch.nn.Module):
def forward(self, x):
logger.warning_once("start")
x1 = x + x
x2 = x1 * x1
x3 = x2 + x2
return (x1, x3)
gm = export(M(), (torch.randn(3, 3),)).graph_module
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
add = torch.ops.aten.add.Tensor(x, x); x = None
mul = torch.ops.aten.mul.Tensor(add, add)
add_1 = torch.ops.aten.add.Tensor(mul, mul); mul = None
return (add, add_1)""",
)
def test_warning(self):
class M(torch.nn.Module):
def forward(self, x):
warnings.warn("moo")
res = x + x
warnings.warn(f"{res}")
return res
gm = export(M(), (torch.randn(3, 3),)).graph_module
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
add = torch.ops.aten.add.Tensor(x, x); x = None
return (add,)""",
)
def test_logging_logger(self):
strict = True
logger = logging.getLogger(__name__)
class M(torch.nn.Module):
def forward(self, x):
logger.log("start")
x1 = x + x
logger.debug(x1)
x2 = x1 * x1
logger.info(1, 2, 3)
x3 = x2 + x2
return (x1, x3)
gm = export(M(), (torch.randn(3, 3),), strict=strict).graph_module
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
add = torch.ops.aten.add.Tensor(x, x); x = None
mul = torch.ops.aten.mul.Tensor(add, add)
add_1 = torch.ops.aten.add.Tensor(mul, mul); mul = None
return (add, add_1)""",
)
def test_constant_fqn(self):
class Nested(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.constant = torch.rand(2, 3)
self.parameter = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return x + self.constant
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.nested = Nested()
def forward(self, x):
return self.nested(x) + self.nested.constant + self.nested.parameter
m = Mod()
ep = export(m, (torch.rand(2, 3),), strict=True)
self.assertEqual(ep.constants["nested.constant"], m.nested.constant)
self.assertEqual(ep.module()(torch.ones(2, 3)), m(torch.ones(2, 3)))
def test_constant_name(self):
class Nested(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.constant = torch.rand(2, 3)
self.parameter = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return x + self.constant
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.nested_1 = Nested()
self.nested_2 = Nested()
def forward(self, x):
return (
self.nested_1(x)
+ self.nested_2(x)
+ self.nested_1.constant
+ self.nested_2.constant
+ self.nested_1.parameter
+ self.nested_2.parameter
)
m = Mod()
ep = export(m, (torch.rand(2, 3),), strict=False)
self.assertEqual(ep.module()(torch.ones(2, 3)), m(torch.ones(2, 3)))
# check constant fqn when there are multiple instances of the same class
self.assertEqual(ep.constants["nested_1.constant"], m.nested_1.constant)
self.assertEqual(ep.constants["nested_2.constant"], m.nested_2.constant)
# check constant_name in the graph
placeholders = [
node for node in ep.graph_module.graph.nodes if node.op == "placeholder"
]
self.assertEqual(len(placeholders), 5)
self.assertTrue(all(ph.name == ph.target for ph in placeholders))
# suffix should be added to duplicated constant_name
self.assertEqual(placeholders[2].name, "c_nested_1_constant")
self.assertEqual(placeholders[3].name, "c_nested_2_constant")
def test_nested_retrace(self):
class Nested(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.randn(3))
def forward(self, x):
return x + self.param
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.nested = Nested()
def forward(self, x):
return x + self.nested(x)
# first export
foo = Foo().to("meta")
inputs = (torch.ones(3, device="meta"),)
foo(*inputs)
ep = torch.export.export(foo, inputs, strict=False)
# second export
foo_1 = ep.module()
ep_1 = torch.export.export(foo_1, inputs, strict=False)
for node1, node2 in zip(ep.graph.nodes, ep_1.graph.nodes):
nn_module_stack_1 = node1.meta.get("nn_module_stack", None)
nn_module_stack_2 = node2.meta.get("nn_module_stack", None)
if nn_module_stack_1 is None:
self.assertTrue(nn_module_stack_2 is None)
else:
for v1, v2 in zip(
nn_module_stack_1.values(), nn_module_stack_2.values()
):
self.assertEqual(v1, v2)
def test_duplicated_getitem(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.topk(x, 2)
foo = Foo()
inputs = (torch.randn(3),)
ep = torch.export.export(foo, inputs, strict=False)
graph_module = copy.deepcopy(ep.graph_module)
call_function_node = None
num_getitems = 0
for node in graph_module.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.topk.default
):
call_function_node = node
elif node.op == "call_function" and node.target == operator.getitem:
self.assertIs(node.args[0], call_function_node)
num_getitems += 1
self.assertIsNotNone(call_function_node)
self.assertEqual(num_getitems, 2)
output_node = list(graph_module.graph.nodes)[-1]
nodes = []
with graph_module.graph.inserting_before(output_node):
nodes.append(
graph_module.graph.call_function(
operator.getitem, (call_function_node, 1)
)
)
nodes.append(
graph_module.graph.call_function(
operator.getitem, (call_function_node, 0)
)
)
nodes.append(
graph_module.graph.call_function(
operator.getitem, (call_function_node, 0)
)
)
nodes.append(
graph_module.graph.call_function(
operator.getitem, (call_function_node, 1)
)
)
signature = ExportGraphSignature(
input_specs=ep.graph_signature.input_specs,
output_specs=ep.graph_signature.output_specs
+ [
OutputSpec(
kind=OutputKind.USER_OUTPUT,
arg=TensorArgument(name=node.name),
target=None,
)
for node in nodes
],
)
output_node.args = (output_node.args[0] + tuple(nodes),)
graph_module.recompile()
new_ep = ep._update(graph_module, signature)
new_num_getitems = 0
for node in new_ep.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.topk.default
):
call_function_node = node
elif node.op == "call_function" and node.target == operator.getitem:
self.assertIs(node.args[0], call_function_node)
new_num_getitems += 1
self.assertEqual(num_getitems, new_num_getitems)
self.assertEqual(
len(list(new_ep.graph.nodes)[-1].args[0]), len(signature.output_specs)
)
@requires_cuda_and_triton
def test_assert_tensor_metadata_device_index(self):
class N(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
x = x.float()
y = y.float()
return x + y
inp = (torch.randn(3, device="cuda"), torch.randn(3, device="cuda"))
ep = export(N(), inp)
ep = move_to_device_pass(ep, {"cuda:0": "cuda"})
ep.module()(torch.randn(3, device="cuda:0"), torch.randn(3, device="cuda:0"))
@unittest.skipIf(not HAS_TORCHREC, "only run when there is torchrec imported")
def test_torchrec_jagged_tensor(self):
class Foo(torch.nn.Module):
def forward(self, jt) -> torch.Tensor:
vals = jt.lengths().view(-1).long()
return vals + 4
foo = Foo()
jt = JaggedTensor(
values=torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
lengths=torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3]),
offsets=torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8]),
)
# TODO tmanlaibaatar
# because we call unflatten in the flat tracer, it creates a new JaggedTensor
# and it gets pruned as it is not reachable. Not sure what the right way to fix
# is but since it is just warning, probably ok to xfail it for now.
with (
self.assertWarnsRegex(
UserWarning,
"While exporting, we found certain side effects happened in the model.forward. "
"Here are the list of potential sources you can double check: \[\"L\['jt'\]\"\]",
),
torch._export.config.patch(use_new_tracer_experimental=False),
):
_ = torch.export.export(foo, (jt,), strict=True)
def test_input_output_no_stacktrace(self):
class M(torch.nn.Module):
def forward(self, x):
return x + x
pyt_model = M()
example_inputs = (torch.ones(3, 3),)
class Wrapper:
def __init__(self, model, example_inputs):
self.model = model
self.example_inputs = example_inputs
def compile(self):
self.exp_program = torch.export.export(
self.model, args=self.example_inputs
)
self.exp_program = self.exp_program.run_decompositions(
get_decompositions([torch.ops.aten.new_full])
)
def forward(self, *args, **kwargs):
self.compile()
wrapper = Wrapper(pyt_model, example_inputs)
wrapper.forward()
def test_export_with_dict_input_nested_in_args(self):
"""Test export with dictionary input nested in args."""
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.linear = torch.nn.Linear(10, 1)
def forward(self, data_batch):
h1 = self.linear(data_batch["a1"])
h2 = self.linear(data_batch["a2"])
return h1 + h2
# Create model and example inputs
model = MyModel()
a1 = torch.randn(10)
a2 = torch.randn(10)
original_input = {"a1": a1, "a2": a2}
example_args_forward = (original_input,)
# Export the model
exported_model = export(model, example_args_forward)
# Run both models and compare results
reordered_input = {"a2": a2, "a1": a1}
original_output = exported_model.module()(reordered_input)
loaded_output = model(original_input)
# Verify outputs are close (allowing for floating point differences)
torch.testing.assert_close(original_output, loaded_output)
def test_strict_export_with_shared_parameters(self):
"""Test that parameter names are preserved when there are shared parameters with the same name."""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.n1 = torch.nn.Parameter(torch.ones(3))
self.n2 = self.n1
def forward(self, x):
res1 = x * self.n1
res2 = x * self.n2
return res1 + res2
m = M()
ep = torch.export.export(m, (torch.ones(3),), strict=True)
gm = ep.module()
# Check that named_parameters are preserved
original_param_names = [name for name, _ in m.named_parameters()]
exported_param_names = [name for name, _ in gm.named_parameters()]
self.assertEqual(original_param_names, exported_param_names)
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
| TestOneOffModelExportResult |
python | spulec__freezegun | tests/test_class_decorator.py | {
"start": 174,
"end": 1848
} | class ____:
@pytest.fixture
def ff(self) -> datetime:
return datetime.now()
@pytest.fixture
def yield_ff(self) -> Iterator[datetime]:
yield datetime.now()
@pytest.fixture
def func(self) -> Iterator[datetime]:
yield datetime.now()
def test_with_fixture(self, ff: datetime) -> None:
assert ff == FakeDatetime(2022, 10, 1, 0, 0)
assert datetime.now() == FakeDatetime(2022, 10, 1, 0, 0)
def test_without_fixture(self) -> None:
assert datetime.now() == FakeDatetime(2022, 10, 1, 0, 0)
def test_with_yield_fixture(self, yield_ff: datetime) -> None:
assert yield_ff == FakeDatetime(2022, 10, 1, 0, 0)
assert datetime.now() == FakeDatetime(2022, 10, 1, 0, 0)
def test_with_func_fixture(self, func: datetime) -> None:
assert func == FakeDatetime(2022, 10, 1, 0, 0)
assert datetime.now() == FakeDatetime(2022, 10, 1, 0, 0)
@pytest.mark.parametrize("func", ("a", "b"))
@freeze_time(datetime.now())
def test_decorator_with_argument_named_func(func: str) -> None:
"""Verify that we can pass an argument called 'func'"""
assert func in ("a", "b")
@pytest.mark.parametrize("arg", ("a", "b"))
@freeze_time(datetime.now())
def test_freezegun_with_argument_named_arg(arg: str) -> None:
"""Verify that we can pass an argument called 'arg'"""
assert arg in ("a", "b")
@freeze_time(datetime.now())
@pytest.mark.parametrize("func", ("a", "b"))
def test_freezegun_decorator_first_parametrize_second(func: str) -> None:
"""Verify that we can pass the parametrized function into freezegun"""
assert func in ("a", "b")
| TestClassDecoratorWithFixture |
python | tensorflow__tensorflow | tensorflow/python/distribute/failure_handling/failure_handling.py | {
"start": 8940,
"end": 9450
} | class ____(TerminationConfig):
"""Configurations for GCP CPU VM."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_fn=None,
exit_fn=None,
grace_period=None,
save_fn=None):
self.termination_watcher_fn = termination_watcher_fn or failure_handling_util.termination_watcher_function_gce
self.exit_fn = exit_fn or failure_handling_util.gce_exit_fn
self.grace_period = grace_period or 0
self.save_fn = save_fn
| GcpCpuTerminationConfig |
python | getsentry__sentry | tests/sentry/lang/native/test_sources.py | {
"start": 1399,
"end": 5296
} | class ____:
@override_settings(SENTRY_BUILTIN_SOURCES=SENTRY_BUILTIN_SOURCES_TEST)
@patch("sentry.lang.native.sources.get_gcp_token")
@django_db_all
def test_sources_gcp_bearer_authentication(self, mock_get_gcp_token, default_project) -> None:
mock_get_gcp_token.return_value = "ya29.TOKEN"
features = {
"organizations:symbol-sources": True,
}
default_project.update_option("sentry:builtin_symbol_sources", ["aaa", "bbb"])
with Feature(features):
sources = get_sources_for_project(default_project)
for i in (1, 2):
assert "client_email" not in sources[i]
assert "private_key" not in sources[i]
assert sources[i]["bearer_token"] == "ya29.TOKEN"
@override_settings(SENTRY_BUILTIN_SOURCES=SENTRY_BUILTIN_SOURCES_TEST)
@patch("sentry.lang.native.sources.get_gcp_token")
@django_db_all
def test_source_alias(self, mock_get_gcp_token, default_project) -> None:
mock_get_gcp_token.return_value = "ya29.TOKEN"
features = {
"organizations:symbol-sources": True,
}
default_project.update_option("sentry:builtin_symbol_sources", ["ccc"])
builtin_sources_before = copy.deepcopy(settings.SENTRY_BUILTIN_SOURCES)
with Feature(features):
sources = get_sources_for_project(default_project)
assert builtin_sources_before == copy.deepcopy(settings.SENTRY_BUILTIN_SOURCES)
# Make sure that we expanded successfully here
# Source 1 will be sentry, the following 2 will be the expanded gcs sources
assert len(sources) == 3
assert sources[0]["type"] == "sentry"
assert sources[1]["type"] == "gcs"
assert sources[1]["name"] == "aaa"
assert sources[2]["type"] == "gcs"
assert sources[2]["name"] == "bbb"
for i in (1, 2):
assert "client_email" not in sources[i]
assert "private_key" not in sources[i]
assert sources[i]["bearer_token"] == "ya29.TOKEN"
@override_settings(SENTRY_BUILTIN_SOURCES=SENTRY_BUILTIN_SOURCES_TEST)
@patch("sentry.lang.native.sources.get_gcp_token")
@django_db_all
def test_source_with_private_key(self, mock_get_gcp_token, default_project) -> None:
mock_get_gcp_token.return_value = "ya29.TOKEN"
features = {
"organizations:symbol-sources": True,
}
default_project.update_option("sentry:builtin_symbol_sources", ["ddd"])
with Feature(features):
sources = get_sources_for_project(default_project)
assert sources[1]["name"] == "ddd"
assert sources[1]["client_email"] == "application@project-id.iam.gserviceaccount.com"
assert sources[1]["private_key"] == "FAKE_PRIVATE_KEY_STRING"
@override_settings(SENTRY_BUILTIN_SOURCES=SENTRY_BUILTIN_SOURCES_TEST)
@patch("sentry.lang.native.sources.get_gcp_token")
@django_db_all
def test_mixed_sources(self, mock_get_gcp_token, default_project) -> None:
"""
Tests the combination of sources where one uses credentials for authentication and the other one
uses pre-fetched token.
"""
mock_get_gcp_token.return_value = "ya29.TOKEN"
features = {
"organizations:symbol-sources": True,
}
default_project.update_option("sentry:builtin_symbol_sources", ["eee"])
with Feature(features):
sources = get_sources_for_project(default_project)
assert sources[1]["name"] == "ddd"
assert "token" not in sources[1]
assert sources[1]["client_email"] == "application@project-id.iam.gserviceaccount.com"
assert sources[1]["private_key"] == "FAKE_PRIVATE_KEY_STRING"
assert sources[2]["name"] == "aaa"
assert sources[2]["bearer_token"] == "ya29.TOKEN"
| TestGcpBearerAuthentication |
python | apache__avro | lang/py/avro/test/test_protocol.py | {
"start": 12930,
"end": 13749
} | class ____(unittest.TestCase):
def test_inner_namespace_set(self):
print("")
print("TEST INNER NAMESPACE")
print("===================")
print("")
proto = HELLO_WORLD.parse()
self.assertEqual(proto.namespace, "com.acme")
self.assertEqual(proto.fullname, "com.acme.HelloWorld")
greeting_type = proto.types_dict["Greeting"]
self.assertEqual(greeting_type.namespace, "com.acme")
def test_inner_namespace_not_rendered(self):
proto = HELLO_WORLD.parse()
self.assertEqual("com.acme.Greeting", proto.types[0].fullname)
self.assertEqual("Greeting", proto.types[0].name)
# but there shouldn't be 'namespace' rendered to json on the inner type
self.assertNotIn("namespace", proto.to_json()["types"][0])
| TestMisc |
python | ray-project__ray | rllib/algorithms/algorithm_config.py | {
"start": 310942,
"end": 319329
} | class ____(AlgorithmConfig):
"""An RLlib DifferentiableAlgorithmConfig builds a Meta algorithm from a given
configuration
.. testcode::
from ray.rllib.algorithm.algorithm_config import DifferentiableAlgorithmConfig
# Construct a generic config for an algorithm that needs differentiable Learners.
config = (
DifferentiableAlgorithmConfig()
.training(lr=3e-4)
.environment(env="CartPole-v1")
.learners(
differentiable_learner_configs=[
DifferentiableLearnerConfig(
DifferentiableTorchLearner,
lr=1e-4,
)
]
)
)
# Similar to `AlgorithmConfig` the config using differentiable Learners can be
# used to build a respective `Algorithm`.
algo = config.build()
"""
# A list of `DifferentiableLearnerConfig` instances that define differentiable
# `Learner`'s. Note, each of them needs to implement the `DifferentiableLearner`
# API.
differentiable_learner_configs: List[DifferentiableLearnerConfig]
def __init__(self, algo_class=None):
"""Initializes the DifferentiableLearnerConfig instance.
Args:
algo_class: An optional Algorithm class that this config class belongs to.
Used (if provided) to build a respective Algorithm instance from this
config.
"""
# Initialize the `AlgorithmConfig` first.
super().__init__(algo_class=algo_class)
# Initialize the list of differentiable learner configs to an empty list, which
# defines the default, i.e. the `MetaLearner` will have no nested updates.
self.differentiable_learner_configs: List[DifferentiableLearnerConfig] = []
def learners(
self,
*,
learner_class: Optional[Type["Learner"]] = NotProvided,
learner_connector: Optional[
Callable[["RLModule"], Union["ConnectorV2", List["ConnectorV2"]]]
] = NotProvided,
add_default_connectors_to_learner_pipeline: Optional[bool] = NotProvided,
learner_config_dict: Optional[Dict[str, Any]] = NotProvided,
differentiable_learner_configs: List[DifferentiableLearnerConfig] = NotProvided,
**kwargs,
) -> "DifferentiableAlgorithmConfig":
"""Sets the configurations for differentiable learners.
Args:
learner_class: The `Learner` class to use for (distributed) updating of the
RLModule. Only used when `enable_rl_module_and_learner=True`.
learner_connector: A callable taking an env observation space and an env
action space as inputs and returning a learner ConnectorV2 (might be
a pipeline) object.
add_default_connectors_to_learner_pipeline: If True (default), RLlib's
Learners automatically add the default Learner ConnectorV2
pieces to the LearnerPipeline. These automatically perform:
a) adding observations from episodes to the train batch, if this has not
already been done by a user-provided connector piece
b) if RLModule is stateful, add a time rank to the train batch, zero-pad
the data, and add the correct state inputs, if this has not already been
done by a user-provided connector piece.
c) add all other information (actions, rewards, terminateds, etc..) to
the train batch, if this has not already been done by a user-provided
connector piece.
Only if you know exactly what you are doing, you
should set this setting to False.
Note that this setting is only relevant if the new API stack is used
(including the new EnvRunner classes).
learner_config_dict: A dict to insert any settings accessible from within
the Learner instance. This should only be used in connection with custom
Learner subclasses and in case the user doesn't want to write an extra
`AlgorithmConfig` subclass just to add a few settings to the base Algo's
own config class.
differentiable_learner_configs: A list of `DifferentiableLearnerConfig` instances
defining the `DifferentiableLearner` classes used for the nested updates in
`Algorithm`'s learner.
"""
super().learners(**kwargs)
if learner_class is not NotProvided:
self._learner_class = learner_class
if learner_connector is not NotProvided:
self._learner_connector = learner_connector
if add_default_connectors_to_learner_pipeline is not NotProvided:
self.add_default_connectors_to_learner_pipeline = (
add_default_connectors_to_learner_pipeline
)
if learner_config_dict is not NotProvided:
self.learner_config_dict.update(learner_config_dict)
if differentiable_learner_configs is not NotProvided:
self.differentiable_learner_configs = differentiable_learner_configs
return self
def validate(self):
"""Validates all values in this config."""
# First, call the `validate` method of super.
super().validate()
# TODO (simon): Maybe moving this to a private method?
# Ensure that the default learner class is derived from `TorchMetaLearner`.
from ray.rllib.core.learner.torch.torch_meta_learner import TorchMetaLearner
if not issubclass(self.get_default_learner_class(), TorchMetaLearner):
self._value_error(
"`get_default_learner_class` must return a `MetaLearner` class "
f"or sublass but got {self.get_default_learner_class()}."
)
# Make sure that the differentiable learner configs are contained in a list.
if not isinstance(self.differentiable_learner_configs, list):
self._value_error(
"`differentiable_learner_configs` must be a list of "
"`DifferentiableLearnerConfig` instances, but is "
f"{type(self.differentiable_learner_configs)}."
)
# In addition, check, if all configurations are wrapped in a
# `DifferentiableLearnerConfig`.
elif not all(
isinstance(learner_cfg, DifferentiableLearnerConfig)
for learner_cfg in self.differentiable_learner_configs
):
self._value_error(
"`differentiable_learner_configs` must be a list of "
"`DifferentiableLearnerConfig` instances, but at least "
"one instance is not a `DifferentiableLearnerConfig`."
)
def get_default_learner_class(self) -> Union[Type["TorchMetaLearner"], str]:
"""Returns the `MetaLearner` class to use for this algorithm.
Override this method in the sub-class to return the `MetaLearner`.
Returns:
The `MetaLearner` class to use for this algorithm either as a class
type or as a string. (e.g. "ray.rllib.core.learner.torch.torch_meta_learner.TorchMetaLearner")
"""
return NotImplemented
def get_differentiable_learner_classes(
self,
) -> List[Union[Type["DifferentiableLearner"], str]]:
"""Returns the `DifferentiableLearner` classes to use for this algorithm.
Override this method in the sub-class to return the `DifferentiableLearner`.
Returns:
The `DifferentiableLearner` class to use for this algorithm either as a class
type or as a string. (e.g.
"ray.rllib.core.learner.torch.torch_meta_learner.TorchDifferentiableLearner").
"""
return NotImplemented
def get_differentiable_learner_configs(self) -> List[DifferentiableLearnerConfig]:
"""Returns the `DifferentiableLearnerConfigs` for all `DifferentiableLearner`s.
Override this method in the sub-class to return the `DifferentiableLearnerConfig`s.
Returns:
The `DifferentiableLearnerConfig` instances to use for this algorithm.
"""
return self.differentiable_learner_configs
| DifferentiableAlgorithmConfig |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/text_line_dataset_test.py | {
"start": 1446,
"end": 2765
} | class ____(test_base.DatasetTestBase):
"""Base class for setting up and testing TextLineDataset."""
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
| TextLineDatasetTestBase |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 118309,
"end": 118437
} | class ____:
xlTabPositionFirst = 0 # from enum XlTabPosition
xlTabPositionLast = 1 # from enum XlTabPosition
| TabPosition |
python | pytorch__pytorch | torch/fx/_graph_pickler.py | {
"start": 8151,
"end": 10708
} | class ____:
metadata: MetaTensorDesc[FakeTensor]
@classmethod
def reduce_helper(
cls, pickler: GraphPickler, obj: FakeTensor
) -> tuple[
Callable[[Self, _UnpickleState], FakeTensor], tuple[Self, _UnpickleStateToken]
]:
return cls.unpickle, (
cls(pickler._meta_tensor_describer, obj),
pickler._unpickle_state,
)
def __init__(self, describer: MetaTensorDescriber, t: Tensor) -> None:
# THINGS TO WORRY ABOUT:
# 1. Need to make sure that two tensors with the same id end up with the
# same id on the other side of the wire.
metadata = describer.describe_tensor(t)
# view_func is fine if it's either None or a _FakeTensorViewFunc. A
# custom one (which is basically a lambda) can't be serialized.
assert not metadata.view_func or isinstance(
metadata.view_func, torch._subclasses.meta_utils._FakeTensorViewFunc
)
self.metadata = dataclasses.replace(metadata, fake_mode=None)
# Some debugging/verification
for k in MetaTensorDesc._UNSERIALIZABLE:
if k in ("fake_mode", "view_func"):
continue
assert getattr(self.metadata, k) is None, (
f"not None: {k}: {getattr(self.metadata, k)}"
)
def unpickle(self, unpickle_state: _UnpickleState) -> FakeTensor:
# TODO: make common w/ _output_from_cache_entry() in fake_tensor.py?
metadata = dataclasses.replace(
self.metadata,
fake_mode=unpickle_state.fake_mode,
)
# also need to set the fake_mode on the base of a tensor if it's a view
if metadata.is_view and metadata.base is not None:
new_base = dataclasses.replace(
metadata.base,
fake_mode=unpickle_state.fake_mode,
)
metadata = dataclasses.replace(metadata, base=new_base)
def with_fake(
make_meta_t: Callable[[], torch.Tensor], device: Union[torch.device, str]
) -> FakeTensor:
with no_dispatch():
return FakeTensor(
unpickle_state.fake_mode,
make_meta_t(),
# pyrefly: ignore [bad-argument-type]
device,
)
return unpickle_state.meta_converter.meta_tensor(
metadata,
unpickle_state.fake_mode.shape_env,
with_fake,
None,
None,
)
| _TensorPickleData |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 14355,
"end": 14676
} | class ____:
app = ""
def col_exists(self, col_name):
with connection.cursor() as cursor:
table_name = f"{self.app}_testtable"
columns = connection.introspection.get_table_description(cursor, table_name)
return any(c for c in columns if c.name == col_name)
| ColExistsMixin |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/assets/connector_metrics.py | {
"start": 313,
"end": 2282
} | class ____(json.JSONDecoder):
"""A JSON decoder that converts "null" strings to None."""
def __init__(self, *args, **kwargs):
super().__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
return {k: (None if v == "null" else v) for k, v in obj.items()}
@sentry_sdk.trace
def _safe_read_gcs_file(gcs_blob: storage.Blob) -> Optional[str]:
"""Read the connector metrics jsonl blob.
Args:
gcs_blob (storage.Blob): The blob.
Returns:
dict: The metrics.
"""
if not gcs_blob.exists():
return None
return gcs_blob.download_as_string().decode("utf-8")
def _convert_json_to_metrics_dict(jsonl_string: str) -> dict:
"""Convert the jsonl string to a metrics dict."""
metrics_dict = defaultdict(dict)
jsonl_lines = jsonl_string.splitlines()
for line in jsonl_lines:
data = json.loads(line, cls=StringNullJsonDecoder)
connector_data = data["_airbyte_data"]
connector_definition_id = connector_data["connector_definition_id"]
airbyte_platform = connector_data["airbyte_platform"]
metrics_dict[connector_definition_id][airbyte_platform] = connector_data
return metrics_dict
# ASSETS
@asset(required_resource_keys={"latest_metrics_gcs_blob"}, group_name=GROUP_NAME)
@sentry.instrument_asset_op
def latest_connector_metrics(context: OpExecutionContext) -> dict:
latest_metrics_gcs_blob = context.resources.latest_metrics_gcs_blob
latest_metrics_jsonl = _safe_read_gcs_file(latest_metrics_gcs_blob)
if latest_metrics_jsonl is None:
context.log.warn(f"No metrics found for {latest_metrics_gcs_blob.name}")
return {}
try:
latest_metrics_dict = _convert_json_to_metrics_dict(latest_metrics_jsonl)
except Exception as e:
context.log.error(f"Error converting json to metrics dict: {str(e)}")
return {}
return latest_metrics_dict
| StringNullJsonDecoder |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_access.py | {
"start": 377,
"end": 6993
} | class ____:
url_responses = {}
def login(self):
raise NotImplementedError
def is_admin(self):
raise NotImplementedError
def assertResponse(self, path, method=None, data=None, **kwargs):
self.login()
if method is None:
method = self.client.get
if data is None:
data = {}
response = method(path, data=data)
response_attrs = {
"status_code": kwargs.pop("status_code", 200),
}
response_attrs.update(kwargs)
response_attrs.update(self.url_responses.get(path, {}))
for key, val in list(response_attrs.items()):
self.assertEqual(getattr(response, key), val)
return response
def setUp(self):
# Previous Fixtures
self.eric = create_user(username="eric", password="test")
self.test = create_user(username="test", password="test")
self.tester = create_user(username="tester", password="test")
self.project = fixture.get(Project, slug="pip")
self.organization = fixture.get(
Organization,
name="Mozilla",
slug="mozilla",
projects=[self.project],
)
self.team = fixture.get(
Team,
name="Foobar",
slug="foobar",
organization=self.organization,
members=[self.test],
)
self.owner = fixture.get(
OrganizationOwner,
owner=self.eric,
organization=self.organization,
)
def test_organization_list(self):
self.assertResponse("/organizations/", status_code=200)
def test_organization_details(self):
self.assertResponse("/organizations/mozilla/", status_code=200)
self.assertResponse("/organizations/mozilla/edit/", status_code=200)
def test_organization_owners_regression(self):
"""Regression test for paths that have been moved."""
self.assertEqual(self.organization.owners.count(), 1)
self.assertResponse("/organizations/mozilla/owners/", status_code=200)
self.assertResponse(
"/organizations/mozilla/owners/add/",
method=self.client.post,
data={"username_or_email": "tester"},
status_code=302,
)
if self.is_admin():
invitation = Invitation.objects.for_object(self.organization).first()
invitation.redeem()
self.assertEqual(self.organization.owners.count(), 2)
else:
self.assertFalse(Invitation.objects.for_object(self.organization).exists())
self.assertEqual(self.organization.owners.count(), 1)
self.assertResponse(
"/organizations/mozilla/owners/delete/",
method=self.client.post,
data={"user": "tester"},
status_code=404,
)
if self.is_admin():
self.assertEqual(self.organization.owners.count(), 2)
else:
self.assertEqual(self.organization.owners.count(), 1)
def test_organization_owners(self):
self.assertEqual(self.organization.owners.count(), 1)
self.assertResponse("/organizations/mozilla/owners/", status_code=200)
self.assertResponse(
"/organizations/mozilla/owners/add/",
method=self.client.post,
data={"username_or_email": "tester"},
status_code=302,
)
if self.is_admin():
invitation = Invitation.objects.for_object(self.organization).first()
invitation.redeem()
self.assertEqual(self.organization.owners.count(), 2)
owner = OrganizationOwner.objects.get(
organization=self.organization,
owner__username="tester",
)
self.assertResponse(
"/organizations/mozilla/owners/{}/delete/".format(owner.pk),
method=self.client.post,
data={"user": "tester"},
status_code=302,
)
self.assertEqual(self.organization.owners.count(), 1)
else:
self.assertFalse(
OrganizationOwner.objects.filter(
organization=self.organization,
owner__username="tester",
).exists(),
)
self.assertEqual(self.organization.owners.count(), 1)
def test_organization_members_regression(self):
"""Tests for regression against old member functionality."""
self.assertEqual(self.organization.members.count(), 2)
self.assertResponse(
"/organizations/mozilla/members/",
status_code=200,
)
self.assertResponse(
"/organizations/mozilla/members/add/",
method=self.client.post,
data={"user": "tester"},
status_code=404,
)
if self.is_admin():
self.assertEqual(self.organization.members.count(), 2)
else:
self.assertEqual(self.organization.members.count(), 2)
self.assertResponse(
"/organizations/mozilla/members/delete/",
method=self.client.post,
data={"user": "tester"},
status_code=404,
)
self.assertEqual(self.organization.members.count(), 2)
def test_organization_teams(self):
self.assertEqual(self.organization.teams.count(), 1)
self.assertResponse("/organizations/mozilla/teams/", status_code=200)
user = User.objects.get(username="test")
project = Project.objects.get(slug="pip")
self.assertResponse(
"/organizations/mozilla/teams/add/",
method=self.client.post,
data={
"name": "more-foobar",
"access": "readonly",
},
status_code=302,
)
if self.is_admin():
self.assertEqual(self.organization.teams.count(), 2)
self.assertEqual(self.organization.members.count(), 2)
self.assertResponse(
"/organizations/mozilla/teams/more-foobar/delete/",
method=self.client.post,
status_code=302,
)
else:
self.assertEqual(self.organization.teams.count(), 1)
self.assertFalse(
self.organization.teams.filter(name="foobar").exists(),
)
self.assertEqual(self.organization.members.count(), 2)
self.assertEqual(self.organization.teams.count(), 1)
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
| OrganizationAccessMixin |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-milvus/llama_index/readers/milvus/base.py | {
"start": 191,
"end": 4555
} | class ____(BaseReader):
"""Milvus reader."""
def __init__(
self,
host: str = "localhost",
port: int = 19530,
user: str = "",
password: str = "",
use_secure: bool = False,
):
"""Initialize with parameters."""
import_err_msg = (
"`pymilvus` package not found, please run `pip install pymilvus`"
)
try:
import pymilvus # noqa
except ImportError:
raise ImportError(import_err_msg)
from pymilvus import MilvusException
self.host = host
self.port = port
self.user = user
self.password = password
self.use_secure = use_secure
self.collection = None
self.default_search_params = {
"IVF_FLAT": {"metric_type": "IP", "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": "IP", "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": "IP", "params": {"nprobe": 10}},
"HNSW": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "IP", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "IP", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "IP", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": "IP", "params": {}},
}
try:
self._create_connection_alias()
except MilvusException:
raise
def load_data(
self,
query_vector: List[float],
collection_name: str,
expr: Any = None,
search_params: Optional[dict] = None,
limit: int = 10,
) -> List[Document]:
"""
Load data from Milvus.
Args:
collection_name (str): Name of the Milvus collection.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
from pymilvus import Collection, MilvusException
try:
self.collection = Collection(collection_name, using=self.alias)
except MilvusException:
raise
assert self.collection is not None
try:
self.collection.load()
except MilvusException:
raise
if search_params is None:
search_params = self._create_search_params()
res = self.collection.search(
[query_vector],
"embedding",
param=search_params,
expr=expr,
output_fields=["doc_id", "text"],
limit=limit,
)
documents = []
# TODO: In future append embedding when more efficient
for hit in res[0]:
document = Document(
id_=hit.entity.get("doc_id"),
text=hit.entity.get("text"),
)
documents.append(document)
return documents
def _create_connection_alias(self) -> None:
from pymilvus import connections
self.alias = None
# Attempt to reuse an open connection
for x in connections.list_connections():
addr = connections.get_connection_addr(x[0])
if (
x[1]
and ("address" in addr)
and (addr["address"] == f"{self.host}:{self.port}")
):
self.alias = x[0]
break
# Connect to the Milvus instance using the passed in Environment variables
if self.alias is None:
self.alias = uuid4().hex
connections.connect(
alias=self.alias,
host=self.host,
port=self.port,
user=self.user, # type: ignore
password=self.password, # type: ignore
secure=self.use_secure,
)
def _create_search_params(self) -> Dict[str, Any]:
assert self.collection is not None
index = self.collection.indexes[0]._index_params
search_params = self.default_search_params[index["index_type"]]
search_params["metric_type"] = index["metric_type"]
return search_params
| MilvusReader |
python | tiangolo__fastapi | docs_src/dependencies/tutorial003_an.py | {
"start": 212,
"end": 697
} | class ____:
def __init__(self, q: Union[str, None] = None, skip: int = 0, limit: int = 100):
self.q = q
self.skip = skip
self.limit = limit
@app.get("/items/")
async def read_items(commons: Annotated[Any, Depends(CommonQueryParams)]):
response = {}
if commons.q:
response.update({"q": commons.q})
items = fake_items_db[commons.skip : commons.skip + commons.limit]
response.update({"items": items})
return response
| CommonQueryParams |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/async/guestbook.py | {
"start": 790,
"end": 977
} | class ____(ndb.Model):
email = ndb.StringProperty()
nickname = ndb.StringProperty()
def nick(self):
return self.nickname or self.email # Whichever is non-empty
| Account |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 13923,
"end": 14584
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->Blip2
| Blip2MLP |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_validation.py | {
"start": 14744,
"end": 20345
} | class ____:
async def test_missing_block_document_default_value(
self,
flow,
session,
missing_block_doc_ref_template,
):
work_pool = await create_work_pool(
session=session,
base_job_template=missing_block_doc_ref_template,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
)
with pytest.raises(HTTPException, match="404: Block not found."):
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
async def test_ignores_block_document_reference_incorrect_type_in_default_value(
self,
flow,
session,
incorrect_type_block_ref_template,
):
work_pool = await create_work_pool(
session=session,
base_job_template=incorrect_type_block_ref_template,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
)
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
async def test_valid_block_document_reference(
self,
flow,
block_ref_template,
session,
):
work_pool = await create_work_pool(
session=session,
base_job_template=block_ref_template,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
)
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
async def test_allows_missing_required_variable_with_default(
self,
flow,
template_required_field_with_default,
session,
):
work_pool = await create_work_pool(
session=session,
base_job_template=template_required_field_with_default,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
)
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
async def test_allows_missing_optional_variable_with_default(
self,
flow,
template_optional_field_with_default,
session,
):
work_pool = await create_work_pool(
session=session,
base_job_template=template_optional_field_with_default,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
)
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
async def test_allows_none_for_optional_variable_with_default(
self,
flow,
template_optional_field_with_default,
session,
):
"""
Multiple Infrastructure block fields default to None, so we should allow None as a
valid value for optional fields even when those fields have default values (and
thus we can't know, because of Pydantic v1 limitations, whether the field
supports None as a value).
"""
work_pool = await create_work_pool(
session=session,
base_job_template=template_optional_field_with_default,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
job_variables={"laptop_choice": None},
)
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
async def test_allows_missing_optional_variable_with_no_default(
self,
flow,
template_optional_field,
session,
):
work_pool = await create_work_pool(
session=session,
base_job_template=template_optional_field,
)
deployment = await create_deployment_with_work_pool(
session=session,
flow=flow,
work_pool=work_pool,
)
await validate_job_variables_for_deployment_flow_run(
session=session,
deployment=deployment,
flow_run=DeploymentFlowRunCreate(
state=schemas.states.Scheduled().to_state_create()
),
)
| TestDeploymentFlowRunJobVariablesValidation |
python | pypa__packaging | src/packaging/pylock.py | {
"start": 9812,
"end": 10607
} | class ____:
path: str
editable: bool | None = None
subdirectory: str | None = None
def __init__(
self,
*,
path: str,
editable: bool | None = None,
subdirectory: str | None = None,
) -> None:
# In Python 3.10+ make dataclass kw_only=True and remove __init__
object.__setattr__(self, "path", path)
object.__setattr__(self, "editable", editable)
object.__setattr__(self, "subdirectory", subdirectory)
@classmethod
def _from_dict(cls, d: Mapping[str, Any]) -> Self:
return cls(
path=_get_required(d, str, "path"),
editable=_get(d, bool, "editable"),
subdirectory=_get(d, str, "subdirectory"),
)
@dataclass(frozen=True, init=False)
| PackageDirectory |
python | redis__redis-py | redis/backoff.py | {
"start": 589,
"end": 1119
} | class ____(AbstractBackoff):
"""Constant backoff upon failure"""
def __init__(self, backoff: float) -> None:
"""`backoff`: backoff time in seconds"""
self._backoff = backoff
def __hash__(self) -> int:
return hash((self._backoff,))
def __eq__(self, other) -> bool:
if not isinstance(other, ConstantBackoff):
return NotImplemented
return self._backoff == other._backoff
def compute(self, failures: int) -> float:
return self._backoff
| ConstantBackoff |
python | apache__airflow | airflow-core/tests/unit/utils/test_entry_points.py | {
"start": 961,
"end": 1155
} | class ____:
def __init__(self, name: str, entry_points: Iterable[metadata.EntryPoint]) -> None:
self.metadata = {"Name": name}
self.entry_points = entry_points
| MockDistribution |
python | pytorch__pytorch | torchgen/selective_build/selector.py | {
"start": 735,
"end": 12666
} | class ____:
# If true, then the build is not selective, and includes all
# operators.
include_all_operators: bool
# Debug Information at the selective/custom build level.
_debug_info: tuple[str, ...] | None
# A dictionary of operator -> operator metadata.
operators: dict[str, SelectiveBuildOperator]
# A dictionary of selected kernel tags and dtypes. Typically a
# PyTorch Operator Kernel (function) may have many code paths
# that are specialized for many many Tensor dtypes, so it's not
# one per kernel function, but there could be many per kernel
# function. The tag isn't a kernel function name, but some fragment
# of the kernel function implementation itself.
kernel_metadata: dict[str, list[str]]
# ExecuTorch only. A dictionary of kernel tag -> list of (list of input
# dtypes for tensor-like input args).
# This is from selective.yaml
et_kernel_metadata: dict[str, list[str]]
# A set of all the custom torch bind classes used by the selected models
# Stored as a set internally to remove duplicates proactively, but written
# as a list to yamls
custom_classes: set[str]
# A set of all the build features used by the selected models
# Stored as a set internally to remove duplicates proactively, but written
# as a list to yamls
build_features: set[str]
# If true, then fragments for all dtypes for all kernel functions
# are included as well as all custom classes. This is typically set when any one of the
# operator lists is generated from a mechanism other than
# tracing based selective build.
include_all_non_op_selectives: bool
@staticmethod
def get_nop_selector() -> SelectiveBuilder:
return SelectiveBuilder.from_yaml_dict({"include_all_operators": True})
@staticmethod
def from_yaml_dict(data: dict[str, object]) -> SelectiveBuilder:
valid_top_level_keys = {
"include_all_non_op_selectives",
"include_all_operators",
"debug_info",
"operators",
"kernel_metadata",
"et_kernel_metadata",
"custom_classes",
"build_features",
}
top_level_keys = set(data.keys())
if len(top_level_keys - valid_top_level_keys) > 0:
raise Exception( # noqa: TRY002
"Got unexpected top level keys: {}".format(
",".join(top_level_keys - valid_top_level_keys),
)
)
include_all_operators = data.get("include_all_operators", False)
assert isinstance(include_all_operators, bool)
debug_info = None
if "debug_info" in data:
di_list = data["debug_info"]
assert isinstance(di_list, list)
debug_info = tuple(str(x) for x in di_list)
operators = {}
operators_dict = data.get("operators", {})
assert isinstance(operators_dict, dict)
for k, v in operators_dict.items():
operators[k] = SelectiveBuildOperator.from_yaml_dict(k, v)
kernel_metadata = {}
kernel_metadata_dict = data.get("kernel_metadata", {})
assert isinstance(kernel_metadata_dict, dict)
for k, v in kernel_metadata_dict.items():
kernel_metadata[str(k)] = [str(dtype) for dtype in v]
et_kernel_metadata = data.get("et_kernel_metadata", {})
assert isinstance(et_kernel_metadata, dict)
custom_classes = data.get("custom_classes", [])
assert isinstance(custom_classes, Iterable)
custom_classes = set(custom_classes)
build_features = data.get("build_features", [])
assert isinstance(build_features, Iterable)
build_features = set(build_features)
include_all_non_op_selectives = data.get("include_all_non_op_selectives", False)
assert isinstance(include_all_non_op_selectives, bool)
return SelectiveBuilder(
include_all_operators,
debug_info,
operators,
kernel_metadata,
et_kernel_metadata,
custom_classes, # type: ignore[arg-type]
build_features, # type: ignore[arg-type]
include_all_non_op_selectives,
)
@staticmethod
def from_yaml_str(config_contents: str) -> SelectiveBuilder:
contents = yaml.safe_load(config_contents)
return SelectiveBuilder.from_yaml_dict(contents)
@staticmethod
def from_yaml_path(config_path: str) -> SelectiveBuilder:
with open(config_path) as f:
contents = yaml.safe_load(f)
return SelectiveBuilder.from_yaml_dict(contents)
@staticmethod
def from_legacy_op_registration_allow_list(
allow_list: set[str], is_root_operator: bool, is_used_for_training: bool
) -> SelectiveBuilder:
operators = {}
for op in allow_list:
operators[op] = {
"name": op,
"is_root_operator": is_root_operator,
"is_used_for_training": is_used_for_training,
"include_all_overloads": True,
}
return SelectiveBuilder.from_yaml_dict(
{
"operators": operators,
"include_all_non_op_selectives": True,
}
)
def is_operator_selected(self, name: str) -> bool:
if self.include_all_operators:
return True
if name in self.operators:
return True
name = strip_operator_overload_name(name)
return name in self.operators and self.operators[name].include_all_overloads
def is_native_function_selected(self, func: NativeFunction) -> bool:
op_name = op_name_from_native_function(func)
return self.is_operator_selected(op_name)
def is_operator_selected_for_training(self, name: str) -> bool:
if not self.is_operator_selected(name):
return False
if self.include_all_operators:
return True
not_training_op = SelectiveBuildOperator(
name="",
is_root_operator=False,
is_used_for_training=False,
include_all_overloads=False,
_debug_info=None,
)
op = not_training_op
if name in self.operators:
op = self.operators[name]
name = strip_operator_overload_name(name)
base_op = not_training_op
if name in self.operators:
base_op = self.operators[name]
return op.is_used_for_training or (
base_op.include_all_overloads and base_op.is_used_for_training
)
def is_native_function_selected_for_training(self, func: NativeFunction) -> bool:
op_name = op_name_from_native_function(func)
return self.is_operator_selected_for_training(op_name)
def is_root_operator(self, name: str) -> bool:
if not self.is_operator_selected(name):
return False
if self.include_all_operators:
return True
if name in self.operators:
op: SelectiveBuildOperator = self.operators[name]
return op.is_root_operator
name = strip_operator_overload_name(name)
if name not in self.operators:
return False
base_op: SelectiveBuildOperator = self.operators[name]
return base_op.include_all_overloads and base_op.is_root_operator
def is_kernel_dtype_selected(self, kernel_tag: str, dtype: str) -> bool:
if self.include_all_operators or self.include_all_non_op_selectives:
return True
return (
kernel_tag in self.kernel_metadata
and dtype in self.kernel_metadata[kernel_tag]
)
def et_get_selected_kernels(self, op_name: str, kernel_key: list[str]) -> list[str]:
"""
Return a list of kernel keys that cover the used ops
"""
# If no kernel metadata, either it's implied by include_all_operators=True or the op is not used.
if op_name not in self.et_kernel_metadata:
return kernel_key if self.include_all_operators else []
# Otherwise, only return the specific kernel keys.
result_set = set()
for model_kernel_keys in self.et_kernel_metadata[op_name]:
key_found = False
for key in kernel_key:
# Don't compare the version for now
if (
key != "default"
and key.split("/")[1] == model_kernel_keys.split("/")[1]
):
result_set.add(key)
key_found = True
break
if not key_found:
if "default" not in kernel_key:
raise Exception("Missing kernel for the model") # noqa: TRY002
else:
result_set.add("default")
return list(result_set)
def to_dict(self) -> dict[str, object]:
ret: dict[str, object] = {
"include_all_non_op_selectives": self.include_all_non_op_selectives,
"include_all_operators": self.include_all_operators,
}
operators = {}
for op_name, op in self.operators.items():
operators[op_name] = op.to_dict()
ret["operators"] = operators
if self._debug_info is not None:
ret["debug_info"] = sorted(self._debug_info)
ret["kernel_metadata"] = {
k: sorted(v) for (k, v) in self.kernel_metadata.items()
}
ret["et_kernel_metadata"] = self.et_kernel_metadata
ret["custom_classes"] = sorted(self.custom_classes)
ret["build_features"] = sorted(self.build_features)
return ret
def merge_kernel_metadata(
lhs: dict[str, list[str]],
rhs: dict[str, list[str]],
) -> dict[str, list[str]]:
kernel_metadata: dict[str, list[str]] = {}
for tag_name, dtypes in list(lhs.items()) + list(rhs.items()):
dtypes_copy = set(dtypes)
if tag_name in kernel_metadata:
dtypes_copy |= set(kernel_metadata[tag_name])
kernel_metadata[tag_name] = list(dtypes_copy)
return kernel_metadata
def merge_et_kernel_metadata(
lhs: dict[str, list[str]],
rhs: dict[str, list[str]],
) -> dict[str, list[str]]:
merge_et_kernel_metadata: dict[str, set[str]] = defaultdict(set)
for op in list(lhs.keys()) + list(rhs.keys()):
merge_et_kernel_metadata[op].update(lhs.get(op, []))
merge_et_kernel_metadata[op].update(rhs.get(op, []))
return {op: sorted(val) for op, val in merge_et_kernel_metadata.items()}
def combine_selective_builders(
lhs: SelectiveBuilder, rhs: SelectiveBuilder
) -> SelectiveBuilder:
include_all_operators = lhs.include_all_operators or rhs.include_all_operators
debug_info = merge_debug_info(lhs._debug_info, rhs._debug_info)
operators = merge_operator_dicts(lhs.operators, rhs.operators)
kernel_metadata = merge_kernel_metadata(lhs.kernel_metadata, rhs.kernel_metadata)
et_kernel_metadata = merge_et_kernel_metadata(
lhs.et_kernel_metadata, rhs.et_kernel_metadata
)
include_all_non_op_selectives = (
lhs.include_all_non_op_selectives or rhs.include_all_non_op_selectives
)
custom_classes = lhs.custom_classes.union(rhs.custom_classes)
build_features = lhs.build_features.union(rhs.build_features)
return SelectiveBuilder(
include_all_operators,
debug_info,
operators,
kernel_metadata,
et_kernel_metadata,
custom_classes,
build_features,
include_all_non_op_selectives,
)
def op_name_from_native_function(f: NativeFunction) -> str:
# This was originally read from the 'operator_name_with_overload' field in the
# declaration dict, which was the part before the first '(' in 'schema_string'.
return f"{f.namespace}::{f.func.name}"
| SelectiveBuilder |
python | davidhalter__jedi | jedi/inference/base_value.py | {
"start": 11990,
"end": 12212
} | class ____(_ValueWrapperBase):
def __init__(self, wrapped_value):
self._wrapped_value = wrapped_value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_value)
| ValueWrapper |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-events-that-can-be-attended-ii.py | {
"start": 61,
"end": 688
} | class ____(object):
def maxValue(self, events, k):
"""
:type events: List[List[int]]
:type k: int
:rtype: int
"""
events.sort(key=lambda x: x[1])
sorted_ends = [x[1] for x in events]
dp = [[0]*(k+1) for _ in xrange(len(events)+1)]
for i in xrange(1, len(events)+1):
prev_i_m_1 = bisect.bisect_left(sorted_ends, events[i-1][0])-1
for j in xrange(1, k+1):
dp[i][j] = max(dp[i-1][j], dp[prev_i_m_1+1][j-1]+events[i-1][2])
return dp[-1][-1]
# Time: O(nlogn + n * k)
# Space: O(n * k)
import bisect
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 153866,
"end": 154885
} | class ____(sgqlc.types.Input):
"""Descriptive details about the check run."""
__schema__ = github_schema
__field_names__ = ("title", "summary", "text", "annotations", "images")
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""A title to provide for this check run."""
summary = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="summary")
"""The summary of the check run (supports Commonmark)."""
text = sgqlc.types.Field(String, graphql_name="text")
"""The details of the check run (supports Commonmark)."""
annotations = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(CheckAnnotationData)), graphql_name="annotations")
"""The annotations that are made as part of the check run."""
images = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null("CheckRunOutputImage")), graphql_name="images")
"""Images attached to the check run output displayed in the GitHub
pull request UI.
"""
| CheckRunOutput |
python | davidhalter__jedi | jedi/api/environment.py | {
"start": 4243,
"end": 4489
} | class ____:
def __init__(self):
self._start_executable = self.executable = sys.executable
self.path = sys.prefix
self.version_info = _VersionInfo(*sys.version_info[:3])
self._env_vars = None
| _SameEnvironmentMixin |
python | cherrypy__cherrypy | cherrypy/test/helper.py | {
"start": 592,
"end": 1017
} | class ____(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
"""Initialize a supervisor."""
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
def log_to_stderr(msg, level):
"""Log to Standard Error."""
return sys.stderr.write(msg + os.linesep)
| Supervisor |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 10187,
"end": 10304
} | class ____(VyperException):
"""Second argument to a division or modulo operation was zero."""
| ZeroDivisionException |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops_v2_test.py | {
"start": 6993,
"end": 8158
} | class ____(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testRangeInitializer(self):
shape = (20, 6, 7)
self._range_test(
init_ops_v2.RandomUniform(minval=-1, maxval=1, seed=124),
shape,
target_mean=0.,
target_max=1,
target_min=-1)
@test_util.run_in_graph_and_eager_modes
def testInitializerIdentical(self):
self.skipTest("Doesn't work without the graphs")
init1 = init_ops_v2.RandomUniform(0, 7, seed=1)
init2 = init_ops_v2.RandomUniform(0, 7, seed=1)
self._identical_test(init1, init2, True)
@test_util.run_in_graph_and_eager_modes
def testInitializerDifferent(self):
init1 = init_ops_v2.RandomUniform(0, 7, seed=1)
init2 = init_ops_v2.RandomUniform(0, 7, seed=2)
self._identical_test(init1, init2, False)
@test_util.run_in_graph_and_eager_modes
def testDuplicatedInitializer(self):
init = init_ops_v2.RandomUniform(0.0, 1.0)
self._duplicated_test(init)
@test_util.run_in_graph_and_eager_modes
def testInitializePartition(self):
init = init_ops_v2.RandomUniform(0, 7, seed=1)
self._partition_test(init)
| RandomUniformInitializerTest |
python | kamyu104__LeetCode-Solutions | Python/clone-binary-tree-with-random-pointer.py | {
"start": 52,
"end": 272
} | class ____(object):
def __init__(self, val=0, left=None, right=None, random=None):
self.val = val
self.left = left
self.right = right
self.random = random
# Definition for NodeCopy.
| Node |
python | sanic-org__sanic | sanic/mixins/exceptions.py | {
"start": 130,
"end": 3888
} | class ____(metaclass=SanicMeta):
def __init__(self, *args, **kwargs) -> None:
self._future_exceptions: set[FutureException] = set()
def _apply_exception_handler(self, handler: FutureException):
raise NotImplementedError # noqa
def exception(
self,
*exceptions: Union[type[Exception], list[type[Exception]]],
apply: bool = True,
) -> Callable:
"""Decorator used to register an exception handler for the current application or blueprint instance.
This method allows you to define a handler for specific exceptions that
may be raised within the routes of this blueprint. You can specify one
or more exception types to catch, and the handler will be applied to
those exceptions.
When used on a Blueprint, the handler will only be applied to routes
registered under that blueprint. That means they only apply to
requests that have been matched, and the exception is raised within
the handler function (or middleware) for that route.
A general exception like `NotFound` should only be registered on the
application instance, not on a blueprint.
See [Exceptions](/en/guide/best-practices/exceptions.html) for more information.
Args:
exceptions (Union[Type[Exception], List[Type[Exception]]]): List of
Python exceptions to be caught by the handler.
apply (bool, optional): Whether the exception handler should be
applied. Defaults to True.
Returns:
Callable: A decorated method to handle global exceptions for any route
registered under this blueprint.
Example:
```python
from sanic import Blueprint, text
bp = Blueprint('my_blueprint')
@bp.exception(Exception)
def handle_exception(request, exception):
return text("Oops, something went wrong!", status=500)
```
```python
from sanic import Sanic, NotFound, text
app = Sanic('MyApp')
@app.exception(NotFound)
def ignore_404s(request, exception):
return text(f"Yep, I totally found the page: {request.url}")
""" # noqa: E501
def decorator(handler):
nonlocal apply
nonlocal exceptions
if isinstance(exceptions[0], list):
exceptions = tuple(*exceptions)
future_exception = FutureException(handler, exceptions)
self._future_exceptions.add(future_exception)
if apply:
self._apply_exception_handler(future_exception)
return handler
return decorator
def all_exceptions(
self, handler: Callable[..., Any]
) -> Callable[..., Any]:
"""Enables the process of creating a global exception handler as a convenience.
This following two examples are equivalent:
```python
@app.exception(Exception)
async def handler(request: Request, exception: Exception) -> HTTPResponse:
return text(f"Exception raised: {exception}")
```
```python
@app.all_exceptions
async def handler(request: Request, exception: Exception) -> HTTPResponse:
return text(f"Exception raised: {exception}")
```
Args:
handler (Callable[..., Any]): A coroutine function to handle exceptions.
Returns:
Callable[..., Any]: A decorated method to handle global exceptions for
any route registered under this blueprint.
""" # noqa: E501
return self.exception(Exception)(handler)
| ExceptionMixin |
python | scikit-learn__scikit-learn | sklearn/externals/_numpydoc/docscrape.py | {
"start": 19092,
"end": 19307
} | class ____(NumpyDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
if config is None:
config = {}
NumpyDocString.__init__(self, doc, config=config)
| ObjDoc |
python | django__django | django/db/models/expressions.py | {
"start": 35827,
"end": 39334
} | class ____(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
except FullResultSet:
arg_sql, arg_params = compiler.compile(Value(True))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, tuple(params)
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@cached_property
def allowed_default(self):
return all(expression.allowed_default for expression in self.source_expressions)
@deconstructible(path="django.db.models.Value")
| Func |
python | django__django | django/db/migrations/migration.py | {
"start": 165,
"end": 9310
} | class ____:
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from
django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Is this an initial migration? Initial migrations are skipped on
# --fake-initial if the table or fields already exist. If None, check if
# the migration has any dependencies to determine if there are dependencies
# to tell if db introspection needs to be done. If True, always perform
# introspection. If False, never perform introspection.
initial = None
# Whether to wrap the whole migration in a transaction. Only has an effect
# on database backends which support transactional DDL.
atomic = True
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
return (
isinstance(other, Migration)
and self.name == other.name
and self.app_label == other.app_label
)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state, preserve=True):
"""
Take a ProjectState and return a new one with the migration's
operations applied to it. Preserve the original object state by
default and return a mutated state from a copy.
"""
new_state = project_state
if preserve:
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Take a project_state representing all migrations prior to this one
and a schema_editor for a live database and apply the migration
in a forwards order.
Return the resulting project state for efficient reuse by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- THIS OPERATION CANNOT BE WRITTEN AS SQL"
)
continue
collected_sql_before = len(schema_editor.collected_sql)
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
atomic_operation = operation.atomic or (
self.atomic and operation.atomic is not False
)
if not schema_editor.atomic_migration and atomic_operation:
# Force a transaction on a non-transactional-DDL backend or an
# atomic operation inside a non-atomic migration.
with atomic(schema_editor.connection.alias):
operation.database_forwards(
self.app_label, schema_editor, old_state, project_state
)
else:
# Normal behavior
operation.database_forwards(
self.app_label, schema_editor, old_state, project_state
)
if collect_sql and collected_sql_before == len(schema_editor.collected_sql):
schema_editor.collected_sql.append("-- (no-op)")
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Take a project_state representing all migrations prior to this one
and a schema_editor for a live database and apply the migration
in a reverse order.
The backwards migration process consists of two phases:
1. The intermediate states from right before the first until right
after the last operation inside this migration are preserved.
2. The operations are applied in reverse order using the states
recorded in step 1.
"""
# Construct all the intermediate states we need for a reverse migration
to_run = []
new_state = project_state
# Phase 1
for operation in self.operations:
# If it's irreversible, error out
if not operation.reversible:
raise IrreversibleError(
"Operation %s in %s is not reversible" % (operation, self)
)
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- THIS OPERATION CANNOT BE WRITTEN AS SQL"
)
continue
collected_sql_before = len(schema_editor.collected_sql)
atomic_operation = operation.atomic or (
self.atomic and operation.atomic is not False
)
if not schema_editor.atomic_migration and atomic_operation:
# Force a transaction on a non-transactional-DDL backend or an
# atomic operation inside a non-atomic migration.
with atomic(schema_editor.connection.alias):
operation.database_backwards(
self.app_label, schema_editor, from_state, to_state
)
else:
# Normal behavior
operation.database_backwards(
self.app_label, schema_editor, from_state, to_state
)
if collect_sql and collected_sql_before == len(schema_editor.collected_sql):
schema_editor.collected_sql.append("-- (no-op)")
return project_state
def suggest_name(self):
"""
Suggest a name for the operations this migration might represent. Names
are not guaranteed to be unique, but put some effort into the fallback
name to avoid VCS conflicts if possible.
"""
if self.initial:
return "initial"
raw_fragments = [op.migration_name_fragment for op in self.operations]
fragments = [re.sub(r"\W+", "_", name) for name in raw_fragments if name]
if not fragments or len(fragments) != len(self.operations):
return "auto_%s" % get_migration_name_timestamp()
name = fragments[0]
for fragment in fragments[1:]:
new_name = f"{name}_{fragment}"
if len(new_name) > 52:
name = f"{name}_and_more"
break
name = new_name
return name
| Migration |
python | getsentry__sentry | tests/sentry/api/endpoints/test_auth_config.py | {
"start": 412,
"end": 3064
} | class ____(APITestCase):
path = "/api/0/auth/config/"
def test_logged_in(self) -> None:
user = self.create_user("foo@example.com")
self.login_as(user)
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["nextUri"] == "/organizations/new/"
def test_logged_in_active_org(self) -> None:
user = self.create_user("foo@example.com")
self.create_organization(owner=user, slug="ricks-org")
self.login_as(user)
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["nextUri"] == "/organizations/ricks-org/issues/"
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@assume_test_silo_mode(SiloMode.MONOLITH) # Single org IS monolith mode
def test_single_org(self) -> None:
create_default_projects()
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["nextUri"] == "/auth/login/sentry/"
def test_superuser_is_not_redirected(self) -> None:
user = self.create_user("foo@example.com", is_superuser=True)
self.login_as(user)
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["nextUri"] == "/organizations/new/"
def test_unauthenticated(self) -> None:
response = self.client.get(self.path)
assert response.status_code == 200
assert not response.data["canRegister"]
assert not response.data["hasNewsletter"]
assert response.data["serverHostname"] == "testserver"
@pytest.mark.skipif(
settings.SENTRY_NEWSLETTER != "sentry.newsletter.dummy.DummyNewsletter",
reason="Requires DummyNewsletter.",
)
def test_has_newsletter(self) -> None:
with newsletter.backend.test_only__downcast_to(DummyNewsletter).enable():
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["hasNewsletter"]
def test_can_register(self) -> None:
with self.options({"auth.allow-registration": True}):
with self.feature("auth:register"):
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["canRegister"]
def test_session_expired(self) -> None:
self.client.cookies["session_expired"] = "1"
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["warning"] == "Your session has expired."
| AuthConfigEndpointTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType8.py | {
"start": 146,
"end": 225
} | class ____:
pass
T_A = TypeVar("T_A", bound=ClassA)
T = TypeVar("T")
| ClassA |
python | ZoranPandovski__al-go-rithms | data_structures/Graphs/graph/Python/DijkstraShortestPath.py | {
"start": 151,
"end": 2750
} | class ____:
def __init__(self, directed=False):
self.graph = defaultdict(list)
self.directed = directed
def addEdge(self, frm, to, weight):
self.graph[frm].append([to, weight])
if self.directed is False:
self.graph[to].append([frm, weight])
else:
self.graph[to] = self.graph[to]
def find_min(self, dist, visited):
minimum = float('inf')
index = -1
for v in self.graph.keys():
if visited[v] is False and dist[v] < minimum:
minimum = dist[v]
index = v
return index
def dijkstra(self, src):
visited = {i: False for i in self.graph}
dist = {i: float('inf') for i in self.graph}
parent = {i: None for i in self.graph}
# set distance of src vertex from itself 0
dist[src] = 0
# find shortest path for all vertices
for i in range(len(self.graph)-1):
# find minimum distance vertex from source
# initially src itself as dist[src] = 0
u = self.find_min(dist, visited)
# mark the node as visited
visited[u] = True
# check if the distance through current edge is less than previously known distance to v
for v, w in self.graph[u]:
if visited[v] is False and dist[u] + w < dist[v]:
dist[v] = dist[u] + w
parent[v] = u
# return parent list and distance to each node from source
return parent, dist
def printPath(self, parent, v):
if parent[v] is None:
return
self.printPath(parent, parent[v])
print(v, end=' ')
def printSolution(self, dist, parent, src):
print('{}\t{}\t{}'.format('Vertex', 'Distance', 'Path'))
for i in self.graph.keys():
if i == src:
continue
print('{} -> {}\t\t{}\t\t{}'.format(src, i, dist[i], src), end=' ')
self.printPath(parent, i)
print()
if __name__ == '__main__':
# make an undirected graph
graph = Graph()
graph.addEdge(0, 1, 4)
graph.addEdge(0, 7, 8)
graph.addEdge(1, 2, 8)
graph.addEdge(1, 7, 11)
graph.addEdge(7, 6, 1)
graph.addEdge(7, 8, 7)
graph.addEdge(6, 8, 6)
graph.addEdge(6, 5, 2)
graph.addEdge(8, 2, 2)
graph.addEdge(2, 3, 7)
graph.addEdge(2, 5, 4)
graph.addEdge(3, 4, 9)
graph.addEdge(3, 5, 14)
graph.addEdge(5, 4, 10)
parent, dist = graph.dijkstra(0)
graph.printSolution(dist, parent, 0)
| Graph |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-qdrant/llama_index/vector_stores/qdrant/base.py | {
"start": 1971,
"end": 65550
} | class ____(BasePydanticVectorStore):
"""
Qdrant Vector Store.
In this vector store, embeddings and docs are stored within a
Qdrant collection.
During query time, the index uses Qdrant to query for the top
k most similar nodes.
Args:
collection_name: (str): name of the Qdrant collection
client (Optional[QdrantClient]): QdrantClient instance from `qdrant-client` package
aclient (Optional[AsyncQdrantClient]): AsyncQdrantClient instance from `qdrant-client` package
url (Optional[str]): url of the Qdrant instance
api_key (Optional[str]): API key for authenticating with Qdrant
batch_size (int): number of points to upload in a single request to Qdrant. Defaults to 64
parallel (int): number of parallel processes to use during upload. Defaults to 1
max_retries (int): maximum number of retries in case of a failure. Defaults to 3
client_kwargs (Optional[dict]): additional kwargs for QdrantClient and AsyncQdrantClient
enable_hybrid (bool): whether to enable hybrid search using dense and sparse vectors
fastembed_sparse_model (Optional[str]): name of the FastEmbed sparse model to use, if any
sparse_doc_fn (Optional[SparseEncoderCallable]): function to encode sparse vectors
sparse_query_fn (Optional[SparseEncoderCallable]): function to encode sparse queries
hybrid_fusion_fn (Optional[HybridFusionCallable]): function to fuse hybrid search results
index_doc_id (bool): whether to create a payload index for the document ID. Defaults to True
text_key (str): Name of the field holding the text information, Defaults to 'text'
dense_vector_name (Optional[str]): Custom name for the dense vector field. Defaults to 'text-dense'
sparse_vector_name (Optional[str]): Custom name for the sparse vector field. Defaults to 'text-sparse-new'
shard_number (Optional[int]): Shard number for sharding the collection
sharding_method (Optional[rest.ShardingMethod]): Sharding method for the collection
replication_factor (Optional[int]): Replication factor for the collection
write_consistency_factor (Optional[int]): Write consistency factor for the collection
shard_key_selector_fn (Optional[Callable[..., rest.ShardKeySelector]]): Function to select shard keys
shard_keys (Optional[list[rest.ShardKey]]): List of shard keys
payload_indexes: Optional[list[dict[str, rest.PayloadSchemaType]]]: List of payload field indexes
Notes:
For backward compatibility, the vector store will automatically detect the vector format
of existing collections and adapt accordingly:
- For collections created with older versions using unnamed vectors (empty string ""),
the vector store will use the legacy format for queries.
- For collections with named vectors, it will use the existing vector names.
- For new collections, it will use the vector names provided or the defaults.
Examples:
`pip install llama-index-vector-stores-qdrant`
```python
import qdrant_client
from llama_index.vector_stores.qdrant import QdrantVectorStore
client = qdrant_client.QdrantClient()
vector_store = QdrantVectorStore(
collection_name="example_collection", client=client
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
collection_name: str
url: Optional[str]
api_key: Optional[str]
batch_size: int
parallel: int
max_retries: int
client_kwargs: dict = Field(default_factory=dict)
enable_hybrid: bool
index_doc_id: bool
fastembed_sparse_model: Optional[str]
text_key: Optional[str]
dense_vector_name: str
sparse_vector_name: str
_client: QdrantClient = PrivateAttr()
_aclient: AsyncQdrantClient = PrivateAttr()
_collection_initialized: bool = PrivateAttr()
_sparse_doc_fn: Optional[SparseEncoderCallable] = PrivateAttr()
_sparse_query_fn: Optional[SparseEncoderCallable] = PrivateAttr()
_hybrid_fusion_fn: Optional[HybridFusionCallable] = PrivateAttr()
_dense_config: Optional[rest.VectorParams] = PrivateAttr()
_sparse_config: Optional[rest.SparseVectorParams] = PrivateAttr()
_quantization_config: Optional[QuantizationConfig] = PrivateAttr()
_legacy_vector_format: Optional[bool] = PrivateAttr()
_shard_key_selector_fn: Optional[Callable[..., rest.ShardKeySelector]] = (
PrivateAttr()
)
_shard_keys: Optional[list[rest.ShardKey]] = PrivateAttr()
_shard_number: Optional[int] = PrivateAttr()
_sharding_method: Optional[rest.ShardingMethod] = PrivateAttr()
_replication_factor: Optional[int] = PrivateAttr()
_write_consistency_factor: Optional[int] = PrivateAttr()
_payload_indexes: Optional[list[dict[str, rest.PayloadSchemaType]]] = PrivateAttr()
def __init__(
self,
collection_name: str,
client: Optional[QdrantClient] = None,
aclient: Optional[AsyncQdrantClient] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
batch_size: int = 64,
parallel: int = 1,
max_retries: int = 3,
client_kwargs: Optional[dict] = None,
dense_config: Optional[rest.VectorParams] = None,
sparse_config: Optional[rest.SparseVectorParams] = None,
quantization_config: Optional[QuantizationConfig] = None,
enable_hybrid: bool = False,
fastembed_sparse_model: Optional[str] = None,
sparse_doc_fn: Optional[SparseEncoderCallable] = None,
sparse_query_fn: Optional[SparseEncoderCallable] = None,
hybrid_fusion_fn: Optional[HybridFusionCallable] = None,
index_doc_id: bool = True,
text_key: Optional[str] = "text",
dense_vector_name: Optional[str] = None,
sparse_vector_name: Optional[str] = None,
shard_number: Optional[int] = None,
sharding_method: Optional[rest.ShardingMethod] = None,
shard_key_selector_fn: Optional[Callable[..., rest.ShardKeySelector]] = None,
shard_keys: Optional[list[rest.ShardKey]] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
payload_indexes: Optional[list[dict[str, rest.PayloadSchemaType]]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# Set default vector names if not provided
dense_vector_name = dense_vector_name or DEFAULT_DENSE_VECTOR_NAME
sparse_vector_name = sparse_vector_name or DEFAULT_SPARSE_VECTOR_NAME
super().__init__(
collection_name=collection_name,
url=url,
api_key=api_key,
batch_size=batch_size,
parallel=parallel,
max_retries=max_retries,
client_kwargs=client_kwargs or {},
enable_hybrid=enable_hybrid,
index_doc_id=index_doc_id,
fastembed_sparse_model=fastembed_sparse_model,
text_key=text_key,
dense_vector_name=dense_vector_name,
sparse_vector_name=sparse_vector_name,
)
# Track if the user provided their own sparse functions. This is to prevent
# them from being overwritten by the lazy-init correction for async clients.
self._user_provided_sparse_doc_fn = sparse_doc_fn is not None
self._user_provided_sparse_query_fn = sparse_query_fn is not None
if (
client is None
and aclient is None
and (url is None or api_key is None or collection_name is None)
):
raise ValueError(
"Must provide either a QdrantClient instance or a url and api_key."
)
if client is None and aclient is None:
client_kwargs = client_kwargs or {}
self._client = qdrant_client.QdrantClient(
url=url, api_key=api_key, **client_kwargs
)
self._aclient = qdrant_client.AsyncQdrantClient(
url=url, api_key=api_key, **client_kwargs
)
else:
if client is not None and aclient is not None:
possible_local_clients = [
getattr(client, "_client", None),
getattr(aclient, "_client", None),
]
if any(
isinstance(client, QdrantLocal) for client in possible_local_clients
):
logger.warning(
"Both client and aclient are provided. If using `:memory:` "
"mode, the data between clients is not synced."
)
self._client = client
self._aclient = aclient
self._payload_indexes = payload_indexes
# Check if collection exists and detect vector format
self._legacy_vector_format = None
if self._client is not None:
self._collection_initialized = self._collection_exists(collection_name)
if self._collection_initialized:
self._detect_vector_format(collection_name)
if self._payload_indexes:
self._create_payload_indexes()
else:
# Need to do lazy init for async clients
self._collection_initialized = False
# Setup hybrid search if enabled
if enable_hybrid or fastembed_sparse_model is not None:
enable_hybrid = True
self._sparse_doc_fn = sparse_doc_fn or self.get_default_sparse_doc_encoder(
collection_name,
fastembed_sparse_model=fastembed_sparse_model,
)
self._sparse_query_fn = (
sparse_query_fn
or self.get_default_sparse_query_encoder(
collection_name,
fastembed_sparse_model=fastembed_sparse_model,
)
)
self._hybrid_fusion_fn = hybrid_fusion_fn or cast(
HybridFusionCallable, relative_score_fusion
)
self._sparse_config = sparse_config
self._dense_config = dense_config
self._quantization_config = quantization_config
self._shard_number = shard_number
self._sharding_method = sharding_method
self._shard_key_selector_fn = shard_key_selector_fn
self._shard_keys = shard_keys
self._replication_factor = replication_factor
self._write_consistency_factor = write_consistency_factor
if self._sharding_method == rest.ShardingMethod.CUSTOM:
self._validate_custom_sharding()
@classmethod
def class_name(cls) -> str:
return "QdrantVectorStore"
def set_query_functions(
self,
sparse_doc_fn: Optional[SparseEncoderCallable] = None,
sparse_query_fn: Optional[SparseEncoderCallable] = None,
hybrid_fusion_fn: Optional[HybridFusionCallable] = None,
):
self._sparse_doc_fn = sparse_doc_fn
self._sparse_query_fn = sparse_query_fn
self._hybrid_fusion_fn = hybrid_fusion_fn
def _build_points(
self, nodes: List[BaseNode], sparse_vector_name: str
) -> Tuple[List[Any], List[str]]:
ids = []
points = []
for node_batch in iter_batch(nodes, self.batch_size):
node_ids = []
vectors: List[Any] = []
sparse_vectors: List[List[float]] = []
sparse_indices: List[List[int]] = []
payloads = []
if self.enable_hybrid and self._sparse_doc_fn is not None:
sparse_indices, sparse_vectors = self._sparse_doc_fn(
[
node.get_content(metadata_mode=MetadataMode.EMBED)
for node in node_batch
],
)
for i, node in enumerate(node_batch):
assert isinstance(node, BaseNode)
node_ids.append(node.node_id)
if self.enable_hybrid:
if (
len(sparse_vectors) > 0
and len(sparse_indices) > 0
and len(sparse_vectors) == len(sparse_indices)
):
vectors.append(
{
# Dynamically switch between the old and new sparse vector name
sparse_vector_name: rest.SparseVector(
indices=sparse_indices[i],
values=sparse_vectors[i],
),
self.dense_vector_name: node.get_embedding(),
}
)
else:
vectors.append(
{
self.dense_vector_name: node.get_embedding(),
}
)
else:
vectors.append({self.dense_vector_name: node.get_embedding()})
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
payloads.append(metadata)
points.extend(
[
rest.PointStruct(id=node_id, payload=payload, vector=vector)
for node_id, payload, vector in zip(node_ids, payloads, vectors)
]
)
ids.extend(node_ids)
return points, ids
def _ensure_async_client(self) -> None:
if self._aclient is None:
raise ValueError(
"Async client is not initialized!\nPlease pass in `aclient` to the constructor: "
"`QdrantVectorStore(..., aclient=AsyncQdrantClient(...))`"
)
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
limit: Optional[int] = None,
shard_identifier: Optional[Any] = None,
) -> List[BaseNode]:
"""
Get nodes from the index.
Args:
node_ids (Optional[List[str]]): List of node IDs to retrieve.
filters (Optional[MetadataFilters]): Metadata filters to apply.
limit (Optional[int]): Maximum number of nodes to retrieve.
shard_identifier (Optional[Any]): Shard identifier for the query.
Returns:
List[BaseNode]: List of nodes retrieved from the index.
"""
should = []
if node_ids is not None:
should = [
HasIdCondition(
has_id=node_ids,
)
]
# If we pass a node_ids list,
# we can limit the search to only those nodes
# or less if limit is provided
limit = len(node_ids) if limit is None else min(len(node_ids), limit)
if filters is not None:
filter = self._build_subfilter(filters)
if filter.should is None:
filter.should = should
else:
filter.should.extend(should)
else:
filter = Filter(should=should)
# If we pass an empty list, Qdrant will not return any results
filter.must = filter.must if filter.must and len(filter.must) > 0 else None
filter.should = (
filter.should if filter.should and len(filter.should) > 0 else None
)
filter.must_not = (
filter.must_not if filter.must_not and len(filter.must_not) > 0 else None
)
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
response = self._client.scroll(
collection_name=self.collection_name,
limit=limit or 9999,
scroll_filter=filter,
with_vectors=True,
shard_key_selector=shard_key_selector,
)
return self.parse_to_query_result(response[0]).nodes
async def aget_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
limit: Optional[int] = None,
shard_identifier: Optional[Any] = None,
) -> List[BaseNode]:
"""
Asynchronous method to get nodes from the index.
Args:
node_ids (Optional[List[str]]): List of node IDs to retrieve.
filters (Optional[MetadataFilters]): Metadata filters to apply.
limit (Optional[int]): Maximum number of nodes to retrieve.
shard_identifier (Optional[Any]): Shard identifier for the query.
Returns:
List[BaseNode]: List of nodes retrieved from the index.
"""
self._ensure_async_client()
should = []
if node_ids is not None:
should = [
HasIdCondition(
has_id=node_ids,
)
]
# If we pass a node_ids list,
# we can limit the search to only those nodes
# or less if limit is provided
limit = len(node_ids) if limit is None else min(len(node_ids), limit)
if filters is not None:
filter = self._build_subfilter(filters)
if filter.should is None:
filter.should = should
else:
filter.should.extend(should)
else:
filter = Filter(should=should)
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
response = await self._aclient.scroll(
collection_name=self.collection_name,
limit=limit or 9999,
scroll_filter=filter,
with_vectors=True,
shard_key_selector=shard_key_selector,
)
return self.parse_to_query_result(response[0]).nodes
def add(
self,
nodes: List[BaseNode],
shard_identifier: Optional[Any] = None,
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
shard_identifier (Optional[Any]): Shard identifier for the nodes
"""
if len(nodes) > 0 and not self._collection_initialized:
self._create_collection(
collection_name=self.collection_name,
vector_size=len(nodes[0].get_embedding()),
)
if self._collection_initialized and self._legacy_vector_format is None:
self._detect_vector_format(self.collection_name)
points, ids = self._build_points(nodes, self.sparse_vector_name)
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
self._client.upload_points(
collection_name=self.collection_name,
points=points,
batch_size=self.batch_size,
parallel=self.parallel,
max_retries=self.max_retries,
wait=True,
shard_key_selector=shard_key_selector,
)
return ids
async def async_add(
self,
nodes: List[BaseNode],
shard_identifier: Optional[Any] = None,
**kwargs: Any,
) -> List[str]:
"""
Asynchronous method to add nodes to Qdrant index.
Args:
nodes: List[BaseNode]: List of nodes with embeddings.
shard_identifier: Optional[Any]: Shard identifier for the nodes.
Returns:
List of node IDs that were added to the index.
Raises:
ValueError: If trying to using async methods without aclient
"""
self._ensure_async_client()
collection_initialized = await self._acollection_exists(self.collection_name)
if len(nodes) > 0 and not collection_initialized:
await self._acreate_collection(
collection_name=self.collection_name,
vector_size=len(nodes[0].get_embedding()),
)
collection_initialized = True
if collection_initialized and self._legacy_vector_format is None:
# If collection exists but we haven't detected the vector format yet
await self._adetect_vector_format(self.collection_name)
points, ids = self._build_points(nodes, self.sparse_vector_name)
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
for batch in iter_batch(points, self.batch_size):
retries = 0
while retries < self.max_retries:
try:
await self._aclient.upsert(
collection_name=self.collection_name,
points=batch,
shard_key_selector=shard_key_selector,
)
break
except (RpcError, UnexpectedResponse) as exc:
retries += 1
if retries >= self.max_retries:
raise exc # noqa: TRY201
return ids
def delete(
self,
ref_doc_id: str,
shard_identifier: Optional[Any] = None,
**delete_kwargs: Any,
) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
shard_identifier (Optional[Any]): Shard identifier for the nodes.
"""
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
self._client.delete(
collection_name=self.collection_name,
points_selector=rest.Filter(
must=[
rest.FieldCondition(
key=DOCUMENT_ID_KEY,
match=rest.MatchValue(value=ref_doc_id),
)
]
),
shard_key_selector=shard_key_selector,
)
async def adelete(
self,
ref_doc_id: str,
shard_identifier: Optional[Any] = None,
**delete_kwargs: Any,
) -> None:
"""
Asynchronous method to delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
shard_identifier (Optional[Any]): Shard identifier for the nodes.
"""
self._ensure_async_client()
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
await self._aclient.delete(
collection_name=self.collection_name,
points_selector=rest.Filter(
must=[
rest.FieldCondition(
key=DOCUMENT_ID_KEY,
match=rest.MatchValue(value=ref_doc_id),
)
]
),
shard_key_selector=shard_key_selector,
)
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
shard_identifier: Optional[Any] = None,
**delete_kwargs: Any,
) -> None:
"""
Delete nodes using with node_ids.
Args:
node_ids (Optional[List[str]): List of node IDs to delete.
filters (Optional[MetadataFilters]): Metadata filters to apply.
shard_identifier (Optional[Any]): Shard identifier for the nodes.
"""
should = []
if node_ids is not None:
should = [
HasIdCondition(
has_id=node_ids,
)
]
if filters is not None:
filter = self._build_subfilter(filters)
if filter.should is None:
filter.should = should
else:
filter.should.extend(should)
else:
filter = Filter(should=should)
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
self._client.delete(
collection_name=self.collection_name,
points_selector=filter,
shard_key_selector=shard_key_selector,
)
async def adelete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
shard_identifier: Optional[Any] = None,
**delete_kwargs: Any,
) -> None:
"""
Asynchronous method to delete nodes using with node_ids.
Args:
node_ids (Optional[List[str]): List of node IDs to delete.
filters (Optional[MetadataFilters]): Metadata filters to apply.
shard_identifier (Optional[Any]): Shard identifier for the nodes.
"""
self._ensure_async_client()
should = []
if node_ids is not None:
should = [
HasIdCondition(
has_id=node_ids,
)
]
if filters is not None:
filter = self._build_subfilter(filters)
if filter.should is None:
filter.should = should
else:
filter.should.extend(should)
else:
filter = Filter(should=should)
shard_key_selector = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
await self._aclient.delete(
collection_name=self.collection_name,
points_selector=filter,
shard_key_selector=shard_key_selector,
)
def clear(self) -> None:
"""
Clear the index.
"""
self._client.delete_collection(collection_name=self.collection_name)
self._collection_initialized = False
async def aclear(self) -> None:
"""
Asynchronous method to clear the index.
"""
self._ensure_async_client()
await self._aclient.delete_collection(collection_name=self.collection_name)
self._collection_initialized = False
@property
def client(self) -> Any:
"""Return the Qdrant client."""
return self._client
def _create_collection(self, collection_name: str, vector_size: int) -> None:
"""Create a Qdrant collection."""
dense_config = self._dense_config or rest.VectorParams(
size=vector_size,
distance=rest.Distance.COSINE,
)
sparse_config = self._sparse_config or rest.SparseVectorParams(
index=rest.SparseIndexParams(),
modifier=(
rest.Modifier.IDF
if self.fastembed_sparse_model in IDF_EMBEDDING_MODELS
else None
),
)
try:
if self.enable_hybrid:
self._client.create_collection(
collection_name=collection_name,
vectors_config={
self.dense_vector_name: dense_config,
},
# Newly created collection will have the new sparse vector name
sparse_vectors_config={self.sparse_vector_name: sparse_config},
quantization_config=self._quantization_config,
shard_number=self._shard_number,
replication_factor=self._replication_factor,
sharding_method=self._sharding_method,
write_consistency_factor=self._write_consistency_factor,
)
else:
self._client.create_collection(
collection_name=collection_name,
vectors_config=dense_config,
quantization_config=self._quantization_config,
shard_number=self._shard_number,
replication_factor=self._replication_factor,
sharding_method=self._sharding_method,
write_consistency_factor=self._write_consistency_factor,
)
if self._shard_keys:
self._create_shard_keys()
# To improve search performance Qdrant recommends setting up
# a payload index for fields used in filters.
# https://qdrant.tech/documentation/concepts/indexing
if self.index_doc_id:
self._client.create_payload_index(
collection_name=collection_name,
field_name=DOCUMENT_ID_KEY,
field_schema=rest.PayloadSchemaType.KEYWORD,
)
if self._payload_indexes:
self._create_payload_indexes()
except (RpcError, ValueError, UnexpectedResponse) as exc:
if "already exists" not in str(exc):
raise exc # noqa: TRY201
logger.warning(
"Collection %s already exists, skipping collection creation.",
collection_name,
)
if self._shard_keys:
self._create_shard_keys()
if self._payload_indexes:
self._create_payload_indexes()
self._collection_initialized = True
async def _acreate_collection(self, collection_name: str, vector_size: int) -> None:
"""Asynchronous method to create a Qdrant collection."""
dense_config = self._dense_config or rest.VectorParams(
size=vector_size,
distance=rest.Distance.COSINE,
)
sparse_config = self._sparse_config or rest.SparseVectorParams(
index=rest.SparseIndexParams(),
modifier=(
rest.Modifier.IDF
if self.fastembed_sparse_model in IDF_EMBEDDING_MODELS
else None
),
)
try:
if self.enable_hybrid:
await self._aclient.create_collection(
collection_name=collection_name,
vectors_config={self.dense_vector_name: dense_config},
sparse_vectors_config={self.sparse_vector_name: sparse_config},
quantization_config=self._quantization_config,
shard_number=self._shard_number,
replication_factor=self._replication_factor,
sharding_method=self._sharding_method,
write_consistency_factor=self._write_consistency_factor,
)
else:
await self._aclient.create_collection(
collection_name=collection_name,
vectors_config=dense_config,
quantization_config=self._quantization_config,
shard_number=self._shard_number,
replication_factor=self._replication_factor,
sharding_method=self._sharding_method,
write_consistency_factor=self._write_consistency_factor,
)
if self._shard_keys:
await self._acreate_shard_keys()
# To improve search performance Qdrant recommends setting up
# a payload index for fields used in filters.
# https://qdrant.tech/documentation/concepts/indexing
if self.index_doc_id:
await self._aclient.create_payload_index(
collection_name=collection_name,
field_name=DOCUMENT_ID_KEY,
field_schema=rest.PayloadSchemaType.KEYWORD,
)
if self._payload_indexes:
await self._acreate_payload_indexes()
except (RpcError, ValueError, UnexpectedResponse) as exc:
if "already exists" not in str(exc):
raise exc # noqa: TRY201
logger.warning(
"Collection %s already exists, skipping collection creation.",
collection_name,
)
if self._shard_keys:
await self._acreate_shard_keys()
if self._payload_indexes:
await self._acreate_payload_indexes()
self._collection_initialized = True
def _collection_exists(self, collection_name: str) -> bool:
"""Check if a collection exists."""
return self._client.collection_exists(collection_name)
async def _acollection_exists(self, collection_name: str) -> bool:
"""Asynchronous method to check if a collection exists."""
return await self._aclient.collection_exists(collection_name)
def _create_shard_keys(self) -> None:
"""Create shard keys in Qdrant collection."""
if not self._shard_keys:
return
for shard_key in self._shard_keys:
try:
self._client.create_shard_key(
collection_name=self.collection_name,
shard_key=shard_key,
)
except (RpcError, ValueError, UnexpectedResponse) as exc:
if "already exists" not in str(exc):
raise exc # noqa: TRY201
logger.warning(
"Shard key %s already exists, skipping creation.",
shard_key,
)
continue
async def _acreate_shard_keys(self) -> None:
"""Asynchronous method to create shard keys in Qdrant collection."""
if not self._shard_keys:
return
for shard_key in self._shard_keys:
try:
await self._aclient.create_shard_key(
collection_name=self.collection_name,
shard_key=shard_key,
)
except (RpcError, ValueError, UnexpectedResponse) as exc:
if "already exists" not in str(exc):
raise exc # noqa: TRY201
logger.warning(
"Shard key %s already exists, skipping creation.",
shard_key,
)
continue
def _create_payload_indexes(self) -> None:
"""Create payload indexes in Qdrant collection."""
if not self._payload_indexes:
return
for payload_index in self._payload_indexes:
self._client.create_payload_index(
collection_name=self.collection_name,
field_name=payload_index["field_name"],
field_schema=payload_index["field_schema"],
)
async def _acreate_payload_indexes(self) -> None:
"""Create payload indexes in Qdrant collection."""
if not self._payload_indexes:
return
for payload_index in self._payload_indexes:
await self._aclient.create_payload_index(
collection_name=self.collection_name,
field_name=payload_index["field_name"],
field_schema=payload_index["field_schema"],
)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query (VectorStoreQuery): query
"""
query_embedding = cast(List[float], query.query_embedding)
# NOTE: users can pass in qdrant_filters (nested/complicated filters) to override the default MetadataFilters
qdrant_filters = kwargs.get("qdrant_filters")
if qdrant_filters is not None:
query_filter = qdrant_filters
else:
query_filter = cast(Filter, self._build_query_filter(query))
shard_identifier = kwargs.get("shard_identifier")
shard_key = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier is not None
else None
)
if query.mode == VectorStoreQueryMode.HYBRID and not self.enable_hybrid:
raise ValueError(
"Hybrid search is not enabled. Please build the query with "
"`enable_hybrid=True` in the constructor."
)
elif (
query.mode == VectorStoreQueryMode.HYBRID
and self.enable_hybrid
and self._sparse_query_fn is not None
and query.query_str is not None
):
sparse_indices, sparse_embedding = self._sparse_query_fn(
[query.query_str],
)
sparse_top_k = query.sparse_top_k or query.similarity_top_k
sparse_response = self._client.query_batch_points(
collection_name=self.collection_name,
requests=[
rest.QueryRequest(
query=query_embedding,
using=self.dense_vector_name,
limit=query.similarity_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
rest.QueryRequest(
query=rest.SparseVector(
indices=sparse_indices[0],
values=sparse_embedding[0],
),
using=self.sparse_vector_name,
limit=sparse_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
],
)
# sanity check
assert len(sparse_response) == 2
assert self._hybrid_fusion_fn is not None
# flatten the response
return self._hybrid_fusion_fn(
self.parse_to_query_result(sparse_response[0].points),
self.parse_to_query_result(sparse_response[1].points),
# NOTE: only for hybrid search (0 for sparse search, 1 for dense search)
alpha=query.alpha or 0.5,
# NOTE: use hybrid_top_k if provided, otherwise use similarity_top_k
top_k=query.hybrid_top_k or query.similarity_top_k,
)
elif (
query.mode == VectorStoreQueryMode.SPARSE
and self.enable_hybrid
and self._sparse_query_fn is not None
and query.query_str is not None
):
sparse_indices, sparse_embedding = self._sparse_query_fn(
[query.query_str],
)
sparse_top_k = query.sparse_top_k or query.similarity_top_k
sparse_response = self._client.query_batch_points(
collection_name=self.collection_name,
requests=[
rest.QueryRequest(
query=rest.SparseVector(
indices=sparse_indices[0],
values=sparse_embedding[0],
),
using=self.sparse_vector_name,
limit=sparse_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
],
)
return self.parse_to_query_result(sparse_response[0].points)
elif self.enable_hybrid:
# search for dense vectors only
response = self._client.query_batch_points(
collection_name=self.collection_name,
requests=[
rest.QueryRequest(
query=query_embedding,
using=self.dense_vector_name,
limit=query.similarity_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
],
)
return self.parse_to_query_result(response[0].points)
else:
# Regular non-hybrid search
response = self._client.query_points(
collection_name=self.collection_name,
query=query_embedding,
using=self.dense_vector_name,
limit=query.similarity_top_k,
query_filter=query_filter,
shard_key_selector=shard_key,
)
return self.parse_to_query_result(response.points)
async def aquery(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Asynchronous method to query index for top k most similar nodes.
Args:
query (VectorStoreQuery): query
"""
self._ensure_async_client()
query_embedding = cast(List[float], query.query_embedding)
# NOTE: users can pass in qdrant_filters (nested/complicated filters) to override the default MetadataFilters
qdrant_filters = kwargs.get("qdrant_filters")
if qdrant_filters is not None:
query_filter = qdrant_filters
else:
# build metadata filters
query_filter = cast(Filter, self._build_query_filter(query))
# Check if we need to detect vector format
if self._legacy_vector_format is None:
await self._adetect_vector_format(self.collection_name)
# Get shard_identifier if provided
shard_identifier = kwargs.get("shard_identifier")
shard_key = (
self._generate_shard_key_selector(shard_identifier)
if shard_identifier
else None
)
if query.mode == VectorStoreQueryMode.HYBRID and not self.enable_hybrid:
raise ValueError(
"Hybrid search is not enabled. Please build the query with "
"`enable_hybrid=True` in the constructor."
)
elif (
query.mode == VectorStoreQueryMode.HYBRID
and self.enable_hybrid
and self._sparse_query_fn is not None
and query.query_str is not None
):
sparse_indices, sparse_embedding = self._sparse_query_fn(
[query.query_str],
)
sparse_top_k = query.sparse_top_k or query.similarity_top_k
sparse_response = await self._aclient.query_batch_points(
collection_name=self.collection_name,
requests=[
rest.QueryRequest(
query=query_embedding,
using=self.dense_vector_name,
limit=query.similarity_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
rest.QueryRequest(
query=rest.SparseVector(
indices=sparse_indices[0],
values=sparse_embedding[0],
),
using=self.sparse_vector_name,
limit=sparse_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
],
)
# sanity check
assert len(sparse_response) == 2
assert self._hybrid_fusion_fn is not None
# flatten the response
return self._hybrid_fusion_fn(
self.parse_to_query_result(sparse_response[0].points),
self.parse_to_query_result(sparse_response[1].points),
alpha=query.alpha or 0.5,
# NOTE: use hybrid_top_k if provided, otherwise use similarity_top_k
top_k=query.hybrid_top_k or query.similarity_top_k,
)
elif (
query.mode == VectorStoreQueryMode.SPARSE
and self.enable_hybrid
and self._sparse_query_fn is not None
and query.query_str is not None
):
sparse_indices, sparse_embedding = self._sparse_query_fn(
[query.query_str],
)
sparse_top_k = query.sparse_top_k or query.similarity_top_k
sparse_response = await self._aclient.query_batch_points(
collection_name=self.collection_name,
requests=[
rest.QueryRequest(
query=rest.SparseVector(
indices=sparse_indices[0],
values=sparse_embedding[0],
),
using=self.sparse_vector_name,
limit=sparse_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
],
)
return self.parse_to_query_result(sparse_response[0].points)
elif self.enable_hybrid:
# search for dense vectors only
response = await self._aclient.query_batch_points(
collection_name=self.collection_name,
requests=[
rest.QueryRequest(
query=query_embedding,
using=self.dense_vector_name,
limit=query.similarity_top_k,
filter=query_filter,
with_payload=True,
shard_key=shard_key,
),
],
)
return self.parse_to_query_result(response[0].points)
else:
response = await self._aclient.query_points(
collection_name=self.collection_name,
query=query_embedding,
using=self.dense_vector_name,
limit=query.similarity_top_k,
query_filter=query_filter,
shard_key_selector=shard_key,
)
return self.parse_to_query_result(response.points)
def parse_to_query_result(self, response: List[Any]) -> VectorStoreQueryResult:
"""
Convert vector store response to VectorStoreQueryResult.
Args:
response: List[Any]: List of results returned from the vector store.
"""
nodes = []
similarities = []
ids = []
for point in response:
payload = cast(Payload, point.payload)
vector = point.vector
embedding = None
if isinstance(vector, dict):
embedding = vector.get(self.dense_vector_name, vector.get("", None))
elif isinstance(vector, list):
embedding = vector
try:
node = metadata_dict_to_node(payload)
if embedding and node.embedding is None:
node.embedding = embedding
except Exception:
metadata, node_info, relationships = legacy_metadata_dict_to_node(
payload
)
node = TextNode(
id_=str(point.id),
text=payload.get(self.text_key),
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
embedding=embedding,
)
nodes.append(node)
ids.append(str(point.id))
try:
similarities.append(point.score)
except AttributeError:
# certain requests do not return a score
similarities.append(1.0)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
def _build_subfilter(self, filters: MetadataFilters) -> Filter:
conditions = []
for subfilter in filters.filters:
# Handle nested MetadataFilters
if isinstance(subfilter, MetadataFilters):
if len(subfilter.filters) > 0:
conditions.append(self._build_subfilter(subfilter))
# Skip empty MetadataFilters
continue
# Handle MetadataFilter with operators
if not subfilter.operator or subfilter.operator == FilterOperator.EQ:
if isinstance(subfilter.value, float):
conditions.append(
FieldCondition(
key=subfilter.key,
range=Range(
gte=subfilter.value,
lte=subfilter.value,
),
)
)
else:
conditions.append(
FieldCondition(
key=subfilter.key,
match=MatchValue(value=subfilter.value),
)
)
elif subfilter.operator == FilterOperator.LT:
conditions.append(
FieldCondition(
key=subfilter.key,
range=Range(lt=subfilter.value),
)
)
elif subfilter.operator == FilterOperator.GT:
conditions.append(
FieldCondition(
key=subfilter.key,
range=Range(gt=subfilter.value),
)
)
elif subfilter.operator == FilterOperator.GTE:
conditions.append(
FieldCondition(
key=subfilter.key,
range=Range(gte=subfilter.value),
)
)
elif subfilter.operator == FilterOperator.LTE:
conditions.append(
FieldCondition(
key=subfilter.key,
range=Range(lte=subfilter.value),
)
)
elif (
subfilter.operator == FilterOperator.TEXT_MATCH
or subfilter.operator == FilterOperator.TEXT_MATCH_INSENSITIVE
):
conditions.append(
FieldCondition(
key=subfilter.key,
match=MatchText(text=subfilter.value),
)
)
elif subfilter.operator == FilterOperator.NE:
conditions.append(
FieldCondition(
key=subfilter.key,
match=MatchExcept(**{"except": [subfilter.value]}),
)
)
elif subfilter.operator == FilterOperator.IN:
# match any of the values
# https://qdrant.tech/documentation/concepts/filtering/#match-any
if isinstance(subfilter.value, List):
values = subfilter.value
else:
values = str(subfilter.value).split(",")
conditions.append(
FieldCondition(
key=subfilter.key,
match=MatchAny(any=values),
)
)
elif subfilter.operator == FilterOperator.NIN:
# match none of the values
# https://qdrant.tech/documentation/concepts/filtering/#match-except
if isinstance(subfilter.value, List):
values = subfilter.value
else:
values = str(subfilter.value).split(",")
conditions.append(
FieldCondition(
key=subfilter.key,
match=MatchExcept(**{"except": values}),
)
)
elif subfilter.operator == FilterOperator.IS_EMPTY:
# This condition will match all records where the field reports either does not exist, or has null or [] value.
# https://qdrant.tech/documentation/concepts/filtering/#is-empty
conditions.append(
IsEmptyCondition(is_empty=PayloadField(key=subfilter.key))
)
else:
# Unsupported filter operator
raise NotImplementedError(
f"Filter operator {subfilter.operator} is not supported by Qdrant vector store. "
f"Supported operators: EQ, NE, GT, GTE, LT, LTE, IN, NIN, TEXT_MATCH, IS_EMPTY"
)
filter = Filter()
if filters.condition == FilterCondition.AND:
filter.must = conditions
elif filters.condition == FilterCondition.OR:
filter.should = conditions
elif filters.condition == FilterCondition.NOT:
filter.must_not = conditions
return filter
def _build_query_filter(self, query: VectorStoreQuery) -> Optional[Any]:
must_conditions = []
if query.doc_ids:
must_conditions.append(
FieldCondition(
key=DOCUMENT_ID_KEY,
match=MatchAny(any=query.doc_ids),
)
)
# Point id is a "service" id, it is not stored in payload. There is 'HasId' condition to filter by point id
# https://qdrant.tech/documentation/concepts/filtering/#has-id
if query.node_ids:
must_conditions.append(
HasIdCondition(has_id=query.node_ids),
)
# Qdrant does not use the query.query_str property for the filtering. Full-text
# filtering cannot handle longer queries and can effectively filter our all the
# nodes. See: https://github.com/jerryjliu/llama_index/pull/1181
if query.filters and query.filters.filters:
must_conditions.append(self._build_subfilter(query.filters))
if len(must_conditions) == 0:
return None
return Filter(must=must_conditions)
def use_old_sparse_encoder(self, collection_name: str) -> bool:
"""
Check if the collection uses the old sparse encoder format.
This is used during initialization to determine which sparse vector name to use.
"""
collection_exists = self._collection_exists(collection_name)
if collection_exists:
cur_collection = self.client.get_collection(collection_name)
return DEFAULT_SPARSE_VECTOR_NAME_OLD in (
cur_collection.config.params.sparse_vectors or {}
)
return False
async def ause_old_sparse_encoder(self, collection_name: str) -> bool:
"""
Asynchronous method to check if the collection uses the old sparse encoder format.
"""
collection_exists = await self._acollection_exists(collection_name)
if collection_exists:
cur_collection = await self._aclient.get_collection(collection_name)
return DEFAULT_SPARSE_VECTOR_NAME_OLD in (
cur_collection.config.params.sparse_vectors or {}
)
return False
def get_default_sparse_doc_encoder(
self,
collection_name: str,
fastembed_sparse_model: Optional[str] = None,
) -> SparseEncoderCallable:
"""
Get the default sparse document encoder.
For async-only clients, assumes new format initially.
Will be auto-corrected on first async operation if collection uses old format.
"""
if self._client is not None:
if self.use_old_sparse_encoder(collection_name):
self.sparse_vector_name = DEFAULT_SPARSE_VECTOR_NAME_OLD
return default_sparse_encoder("naver/efficient-splade-VI-BT-large-doc")
if fastembed_sparse_model is not None:
return fastembed_sparse_encoder(model_name=fastembed_sparse_model)
return fastembed_sparse_encoder()
def get_default_sparse_query_encoder(
self,
collection_name: str,
fastembed_sparse_model: Optional[str] = None,
) -> SparseEncoderCallable:
"""
Get the default sparse query encoder.
For async-only clients, assumes new format initially.
Will be auto-corrected on first async operation if collection uses old format.
"""
if self._client is not None:
if self.use_old_sparse_encoder(collection_name):
# Update the sparse vector name to use the old format
self.sparse_vector_name = DEFAULT_SPARSE_VECTOR_NAME_OLD
return default_sparse_encoder(
"naver/efficient-splade-VI-BT-large-query"
)
if fastembed_sparse_model is not None:
return fastembed_sparse_encoder(model_name=fastembed_sparse_model)
return fastembed_sparse_encoder()
def _detect_vector_format(self, collection_name: str) -> None:
"""
Detect and handle old vector formats from existing collections.
- named vs non-named vectors
- new sparse vector field name vs old sparse vector field name
"""
try:
old_sparse_name = self.sparse_vector_name # Store state before detection
collection_info = self._client.get_collection(collection_name)
vectors_config = collection_info.config.params.vectors
sparse_vectors = collection_info.config.params.sparse_vectors or {}
# Check if we have an unnamed vector format (where name is empty string)
if isinstance(vectors_config, dict):
# Using named vectors format
if LEGACY_UNNAMED_VECTOR in vectors_config:
self._legacy_vector_format = True
self.dense_vector_name = LEGACY_UNNAMED_VECTOR
else:
# Using unnamed vector format from earlier versions
self._legacy_vector_format = True
self.dense_vector_name = LEGACY_UNNAMED_VECTOR
# Detect sparse vector name if any sparse vectors configured
if isinstance(sparse_vectors, dict) and len(sparse_vectors) > 0:
if self.sparse_vector_name in sparse_vectors:
pass
elif DEFAULT_SPARSE_VECTOR_NAME_OLD in sparse_vectors:
self.sparse_vector_name = DEFAULT_SPARSE_VECTOR_NAME_OLD
# If the name changed, our initial assumption was wrong. Correct it.
if self.enable_hybrid and old_sparse_name != self.sparse_vector_name:
self._reinitialize_sparse_encoders()
except Exception as e:
logger.warning(
f"Could not detect vector format for collection {collection_name}: {e}"
)
async def _adetect_vector_format(self, collection_name: str) -> None:
"""
Asynchronous method to detect and handle old vector formats from existing collections.
"""
try:
old_sparse_name = self.sparse_vector_name # Store state before detection
collection_info = await self._aclient.get_collection(collection_name)
vectors_config = collection_info.config.params.vectors
sparse_vectors = collection_info.config.params.sparse_vectors or {}
# Check if we have an unnamed vector format (where name is empty string)
if isinstance(vectors_config, dict):
# Using named vectors format
if LEGACY_UNNAMED_VECTOR in vectors_config:
self._legacy_vector_format = True
self.dense_vector_name = LEGACY_UNNAMED_VECTOR
else:
# Using unnamed vector format from earlier versions
self._legacy_vector_format = True
self.dense_vector_name = LEGACY_UNNAMED_VECTOR
# Detect sparse vector name and correct if necessary
if isinstance(sparse_vectors, dict) and len(sparse_vectors) > 0:
if self.sparse_vector_name in sparse_vectors:
pass
elif DEFAULT_SPARSE_VECTOR_NAME_OLD in sparse_vectors:
self.sparse_vector_name = DEFAULT_SPARSE_VECTOR_NAME_OLD
# If the name changed, our initial assumption was wrong. Correct it.
if self.enable_hybrid and old_sparse_name != self.sparse_vector_name:
self._reinitialize_sparse_encoders()
except Exception as e:
logger.warning(
f"Could not detect vector format for collection {collection_name}: {e}"
)
def _reinitialize_sparse_encoders(self) -> None:
"""Recreate default sparse encoders after vector format detection, respecting user-provided functions."""
if not self.enable_hybrid:
return
# Only override the doc function if the user did NOT provide one
if not self._user_provided_sparse_doc_fn:
if self.sparse_vector_name == DEFAULT_SPARSE_VECTOR_NAME_OLD:
self._sparse_doc_fn = default_sparse_encoder(
"naver/efficient-splade-VI-BT-large-doc"
)
else:
self._sparse_doc_fn = fastembed_sparse_encoder(
model_name=self.fastembed_sparse_model
)
# Only override the query function if the user did NOT provide one
if not self._user_provided_sparse_query_fn:
if self.sparse_vector_name == DEFAULT_SPARSE_VECTOR_NAME_OLD:
self._sparse_query_fn = default_sparse_encoder(
"naver/efficient-splade-VI-BT-large-query"
)
else:
self._sparse_query_fn = fastembed_sparse_encoder(
model_name=self.fastembed_sparse_model
)
def _validate_custom_sharding(
self,
):
"""
Validate custom sharding configuration.
"""
if not self._shard_key_selector_fn:
raise ValueError(
"Must provide a shard_key_selector_fn for custom sharding."
)
if not self._shard_keys:
raise ValueError("Must provide shard_keys for custom sharding.")
def _generate_shard_key_selector(
self, shard_identifier: Any
) -> Union[rest.ShardKeySelector, None]:
"""
Generate a shard key selector based on the shard identifier.
"""
if (
self._shard_key_selector_fn is not None
and self._sharding_method == rest.ShardingMethod.CUSTOM
):
return self._shard_key_selector_fn(shard_identifier)
return None
| QdrantVectorStore |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 6625,
"end": 7067
} | class ____(ASTNode):
"""
Base regex formatting node.
All regex formatting nodes should extend this class.
"""
def transform_regex(self, regex_result):
"""
Transform a regex match.
This method takes a regex result and applies some transformation to
return a new string.
"""
return ''
# -------------------------- Int snippet node classes -------------------------
| FormatNode |
python | psf__black | tests/data/cases/class_blank_parentheses.py | {
"start": 0,
"end": 50
} | class ____():
pass
| SimpleClassWithBlankParentheses |
python | facebook__pyre-check | documentation/pysa_tutorial/exercise5/urls.py | {
"start": 262,
"end": 384
} | class ____:
path: str
callback: str
urlpatterns = [UrlPattern(r"^operate_on_twos/(.*)", operate_on_twos)]
| UrlPattern |
python | kamyu104__LeetCode-Solutions | Python/shortest-path-in-a-hidden-grid.py | {
"start": 37,
"end": 216
} | class ____(object):
def canMove(self, direction):
pass
def move(self, direction):
pass
def isTarget(self):
pass
import collections
| GridMaster |
python | django__django | django/db/backends/mysql/schema.py | {
"start": 183,
"end": 9938
} | class ____(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s%(collation)s%(comment)s"
sql_alter_column_no_default_null = "ALTER COLUMN %(column)s SET DEFAULT NULL"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_column_inline_fk = (
", ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s(%(to_column)s)%(on_delete_db)s"
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_rename_index = "ALTER TABLE %(table)s RENAME INDEX %(old_name)s TO %(new_name)s"
sql_create_pk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
)
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_alter_table_comment = "ALTER TABLE %(table)s COMMENT = %(comment)s"
sql_alter_column_comment = None
@property
def sql_delete_check(self):
if self.connection.mysql_is_mariadb:
# The name of the column check constraint is the same as the field
# name on MariaDB. Adding IF EXISTS clause prevents migrations
# crash. Constraint is removed during a "MODIFY" column statement.
return "ALTER TABLE %(table)s DROP CONSTRAINT IF EXISTS %(name)s"
return "ALTER TABLE %(table)s DROP CHECK %(name)s"
def quote_value(self, value):
self.connection.ensure_connection()
# MySQLdb escapes to string, PyMySQL to bytes.
quoted = self.connection.connection.escape(
value, self.connection.connection.encoders
)
if isinstance(value, str) and isinstance(quoted, bytes):
quoted = quoted.decode()
return quoted
def _is_limited_data_type(self, field):
db_type = field.db_type(self.connection)
return (
db_type is not None
and db_type.lower() in self.connection._limited_data_types
)
def _is_text_or_blob(self, field):
db_type = field.db_type(self.connection)
return db_type and db_type.lower().endswith(("blob", "text"))
def skip_default(self, field):
default_is_empty = self.effective_default(field) in ("", b"")
if default_is_empty and self._is_text_or_blob(field):
return True
return False
def skip_default_on_alter(self, field):
if self.skip_default(field):
return True
if self._is_limited_data_type(field) and not self.connection.mysql_is_mariadb:
# MySQL doesn't support defaults for BLOB and TEXT in the
# ALTER COLUMN statement.
return True
return False
def _column_default_sql(self, field):
if not self.connection.mysql_is_mariadb and self._is_limited_data_type(field):
# MySQL supports defaults for BLOB and TEXT columns only if the
# default value is written as an expression i.e. in parentheses.
return "(%s)"
return super()._column_default_sql(field)
def add_field(self, model, field):
super().add_field(model, field)
# Simulate the effect of a one-off default.
# field.default may be unhashable, so a set isn't used for "in" check.
if self.skip_default(field) and field.default not in (None, NOT_PROVIDED):
effective_default = self.effective_default(field)
self.execute(
"UPDATE %(table)s SET %(column)s = %%s"
% {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
},
[effective_default],
)
def remove_constraint(self, model, constraint):
if (
isinstance(constraint, UniqueConstraint)
and constraint.create_sql(model, self) is not None
):
self._create_missing_fk_index(
model,
fields=constraint.fields,
expressions=constraint.expressions,
)
super().remove_constraint(model, constraint)
def remove_index(self, model, index):
self._create_missing_fk_index(
model,
fields=[field_name for field_name, _ in index.fields_orders],
expressions=index.expressions,
)
super().remove_index(model, index)
def _field_should_be_indexed(self, model, field):
if not super()._field_should_be_indexed(model, field):
return False
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
# No need to create an index for ForeignKey fields except if
# db_constraint=False because the index from that constraint won't be
# created.
if (
storage == "InnoDB"
and field.get_internal_type() == "ForeignKey"
and field.db_constraint
):
return False
return not self._is_limited_data_type(field)
def _create_missing_fk_index(
self,
model,
*,
fields,
expressions=None,
):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index has the FK field as its first field (see
https://bugs.mysql.com/bug.php?id=37910).
Manually create an implicit FK index to make it possible to remove the
composed index.
"""
first_field_name = None
if fields:
first_field_name = fields[0]
elif (
expressions
and self.connection.features.supports_expression_indexes
and isinstance(expressions[0], F)
and LOOKUP_SEP not in expressions[0].name
):
first_field_name = expressions[0].name
if not first_field_name:
return
first_field = model._meta.get_field(first_field_name)
if first_field.get_internal_type() == "ForeignKey":
column = self.connection.introspection.identifier_converter(
first_field.column
)
with self.connection.cursor() as cursor:
constraint_names = [
name
for name, infodict in self.connection.introspection.get_constraints(
cursor, model._meta.db_table
).items()
if infodict["index"] and infodict["columns"][0] == column
]
# There are no other indexes that starts with the FK field, only
# the index that is expected to be deleted.
if len(constraint_names) == 1:
self.execute(
self._create_index_sql(model, fields=[first_field], suffix="")
)
def _delete_composed_index(self, model, fields, *args):
self._create_missing_fk_index(model, fields=fields)
return super()._delete_composed_index(model, fields, *args)
def _set_field_new_type(self, field, new_type):
"""
Keep the NULL and DEFAULT properties of the old field. If it has
changed, it will be handled separately.
"""
if field.has_db_default():
default_sql, params = self.db_default_sql(field)
default_sql %= tuple(self.quote_value(p) for p in params)
new_type += f" DEFAULT {default_sql}"
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(
self, model, old_field, new_field, new_type, old_collation, new_collation
):
new_type = self._set_field_new_type(old_field, new_type)
return super()._alter_column_type_sql(
model, old_field, new_field, new_type, old_collation, new_collation
)
def _field_db_check(self, field, field_db_params):
if self.connection.mysql_is_mariadb:
return super()._field_db_check(field, field_db_params)
# On MySQL, check constraints with the column name as it requires
# explicit recreation when the column is renamed.
return field_db_params["check"]
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type(old_field, new_type)
return super()._rename_field_sql(table, old_field, new_field, new_type)
def _alter_column_comment_sql(self, model, new_field, new_type, new_db_comment):
# Comment is alter when altering the column type.
return "", []
def _comment_sql(self, comment):
comment_sql = super()._comment_sql(comment)
return f" COMMENT {comment_sql}"
def _alter_column_null_sql(self, model, old_field, new_field):
if not new_field.has_db_default():
return super()._alter_column_null_sql(model, old_field, new_field)
new_db_params = new_field.db_parameters(connection=self.connection)
type_sql = self._set_field_new_type(new_field, new_db_params["type"])
return (
"MODIFY %(column)s %(type)s"
% {
"column": self.quote_name(new_field.column),
"type": type_sql,
},
[],
)
| DatabaseSchemaEditor |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 98260,
"end": 100157
} | class ____(fixtures.TestBase):
def test_subclass_overrides_cls_given(self):
class Foo:
def bar(self):
pass
class Bar(Foo):
def bar(self):
pass
is_true(util.method_is_overridden(Bar, Foo.bar))
def test_subclass_overrides(self):
class Foo:
def bar(self):
pass
class Bar(Foo):
def bar(self):
pass
is_true(util.method_is_overridden(Bar(), Foo.bar))
def test_subclass_overrides_skiplevel(self):
class Foo:
def bar(self):
pass
class Bar(Foo):
pass
class Bat(Bar):
def bar(self):
pass
is_true(util.method_is_overridden(Bat(), Foo.bar))
def test_subclass_overrides_twolevels(self):
class Foo:
def bar(self):
pass
class Bar(Foo):
def bar(self):
pass
class Bat(Bar):
pass
is_true(util.method_is_overridden(Bat(), Foo.bar))
def test_subclass_doesnt_override_cls_given(self):
class Foo:
def bar(self):
pass
class Bar(Foo):
pass
is_false(util.method_is_overridden(Bar, Foo.bar))
def test_subclass_doesnt_override(self):
class Foo:
def bar(self):
pass
class Bar(Foo):
pass
is_false(util.method_is_overridden(Bar(), Foo.bar))
def test_subclass_overrides_multi_mro(self):
class Base:
pass
class Foo:
pass
class Bat(Base):
def bar(self):
pass
class HoHo(Foo, Bat):
def bar(self):
pass
is_true(util.method_is_overridden(HoHo(), Bat.bar))
| MethodOveriddenTest |
python | realpython__materials | torchaudio/speech.py | {
"start": 2517,
"end": 4592
} | class ____(Dataset):
def __init__(
self,
folder: str | Path | None = None,
seconds: int | float | None = None,
noise_level: float = 0.005,
enable_noise: bool = True,
transform: Callable[[Tensor], Tensor] | None = None,
) -> None:
if folder:
self.folder = Path(folder).resolve()
else:
self.folder = Path.cwd() / FOLDER_IN_ARCHIVE
self._raw_dataset = SPEECHCOMMANDS(
self.folder.parent, folder_in_archive=self.folder.name
)
self._noise = noise_level
self._enable_noise = enable_noise
self._transform = transform
self._seconds = seconds
def __len__(self) -> int:
return len(self._raw_dataset)
def __getitem__(self, index: int) -> SpeechSample:
relative_path, _, *metadata = self._raw_dataset.get_metadata(index)
absolute_path = self.folder / relative_path
waveform, sample_rate = torchaudio.load(absolute_path)
speech_sample = SpeechSample(waveform, sample_rate, *metadata)
if self._seconds is not None:
speech_sample = speech_sample.pad_trim(self._seconds)
if self._enable_noise:
speech_sample = speech_sample.with_gaussian_noise(self._noise)
if self._transform:
speech_sample = speech_sample.apply(self._transform)
return speech_sample
def bulk_process(
dataset: SPEECHCOMMANDS,
output_dir: str | Path,
sample_rate: int,
seconds: int | float,
) -> None:
for index, sample in tqdm(enumerate(dataset), total=len(dataset)):
speech_sample = SpeechSample(*sample)
input_path, *_ = dataset.get_metadata(index)
output_path = Path(output_dir).resolve() / input_path
output_path.parent.mkdir(parents=True, exist_ok=True)
if speech_sample.sample_rate != sample_rate:
speech_sample = speech_sample.resample(sample_rate)
speech_sample = speech_sample.pad_trim(seconds)
speech_sample.save(output_path)
| AugmentedSpeechCommands |
python | PyCQA__pylint | doc/data/messages/c/class-variable-slots-conflict/good.py | {
"start": 0,
"end": 253
} | class ____:
__slots__ = ("_age", "name")
def __init__(self, age, name):
self._age = age
self.name = name
@property
def age(self):
return self._age
def say_hi(self):
print(f"Hi, I'm {self.name}.")
| Person |
python | numba__numba | numba/core/errors.py | {
"start": 1393,
"end": 1530
} | class ____(NumbaWarning, DeprecationWarning):
"""
Warning category for use of a deprecated feature.
"""
| NumbaDeprecationWarning |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-ollama-query-engine/llama_index/packs/ollama_query_engine/base.py | {
"start": 1311,
"end": 4484
} | class ____(BaseEmbedding):
"""
Class for Ollama embeddings.
Args:
model_name (str): Model for embedding.
base_url (str): Ollama url. Defaults to http://localhost:11434.
"""
_base_url: str = PrivateAttr()
_verbose: bool = PrivateAttr()
def __init__(
self,
model_name: str,
base_url: str = DEFAULT_OLLAMA_BASE_URL,
verbose: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
**kwargs,
)
self._verbose = verbose
self._base_url = base_url
@classmethod
def class_name(cls) -> str:
return "OllamaEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self.get_general_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return self.get_general_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self.get_general_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self.get_general_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embeddings_list: List[List[float]] = []
for text in texts:
embeddings = self.get_general_text_embedding(text)
embeddings_list.append(embeddings)
return embeddings_list
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return self._get_text_embeddings(texts)
def get_general_text_embedding(self, input: str) -> List[float]:
"""Get Ollama embedding."""
try:
import requests
except ImportError:
raise ImportError(
"Could not import requests library."
"Please install requests with `pip install requests`"
)
# all_kwargs = self._get_all_kwargs()
response = requests.post(
url=f"{self._base_url}/api/embeddings",
headers={"Content-Type": "application/json"},
json={"prompt": input, "model": self.model_name},
)
response.encoding = "utf-8"
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"Ollama call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
try:
embeddings = response.json()["embedding"]
if self._verbose:
print(f"Text={input}")
print(embeddings)
return embeddings
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised for Ollama Call: {e}.\nResponse: {response.text}"
)
| OllamaEmbedding |
python | viewflow__viewflow | viewflow/workflow/migrations/0003_task_owner_permission_change.py | {
"start": 108,
"end": 426
} | class ____(migrations.Migration):
dependencies = [
("viewflow", "0002_fsmchange"),
]
operations = [
migrations.AlterField(
model_name="task",
name="owner_permission",
field=models.CharField(blank=True, null=True, max_length=150),
),
]
| Migration |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_device_allocation_result.py | {
"start": 383,
"end": 5285
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config': 'list[V1beta2DeviceAllocationConfiguration]',
'results': 'list[V1beta2DeviceRequestAllocationResult]'
}
attribute_map = {
'config': 'config',
'results': 'results'
}
def __init__(self, config=None, results=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceAllocationResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config = None
self._results = None
self.discriminator = None
if config is not None:
self.config = config
if results is not None:
self.results = results
@property
def config(self):
"""Gets the config of this V1beta2DeviceAllocationResult. # noqa: E501
This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag. This includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters. # noqa: E501
:return: The config of this V1beta2DeviceAllocationResult. # noqa: E501
:rtype: list[V1beta2DeviceAllocationConfiguration]
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this V1beta2DeviceAllocationResult.
This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag. This includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters. # noqa: E501
:param config: The config of this V1beta2DeviceAllocationResult. # noqa: E501
:type: list[V1beta2DeviceAllocationConfiguration]
"""
self._config = config
@property
def results(self):
"""Gets the results of this V1beta2DeviceAllocationResult. # noqa: E501
Results lists all allocated devices. # noqa: E501
:return: The results of this V1beta2DeviceAllocationResult. # noqa: E501
:rtype: list[V1beta2DeviceRequestAllocationResult]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1beta2DeviceAllocationResult.
Results lists all allocated devices. # noqa: E501
:param results: The results of this V1beta2DeviceAllocationResult. # noqa: E501
:type: list[V1beta2DeviceRequestAllocationResult]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceAllocationResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceAllocationResult):
return True
return self.to_dict() != other.to_dict()
| V1beta2DeviceAllocationResult |
python | pandas-dev__pandas | pandas/core/interchange/column.py | {
"start": 1934,
"end": 17922
} | class ____(Column):
"""
A column object, with only the methods and properties required by the
interchange protocol defined.
A column can contain one or more chunks. Each chunk can contain up to three
buffers - a data buffer, a mask buffer (depending on null representation),
and an offsets buffer (if variable-size binary; e.g., variable-length
strings).
Note: this Column object can only be produced by ``__dataframe__``, so
doesn't need its own version or ``__column__`` protocol.
"""
def __init__(self, column: pd.Series, allow_copy: bool = True) -> None:
"""
Note: doesn't deal with extension arrays yet, just assume a regular
Series/ndarray for now.
"""
if isinstance(column, pd.DataFrame):
raise TypeError(
"Expected a Series, got a DataFrame. This likely happened "
"because you called __dataframe__ on a DataFrame which, "
"after converting column names to string, resulted in duplicated "
f"names: {column.columns}. Please rename these columns before "
"using the interchange protocol."
)
if not isinstance(column, pd.Series):
raise NotImplementedError(f"Columns of type {type(column)} not handled yet")
# Store the column as a private attribute
self._col = column
self._allow_copy = allow_copy
def size(self) -> int:
"""
Size of the column, in elements.
"""
return self._col.size
@property
def offset(self) -> int:
"""
Offset of first element. Always zero.
"""
# TODO: chunks are implemented now, probably this should return something
return 0
@cache_readonly
def dtype(self) -> tuple[DtypeKind, int, str, str]:
dtype = self._col.dtype
if isinstance(dtype, pd.CategoricalDtype):
codes = self._col.values.codes
(
_,
bitwidth,
c_arrow_dtype_f_str,
_,
) = self._dtype_from_pandasdtype(codes.dtype)
return (
DtypeKind.CATEGORICAL,
bitwidth,
c_arrow_dtype_f_str,
Endianness.NATIVE,
)
elif is_string_dtype(dtype):
if infer_dtype(self._col) in ("string", "empty"):
return (
DtypeKind.STRING,
8,
dtype_to_arrow_c_fmt(dtype),
Endianness.NATIVE,
)
raise NotImplementedError("Non-string object dtypes are not supported yet")
else:
return self._dtype_from_pandasdtype(dtype)
def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:
"""
See `self.dtype` for details.
"""
# Note: 'c' (complex) not handled yet (not in array spec v1).
# 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) not handled
# datetime and timedelta both map to datetime (is timedelta handled?)
kind = _NP_KINDS.get(dtype.kind, None)
if kind is None:
# Not a NumPy dtype. Check if it's a categorical maybe
raise ValueError(f"Data type {dtype} not supported by interchange protocol")
if isinstance(dtype, ArrowDtype):
byteorder = dtype.numpy_dtype.byteorder
elif isinstance(dtype, DatetimeTZDtype):
byteorder = dtype.base.byteorder # type: ignore[union-attr]
elif isinstance(dtype, BaseMaskedDtype):
byteorder = dtype.numpy_dtype.byteorder
else:
byteorder = dtype.byteorder
if dtype == "bool[pyarrow]":
# return early to avoid the `* 8` below, as this is a bitmask
# rather than a bytemask
return (
kind,
dtype.itemsize, # pyright: ignore[reportAttributeAccessIssue]
ArrowCTypes.BOOL,
byteorder,
)
return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder
@property
def describe_categorical(self):
"""
If the dtype is categorical, there are two options:
- There are only values in the data buffer.
- There is a separate non-categorical Column encoding for categorical values.
Raises TypeError if the dtype is not categorical
Content of returned dict:
- "is_ordered" : bool, whether the ordering of dictionary indices is
semantically meaningful.
- "is_dictionary" : bool, whether a dictionary-style mapping of
categorical values to other objects exists
- "categories" : Column representing the (implicit) mapping of indices to
category values (e.g. an array of cat1, cat2, ...).
None if not a dictionary-style categorical.
"""
if not self.dtype[0] == DtypeKind.CATEGORICAL:
raise TypeError(
"describe_categorical only works on a column with categorical dtype!"
)
return {
"is_ordered": self._col.cat.ordered,
"is_dictionary": True,
"categories": PandasColumn(pd.Series(self._col.cat.categories)),
}
@property
def describe_null(self):
if isinstance(self._col.dtype, BaseMaskedDtype):
column_null_dtype = ColumnNullType.USE_BYTEMASK
null_value = 1
return column_null_dtype, null_value
if isinstance(self._col.dtype, ArrowDtype):
# We already rechunk (if necessary / allowed) upon initialization, so this
# is already single-chunk by the time we get here.
if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined]
return ColumnNullType.NON_NULLABLE, None
return ColumnNullType.USE_BITMASK, 0
kind = self.dtype[0]
try:
null, value = _NULL_DESCRIPTION[kind]
except KeyError as err:
raise NotImplementedError(f"Data type {kind} not yet supported") from err
return null, value
@cache_readonly
def null_count(self) -> int:
"""
Number of null elements. Should always be known.
"""
return self._col.isna().sum().item()
@property
def metadata(self) -> dict[str, pd.Index]:
"""
Store specific metadata of the column.
"""
return {"pandas.index": self._col.index}
def num_chunks(self) -> int:
"""
Return the number of chunks the column consists of.
"""
return 1
def get_chunks(self, n_chunks: int | None = None):
"""
Return an iterator yielding the chunks.
See `DataFrame.get_chunks` for details on ``n_chunks``.
"""
if n_chunks and n_chunks > 1:
size = len(self._col)
step = size // n_chunks
if size % n_chunks != 0:
step += 1
for start in range(0, step * n_chunks, step):
yield PandasColumn(
self._col.iloc[start : start + step], self._allow_copy
)
else:
yield self
def get_buffers(self) -> ColumnBuffers:
"""
Return a dictionary containing the underlying buffers.
The returned dictionary has the following contents:
- "data": a two-element tuple whose first element is a buffer
containing the data and whose second element is the data
buffer's associated dtype.
- "validity": a two-element tuple whose first element is a buffer
containing mask values indicating missing data and
whose second element is the mask value buffer's
associated dtype. None if the null representation is
not a bit or byte mask.
- "offsets": a two-element tuple whose first element is a buffer
containing the offset values for variable-size binary
data (e.g., variable-length strings) and whose second
element is the offsets buffer's associated dtype. None
if the data buffer does not have an associated offsets
buffer.
"""
buffers: ColumnBuffers = {
"data": self._get_data_buffer(),
"validity": None,
"offsets": None,
}
try:
buffers["validity"] = self._get_validity_buffer()
except NoBufferPresent:
pass
try:
buffers["offsets"] = self._get_offsets_buffer()
except NoBufferPresent:
pass
return buffers
def _get_data_buffer(
self,
) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]:
"""
Return the buffer containing the data and the buffer's associated dtype.
"""
buffer: Buffer
if self.dtype[0] == DtypeKind.DATETIME:
# self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make
# it longer than 4 characters
if len(self.dtype[2]) > 4:
np_arr = self._col.dt.tz_convert(None).to_numpy()
else:
np_arr = self._col.to_numpy()
buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)
dtype = (
DtypeKind.INT,
64,
ArrowCTypes.INT64,
Endianness.NATIVE,
)
elif self.dtype[0] in (
DtypeKind.INT,
DtypeKind.UINT,
DtypeKind.FLOAT,
DtypeKind.BOOL,
):
dtype = self.dtype
arr = self._col.array
if isinstance(self._col.dtype, ArrowDtype):
# We already rechunk (if necessary / allowed) upon initialization, so
# this is already single-chunk by the time we get here.
arr = arr._pa_array.chunks[0] # type: ignore[attr-defined]
buffer = PandasBufferPyarrow(
arr.buffers()[1],
length=len(arr),
)
return buffer, dtype
if isinstance(self._col.dtype, BaseMaskedDtype):
np_arr = arr._data # type: ignore[attr-defined]
else:
np_arr = arr._ndarray # type: ignore[attr-defined]
buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)
elif self.dtype[0] == DtypeKind.CATEGORICAL:
codes = self._col.values._codes
buffer = PandasBuffer(codes, allow_copy=self._allow_copy)
dtype = self._dtype_from_pandasdtype(codes.dtype)
elif self.dtype[0] == DtypeKind.STRING:
# Marshal the strings from a NumPy object array into a byte array
buf = self._col.to_numpy()
b = bytearray()
# TODO: this for-loop is slow; can be implemented in Cython/C/C++ later
for obj in buf:
if isinstance(obj, str):
b.extend(obj.encode(encoding="utf-8"))
# Convert the byte array to a Pandas "buffer" using
# a NumPy array as the backing store
buffer = PandasBuffer(np.frombuffer(b, dtype="uint8"))
# Define the dtype for the returned buffer
# TODO: this will need correcting
# https://github.com/pandas-dev/pandas/issues/54781
dtype = (
DtypeKind.UINT,
8,
ArrowCTypes.UINT8,
Endianness.NATIVE,
) # note: currently only support native endianness
else:
raise NotImplementedError(f"Data type {self._col.dtype} not handled yet")
return buffer, dtype
def _get_validity_buffer(self) -> tuple[Buffer, Any] | None:
"""
Return the buffer containing the mask values indicating missing data and
the buffer's associated dtype.
Raises NoBufferPresent if null representation is not a bit or byte mask.
"""
null, invalid = self.describe_null
buffer: Buffer
if isinstance(self._col.dtype, ArrowDtype):
# We already rechunk (if necessary / allowed) upon initialization, so this
# is already single-chunk by the time we get here.
arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined]
dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE)
if arr.buffers()[0] is None:
return None
buffer = PandasBufferPyarrow(
arr.buffers()[0],
length=len(arr),
)
return buffer, dtype
if isinstance(self._col.dtype, BaseMaskedDtype):
mask = self._col.array._mask # type: ignore[attr-defined]
buffer = PandasBuffer(mask)
dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
return buffer, dtype
if self.dtype[0] == DtypeKind.STRING:
# For now, use byte array as the mask.
# TODO: maybe store as bit array to save space?..
buf = self._col.to_numpy()
# Determine the encoding for valid values
valid = invalid == 0
invalid = not valid
mask = np.zeros(shape=(len(buf),), dtype=np.bool_)
for i, obj in enumerate(buf):
mask[i] = valid if isinstance(obj, str) else invalid
# Convert the mask array to a Pandas "buffer" using
# a NumPy array as the backing store
buffer = PandasBuffer(mask)
# Define the dtype of the returned buffer
dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
return buffer, dtype
try:
msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask"
except KeyError as err:
# TODO: implement for other bit/byte masks?
raise NotImplementedError("See self.describe_null") from err
raise NoBufferPresent(msg)
def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]:
"""
Return the buffer containing the offset values for variable-size binary
data (e.g., variable-length strings) and the buffer's associated dtype.
Raises NoBufferPresent if the data buffer does not have an associated
offsets buffer.
"""
if self.dtype[0] == DtypeKind.STRING:
# For each string, we need to manually determine the next offset
values = self._col.to_numpy()
ptr = 0
offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)
for i, v in enumerate(values):
# For missing values (in this case, `np.nan` values)
# we don't increment the pointer
if isinstance(v, str):
b = v.encode(encoding="utf-8")
ptr += len(b)
offsets[i + 1] = ptr
# Convert the offsets to a Pandas "buffer" using
# the NumPy array as the backing store
buffer = PandasBuffer(offsets)
# Assemble the buffer dtype info
dtype = (
DtypeKind.INT,
64,
ArrowCTypes.INT64,
Endianness.NATIVE,
) # note: currently only support native endianness
else:
raise NoBufferPresent(
"This column has a fixed-length dtype so "
"it does not have an offsets buffer"
)
return buffer, dtype
| PandasColumn |
python | protocolbuffers__protobuf | python/google/protobuf/text_format.py | {
"start": 1835,
"end": 1909
} | class ____(Exception):
"""Top-level module error for text_format."""
| Error |
python | django__django | tests/contenttypes_tests/test_fields.py | {
"start": 330,
"end": 2776
} | class ____(TestCase):
def test_str(self):
class Model(models.Model):
field = GenericForeignKey()
field = Model._meta.get_field("field")
self.assertEqual(str(field), "contenttypes_tests.Model.field")
def test_get_content_type_no_arguments(self):
field = Answer._meta.get_field("question")
with self.assertRaisesMessage(
Exception, "Impossible arguments to GFK.get_content_type!"
):
field.get_content_type()
def test_get_object_cache_respects_deleted_objects(self):
question = Question.objects.create(text="Who?")
post = Post.objects.create(title="Answer", parent=question)
question_pk = question.pk
Question.objects.all().delete()
post = Post.objects.get(pk=post.pk)
with self.assertNumQueries(1):
self.assertEqual(post.object_id, question_pk)
self.assertIsNone(post.parent)
self.assertIsNone(post.parent)
def test_clear_cached_generic_relation(self):
question = Question.objects.create(text="What is your name?")
answer = Answer.objects.create(text="Answer", question=question)
old_entity = answer.question
answer.refresh_from_db()
new_entity = answer.question
self.assertIsNot(old_entity, new_entity)
def test_clear_cached_generic_relation_explicit_fields(self):
question = Question.objects.create(text="question")
answer = Answer.objects.create(text="answer", question=question)
old_question_obj = answer.question
# The reverse relation is not refreshed if not passed explicitly in
# `fields`.
answer.refresh_from_db(fields=["text"])
self.assertIs(answer.question, old_question_obj)
answer.refresh_from_db(fields=["question"])
self.assertIsNot(answer.question, old_question_obj)
self.assertEqual(answer.question, old_question_obj)
def test_clear_cached_generic_relation_when_deferred(self):
question = Question.objects.create(text="question")
Answer.objects.create(text="answer", question=question)
answer = Answer.objects.defer("text").get()
old_question_obj = answer.question
# The reverse relation is refreshed even when the text field is
# deferred.
answer.refresh_from_db()
self.assertIsNot(answer.question, old_question_obj)
| GenericForeignKeyTests |
python | django__django | tests/migrations/models.py | {
"start": 1198,
"end": 1339
} | class ____(models.Manager):
def __init__(self, a, b, c=1, d=2):
super().__init__()
self.args = (a, b, c, d)
| BaseFoodManager |
python | jazzband__django-oauth-toolkit | tests/test_oauth2_provider_middleware.py | {
"start": 381,
"end": 3694
} | class ____(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.middleware = OAuth2ExtraTokenMiddleware(lambda r: None)
# Create test user and application for valid token tests
self.user = User.objects.create_user("test_user", "test@example.com", "123456")
self.application = Application.objects.create(
name="Test Application",
user=self.user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
def test_malformed_bearer_header_no_token(self):
"""Test that 'Authorization: Bearer' without token doesn't crash"""
request = self.factory.get("/", HTTP_AUTHORIZATION="Bearer")
# This should not raise an IndexError
_ = self.middleware(request)
# Should not have access_token attribute
self.assertFalse(hasattr(request, "access_token"))
def test_malformed_bearer_header_empty_token(self):
"""Test that 'Authorization: Bearer ' with empty token doesn't crash"""
request = self.factory.get("/", HTTP_AUTHORIZATION="Bearer ")
# This should not raise an IndexError
_ = self.middleware(request)
# Should not have access_token attribute
self.assertFalse(hasattr(request, "access_token"))
def test_valid_bearer_token(self):
"""Test that valid bearer token works correctly"""
# Create a valid access token
token_string = "test-token-12345"
token_checksum = hashlib.sha256(token_string.encode("utf-8")).hexdigest()
access_token = AccessToken.objects.create(
user=self.user,
scope="read",
expires=datetime.datetime.now() + datetime.timedelta(days=1),
token=token_string,
token_checksum=token_checksum,
application=self.application,
)
request = self.factory.get("/", HTTP_AUTHORIZATION=f"Bearer {token_string}")
_ = self.middleware(request)
# Should have access_token attribute set
self.assertTrue(hasattr(request, "access_token"))
self.assertEqual(request.access_token, access_token)
def test_invalid_bearer_token(self):
"""Test that invalid bearer token doesn't crash but doesn't set access_token"""
request = self.factory.get("/", HTTP_AUTHORIZATION="Bearer invalid-token-xyz")
# This should not raise an exception
_ = self.middleware(request)
# Should not have access_token attribute
self.assertFalse(hasattr(request, "access_token"))
def test_no_authorization_header(self):
"""Test that request without Authorization header works normally"""
request = self.factory.get("/")
_ = self.middleware(request)
# Should not have access_token attribute
self.assertFalse(hasattr(request, "access_token"))
def test_non_bearer_authorization_header(self):
"""Test that non-Bearer authorization headers are ignored"""
request = self.factory.get("/", HTTP_AUTHORIZATION="Basic dXNlcjpwYXNz")
_ = self.middleware(request)
# Should not have access_token attribute
self.assertFalse(hasattr(request, "access_token"))
| TestOAuth2ExtraTokenMiddleware |
python | pypa__pipenv | pipenv/patched/pip/_internal/utils/hashes.py | {
"start": 4366,
"end": 5002
} | class ____(Hashes):
"""A workalike for Hashes used when we're missing a hash for a requirement
It computes the actual hash of the requirement and raises a HashMissing
exception showing it to the user.
"""
def __init__(self) -> None:
"""Don't offer the ``hashes`` kwarg."""
# Pass our favorite hash in to generate a "gotten hash". With the
# empty list, it will never match, so an error will always raise.
super().__init__(hashes={FAVORITE_HASH: []})
def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn":
raise HashMissing(gots[FAVORITE_HASH].hexdigest())
| MissingHashes |
python | ray-project__ray | python/ray/data/_internal/stats.py | {
"start": 4442,
"end": 31511
} | class ____:
"""Actor holding stats for blocks created by LazyBlockList.
This actor is shared across all datasets created in the same cluster.
In order to cap memory usage, we set a max number of stats to keep
in the actor. When this limit is exceeded, the stats will be garbage
collected in FIFO order.
TODO(ekl) we should consider refactoring LazyBlockList so stats can be
extracted without using an out-of-band actor."""
def __init__(self, max_stats=1000):
# Mapping from uuid -> (task_id -> list of blocks statistics).
self.metadata = collections.defaultdict(dict)
self.last_time = {}
self.start_time = {}
self.max_stats = max_stats
# Assign dataset uuids with a global counter.
self.next_dataset_id = 0
# Dataset metadata to be queried directly by DashboardHead api.
self.datasets: Dict[str, Any] = {}
# Cache of calls to ray.nodes() to prevent unnecessary network calls
self._ray_nodes_cache: Dict[str, str] = {}
# Initialize the metadata exporter
self._metadata_exporter = get_dataset_metadata_exporter()
self.dataset_metadatas: Dict[str, DatasetMetadata] = {}
# A FIFO queue of dataset_tags for finished datasets. This is used to
# efficiently evict the oldest finished datasets when max_stats is reached.
self.finished_datasets_queue = collections.deque()
# Ray Data dashboard metrics
# Everything is a gauge because we need to reset all of
# a dataset's metrics to 0 after each finishes execution.
op_tags_keys = ("dataset", "operator")
# TODO(scottjlee): move these overvie metrics as fields in a
# separate dataclass, similar to OpRuntimeMetrics.
self.spilled_bytes = Gauge(
"data_spilled_bytes",
description="""Bytes spilled by dataset operators.
DataContext.enable_get_object_locations_for_metrics
must be set to True to report this metric""",
tag_keys=op_tags_keys,
)
self.freed_bytes = Gauge(
"data_freed_bytes",
description="Bytes freed by dataset operators",
tag_keys=op_tags_keys,
)
self.current_bytes = Gauge(
"data_current_bytes",
description="Bytes currently in memory store used by dataset operators",
tag_keys=op_tags_keys,
)
self.cpu_usage_cores = Gauge(
"data_cpu_usage_cores",
description="CPUs allocated to dataset operators",
tag_keys=op_tags_keys,
)
self.gpu_usage_cores = Gauge(
"data_gpu_usage_cores",
description="GPUs allocated to dataset operators",
tag_keys=op_tags_keys,
)
self.output_bytes = Gauge(
"data_output_bytes",
description="Bytes outputted by dataset operators",
tag_keys=op_tags_keys,
)
self.output_rows = Gauge(
"data_output_rows",
description="Rows outputted by dataset operators",
tag_keys=op_tags_keys,
)
# === Metrics from OpRuntimeMetrics ===
# Inputs-related metrics
self.execution_metrics_inputs = (
self._create_prometheus_metrics_for_execution_metrics(
metrics_group=MetricsGroup.INPUTS,
tag_keys=op_tags_keys,
)
)
# Outputs-related metrics
self.execution_metrics_outputs = (
self._create_prometheus_metrics_for_execution_metrics(
metrics_group=MetricsGroup.OUTPUTS,
tag_keys=op_tags_keys,
)
)
# Task-related metrics
self.execution_metrics_tasks = (
self._create_prometheus_metrics_for_execution_metrics(
metrics_group=MetricsGroup.TASKS,
tag_keys=op_tags_keys,
)
)
# Object store memory-related metrics
self.execution_metrics_obj_store_memory = (
self._create_prometheus_metrics_for_execution_metrics(
metrics_group=MetricsGroup.OBJECT_STORE_MEMORY,
tag_keys=op_tags_keys,
)
)
# Actor related metrics
self.execution_metrics_actors = (
self._create_prometheus_metrics_for_execution_metrics(
metrics_group=MetricsGroup.ACTORS,
tag_keys=op_tags_keys,
)
)
# Miscellaneous metrics
self.execution_metrics_misc = (
self._create_prometheus_metrics_for_execution_metrics(
metrics_group=MetricsGroup.MISC,
tag_keys=op_tags_keys,
)
)
# Per Node metrics
self.per_node_metrics = self._create_prometheus_metrics_for_per_node_metrics()
iter_tag_keys = ("dataset",)
self.time_to_first_batch_s = Gauge(
"data_iter_time_to_first_batch_seconds",
description="Total time spent waiting for the first batch after starting iteration. "
"This includes the dataset pipeline warmup time. This metric is accumulated across different epochs.",
tag_keys=iter_tag_keys,
)
self.iter_block_fetching_s = Gauge(
"data_iter_block_fetching_seconds",
description="Seconds taken to fetch (with ray.get) blocks by iter_batches()",
tag_keys=iter_tag_keys,
)
self.iter_batch_shaping_s = Gauge(
"data_iter_batch_shaping_seconds",
description="Seconds taken to shape batch from incoming blocks by iter_batches()",
tag_keys=iter_tag_keys,
)
self.iter_batch_formatting_s = Gauge(
"data_iter_batch_formatting_seconds",
description="Seconds taken to format batches by iter_batches()",
tag_keys=iter_tag_keys,
)
self.iter_batch_collating_s = Gauge(
"data_iter_batch_collating_seconds",
description="Seconds taken to collate batches by iter_batches()",
tag_keys=iter_tag_keys,
)
self.iter_batch_finalizing_s = Gauge(
"data_iter_batch_finalizing_seconds",
description="Seconds taken to collate batches by iter_batches()",
tag_keys=iter_tag_keys,
)
self.iter_total_blocked_s = Gauge(
"data_iter_total_blocked_seconds",
description="Seconds user thread is blocked by iter_batches()",
tag_keys=iter_tag_keys,
)
self.iter_user_s = Gauge(
"data_iter_user_seconds",
description="Seconds spent in user code",
tag_keys=iter_tag_keys,
)
self.iter_initialize_s = Gauge(
"data_iter_initialize_seconds",
description="Seconds spent in iterator initialization code",
tag_keys=iter_tag_keys,
)
self.iter_get_ref_bundles_s = Gauge(
"data_iter_get_ref_bundles_seconds",
description="Seconds spent getting RefBundles from the dataset iterator",
tag_keys=iter_tag_keys,
)
self.iter_get_s = Gauge(
"data_iter_get_seconds",
description="Seconds spent in ray.get() while resolving block references",
tag_keys=iter_tag_keys,
)
self.iter_next_batch_s = Gauge(
"data_iter_next_batch_seconds",
description="Seconds spent getting the next batch from the block buffer",
tag_keys=iter_tag_keys,
)
self.iter_format_batch_s = Gauge(
"data_iter_format_batch_seconds",
description="Seconds spent formatting the batch",
tag_keys=iter_tag_keys,
)
self.iter_collate_batch_s = Gauge(
"data_iter_collate_batch_seconds",
description="Seconds spent collating the batch",
tag_keys=iter_tag_keys,
)
self.iter_finalize_batch_s = Gauge(
"data_iter_finalize_batch_seconds",
description="Seconds spent finalizing the batch",
tag_keys=iter_tag_keys,
)
self.iter_blocks_local = Gauge(
"data_iter_blocks_local",
description="Number of blocks already on the local node",
tag_keys=iter_tag_keys,
)
self.iter_blocks_remote = Gauge(
"data_iter_blocks_remote",
description="Number of blocks that require fetching from another node",
tag_keys=iter_tag_keys,
)
self.iter_unknown_location = Gauge(
"data_iter_unknown_location",
description="Number of blocks that have unknown locations",
tag_keys=iter_tag_keys,
)
self.iter_prefetched_bytes = Gauge(
"data_iter_prefetched_bytes",
description="Current bytes of prefetched blocks in the iterator",
tag_keys=iter_tag_keys,
)
# === Dataset and Operator Metadata Metrics ===
dataset_tags = ("dataset", "job_id", "start_time")
self.data_dataset_estimated_total_blocks = Gauge(
"data_dataset_estimated_total_blocks",
description="Total work units in blocks for dataset",
tag_keys=dataset_tags,
)
self.data_dataset_estimated_total_rows = Gauge(
"data_dataset_estimated_total_rows",
description="Total work units in rows for dataset",
tag_keys=dataset_tags,
)
self.data_dataset_state = Gauge(
"data_dataset_state",
description=f"State of dataset ({', '.join([f'{s.value}={s.name}' for s in DatasetState])})",
tag_keys=dataset_tags,
)
operator_tags = ("dataset", "operator")
self.data_operator_estimated_total_blocks = Gauge(
"data_operator_estimated_total_blocks",
description="Total work units in blocks for operator",
tag_keys=operator_tags,
)
self.data_operator_estimated_total_rows = Gauge(
"data_operator_estimated_total_rows",
description="Total work units in rows for operator",
tag_keys=operator_tags,
)
self.data_operator_queued_blocks = Gauge(
"data_operator_queued_blocks",
description="Number of queued blocks for operator",
tag_keys=operator_tags,
)
self.data_operator_state = Gauge(
"data_operator_state",
description=f"State of operator ({', '.join([f'{s.value}={s.name}' for s in DatasetState])})",
tag_keys=operator_tags,
)
def _create_prometheus_metrics_for_execution_metrics(
self, metrics_group: MetricsGroup, tag_keys: Tuple[str, ...]
) -> Dict[str, Metric]:
metrics = {}
for metric in OpRuntimeMetrics.get_metrics():
if not metric.metrics_group == metrics_group:
continue
metric_name = f"data_{metric.name}"
metric_description = metric.description
if metric.metrics_type == MetricsType.Gauge:
metrics[metric.name] = Gauge(
metric_name,
description=metric_description,
tag_keys=tag_keys,
)
elif metric.metrics_type == MetricsType.Histogram:
metrics[metric.name] = Histogram(
metric_name,
description=metric_description,
tag_keys=tag_keys,
**metric.metrics_args,
)
elif metric.metrics_type == MetricsType.Counter:
metrics[metric.name] = Counter(
metric_name,
description=metric_description,
tag_keys=tag_keys,
)
return metrics
def _create_prometheus_metrics_for_per_node_metrics(self) -> Dict[str, Gauge]:
metrics = {}
for field in fields(NodeMetrics):
metric_name = f"data_{field.name}_per_node"
metrics[field.name] = Gauge(
metric_name,
description="",
tag_keys=("dataset", "node_ip"),
)
return metrics
def gen_dataset_id(self) -> str:
"""Generate a unique dataset_id for tracking datasets."""
dataset_id = str(self.next_dataset_id)
self.next_dataset_id += 1
return dataset_id
def update_execution_metrics(
self,
dataset_tag: str,
op_metrics: List[Dict[str, Union[int, float]]],
operator_tags: List[str],
state: Dict[str, Any],
per_node_metrics: Optional[Dict[str, Dict[str, Union[int, float]]]] = None,
):
def _record(
prom_metric: Metric,
value: Union[int, float, List[int]],
tags: Dict[str, str] = None,
):
if isinstance(prom_metric, Gauge):
prom_metric.set(value, tags)
elif isinstance(prom_metric, Counter):
prom_metric.inc(value, tags)
elif isinstance(prom_metric, Histogram):
if isinstance(value, RuntimeMetricsHistogram):
value.export_to(prom_metric, tags)
for stats, operator_tag in zip(op_metrics, operator_tags):
tags = self._create_tags(dataset_tag, operator_tag)
self.spilled_bytes.set(stats.get("obj_store_mem_spilled", 0), tags)
self.freed_bytes.set(stats.get("obj_store_mem_freed", 0), tags)
self.current_bytes.set(stats.get("obj_store_mem_used", 0), tags)
self.output_bytes.set(stats.get("bytes_task_outputs_generated", 0), tags)
self.output_rows.set(stats.get("row_outputs_taken", 0), tags)
self.cpu_usage_cores.set(stats.get("cpu_usage", 0), tags)
self.gpu_usage_cores.set(stats.get("gpu_usage", 0), tags)
for field_name, prom_metric in self.execution_metrics_inputs.items():
_record(prom_metric, stats.get(field_name, 0), tags)
for field_name, prom_metric in self.execution_metrics_outputs.items():
_record(prom_metric, stats.get(field_name, 0), tags)
for field_name, prom_metric in self.execution_metrics_tasks.items():
_record(prom_metric, stats.get(field_name, 0), tags)
for (
field_name,
prom_metric,
) in self.execution_metrics_obj_store_memory.items():
_record(prom_metric, stats.get(field_name, 0), tags)
for field_name, prom_metric in self.execution_metrics_actors.items():
_record(prom_metric, stats.get(field_name, 0), tags)
for field_name, prom_metric in self.execution_metrics_misc.items():
_record(prom_metric, stats.get(field_name, 0), tags)
# Update per node metrics if they exist, the creation of these metrics is controlled
# by the _data_context.enable_per_node_metrics flag in the streaming executor but
# that is not exposed in the _StatsActor so here we simply check if the metrics exist
# and if so, update them
if per_node_metrics is not None:
for node_id, node_metrics in per_node_metrics.items():
# Translate node_id into node_name (the node ip), cache node info
if node_id not in self._ray_nodes_cache:
# Rebuilding this cache will fetch all nodes, this
# only needs to be done up to once per loop
self._rebuild_ray_nodes_cache()
node_ip = self._ray_nodes_cache.get(node_id, NODE_UNKNOWN)
tags = self._create_tags(dataset_tag=dataset_tag, node_ip_tag=node_ip)
for metric_name, metric_value in node_metrics.items():
prom_metric = self.per_node_metrics[metric_name]
_record(prom_metric, metric_value, tags)
# This update is called from a dataset's executor,
# so all tags should contain the same dataset
self.update_dataset(dataset_tag, state)
def _rebuild_ray_nodes_cache(self) -> None:
current_nodes = ray.nodes()
for node in current_nodes:
node_id = node.get("NodeID", None)
node_name = node.get("NodeName", None)
if node_id is not None and node_name is not None:
self._ray_nodes_cache[node_id] = node_name
def update_iteration_metrics(
self,
stats: "DatasetStats",
dataset_tag,
):
tags = self._create_tags(dataset_tag)
self.iter_initialize_s.set(stats.iter_initialize_s.get(), tags)
self.iter_get_ref_bundles_s.set(stats.iter_get_ref_bundles_s.get(), tags)
self.iter_get_s.set(stats.iter_get_s.get(), tags)
self.iter_next_batch_s.set(stats.iter_next_batch_s.get(), tags)
self.iter_format_batch_s.set(stats.iter_format_batch_s.get(), tags)
self.iter_collate_batch_s.set(stats.iter_collate_batch_s.get(), tags)
self.iter_finalize_batch_s.set(stats.iter_finalize_batch_s.get(), tags)
self.iter_blocks_local.set(stats.iter_blocks_local, tags)
self.iter_blocks_remote.set(stats.iter_blocks_remote, tags)
self.iter_unknown_location.set(stats.iter_unknown_location, tags)
self.iter_prefetched_bytes.set(stats.iter_prefetched_bytes, tags)
self.iter_block_fetching_s.set(stats.iter_get_s.get(), tags)
self.iter_batch_shaping_s.set(stats.iter_next_batch_s.get(), tags)
self.iter_batch_formatting_s.set(stats.iter_format_batch_s.get(), tags)
self.iter_batch_collating_s.set(stats.iter_collate_batch_s.get(), tags)
self.iter_batch_finalizing_s.set(stats.iter_finalize_batch_s.get(), tags)
self.time_to_first_batch_s.set(stats.iter_time_to_first_batch_s.get(), tags)
self.iter_total_blocked_s.set(stats.iter_total_blocked_s.get(), tags)
self.iter_user_s.set(stats.iter_user_s.get(), tags)
def register_dataset(
self,
job_id: str,
dataset_tag: str,
operator_tags: List[str],
topology: Topology,
data_context: DataContext,
):
start_time = time.time()
self.datasets[dataset_tag] = {
"job_id": job_id,
"state": DatasetState.PENDING.name,
"progress": 0,
"total": 0,
"total_rows": 0,
"start_time": start_time,
"end_time": None,
"operators": {
operator: {
"state": DatasetState.PENDING.name,
"progress": 0,
"total": 0,
"queued_blocks": 0,
}
for operator in operator_tags
},
}
if self._metadata_exporter is not None:
self.dataset_metadatas[dataset_tag] = DatasetMetadata(
job_id=job_id,
topology=topology,
dataset_id=dataset_tag,
start_time=start_time,
data_context=data_context,
execution_start_time=None,
execution_end_time=None,
state=DatasetState.PENDING.name,
)
self._metadata_exporter.export_dataset_metadata(
self.dataset_metadatas[dataset_tag]
)
def update_dataset(self, dataset_tag: str, state: Dict[str, Any]):
self.datasets[dataset_tag].update(state)
state = self.datasets[dataset_tag]
job_id = self.datasets[dataset_tag].get("job_id", "None")
start_time = str(int(self.datasets[dataset_tag].get("start_time", 0)))
# Update dataset-level metrics
dataset_tags = {
"dataset": dataset_tag,
"job_id": job_id,
"start_time": start_time,
}
self.data_dataset_estimated_total_blocks.set(
state.get("total", 0), dataset_tags
)
self.data_dataset_estimated_total_rows.set(
state.get("total_rows", 0), dataset_tags
)
state_string = state.get("state", DatasetState.UNKNOWN.name)
state_enum = DatasetState.from_string(state_string)
self.data_dataset_state.set(state_enum.value, dataset_tags)
self.update_dataset_metadata_state(dataset_tag, state_string)
# Update operator-level metrics
operator_states: Dict[str, str] = {}
for operator, op_state in state.get("operators", {}).items():
operator_tags = {
"dataset": dataset_tag,
"operator": operator,
}
self.data_operator_estimated_total_blocks.set(
op_state.get("total", 0), operator_tags
)
self.data_operator_estimated_total_rows.set(
op_state.get("total_rows", 0), operator_tags
)
self.data_operator_queued_blocks.set(
op_state.get("queued_blocks", 0), operator_tags
)
# Get state code directly from enum
state_string = op_state.get("state", DatasetState.UNKNOWN.name)
state_enum = DatasetState.from_string(state_string)
self.data_operator_state.set(state_enum.value, operator_tags)
operator_states[operator] = state_string
self.update_dataset_metadata_operator_states(dataset_tag, operator_states)
# Evict the oldest finished datasets to ensure the `max_stats` limit is enforced.
if state["state"] in {DatasetState.FINISHED.name, DatasetState.FAILED.name}:
self.finished_datasets_queue.append(dataset_tag)
while len(self.datasets) > self.max_stats and self.finished_datasets_queue:
tag_to_evict = self.finished_datasets_queue.popleft()
self.datasets.pop(tag_to_evict, None)
self.dataset_metadatas.pop(tag_to_evict, None)
def get_datasets(self, job_id: Optional[str] = None):
if not job_id:
return self.datasets
return {k: v for k, v in self.datasets.items() if v["job_id"] == job_id}
def update_dataset_metadata_state(self, dataset_id: str, new_state: str):
if dataset_id not in self.dataset_metadatas:
return
update_time = time.time()
dataset_metadata = self.dataset_metadatas[dataset_id]
if dataset_metadata.state == new_state:
return
updated_dataset_metadata = copy.deepcopy(dataset_metadata)
updated_dataset_metadata.state = new_state
if new_state == DatasetState.RUNNING.name:
updated_dataset_metadata.execution_start_time = update_time
elif new_state in (DatasetState.FINISHED.name, DatasetState.FAILED.name):
updated_dataset_metadata.execution_end_time = update_time
# Update metadata of running operators
for operator in updated_dataset_metadata.topology.operators:
if operator.state == DatasetState.RUNNING.name:
operator.state = new_state
operator.execution_end_time = update_time
self.dataset_metadatas[dataset_id] = updated_dataset_metadata
if self._metadata_exporter is not None:
self._metadata_exporter.export_dataset_metadata(
updated_dataset_metadata,
include_data_context=False,
include_op_args=False,
)
def update_dataset_metadata_operator_states(
self, dataset_id: str, operator_states: Dict[str, str]
):
if dataset_id not in self.dataset_metadatas:
return
dataset_metadata = self.dataset_metadatas[dataset_id]
update_needed = False
for operator in dataset_metadata.topology.operators:
if (
operator.id in operator_states
and operator.state != operator_states[operator.id]
):
update_needed = True
break
if not update_needed:
return
updated_dataset_metadata = copy.deepcopy(dataset_metadata)
update_time = time.time()
for operator in updated_dataset_metadata.topology.operators:
if operator.id in operator_states:
new_state = operator_states[operator.id]
if operator.state == new_state:
continue
operator.state = new_state
if new_state == DatasetState.RUNNING.name:
operator.execution_start_time = update_time
elif new_state in (
DatasetState.FINISHED.name,
DatasetState.FAILED.name,
):
operator.execution_end_time = update_time
# Handle outlier case for InputDataBuffer, which is marked as finished immediately and does not have a RUNNING state.
# Set the execution time the same as its end time
if not operator.execution_start_time:
operator.execution_start_time = update_time
self.dataset_metadatas[dataset_id] = updated_dataset_metadata
if self._metadata_exporter is not None:
self._metadata_exporter.export_dataset_metadata(
updated_dataset_metadata,
include_data_context=False,
include_op_args=False,
)
def _create_tags(
self,
dataset_tag: str,
operator_tag: Optional[str] = None,
node_ip_tag: Optional[str] = None,
):
tags = {"dataset": dataset_tag}
if operator_tag is not None:
tags["operator"] = operator_tag
if node_ip_tag is not None:
tags["node_ip"] = node_ip_tag
return tags
def get_or_create_stats_actor() -> ActorHandle[_StatsActor]:
"""Each cluster will contain exactly 1 _StatsActor. This function
returns the current _StatsActor handle, or create a new one if one
does not exist in the connected cluster. The _StatsActor is pinned on
on driver process' node.
"""
if ray._private.worker._global_node is None:
raise RuntimeError(
"Global node is not initialized. Driver might be not connected to Ray."
)
current_cluster_id = ray._private.worker._global_node.cluster_id
logger.debug(f"Stats Actor located on cluster_id={current_cluster_id}")
# so it fate-shares with the driver.
scheduling_strategy = NodeAffinitySchedulingStrategy(
ray.get_runtime_context().get_node_id(),
soft=False,
)
return _StatsActor.options(
name=STATS_ACTOR_NAME,
namespace=STATS_ACTOR_NAMESPACE,
get_if_exists=True,
lifetime="detached",
scheduling_strategy=scheduling_strategy,
).remote()
| _StatsActor |
python | Lightning-AI__lightning | src/lightning/fabric/plugins/precision/transformer_engine.py | {
"start": 1436,
"end": 8308
} | class ____(Precision):
"""Plugin for training with fp8 precision via nvidia's
`Transformer Engine <https://docs.nvidia.com/deeplearning/transformer-engine>`__.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Args:
weights_dtype: The weights dtype to use.
recipe: Recipe for the DelayedScaling
`configuration <https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#transformer_engine.common.recipe.DelayedScaling>`__.
In dict format or the dataclass format.
replace_layers: Whether to replace ``Linear`` and ``LayerNorm`` layers automatically with their Transformer
Engine alternatives. Note that they don't subclass the torch equivalents so checks like
``isinstance(l, torch.nn.Linear)`` will not pass.
fallback_compute_dtype: The compute dtype to use for operations that don't support fp8 autocast. Defaults to the
same as ``weights_dtype``.
.. note::
Support for FP8 in the linear layers with this plugin is currently limited to tensors
with shapes where the dimensions are divisible by 8 and 16 respectively. You might want to add padding to your
inputs to conform to this restriction.
"""
precision: Literal["transformer-engine", "transformer-engine-float16"] = "transformer-engine"
def __init__(
self,
*,
weights_dtype: torch.dtype,
recipe: Optional[Union[Mapping[str, Any], "DelayedScaling"]] = None,
replace_layers: Optional[bool] = None,
fallback_compute_dtype: Optional[torch.dtype] = None,
) -> None:
if not _TRANSFORMER_ENGINE_AVAILABLE:
raise ModuleNotFoundError(str(_TRANSFORMER_ENGINE_AVAILABLE))
from transformer_engine.common.recipe import DelayedScaling
if recipe is None:
recipe = DelayedScaling()
elif isinstance(recipe, Mapping):
recipe = dict(recipe) # copy
if "fp8_format" in recipe:
from transformer_engine.common.recipe import Format
recipe["fp8_format"] = getattr(Format, recipe["fp8_format"])
recipe = DelayedScaling(**recipe)
self.weights_dtype = weights_dtype
self.recipe = recipe
self.replace_layers = replace_layers
self.fallback_compute_dtype = fallback_compute_dtype or weights_dtype
@override
def convert_module(self, module: torch.nn.Module) -> torch.nn.Module:
# avoid converting if any is found. assume the user took care of it
if any("transformer_engine.pytorch" in m.__module__ for m in module.modules()):
if self.replace_layers is True:
# info level because this is expected with `init_module`
rank_zero_info(
"`TransformerEnginePrecision(replace_layers=True)` is set but the model already contains"
" TransformerEngine layers. Skipping"
)
elif self.replace_layers in (None, True):
_convert_layers(module)
module = module.to(dtype=self.weights_dtype)
return module
@override
def tensor_init_context(self) -> AbstractContextManager:
return _DtypeContextManager(self.weights_dtype)
@override
def module_init_context(self) -> AbstractContextManager:
dtype_ctx = self.tensor_init_context()
stack = ExitStack()
if self.replace_layers:
import transformer_engine.pytorch as te
context_manager = _ClassReplacementContextManager({
"torch.nn.Linear": te.Linear,
"torch.nn.LayerNorm": te.LayerNorm,
})
stack.enter_context(context_manager)
stack.enter_context(dtype_ctx)
return stack
@override
def forward_context(self) -> AbstractContextManager:
dtype_ctx = _DtypeContextManager(self.weights_dtype)
fallback_autocast_ctx = torch.autocast(device_type="cuda", dtype=self.fallback_compute_dtype)
import transformer_engine.pytorch as te
autocast_ctx = te.fp8_autocast(enabled=True, fp8_recipe=self.recipe)
stack = ExitStack()
stack.enter_context(dtype_ctx)
# enable an outer fallback autocast for operations that do not support fp8
stack.enter_context(fallback_autocast_ctx)
stack.enter_context(autocast_ctx)
return stack
@override
def convert_input(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=self.weights_dtype)
@override
def convert_output(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.get_default_dtype())
def _convert_layers(module: torch.nn.Module) -> None:
import transformer_engine.pytorch as te
for name, child in module.named_children():
if isinstance(child, torch.nn.Linear):
if child.in_features % 8 != 0 or child.out_features % 16 != 0:
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html#FP8-autocasting
rank_zero_warn(
"Support for FP8 in the linear layers with this plugin is currently limited to"
" tensors with shapes where the dimensions are divisible by 8 and 16 respectively."
f" The layer {name!r} does not fit this criteria. You might want to add padding to your inputs."
)
continue
has_bias = child.bias is not None
replacement = te.Linear(child.in_features, child.out_features, bias=has_bias)
replacement.weight.data = child.weight.data.clone()
if has_bias:
replacement.bias.data = child.bias.data.clone()
log.debug(f"Replacing layer {name!r} with Transformer Engine equivalent")
module.__setattr__(name, replacement)
elif isinstance(child, torch.nn.LayerNorm):
replacement = te.LayerNorm(child.normalized_shape[0], eps=child.eps)
replacement.weight.data = child.weight.data.clone()
# Check if bias exists before attempting to clone its data
if child.bias is not None and replacement.bias is not None:
replacement.bias.data = child.bias.data.clone()
log.debug(f"Replacing layer {name!r} with Transformer Engine equivalent")
module.__setattr__(name, replacement)
else:
# there are other transformer engine layers that we could convert but require fusion. full list at:
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/pytorch.html
_convert_layers(child)
| TransformerEnginePrecision |
python | huggingface__transformers | tests/quantization/autoawq/test_awq.py | {
"start": 1344,
"end": 3483
} | class ____(unittest.TestCase):
def test_wrong_backend(self):
"""
Simple test that checks if a user passes a wrong backend an error is raised
"""
# This should work fine
_ = AwqConfig(bits=4)
with self.assertRaises(ValueError):
AwqConfig(bits=4, backend="")
# These should work fine
_ = AwqConfig(bits=4, version="GEMM")
_ = AwqConfig(bits=4, version="gemm")
with self.assertRaises(ValueError):
AwqConfig(bits=4, backend="unexisting-backend")
# Only cuda and xpu devices can run this function
support_llm_awq = False
device_type, major, _ = get_device_properties()
if device_type == "cuda" and major >= 8:
support_llm_awq = True
elif device_type == "xpu":
support_llm_awq = True
if support_llm_awq:
# LLMAWQ should work on an A100
AwqConfig(bits=4, backend="llm-awq")
else:
# LLMAWQ does not work on a T4
with self.assertRaises(ValueError):
AwqConfig(bits=4, backend="llm-awq")
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = AwqConfig(bits=4)
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"bits": 2, "zero_point": False, "backend": "autoawq"}
quantization_config = AwqConfig.from_dict(dict)
self.assertEqual(dict["bits"], quantization_config.bits)
self.assertEqual(dict["zero_point"], quantization_config.zero_point)
self.assertEqual(dict["backend"], quantization_config.backend)
@slow
@require_torch_accelerator
@require_auto_awq
@require_accelerate
| AwqConfigTest |
python | facebook__pyre-check | tools/generate_taint_models/tests/get_dynamic_graphql_sources_test.py | {
"start": 914,
"end": 1063
} | class ____:
def method1(self, foo) -> bool:
return True
def method2(self, foo, *bar) -> bool:
return True
@dataclass
| TestClass |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py | {
"start": 17086,
"end": 30968
} | class ____(BoringModel):
def training_step(self, *args, **kwargs):
self.print("training_step", end="")
return super().training_step(*args, **kwargs)
def validation_step(self, *args, **kwargs):
self.print("validation_step", file=sys.stderr)
return super().validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
self.print("test_step")
return super().test_step(*args, **kwargs)
def predict_step(self, *args, **kwargs):
self.print("predict_step")
return super().predict_step(*args, **kwargs)
@mock.patch("tqdm.tqdm.write")
def test_tqdm_progress_bar_print(tqdm_write, tmp_path):
"""Test that printing in the LightningModule redirects arguments to the progress bar."""
model = PrintModel()
bar = TQDMProgressBar()
trainer = Trainer(
default_root_dir=tmp_path,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)
assert tqdm_write.call_args_list == [
call("training_step", end=""),
call("validation_step", file=sys.stderr),
call("test_step"),
call("predict_step"),
]
@mock.patch("tqdm.tqdm.write")
def test_tqdm_progress_bar_print_no_train(tqdm_write, tmp_path):
"""Test that printing in the LightningModule redirects arguments to the progress bar without training."""
model = PrintModel()
bar = TQDMProgressBar()
trainer = Trainer(
default_root_dir=tmp_path,
num_sanity_val_steps=0,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
assert tqdm_write.call_args_list == [
call("validation_step", file=sys.stderr),
call("test_step"),
call("predict_step"),
]
@mock.patch("builtins.print")
@mock.patch("lightning.pytorch.callbacks.progress.tqdm_progress.Tqdm.write")
def test_tqdm_progress_bar_print_disabled(tqdm_write, mock_print, tmp_path):
"""Test that printing in LightningModule goes through built-in print function when progress bar is disabled."""
model = PrintModel()
bar = TQDMProgressBar()
trainer = Trainer(
default_root_dir=tmp_path,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
bar.disable()
trainer.fit(model)
trainer.test(model, verbose=False)
trainer.predict(model)
mock_print.assert_has_calls([
call("training_step", end=""),
call("validation_step", file=ANY),
call("test_step"),
call("predict_step"),
])
tqdm_write.assert_not_called()
def test_tqdm_progress_bar_can_be_pickled(tmp_path):
bar = TQDMProgressBar()
trainer = Trainer(
default_root_dir=tmp_path,
callbacks=[bar],
max_epochs=1,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
logger=False,
enable_model_summary=False,
)
model = BoringModel()
pickle.dumps(bar)
trainer.fit(model)
pickle.dumps(bar)
trainer.validate(model)
pickle.dumps(bar)
trainer.test(model)
pickle.dumps(bar)
trainer.predict(model)
pickle.dumps(bar)
@pytest.mark.parametrize(
("val_check_interval", "train_progress_bar_updates", "val_progress_bar_updates"),
[(4, [0, 3, 6, 7], [0, 3, 6, 7]), (0.5, [0, 3, 6, 7], [0, 3, 6, 7])],
)
def test_progress_bar_max_val_check_interval(
tmp_path, val_check_interval, train_progress_bar_updates, val_progress_bar_updates
):
limit_batches = 7
model = BoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
num_sanity_val_steps=0,
max_epochs=1,
enable_model_summary=False,
val_check_interval=val_check_interval,
limit_train_batches=limit_batches,
limit_val_batches=limit_batches,
callbacks=TQDMProgressBar(refresh_rate=3),
)
with mock.patch("lightning.pytorch.callbacks.progress.tqdm_progress.Tqdm", MockTqdm):
trainer.fit(model)
pbar = trainer.progress_bar_callback
assert pbar.train_progress_bar.n_values == train_progress_bar_updates
assert pbar.val_progress_bar.n_values == val_progress_bar_updates
val_check_batch = (
max(1, int(limit_batches * val_check_interval)) if isinstance(val_check_interval, float) else val_check_interval
)
assert trainer.val_check_batch == val_check_batch
math.ceil(limit_batches // val_check_batch)
pbar_callback = trainer.progress_bar_callback
assert pbar_callback.val_progress_bar.n == limit_batches
assert pbar_callback.val_progress_bar.total == limit_batches
assert pbar_callback.train_progress_bar.n == limit_batches
assert pbar_callback.train_progress_bar.total == limit_batches
assert pbar_callback.is_enabled
@patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False)
@RunIf(min_cuda_gpus=2, standalone=True)
@pytest.mark.parametrize("val_check_interval", [0.2, 0.5])
def test_progress_bar_max_val_check_interval_ddp(tmp_path, val_check_interval):
world_size = 2
total_train_samples = 16
train_batch_size = 4
total_val_samples = 2
val_batch_size = 1
train_data = DataLoader(RandomDataset(32, 8), batch_size=train_batch_size)
val_data = DataLoader(RandomDataset(32, total_val_samples), batch_size=val_batch_size)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
num_sanity_val_steps=0,
max_epochs=1,
val_check_interval=val_check_interval,
accelerator="gpu",
devices=world_size,
strategy="ddp",
enable_progress_bar=True,
enable_model_summary=False,
)
trainer.fit(model, train_dataloaders=train_data, val_dataloaders=val_data)
total_train_batches = total_train_samples // (train_batch_size * world_size)
val_check_batch = max(1, int(total_train_batches * val_check_interval))
assert trainer.val_check_batch == val_check_batch
total_val_batches = total_val_samples // (val_batch_size * world_size)
pbar_callback = trainer.progress_bar_callback
if trainer.is_global_zero:
assert pbar_callback.val_progress_bar.n == total_val_batches
assert pbar_callback.val_progress_bar.total == total_val_batches
assert pbar_callback.train_progress_bar.n == total_train_batches // world_size
assert pbar_callback.train_progress_bar.total == total_train_batches // world_size
assert pbar_callback.is_enabled
def test_get_progress_bar_metrics(tmp_path):
"""Test that the metrics shown in the progress bar can be customized."""
class TestProgressBar(TQDMProgressBar):
def get_metrics(self, trainer: Trainer, model: LightningModule):
items = super().get_metrics(trainer, model)
items.pop("v_num", None)
items["my_metric"] = 123
return items
progress_bar = TestProgressBar()
trainer = Trainer(
default_root_dir=tmp_path,
callbacks=[progress_bar],
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
enable_checkpointing=False,
enable_model_summary=False,
)
model = BoringModel()
trainer.fit(model)
standard_metrics = progress_bar.get_metrics(trainer, model)
assert "v_num" not in standard_metrics
assert "my_metric" in standard_metrics
def test_get_progress_bar_metrics_fast_dev_run(tmp_path):
"""Test that `v_num` does not appear in the progress bar when a dummy logger is used (fast-dev-run)."""
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True)
model = BoringModel()
trainer.fit(model)
standard_metrics = trainer.progress_bar_callback.get_metrics(trainer, model)
assert isinstance(trainer.logger, DummyLogger)
assert "v_num" not in standard_metrics
def test_tqdm_progress_bar_correct_value_epoch_end(tmp_path):
"""TQDM counterpart to test_rich_progress_bar::test_rich_progress_bar_correct_value_epoch_end."""
class MockedProgressBar(TQDMProgressBar):
calls = defaultdict(list)
def get_metrics(self, trainer, pl_module):
items = super().get_metrics(trainer, model)
del items["v_num"]
# this is equivalent to mocking `set_postfix` as this method gets called every time
self.calls[trainer.state.fn].append((
trainer.state.stage.value,
trainer.current_epoch,
trainer.global_step,
items,
))
return items
class MyModel(BoringModel):
def training_step(self, batch, batch_idx):
self.log("a", self.global_step, prog_bar=True, on_step=False, on_epoch=True, reduce_fx=max)
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
self.log("b", self.global_step, prog_bar=True, on_step=False, on_epoch=True, reduce_fx=max)
return super().validation_step(batch, batch_idx)
def test_step(self, batch, batch_idx):
self.log("c", self.global_step, prog_bar=True, on_step=False, on_epoch=True, reduce_fx=max)
return super().test_step(batch, batch_idx)
model = MyModel()
pbar = MockedProgressBar()
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=2,
enable_model_summary=False,
enable_checkpointing=False,
log_every_n_steps=1,
callbacks=pbar,
logger=CSVLogger(tmp_path),
)
trainer.fit(model)
assert pbar.calls["fit"] == [
("sanity_check", 0, 0, {"b": 0}),
("train", 0, 1, {}),
("train", 0, 2, {}),
("validate", 0, 2, {"b": 2}), # validation end
# epoch end over, `on_epoch=True` metrics are computed
("train", 0, 2, {"a": 1, "b": 2}), # training epoch end
("train", 1, 3, {"a": 1, "b": 2}),
("train", 1, 4, {"a": 1, "b": 2}),
("validate", 1, 4, {"a": 1, "b": 4}), # validation end
("train", 1, 4, {"a": 3, "b": 4}), # training epoch end
]
trainer.validate(model, verbose=False)
assert pbar.calls["validate"] == []
trainer.test(model, verbose=False)
assert pbar.calls["test"] == []
@mock.patch("lightning.pytorch.trainer.trainer.Trainer.is_global_zero", new_callable=PropertyMock, return_value=False)
def test_tqdm_progress_bar_disabled_when_not_rank_zero(is_global_zero):
"""Test that the progress bar is disabled when not in global rank zero."""
pbar = TQDMProgressBar()
model = BoringModel()
trainer = Trainer(
callbacks=[pbar],
fast_dev_run=True,
)
pbar.enable()
trainer.fit(model)
assert pbar.is_disabled
pbar.enable()
trainer.predict(model)
assert pbar.is_disabled
pbar.enable()
trainer.validate(model)
assert pbar.is_disabled
pbar.enable()
trainer.test(model)
assert pbar.is_disabled
@pytest.mark.parametrize("leave", [True, False])
def test_tqdm_leave(leave, tmp_path):
pbar = TQDMProgressBar(leave=leave)
pbar.init_train_tqdm = Mock(wraps=pbar.init_train_tqdm)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
callbacks=[pbar],
max_epochs=3,
limit_train_batches=1,
limit_val_batches=1,
benchmark=True,
)
trainer.fit(model)
assert pbar.init_train_tqdm.call_count == (4 if leave else 1)
@patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False)
def test_tqdm_progress_bar_reset_behavior(tmp_path):
"""Test that progress bars call reset() without parameters and set total separately."""
model = BoringModel()
class ResetTrackingTqdm(MockTqdm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_calls_with_params = []
def reset(self, total=None):
self.reset_calls_with_params.append(total)
super().reset(total)
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
logger=False,
enable_checkpointing=False,
)
pbar = trainer.progress_bar_callback
with mock.patch("lightning.pytorch.callbacks.progress.tqdm_progress.Tqdm", ResetTrackingTqdm):
trainer.fit(model)
train_bar = pbar.train_progress_bar
assert None in train_bar.reset_calls_with_params, (
f"train reset() should be called without parameters, got calls: {train_bar.reset_calls_with_params}"
)
# Verify that total was set separately to the expected value
assert 2 in train_bar.total_values, (
f"train total should be set to 2 after reset(), got total_values: {train_bar.total_values}"
)
# Verify that validation progress bar reset() was called without parameters
val_bar = pbar.val_progress_bar
assert None in val_bar.reset_calls_with_params, (
f"validation reset() should be called without parameters, got calls: {val_bar.reset_calls_with_params}"
)
# Verify that total was set separately to the expected value
assert 2 in val_bar.total_values, (
f"validation total should be set to 2 after reset(), got total_values: {val_bar.total_values}"
)
| PrintModel |
python | pytorch__pytorch | torch/nn/modules/pooling.py | {
"start": 57212,
"end": 58131
} | class ____(_AdaptiveAvgPoolNd):
r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
The output size is :math:`L_{out}`, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size :math:`L_{out}`.
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
:math:`L_{out}=\text{output\_size}`.
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveAvgPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
output_size: _size_1_t
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.adaptive_avg_pool1d(input, self.output_size)
| AdaptiveAvgPool1d |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 2741,
"end": 3055
} | class ____(MutableComposite):
x: int
y: int
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
self.changed()
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
@dataclasses.dataclass
| DCPoint |
python | doocs__leetcode | lcof/面试题53 - I. 在排序数组中查找数字 I/Solution.py | {
"start": 0,
"end": 173
} | class ____:
def search(self, nums: List[int], target: int) -> int:
l = bisect_left(nums, target)
r = bisect_right(nums, target)
return r - l
| Solution |
python | plotly__plotly.py | plotly/graph_objs/scattergl/marker/_colorbar.py | {
"start": 233,
"end": 61680
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergl.marker"
_path_str = "scattergl.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.scattergl.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scattergl.mark
er.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
scattergl.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.scattergl.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scattergl.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rgl.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scattergl.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattergl.marker.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scattergl.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rgl.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scattergl.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattergl.marker.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergl.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | dagster-io__dagster | python_modules/automation/automation/dagster_docs/watcher.py | {
"start": 5060,
"end": 10158
} | class ____:
"""Watches for file changes and dynamically manages docstring validation."""
def __init__(self, root_path: Path, config: ValidationConfig, verbose: bool = False):
"""Initialize the changed files watcher.
Args:
root_path: Root path of the git repository
config: Validation configuration
verbose: Whether to print debug information
"""
self.root_path = root_path
self.config = config
self.verbose = verbose
self.current_changed_files: set[Path] = set()
self.file_watchers: dict[Path, DocstringFileWatcher] = {}
self.observer = Observer()
self.git_refresh_debounce = 1.0 # Debounce git checks
self.last_git_check = 0
def _on_file_changed(self, file_path: Path) -> None:
"""Called when any .py file changes - triggers git refresh and watcher updates."""
# Debounced git status refresh to update the watcher set
current_time = time.time()
if current_time - self.last_git_check > self.git_refresh_debounce:
self.last_git_check = current_time
self._refresh_git_status()
def _refresh_git_status(self) -> None:
"""Check git status and update active watchers."""
new_changed_files = set(git_changed_files(self.root_path))
# Remove watchers for files no longer changed
for file_path in self.current_changed_files - new_changed_files:
if file_path in self.file_watchers:
self.file_watchers[file_path].stop_watching()
del self.file_watchers[file_path]
if self.verbose:
click.echo(f"[DEBUG] Stopped watching {file_path} (no longer changed)")
# Add watchers for newly changed files
for file_path in new_changed_files - self.current_changed_files:
callback = self._create_validation_callback(file_path)
self.file_watchers[file_path] = DocstringFileWatcher(file_path, callback, self.verbose)
self.file_watchers[file_path].start_watching()
if self.verbose:
click.echo(f"[DEBUG] Started watching {file_path} (newly changed)")
# Update status message if the set changed
if new_changed_files != self.current_changed_files:
file_count = len(new_changed_files)
if file_count == 0:
click.echo("No changed files to watch")
else:
file_names = [f.name for f in new_changed_files]
click.echo(f"Watching {file_count} changed files: {', '.join(file_names)}")
self.current_changed_files = new_changed_files
def _create_validation_callback(self, file_path: Path) -> Callable[[], None]:
"""Create validation callback for a specific changed file."""
def validate_file() -> None:
timestamp = time.strftime("%H:%M:%S")
click.echo(f"[{timestamp}] File changed, validating symbols in {file_path.name}...")
try:
# Get module path for this file
module_path = self.config.path_converter(file_path, self.config.root_path)
if module_path is None:
click.echo(f"Could not determine module path for {file_path}")
return
# Extract and validate symbols from this file
symbols = extract_symbols_from_file(file_path, module_path)
if not symbols:
click.echo("No symbols found to validate")
return
results = validate_symbols(symbols)
error_count, warning_count = print_validation_results(results, self.verbose)
if error_count == 0 and warning_count == 0:
click.echo("✓ All docstrings are valid!")
elif error_count == 0:
click.echo(f"✓ All docstrings are valid (with {warning_count} warnings)")
else:
click.echo(f"✗ Found {error_count} errors, {warning_count} warnings")
except Exception as e:
click.echo(f"Validation error: {e}", err=True)
if self.verbose:
import traceback
traceback.print_exc()
click.echo("-" * 50)
return validate_file
def start_watching(self) -> None:
"""Start watching for file changes, respecting .gitignore."""
handler = GitignoreAwareHandler(self.root_path, self)
self.observer.schedule(handler, str(self.root_path), recursive=True)
self.observer.start()
# Initial git status check
self._refresh_git_status()
def stop_watching(self) -> None:
"""Stop watching for file changes."""
# Stop all individual file watchers
for watcher in self.file_watchers.values():
watcher.stop_watching()
self.file_watchers.clear()
# Stop the main observer
self.observer.stop()
self.observer.join()
| ChangedFilesWatcher |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_virginia_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_virginia_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_virginia_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidVirginiaZip |
python | pytorch__pytorch | torch/onnx/_internal/torchscript_exporter/verification.py | {
"start": 1427,
"end": 19154
} | class ____:
"""Options for ONNX export verification.
.. deprecated:: 2.7
Consider using ``torch.onnx.export(..., dynamo=True)`` and use the returned
``ONNXProgram`` to test the ONNX model.
Attributes:
flatten: If True, unpack nested list/tuple/dict inputs into a flattened list of
Tensors for ONNX. Set this to False if nested structures are to be preserved
for ONNX, which is usually the case with exporting ScriptModules. Default True.
ignore_none: Whether to ignore None type in torch output, which is usually the
case with tracing. Set this to False, if torch output should keep None type,
which is usually the case with exporting ScriptModules. Default to True.
check_shape: Whether to check the shapes between PyTorch and ONNX Runtime outputs
are exactly the same. Set this to False to allow output shape broadcasting.
Default to True.
check_dtype: Whether to check the dtypes between PyTorch and ONNX Runtime outputs
are consistent. Default to True.
backend: ONNX backend for verification. Default to OnnxBackend.ONNX_RUNTIME_CPU.
rtol: relative tolerance in comparison between ONNX and PyTorch outputs.
atol: absolute tolerance in comparison between ONNX and PyTorch outputs.
remained_onnx_input_idx: If provided, only the specified inputs will be passed
to the ONNX model. Supply a list when there are unused inputs in the model.
Since unused inputs will be removed in the exported ONNX model, supplying
all inputs will cause an error on unexpected inputs. This parameter tells
the verifier which inputs to pass into the ONNX model.
acceptable_error_percentage: acceptable percentage of element mismatches in comparison.
It should be a float of value between 0.0 and 1.0.
"""
flatten: bool = True
ignore_none: bool = True
check_shape: bool = True
check_dtype: bool = True
backend: OnnxBackend = OnnxBackend.ONNX_RUNTIME_CPU
rtol: float = 1e-3
atol: float = 1e-7
remained_onnx_input_idx: Sequence[int] | None = None
acceptable_error_percentage: float | None = None
def _flatten_tuples(elem):
flattened = []
for t in elem:
if isinstance(t, tuple):
flattened.extend(_flatten_tuples(t))
else:
flattened.append(t)
return flattened
def _to_numpy(elem) -> list | npt.NDArray:
if isinstance(elem, torch.Tensor):
if elem.requires_grad:
return elem.detach().cpu().numpy()
else:
return elem.cpu().numpy()
elif isinstance(elem, (list, tuple)):
return [_to_numpy(inp) for inp in elem]
elif isinstance(elem, (bool, int, float)):
return np.array(elem)
elif isinstance(elem, dict):
flattened = []
for k in elem:
flattened.extend([_to_numpy(k), _to_numpy(elem[k])])
return flattened
return elem
def _inline_flatten_list(inputs, res_list) -> list:
for i in inputs:
res_list.append(i) if not isinstance(
i, (list, tuple)
) else _inline_flatten_list(i, res_list)
return res_list
def _unpack_to_numpy(values, cast_onnx_accepted=True) -> list:
value_unpacked = []
for value in values:
value_unpacked.extend(
utils.unpack_quantized_tensor(value, cast_onnx_accepted=cast_onnx_accepted)
)
return [_to_numpy(v) for v in value_unpacked]
def _run_onnx(onnx_session, inputs) -> _OutputsType:
kw_inputs = {}
if inputs and isinstance(inputs[-1], dict):
kw_inputs = inputs[-1]
inputs = inputs[:-1]
inputs = _unpack_to_numpy(_flatten_tuples(inputs))
ort_inputs = {}
for input_name, input in kw_inputs.items():
ort_inputs[input_name] = _to_numpy(input)
inputs = _to_numpy(inputs)
if hasattr(onnx_session, "get_inputs"):
# onnxruntime.InferenceSession
input_names = [i.name for i in onnx_session.get_inputs()]
elif hasattr(onnx_session, "input_names"):
# onnx.reference.ReferenceEvaluator
input_names = onnx_session.input_names
else:
raise ValueError(f"Unknown ONNX backend type: {type(onnx_session)}.")
for i, input in enumerate(inputs):
if i == len(input_names) or input_names[i] in ort_inputs:
raise ValueError(
f"got too many positional inputs. inputs: {inputs}. kw_inputs: {kw_inputs}. "
f"input names: {input_names}."
)
ort_inputs[input_names[i]] = input
onnx_outs = onnx_session.run(None, ort_inputs)
return onnx_outs
def _ort_session(
model: str | io.BytesIO, ort_providers: Sequence[str] = _ORT_PROVIDERS
):
try:
import onnxruntime # type: ignore[import]
except ImportError as e:
raise ImportError("onnxruntime is required for export verification.") from e
if ort_providers is None:
ort_providers = _ORT_PROVIDERS
session_options = onnxruntime.SessionOptions()
# suppress ort warnings.
# 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.
session_options.log_severity_level = 3
ort_session = onnxruntime.InferenceSession(
model if isinstance(model, str) else model.getvalue(),
session_options,
providers=ort_providers,
)
return ort_session
def _onnx_backend_session(model: str | io.BytesIO, backend: OnnxBackend):
if backend == OnnxBackend.REFERENCE:
raise NotImplementedError
elif backend in {OnnxBackend.ONNX_RUNTIME_CPU, OnnxBackend.ONNX_RUNTIME_CUDA}:
onnx_session = _ort_session(model, (backend.value,))
else:
raise ValueError(f"Unsupported backend: {backend}")
return onnx_session
def _compare_onnx_pytorch_outputs_in_np(
onnx_outs: _OutputsType,
pt_outs: _OutputsType,
options: VerificationOptions,
) -> None:
assert len(onnx_outs) == len(pt_outs), (
f"Number of outputs differ ONNX runtime: ({len(onnx_outs)}) PyTorch: ({len(pt_outs)})"
)
acceptable_error_percentage = options.acceptable_error_percentage
if acceptable_error_percentage and (
acceptable_error_percentage > 1.0 or acceptable_error_percentage < 0.0
):
raise ValueError(
"If set, acceptable_error_percentage should be between 0.0 and 1.0"
)
for ort_out, pt_out in zip(onnx_outs, pt_outs):
try:
# TODO: Remove `check_shape` option once every shape inconsistent issue is addressed.
if not options.check_shape:
# Allow different but broadcastable output shapes.
ort_out, pt_out = np.broadcast_arrays(ort_out, pt_out)
torch.testing.assert_close(
ort_out,
pt_out,
rtol=options.rtol,
atol=options.atol,
check_dtype=options.check_dtype,
equal_nan=True,
)
except AssertionError as e:
if acceptable_error_percentage:
error_percentage = 1 - np.sum(
np.isclose(ort_out, pt_out, rtol=options.rtol, atol=options.atol)
) / np.prod(ort_out.shape) # pyrefly: ignore [missing-attribute]
if error_percentage <= acceptable_error_percentage:
warnings.warn(
f"Suppressed AssertionError:\n{e}.\n"
f"Error percentage {error_percentage} "
f"within acceptable range {acceptable_error_percentage}.",
stacklevel=2,
)
continue
# pyrefly: ignore [missing-attribute]
if ort_out.dtype == np.uint8 or ort_out.dtype == np.int8:
warnings.warn("ONNX output is quantized", stacklevel=2)
# pyrefly: ignore [missing-attribute]
if pt_out.dtype == np.uint8 or pt_out.dtype == np.int8:
warnings.warn("PyTorch output is quantized", stacklevel=2)
raise
def _compare_onnx_pytorch_outputs(
onnx_outs: _OutputsType,
pt_outs: Any,
options: VerificationOptions,
) -> None:
"""
Compare ONNX and PyTorch outputs.
Args:
onnx_outs: outputs from ONNX backend.
pt_outs: outputs from PyTorch.
options: options for verification.
Raises:
AssertionError: if outputs from ONNX model and PyTorch model are not
equal up to specified precision.
ValueError: if arguments provided are invalid.
"""
if options.ignore_none:
# torch.jit._flatten filters None type
pt_outs, _ = torch.jit._flatten(pt_outs)
else:
pt_outs = _inline_flatten_list([pt_outs], [])
pt_outs_np = _unpack_to_numpy(pt_outs, cast_onnx_accepted=False)
onnx_outs = _inline_flatten_list(onnx_outs, [])
_compare_onnx_pytorch_outputs_in_np(onnx_outs, pt_outs_np, options)
def _prepare_input_for_pytorch(args, kwargs):
"""Prepare input for PyTorch model execution.
Any future changes/formatting to the input before dispatching to the PyTorch
model should be made in this function.
Args:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
Returns:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
"""
if isinstance(args, (torch.Tensor, dict)):
args = (args,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
args = copy.deepcopy(args)
if kwargs:
kwargs = copy.deepcopy(kwargs)
else:
kwargs = {}
return args, kwargs
def _prepare_input_for_export(args, kwargs):
"""Prepare input for ONNX model export.
Any future changes/formatting to the input before dispatching to the
:func:`torch.onnx.export` api should be made in this function.
Args:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
Returns:
onnx_inputs: positional arguments for ONNX model export, as `args` in
:func:`torch.onnx.export`.
"""
args, kwargs = _prepare_input_for_pytorch(args, kwargs)
if not kwargs and len(args) > 0 and isinstance(args[-1], dict):
onnx_inputs = args + ({},)
elif kwargs:
onnx_inputs = args + (kwargs,)
else:
onnx_inputs = args
return onnx_inputs
def _prepare_input_for_onnx(
args, kwargs, remained_onnx_input_idx: Sequence[int] | None, flatten: bool
):
"""Prepare input for ONNX model execution in ONNX backend.
Any future changes/formatting to the input before dispatching to the ONNX backend
run should be made in this function.
Args:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
remained_onnx_input_idx: indices of inputs to be used for ONNX model execution.
flatten: whether to flatten the input before dispatching to the ONNX model execution.
Returns:
onnx_inputs: positional arguments for ONNX model execution in ONNX backend.
"""
onnx_inputs = _prepare_input_for_export(args, kwargs)
if flatten:
onnx_inputs, _ = torch.jit._flatten(onnx_inputs)
elif onnx_inputs and onnx_inputs[-1] == {}:
# Handle empty kwargs (normally removed by flatten).
onnx_inputs = onnx_inputs[:-1]
if remained_onnx_input_idx is not None:
return [onnx_inputs[i] for i in remained_onnx_input_idx]
else:
return onnx_inputs
def _try_clone_model(model):
"""Used for preserving original model in case forward mutates model states."""
try:
return copy.deepcopy(model)
except Exception:
warnings.warn(
"Failed to clone model. Model state might be mutated during verification.",
stacklevel=2,
)
return model
def _compare_onnx_pytorch_model(
pt_model: _ModelType,
onnx_model_f: str | io.BytesIO,
input_args: _InputArgsType,
input_kwargs: _InputKwargsType | None,
additional_test_inputs: Sequence[_InputArgsType] | None,
options: VerificationOptions,
) -> None:
"""Compare outputs from ONNX model runs with outputs from PyTorch model runs.
Args:
pt_model: PyTorch model.
onnx_model_f: ONNX model file path or file-like object.
input_args: positional arguments for PyTorch model forward method.
input_kwargs: keyword arguments for PyTorch model forward method.
additional_test_inputs: additional positional arguments for PyTorch model
forward method.
options: options for verification.
Raises:
AssertionError: if outputs from ONNX model and PyTorch model are not
equal up to specified precision.
"""
onnx_session = _onnx_backend_session(onnx_model_f, options.backend)
def compare_onnx_pytorch_model_with_input(input_args, input_kwargs) -> None:
pt_args, pt_kwargs = _prepare_input_for_pytorch(input_args, input_kwargs)
# TODO: remove this and treat mutating model separately. See #77679
pt_model_copy = _try_clone_model(pt_model)
pt_outs = pt_model_copy(*pt_args, **pt_kwargs)
onnx_inputs = _prepare_input_for_onnx(
input_args, input_kwargs, options.remained_onnx_input_idx, options.flatten
)
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
_compare_onnx_pytorch_outputs(
onnx_outs=onnx_outs,
pt_outs=pt_outs,
options=options,
)
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
if additional_test_inputs:
for test_input_args in additional_test_inputs:
compare_onnx_pytorch_model_with_input(test_input_args, {})
def verify(
model: _ModelType,
input_args: _InputArgsType,
input_kwargs: _InputKwargsType | None = None,
do_constant_folding: bool = True,
dynamic_axes: Mapping[str, Mapping[int, str] | Mapping[str, Sequence[int]]]
| None = None,
input_names: Sequence[str] | None = None,
output_names: Sequence[str] | None = None,
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
opset_version: int | None = None,
keep_initializers_as_inputs: bool = True,
verbose: bool = False,
fixed_batch_size: bool = False,
use_external_data: bool = False,
additional_test_inputs: Sequence[_InputArgsType] | None = None,
options: VerificationOptions | None = None,
) -> None:
"""Verify model export to ONNX against original PyTorch model.
.. deprecated:: 2.7
Consider using ``torch.onnx.export(..., dynamo=True)`` and use the returned
``ONNXProgram`` to test the ONNX model.
Args:
model: See :func:`torch.onnx.export`.
input_args: See :func:`torch.onnx.export`.
input_kwargs: See :func:`torch.onnx.export`.
do_constant_folding: See :func:`torch.onnx.export`.
dynamic_axes: See :func:`torch.onnx.export`.
input_names: See :func:`torch.onnx.export`.
output_names: See :func:`torch.onnx.export`.
training: See :func:`torch.onnx.export`.
opset_version: See :func:`torch.onnx.export`.
keep_initializers_as_inputs: See :func:`torch.onnx.export`.
verbose: See :func:`torch.onnx.export`.
fixed_batch_size: Legacy argument, used only by rnn test cases.
use_external_data: Explicitly specify whether to export the model with external data.
additional_test_inputs: List of tuples. Each tuple is a group of
input arguments to test. Currently only ``*args`` are supported.
options: A VerificationOptions object that controls the verification behavior.
Raises:
AssertionError: if outputs from ONNX model and PyTorch model are not
equal up to specified precision.
ValueError: if arguments provided are invalid.
"""
if options is None:
options = VerificationOptions()
if training == torch.onnx.TrainingMode.TRAINING:
model.train()
elif training == torch.onnx.TrainingMode.EVAL:
model.eval()
with torch.no_grad(), contextlib.ExitStack() as stack:
model_f: str | io.BytesIO = io.BytesIO()
if use_external_data:
tmpdir_path = stack.enter_context(tempfile.TemporaryDirectory())
model_f = os.path.join(tmpdir_path, "model.onnx")
inputs_for_export = _prepare_input_for_export(input_args, input_kwargs)
# TODO(#77679): remove this and treat mutating model separately.
model_copy = _try_clone_model(model)
utils._export(
model,
inputs_for_export,
model_f,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
fixed_batch_size=fixed_batch_size,
training=training,
verbose=verbose,
)
_compare_onnx_pytorch_model(
pt_model=model_copy,
onnx_model_f=model_f,
input_args=input_args,
input_kwargs=input_kwargs,
additional_test_inputs=additional_test_inputs,
options=options,
)
| VerificationOptions |
python | django__django | tests/admin_views/models.py | {
"start": 3482,
"end": 3591
} | class ____(models.Model):
name = models.CharField(max_length=100, blank=True)
| RowLevelChangePermissionModel |
python | tornadoweb__tornado | tornado/iostream.py | {
"start": 3218,
"end": 3426
} | class ____(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
| UnsatisfiableReadError |
python | tensorflow__tensorflow | tensorflow/python/saved_model/model_utils/export_output.py | {
"start": 14629,
"end": 14991
} | class ____(_SupervisedOutput):
"""Represents the output of a supervised training process.
This class generates the appropriate signature def for exporting
training output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_train_signature_def
| TrainOutput |
python | getsentry__sentry | src/sentry/integrations/api/serializers/models/doc_integration_avatar.py | {
"start": 240,
"end": 573
} | class ____(Serializer):
def serialize(
self, obj: DocIntegrationAvatar, attrs, user, **kwargs
) -> MutableMapping[str, Any]:
return {
"avatarType": obj.get_avatar_type_display(),
"avatarUuid": obj.ident,
"avatarUrl": obj.absolute_url(),
}
| DocIntegrationAvatarSerializer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/recursiveTypeAlias8.py | {
"start": 330,
"end": 371
} | class ____(TypedDict):
type: int
| ClassC |
python | getsentry__sentry | src/sentry/uptime/subscriptions/subscriptions.py | {
"start": 2774,
"end": 3491
} | class ____(ValueError):
pass
def check_uptime_subscription_limit(organization_id: int) -> None:
"""
Check if adding a new manual uptime monitor would exceed the organization's limit.
Raises MaxManualUptimeSubscriptionsReached if the limit would be exceeded.
"""
manual_subscription_count = Detector.objects.filter(
status=ObjectStatus.ACTIVE,
enabled=True,
type=GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE,
project__organization_id=organization_id,
config__mode=UptimeMonitorMode.MANUAL,
).count()
if manual_subscription_count >= MAX_MANUAL_SUBSCRIPTIONS_PER_ORG:
raise MaxManualUptimeSubscriptionsReached
| MaxManualUptimeSubscriptionsReached |
python | prabhupant__python-ds | data_structures/graphs/not_reachable_nodes.py | {
"start": 37,
"end": 813
} | class ____:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def dfs_util(self, v, visited):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.dfs_util(i, visited)
def count_nodes(self, v):
visited = [False] * self.V
self.dfs_util(v, visited)
count = 0
for i in range(self.V):
if visited[i] == False:
count += 1
return count
g = Graph(8)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(6, 7)
print(g.count_nodes(0))
| Graph |
python | openai__openai-python | src/openai/types/realtime/call_refer_params.py | {
"start": 206,
"end": 422
} | class ____(TypedDict, total=False):
target_uri: Required[str]
"""URI that should appear in the SIP Refer-To header.
Supports values like `tel:+14155550123` or `sip:agent@example.com`.
"""
| CallReferParams |
python | openai__gym | gym/wrappers/rescale_action.py | {
"start": 150,
"end": 3100
} | class ____(gym.ActionWrapper):
"""Affinely rescales the continuous action space of the environment to the range [min_action, max_action].
The base environment :attr:`env` must have an action space of type :class:`spaces.Box`. If :attr:`min_action`
or :attr:`max_action` are numpy arrays, the shape must match the shape of the environment's action space.
Example:
>>> import gym
>>> env = gym.make('BipedalWalker-v3')
>>> env.action_space
Box(-1.0, 1.0, (4,), float32)
>>> min_action = -0.5
>>> max_action = np.array([0.0, 0.5, 1.0, 0.75])
>>> env = RescaleAction(env, min_action=min_action, max_action=max_action)
>>> env.action_space
Box(-0.5, [0. 0.5 1. 0.75], (4,), float32)
>>> RescaleAction(env, min_action, max_action).action_space == gym.spaces.Box(min_action, max_action)
True
"""
def __init__(
self,
env: gym.Env,
min_action: Union[float, int, np.ndarray],
max_action: Union[float, int, np.ndarray],
):
"""Initializes the :class:`RescaleAction` wrapper.
Args:
env (Env): The environment to apply the wrapper
min_action (float, int or np.ndarray): The min values for each action. This may be a numpy array or a scalar.
max_action (float, int or np.ndarray): The max values for each action. This may be a numpy array or a scalar.
"""
assert isinstance(
env.action_space, spaces.Box
), f"expected Box action space, got {type(env.action_space)}"
assert np.less_equal(min_action, max_action).all(), (min_action, max_action)
super().__init__(env)
self.min_action = (
np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + min_action
)
self.max_action = (
np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + max_action
)
self.action_space = spaces.Box(
low=min_action,
high=max_action,
shape=env.action_space.shape,
dtype=env.action_space.dtype,
)
def action(self, action):
"""Rescales the action affinely from [:attr:`min_action`, :attr:`max_action`] to the action space of the base environment, :attr:`env`.
Args:
action: The action to rescale
Returns:
The rescaled action
"""
assert np.all(np.greater_equal(action, self.min_action)), (
action,
self.min_action,
)
assert np.all(np.less_equal(action, self.max_action)), (action, self.max_action)
low = self.env.action_space.low
high = self.env.action_space.high
action = low + (high - low) * (
(action - self.min_action) / (self.max_action - self.min_action)
)
action = np.clip(action, low, high)
return action
| RescaleAction |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/backfill.py | {
"start": 3866,
"end": 4034
} | class ____(graphene.ObjectType):
backfill_id = graphene.NonNull(graphene.String)
class Meta:
name = "ResumeBackfillSuccess"
| GrapheneResumeBackfillSuccess |
python | huggingface__transformers | src/transformers/models/moonshine/modeling_moonshine.py | {
"start": 2267,
"end": 2857
} | class ____(nn.Module):
def __init__(self, config, hidden_act):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
| MoonshineEncoderMLP |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 867,
"end": 936
} | class ____(Enum):
DETECTED = 0
ALERT_TRIGGERED = 2
| IncidentType |
python | yandexdataschool__Practical_RL | week04_approx_rl/dqn/replay_buffer.py | {
"start": 2401,
"end": 4296
} | class ____(ReplayBuffer):
"""
ReplayBuffer for vectorized environments, which are wrapped into FrameBuffers.
If an environment is first wrapped into a FrameBuffer and then vectorized,
then the resulting VecEnv will not use LazyFrames, but it will directly
use np.ndarrays, thus greatly increasing RAM consumption by the buffer.
Instead, we first vectorize an environment and only then wrap in into FrameBuffers.
It's not as convenient, but it keeps the advantage in memory from LazyFrames.
So,
observations and next_obervations are stored as LazyFrames
of shape (n_frames, n_envs, ...)
actions, rewards and dones are stored as np.ndarrays of shape (n_envs,).
"""
# (n_frames, n_envs, *)
def _encode_sample(self, idxes):
"""
For each index in idxes samples a (s, a, r, s', done) transition
from a randomly chosen environment of the corresponding VecEnv.
"""
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
n_envs = action.shape[0]
env_idx_chosen_for_sample = random.randint(0, n_envs - 1)
obses_t.append(
np.array(obs_t, copy=False)[:, env_idx_chosen_for_sample],
)
actions.append(np.array(action, copy=False)[env_idx_chosen_for_sample])
rewards.append(reward[env_idx_chosen_for_sample])
obses_tp1.append(
np.array(obs_tp1, copy=False)[:, env_idx_chosen_for_sample],
)
dones.append(done[env_idx_chosen_for_sample])
return (
np.array(obses_t),
np.array(actions),
np.array(rewards),
np.array(obses_tp1),
np.array(dones),
)
| LazyFramesVectorReplayBuffer |
python | conda__conda | conda/plugins/hookspec.py | {
"start": 1386,
"end": 22212
} | class ____:
"""Collection of all supported conda plugin hookspecs."""
@_hookspec
def conda_solvers(self) -> Iterable[CondaSolver]:
"""
Register solvers in conda.
**Example:**
.. code-block:: python
import logging
from conda import plugins
from conda.core import solve
log = logging.getLogger(__name__)
class VerboseSolver(solve.Solver):
def solve_final_state(self, *args, **kwargs):
log.info("My verbose solver!")
return super().solve_final_state(*args, **kwargs)
@plugins.hookimpl
def conda_solvers():
yield plugins.CondaSolver(
name="verbose-classic",
backend=VerboseSolver,
)
:return: An iterable of solver entries.
"""
yield from ()
@_hookspec
def conda_subcommands(self) -> Iterable[CondaSubcommand]:
"""
Register external subcommands in conda.
**Example:**
.. code-block:: python
from conda import plugins
def example_command(args):
print("This is an example command!")
@plugins.hookimpl
def conda_subcommands():
yield plugins.CondaSubcommand(
name="example",
summary="example command",
action=example_command,
)
:return: An iterable of subcommand entries.
"""
yield from ()
@_hookspec
def conda_virtual_packages(self) -> Iterable[CondaVirtualPackage]:
"""
Register virtual packages in Conda.
**Example:**
.. code-block:: python
from conda import plugins
@plugins.hookimpl
def conda_virtual_packages():
yield plugins.CondaVirtualPackage(
name="my_custom_os",
version="1.2.3",
build="x86_64",
)
:return: An iterable of virtual package entries.
"""
yield from ()
@_hookspec
def conda_pre_commands(self) -> Iterable[CondaPreCommand]:
"""
Register pre-command functions in conda.
**Example:**
.. code-block:: python
from conda import plugins
def example_pre_command(command):
print("pre-command action")
@plugins.hookimpl
def conda_pre_commands():
yield plugins.CondaPreCommand(
name="example-pre-command",
action=example_pre_command,
run_for={"install", "create"},
)
"""
yield from ()
@_hookspec
def conda_post_commands(self) -> Iterable[CondaPostCommand]:
"""
Register post-command functions in conda.
**Example:**
.. code-block:: python
from conda import plugins
def example_post_command(command):
print("post-command action")
@plugins.hookimpl
def conda_post_commands():
yield plugins.CondaPostCommand(
name="example-post-command",
action=example_post_command,
run_for={"install", "create"},
)
"""
yield from ()
@_hookspec
def conda_auth_handlers(self) -> Iterable[CondaAuthHandler]:
"""
Register a conda auth handler derived from the requests API.
This plugin hook allows attaching requests auth handler subclasses,
e.g. when authenticating requests against individual channels hosted
at HTTP/HTTPS services.
**Example:**
.. code-block:: python
import os
from conda import plugins
from requests.auth import AuthBase
class EnvironmentHeaderAuth(AuthBase):
def __init__(self, *args, **kwargs):
self.username = os.environ["EXAMPLE_CONDA_AUTH_USERNAME"]
self.password = os.environ["EXAMPLE_CONDA_AUTH_PASSWORD"]
def __call__(self, request):
request.headers["X-Username"] = self.username
request.headers["X-Password"] = self.password
return request
@plugins.hookimpl
def conda_auth_handlers():
yield plugins.CondaAuthHandler(
name="environment-header-auth",
handler=EnvironmentHeaderAuth,
)
"""
yield from ()
@_hookspec
def conda_health_checks(self) -> Iterable[CondaHealthCheck]:
"""
Register health checks for conda doctor.
This plugin hook allows you to add more "health checks" to conda doctor
that you can write to diagnose problems in your conda environment.
Check out the health checks already shipped with conda for inspiration.
**Example:**
.. code-block:: python
from conda import plugins
def example_health_check(prefix: str, verbose: bool):
print("This is an example health check!")
@plugins.hookimpl
def conda_health_checks():
yield plugins.CondaHealthCheck(
name="example-health-check",
action=example_health_check,
)
"""
yield from ()
@_hookspec
def conda_pre_transaction_actions(self) -> Iterable[CondaPreTransactionAction]:
"""Register pre-transaction hooks.
Pre-transaction hooks run before all other actions run in a
UnlinkLinkTransaction. For information about the Action class,
see :class:`~conda.core.path_actions.Action`.
**Example:**
.. code-block:: python
from conda import plugins
from conda.core.path_actions import Action
class PrintAction(Action):
def verify(self):
print("Performing verification...")
self._verified = True
def execute(self):
print(
self.transaction_context,
self.target_prefix,
self.unlink_precs,
self.link_precs,
self.remove_specs,
self.update_specs,
self.neutered_specs,
)
def reverse(self):
print("Reversing only happens when `execute` raises an exception.")
def cleanup(self):
print("Carrying out cleanup...")
class PrintActionPlugin:
@plugins.hookimpl
def conda_pre_transaction_actions(
self,
) -> Iterable[plugins.CondaPreTransactionAction]:
yield plugins.CondaPreTransactionAction(
name="example-pre-transaction-action",
action=PrintAction,
)
"""
yield from ()
@_hookspec
def conda_post_transaction_actions(self) -> Iterable[CondaPostTransactionAction]:
"""Register post-transaction hooks.
Post-transaction hooks run after all other actions run in a
UnlinkLinkTransaction. For information about the Action class,
see :class:`~conda.core.path_actions.Action`.
**Example:**
.. code-block:: python
from conda import plugins
from conda.core.path_actions import Action
class PrintAction(Action):
def verify(self):
print("Performing verification...")
self._verified = True
def execute(self):
print(
self.transaction_context,
self.target_prefix,
self.unlink_precs,
self.link_precs,
self.remove_specs,
self.update_specs,
self.neutered_specs,
)
def reverse(self):
print("Reversing only happens when `execute` raises an exception.")
def cleanup(self):
print("Carrying out cleanup...")
class PrintActionPlugin:
@plugins.hookimpl
def conda_post_transaction_actions(
self,
) -> Iterable[plugins.CondaPostTransactionAction]:
yield plugins.CondaPostTransactionAction(
name="example-post-transaction-action",
action=PrintAction,
)
"""
yield from ()
@_hookspec
def conda_pre_solves(self) -> Iterable[CondaPreSolve]:
"""
Register pre-solve functions in conda that are used in the
general solver API, before the solver processes the package specs in
search of a solution.
**Example:**
.. code-block:: python
from conda import plugins
from conda.models.match_spec import MatchSpec
def example_pre_solve(
specs_to_add: frozenset[MatchSpec],
specs_to_remove: frozenset[MatchSpec],
):
print(f"Adding {len(specs_to_add)} packages")
print(f"Removing {len(specs_to_remove)} packages")
@plugins.hookimpl
def conda_pre_solves():
yield plugins.CondaPreSolve(
name="example-pre-solve",
action=example_pre_solve,
)
"""
yield from ()
@_hookspec
def conda_post_solves(self) -> Iterable[CondaPostSolve]:
"""
Register post-solve functions in conda that are used in the
general solver API, after the solver has provided the package
records to add or remove from the conda environment.
**Example:**
.. code-block:: python
from conda import plugins
from conda.models.records import PackageRecord
def example_post_solve(
repodata_fn: str,
unlink_precs: tuple[PackageRecord, ...],
link_precs: tuple[PackageRecord, ...],
):
print(f"Uninstalling {len(unlink_precs)} packages")
print(f"Installing {len(link_precs)} packages")
@plugins.hookimpl
def conda_post_solves():
yield plugins.CondaPostSolve(
name="example-post-solve",
action=example_post_solve,
)
"""
yield from ()
@_hookspec
def conda_settings(self) -> Iterable[CondaSetting]:
"""
Register new setting
The example below defines a simple string type parameter
**Example:**
.. code-block:: python
from conda import plugins
from conda.common.configuration import PrimitiveParameter, SequenceParameter
@plugins.hookimpl
def conda_settings():
yield plugins.CondaSetting(
name="example_option",
description="This is an example option",
parameter=PrimitiveParameter("default_value", element_type=str),
aliases=("example_option_alias",),
)
"""
yield from ()
@_hookspec
def conda_reporter_backends(self) -> Iterable[CondaReporterBackend]:
"""
Register new reporter backend
The example below defines a reporter backend that uses the ``pprint`` module in Python.
**Example:**
.. code-block:: python
from pprint import pformat
from conda import plugins
from conda.plugins.types import (
CondaReporterBackend,
ReporterRendererBase,
ProgressBarBase,
)
class PprintReporterRenderer(ReporterRendererBase):
"Implementation of the ReporterRendererBase"
def detail_view(self, data):
return pformat(data)
def envs_list(self, data):
formatted_data = pformat(data)
return f"Environments: {formatted_data}"
def progress_bar(self, description, io_context_manager) -> ProgressBarBase:
"Returns our custom progress bar implementation"
return PprintProgressBar(description, io_context_manager)
class PprintProgressBar(ProgressBarBase):
"Blank implementation of ProgressBarBase which does nothing"
def update_to(self, fraction) -> None:
pass
def refresh(self) -> None:
pass
def close(self) -> None:
pass
@plugins.hookimpl
def conda_reporter_backends():
yield CondaReporterBackend(
name="pprint",
description="Reporter backend based on the pprint module",
renderer=PprintReporterRenderer,
)
"""
yield from ()
@_hookspec
def conda_session_headers(self, host: str) -> Iterable[CondaRequestHeader]:
"""
Register new HTTP request headers
The example below defines how to add HTTP headers for all requests
with the hostname of ``example.com``.
**Example:**
.. code-block:: python
from conda import plugins
HOSTS = {"example.com", "sub.example.com"}
@plugins.hookimpl
def conda_session_headers(host: str):
if host in HOSTS:
yield plugins.CondaRequestHeader(
name="Example-Header",
value="example",
)
"""
yield from ()
@_hookspec
def conda_request_headers(
self, host: str, path: str
) -> Iterable[CondaRequestHeader]:
"""
Register new HTTP request headers
The example below defines how to add HTTP headers for all requests
with the hostname of ``example.com`` and a ``path/to/endpoint.json`` path.
**Example:**
.. code-block:: python
from conda import plugins
HOSTS = {"example.com", "sub.example.com"}
ENDPOINT = "/path/to/endpoint.json"
@plugins.hookimpl
def conda_request_headers(host: str, path: str):
if host in HOSTS and path == ENDPOINT:
yield plugins.CondaRequestHeader(
name="Example-Header",
value="example",
)
"""
yield from ()
@_hookspec
def conda_prefix_data_loaders() -> Iterable[CondaPrefixDataLoader]:
"""
Register new loaders for PrefixData
The example below defines how to expose the packages installed
by a hypothetical 'penguin' tool as conda packages.
**Example:**
.. code-block:: python
from pathlib import Path
from conda import plugins
from conda.common.path import PathType
from conda.models.records import PrefixRecord
from conda.plugins.types import CondaPrefixDataLoader
@plugins.hookimpl
def conda_prefix_data_loaders():
yield CondaPrefixDataLoader(
"hypothetical",
load_hypothetical_packages,
)
def load_hypothetical_packages(
prefix: PathType, records: dict[str, PrefixRecord]
) -> dict[str, PrefixRecord]:
penguin_records = {}
for info in Path(prefix).glob("lib/penguin/*.penguin-info"):
name, version = info.name.rsplit("-", 1)
kwargs = {} # retrieve extra fields here
penguin_records[name] = PrefixRecord(
name=name, version=version, build_number=0, build="0", **kwargs
)
records.update(penguin_records)
return penguin_records
"""
yield from ()
@_hookspec
def conda_environment_specifiers(self) -> Iterable[CondaEnvironmentSpecifier]:
"""
**EXPERIMENTAL**
Register new conda env spec type
The example below defines a type of conda env file called "random". It
can parse a file with the file extension `.random`. This plugin will ignore
whatever is in the input environment file and produce an environment with a
random name and with random packages.
**Example:**
.. code-block:: python
import json
import random
from pathlib import Path
from subprocess import run
from conda import plugins
from ...plugins.types import EnvironmentSpecBase
from conda.env.env import Environment
packages = ["python", "numpy", "scipy", "matplotlib", "pandas", "scikit-learn"]
class RandomSpec(EnvironmentSpecBase):
extensions = {".random"}
def __init__(self, filename: str):
self.filename = filename
def can_handle(self):
# Return early if no filename was provided
if self.filename is None:
return False
# Extract the file extension (e.g., '.txt' or '' if no extension)
file_ext = os.path.splitext(self.filename)[1]
# Check if the file has a supported extension and exists
return any(
spec_ext == file_ext and os.path.exists(self.filename)
for spec_ext in RandomSpec.extensions
)
def env(self):
return Environment(
name="".join(random.choice("0123456789abcdef") for i in range(6)),
dependencies=[random.choice(packages) for i in range(6)],
)
@plugins.hookimpl
def conda_environment_specifiers():
yield plugins.CondaEnvSpec(
name="random",
environment_spec=RandomSpec,
)
"""
yield from ()
@_hookspec
def conda_environment_exporters(self) -> Iterable[CondaEnvironmentExporter]:
"""
Register new conda environment exporter
Environment exporters serialize conda Environment objects to different
output formats (JSON, TOML, XML, etc.) for the 'conda export' command.
This is separate from environment specifiers which parse input files.
**Example:**
.. code-block:: python
import tomlkit
from conda import plugins
from conda.exceptions import CondaValueError
from conda.models.environment import Environment
from conda.plugins.types import CondaEnvironmentExporter
def export_toml(env: Environment) -> str:
# Export Environment to TOML format
# For formats that use the standard dictionary structure,
# you can use the shared utility:
from conda.plugins.environment_exporters.standard import to_dict
env_dict = to_dict(env)
# Create TOML document
toml_doc = tomlkit.document()
if env_dict.get("name"):
toml_doc["name"] = env_dict["name"]
if env_dict.get("channels"):
toml_doc["channels"] = env_dict["channels"]
if env_dict.get("dependencies"):
toml_doc["dependencies"] = env_dict["dependencies"]
if env_dict.get("variables"):
toml_doc["variables"] = env_dict["variables"]
return tomlkit.dumps(toml_doc)
@plugins.hookimpl
def conda_environment_exporters():
yield CondaEnvironmentExporter(
name="environment-toml",
aliases=("toml",),
default_filenames=("environment.toml",),
export=export_toml,
)
"""
yield from ()
| CondaSpecs |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/duplicateDeclaration2.py | {
"start": 96,
"end": 937
} | class ____:
def __init__(self):
self._property: str = ""
# This should generate an error because "prop"
# is overwritten below.
@property
def prop(self):
return self._property
# This should generate an error because "prop"
# is overwritten below.
@prop.setter
def prop(self, val: str):
self._property = val
# This should generate an error because "prop"
# is overwritten below.
@prop.deleter
def prop(self):
pass
# This should generate an error because "prop"
# is overwritten below.
@property
def prop(self):
return self._property
@property
def prop(self):
return self._property
@prop.setter
def prop(self, val: str):
self._property = val
@prop.deleter
def prop(self):
pass
| MyClass |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/shell_tool.py | {
"start": 11353,
"end": 26485
} | class ____(AgentMiddleware[ShellToolState, Any]):
"""Middleware that registers a persistent shell tool for agents.
The middleware exposes a single long-lived shell session. Use the execution policy
to match your deployment's security posture:
* `HostExecutionPolicy` – full host access; best for trusted environments where the
agent already runs inside a container or VM that provides isolation.
* `CodexSandboxExecutionPolicy` – reuses the Codex CLI sandbox for additional
syscall/filesystem restrictions when the CLI is available.
* `DockerExecutionPolicy` – launches a separate Docker container for each agent run,
providing harder isolation, optional read-only root filesystems, and user
remapping.
When no policy is provided the middleware defaults to `HostExecutionPolicy`.
"""
state_schema = ShellToolState
def __init__(
self,
workspace_root: str | Path | None = None,
*,
startup_commands: tuple[str, ...] | list[str] | str | None = None,
shutdown_commands: tuple[str, ...] | list[str] | str | None = None,
execution_policy: BaseExecutionPolicy | None = None,
redaction_rules: tuple[RedactionRule, ...] | list[RedactionRule] | None = None,
tool_description: str | None = None,
tool_name: str = SHELL_TOOL_NAME,
shell_command: Sequence[str] | str | None = None,
env: Mapping[str, Any] | None = None,
) -> None:
"""Initialize an instance of `ShellToolMiddleware`.
Args:
workspace_root: Base directory for the shell session.
If omitted, a temporary directory is created when the agent starts and
removed when it ends.
startup_commands: Optional commands executed sequentially after the session
starts.
shutdown_commands: Optional commands executed before the session shuts down.
execution_policy: Execution policy controlling timeouts, output limits, and
resource configuration.
Defaults to `HostExecutionPolicy` for native execution.
redaction_rules: Optional redaction rules to sanitize command output before
returning it to the model.
tool_description: Optional override for the registered shell tool
description.
tool_name: Name for the registered shell tool.
Defaults to `"shell"`.
shell_command: Optional shell executable (string) or argument sequence used
to launch the persistent session.
Defaults to an implementation-defined bash command.
env: Optional environment variables to supply to the shell session.
Values are coerced to strings before command execution. If omitted, the
session inherits the parent process environment.
"""
super().__init__()
self._workspace_root = Path(workspace_root) if workspace_root else None
self._tool_name = tool_name
self._shell_command = self._normalize_shell_command(shell_command)
self._environment = self._normalize_env(env)
if execution_policy is not None:
self._execution_policy = execution_policy
else:
self._execution_policy = HostExecutionPolicy()
rules = redaction_rules or ()
self._redaction_rules: tuple[ResolvedRedactionRule, ...] = tuple(
rule.resolve() for rule in rules
)
self._startup_commands = self._normalize_commands(startup_commands)
self._shutdown_commands = self._normalize_commands(shutdown_commands)
# Create a proper tool that executes directly (no interception needed)
description = tool_description or DEFAULT_TOOL_DESCRIPTION
@tool(self._tool_name, args_schema=_ShellToolInput, description=description)
def shell_tool(
*,
runtime: ToolRuntime[None, ShellToolState],
command: str | None = None,
restart: bool = False,
) -> ToolMessage | str:
resources = self._get_or_create_resources(runtime.state)
return self._run_shell_tool(
resources,
{"command": command, "restart": restart},
tool_call_id=runtime.tool_call_id,
)
self._shell_tool = shell_tool
self.tools = [self._shell_tool]
@staticmethod
def _normalize_commands(
commands: tuple[str, ...] | list[str] | str | None,
) -> tuple[str, ...]:
if commands is None:
return ()
if isinstance(commands, str):
return (commands,)
return tuple(commands)
@staticmethod
def _normalize_shell_command(
shell_command: Sequence[str] | str | None,
) -> tuple[str, ...]:
if shell_command is None:
return ("/bin/bash",)
normalized = (shell_command,) if isinstance(shell_command, str) else tuple(shell_command)
if not normalized:
msg = "Shell command must contain at least one argument."
raise ValueError(msg)
return normalized
@staticmethod
def _normalize_env(env: Mapping[str, Any] | None) -> dict[str, str] | None:
if env is None:
return None
normalized: dict[str, str] = {}
for key, value in env.items():
if not isinstance(key, str):
msg = "Environment variable names must be strings."
raise TypeError(msg)
normalized[key] = str(value)
return normalized
def before_agent(self, state: ShellToolState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
"""Start the shell session and run startup commands."""
resources = self._get_or_create_resources(state)
return {"shell_session_resources": resources}
async def abefore_agent(self, state: ShellToolState, runtime: Runtime) -> dict[str, Any] | None:
"""Async start the shell session and run startup commands."""
return self.before_agent(state, runtime)
def after_agent(self, state: ShellToolState, runtime: Runtime) -> None: # noqa: ARG002
"""Run shutdown commands and release resources when an agent completes."""
resources = state.get("shell_session_resources")
if not isinstance(resources, _SessionResources):
# Resources were never created, nothing to clean up
return
try:
self._run_shutdown_commands(resources.session)
finally:
resources._finalizer()
async def aafter_agent(self, state: ShellToolState, runtime: Runtime) -> None:
"""Async run shutdown commands and release resources when an agent completes."""
return self.after_agent(state, runtime)
def _get_or_create_resources(self, state: ShellToolState) -> _SessionResources:
"""Get existing resources from state or create new ones if they don't exist.
This method enables resumability by checking if resources already exist in the state
(e.g., after an interrupt), and only creating new resources if they're not present.
Args:
state: The agent state which may contain shell session resources.
Returns:
Session resources, either retrieved from state or newly created.
"""
resources = state.get("shell_session_resources")
if isinstance(resources, _SessionResources):
return resources
new_resources = self._create_resources()
# Cast needed to make state dict-like for mutation
cast("dict[str, Any]", state)["shell_session_resources"] = new_resources
return new_resources
def _create_resources(self) -> _SessionResources:
workspace = self._workspace_root
tempdir: tempfile.TemporaryDirectory[str] | None = None
if workspace is None:
tempdir = tempfile.TemporaryDirectory(prefix=SHELL_TEMP_PREFIX)
workspace_path = Path(tempdir.name)
else:
workspace_path = workspace
workspace_path.mkdir(parents=True, exist_ok=True)
session = ShellSession(
workspace_path,
self._execution_policy,
self._shell_command,
self._environment or {},
)
try:
session.start()
LOGGER.info("Started shell session in %s", workspace_path)
self._run_startup_commands(session)
except BaseException:
LOGGER.exception("Starting shell session failed; cleaning up resources.")
session.stop(self._execution_policy.termination_timeout)
if tempdir is not None:
tempdir.cleanup()
raise
return _SessionResources(session=session, tempdir=tempdir, policy=self._execution_policy)
def _run_startup_commands(self, session: ShellSession) -> None:
if not self._startup_commands:
return
for command in self._startup_commands:
result = session.execute(command, timeout=self._execution_policy.startup_timeout)
if result.timed_out or (result.exit_code not in (0, None)):
msg = f"Startup command '{command}' failed with exit code {result.exit_code}"
raise RuntimeError(msg)
def _run_shutdown_commands(self, session: ShellSession) -> None:
if not self._shutdown_commands:
return
for command in self._shutdown_commands:
try:
result = session.execute(command, timeout=self._execution_policy.command_timeout)
if result.timed_out:
LOGGER.warning("Shutdown command '%s' timed out.", command)
elif result.exit_code not in (0, None):
LOGGER.warning(
"Shutdown command '%s' exited with %s.", command, result.exit_code
)
except (RuntimeError, ToolException, OSError) as exc:
LOGGER.warning(
"Failed to run shutdown command '%s': %s", command, exc, exc_info=True
)
def _apply_redactions(self, content: str) -> tuple[str, dict[str, list[PIIMatch]]]:
"""Apply configured redaction rules to command output."""
matches_by_type: dict[str, list[PIIMatch]] = {}
updated = content
for rule in self._redaction_rules:
updated, matches = rule.apply(updated)
if matches:
matches_by_type.setdefault(rule.pii_type, []).extend(matches)
return updated, matches_by_type
def _run_shell_tool(
self,
resources: _SessionResources,
payload: dict[str, Any],
*,
tool_call_id: str | None,
) -> Any:
session = resources.session
if payload.get("restart"):
LOGGER.info("Restarting shell session on request.")
try:
session.restart()
self._run_startup_commands(session)
except BaseException as err:
LOGGER.exception("Restarting shell session failed; session remains unavailable.")
msg = "Failed to restart shell session."
raise ToolException(msg) from err
message = "Shell session restarted."
return self._format_tool_message(message, tool_call_id, status="success")
command = payload.get("command")
if not command or not isinstance(command, str):
msg = "Shell tool expects a 'command' string when restart is not requested."
raise ToolException(msg)
LOGGER.info("Executing shell command: %s", command)
result = session.execute(command, timeout=self._execution_policy.command_timeout)
if result.timed_out:
timeout_seconds = self._execution_policy.command_timeout
message = f"Error: Command timed out after {timeout_seconds:.1f} seconds."
return self._format_tool_message(
message,
tool_call_id,
status="error",
artifact={
"timed_out": True,
"exit_code": None,
},
)
try:
sanitized_output, matches = self._apply_redactions(result.output)
except PIIDetectionError as error:
LOGGER.warning("Blocking command output due to detected %s.", error.pii_type)
message = f"Output blocked: detected {error.pii_type}."
return self._format_tool_message(
message,
tool_call_id,
status="error",
artifact={
"timed_out": False,
"exit_code": result.exit_code,
"matches": {error.pii_type: error.matches},
},
)
sanitized_output = sanitized_output or "<no output>"
if result.truncated_by_lines:
sanitized_output = (
f"{sanitized_output.rstrip()}\n\n"
f"... Output truncated at {self._execution_policy.max_output_lines} lines "
f"(observed {result.total_lines})."
)
if result.truncated_by_bytes and self._execution_policy.max_output_bytes is not None:
sanitized_output = (
f"{sanitized_output.rstrip()}\n\n"
f"... Output truncated at {self._execution_policy.max_output_bytes} bytes "
f"(observed {result.total_bytes})."
)
if result.exit_code not in (0, None):
sanitized_output = f"{sanitized_output.rstrip()}\n\nExit code: {result.exit_code}"
final_status: Literal["success", "error"] = "error"
else:
final_status = "success"
artifact = {
"timed_out": False,
"exit_code": result.exit_code,
"truncated_by_lines": result.truncated_by_lines,
"truncated_by_bytes": result.truncated_by_bytes,
"total_lines": result.total_lines,
"total_bytes": result.total_bytes,
"redaction_matches": matches,
}
return self._format_tool_message(
sanitized_output,
tool_call_id,
status=final_status,
artifact=artifact,
)
def _format_tool_message(
self,
content: str,
tool_call_id: str | None,
*,
status: Literal["success", "error"],
artifact: dict[str, Any] | None = None,
) -> ToolMessage | str:
artifact = artifact or {}
if tool_call_id is None:
return content
return ToolMessage(
content=content,
tool_call_id=tool_call_id,
name=self._tool_name,
status=status,
artifact=artifact,
)
__all__ = [
"CodexSandboxExecutionPolicy",
"DockerExecutionPolicy",
"HostExecutionPolicy",
"RedactionRule",
"ShellToolMiddleware",
]
| ShellToolMiddleware |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/shortcuts/progress_bar/formatters.py | {
"start": 10202,
"end": 11739
} | class ____(Formatter):
"""
For the fun. Add rainbow colors to any of the other formatters.
"""
colors = ["#%.2x%.2x%.2x" % _hue_to_rgb(h / 100.0) for h in range(0, 100)]
def __init__(self, formatter: Formatter) -> None:
self.formatter = formatter
def format(
self,
progress_bar: ProgressBar,
progress: ProgressBarCounter[object],
width: int,
) -> AnyFormattedText:
# Get formatted text from nested formatter, and explode it in
# text/style tuples.
result = self.formatter.format(progress_bar, progress, width)
result = explode_text_fragments(to_formatted_text(result))
# Insert colors.
result2: StyleAndTextTuples = []
shift = int(time.time() * 3) % len(self.colors)
for i, (style, text, *_) in enumerate(result):
result2.append(
(style + " " + self.colors[(i + shift) % len(self.colors)], text)
)
return result2
def get_width(self, progress_bar: ProgressBar) -> AnyDimension:
return self.formatter.get_width(progress_bar)
def create_default_formatters() -> list[Formatter]:
"""
Return the list of default formatters.
"""
return [
Label(),
Text(" "),
Percentage(),
Text(" "),
Bar(),
Text(" "),
Progress(),
Text(" "),
Text("eta [", style="class:time-left"),
TimeLeft(),
Text("]", style="class:time-left"),
Text(" "),
]
| Rainbow |
python | google__jax | tests/export_back_compat_test.py | {
"start": 45741,
"end": 47552
} | class ____(bctu.CompatTestBase):
def test_shardy_sharding_ops_with_different_meshes(self):
# Tests whether we can save and load a module with meshes that have the
# same axis sizes (and same order) but different axis names.
# Also tests "Sharding", "xla.sdy.GlobalToLocalShape",
# "xla.sdy.LocalToGlobalShape".
if not jtu.test_device_matches(["tpu"]) or len(jax.devices()) < 2:
self.skipTest("Test runs only on TPU with at least 2 devices")
# Must use exactly 2 devices for expected outputs from ppermute.
devices = jax.devices()[:2]
old_mesh = Mesh(devices, axis_names=('a'))
def func(x): # x: f32[4, 4]
@partial(shard_map, mesh=old_mesh,
in_specs=(P('a', None),), out_specs=P('a', None))
def shard_map_func(x): # b: f32[2, 4]
axis_size = lax.axis_size('a')
perm = [(j, (j + 1) % axis_size) for j in range(axis_size)]
return lax.ppermute(x, 'a', perm=perm)
x = jax.lax.with_sharding_constraint(x, NS(old_mesh, P('a', None)))
return shard_map_func(x)
data = [
(shardy_sharding_ops_with_different_meshes.data_2025_04_14, []),
(shardy_sharding_ops_with_different_meshes.data_2025_06_30, None),
]
# Due to changes in how Shardy is serialized, from using custom calls to
# natively serializing Shardy with StableHLO, we may need to override
# the expected custom call targets for old test data that was serialized
# with custom calls.
for data, custom_call_targets_override in data:
with Mesh(devices, axis_names=('x')):
self.run_one_test(
func, self.load_testdata(data),
expect_current_custom_calls=custom_call_targets_override)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| ShardyCompatTest |
python | pypa__warehouse | warehouse/packaging/models.py | {
"start": 19520,
"end": 20269
} | class ____(db.Model):
__tablename__ = "release_dependencies"
__table_args__ = (
Index("release_dependencies_release_kind_idx", "release_id", "kind"),
)
__repr__ = make_repr("release", "kind", "specifier")
release_id: Mapped[UUID] = mapped_column(
ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
)
release: Mapped[Release] = orm.relationship(back_populates="dependencies")
kind: Mapped[int | None]
specifier: Mapped[str | None]
def _dependency_relation(kind):
return orm.relationship(
"Dependency",
primaryjoin=lambda: sql.and_(
Release.id == Dependency.release_id, Dependency.kind == kind.value
),
viewonly=True,
)
| Dependency |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/data_forwarding_index.py | {
"start": 1211,
"end": 1432
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read"],
"POST": ["org:write"],
}
@region_silo_endpoint
@extend_schema(tags=["Integrations"])
| OrganizationDataForwardingDetailsPermission |
python | PyCQA__pylint | tests/functional/a/access/access_attr_before_def_false_positive.py | {
"start": 2169,
"end": 2536
} | class ____:
"""use_attr is seen as the method defining attr because it's in
first position
"""
def __init__(self):
self.reset()
def use_attr(self):
"""use and set members"""
if self.attr:
print('hop')
self.attr = 10
def reset(self):
"""reset members"""
self.attr = 4
| DefinedOutsideInit |
python | apache__airflow | providers/yandex/tests/unit/yandex/operators/test_dataproc.py | {
"start": 2512,
"end": 18703
} | class ____:
def setup_method(self):
dag_id = "test_dag"
self.dag = DAG(
dag_id,
default_args={
"owner": "airflow",
"start_date": datetime.datetime.today(),
"end_date": datetime.datetime.today() + datetime.timedelta(days=1),
},
schedule="@daily",
)
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.create_cluster")
@patch("yandexcloud.__version__", "0.308.0")
def test_create_cluster(self, mock_create_cluster, *_):
operator = DataprocCreateClusterOperator(
task_id="create_cluster",
ssh_public_keys=SSH_PUBLIC_KEYS,
folder_id=FOLDER_ID,
subnet_id=SUBNET_ID,
zone=AVAILABILITY_ZONE_ID,
connection_id=CONNECTION_ID,
s3_bucket=S3_BUCKET_NAME_FOR_LOGS,
cluster_image_version=CLUSTER_IMAGE_VERSION,
log_group_id=LOG_GROUP_ID,
)
context = {"task_instance": MagicMock()}
operator.execute(context)
mock_create_cluster.assert_called_once_with(
cluster_description="",
cluster_image_version="1.4",
cluster_name=None,
computenode_count=0,
computenode_disk_size=None,
computenode_disk_type=None,
computenode_resource_preset=None,
computenode_max_hosts_count=None,
computenode_measurement_duration=None,
computenode_warmup_duration=None,
computenode_stabilization_duration=None,
computenode_preemptible=False,
computenode_cpu_utilization_target=None,
computenode_decommission_timeout=None,
datanode_count=1,
datanode_disk_size=None,
datanode_disk_type=None,
datanode_resource_preset=None,
folder_id="my_folder_id",
masternode_disk_size=None,
masternode_disk_type=None,
masternode_resource_preset=None,
s3_bucket="my_bucket_name",
service_account_id=None,
services=("HDFS", "YARN", "MAPREDUCE", "HIVE", "SPARK"),
ssh_public_keys=[
"ssh-rsa AAA5B3NzaC1yc2EAA1ADA2ABA3AA4QCxO38tKA0XIs9ivPxt7AYdf3bgtAR1ow3Qkb9GPQ6wkFHQq"
"cFDe6faKCxH6iDRt2o4D8L8Bx6zN42uZSB0nf8jkIxFTcEU3mFSXEbWByg78ao3dMrAAj1tyr1H1pON6P0="
],
subnet_id="my_subnet_id",
zone="ru-central1-c",
log_group_id=LOG_GROUP_ID,
properties=None,
enable_ui_proxy=False,
host_group_ids=None,
security_group_ids=None,
labels=None,
initialization_actions=None,
)
context["task_instance"].xcom_push.assert_has_calls(
[
call(key="cluster_id", value=mock_create_cluster().response.id),
call(key="yandexcloud_connection_id", value=CONNECTION_ID),
]
)
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.create_cluster")
@patch("yandexcloud.__version__", "0.350.0")
def test_create_cluster_with_350_sdk(self, mock_create_cluster, *_):
operator = DataprocCreateClusterOperator(
task_id="create_cluster",
ssh_public_keys=SSH_PUBLIC_KEYS,
folder_id=FOLDER_ID,
subnet_id=SUBNET_ID,
zone=AVAILABILITY_ZONE_ID,
connection_id=CONNECTION_ID,
s3_bucket=S3_BUCKET_NAME_FOR_LOGS,
cluster_image_version=CLUSTER_IMAGE_VERSION,
log_group_id=LOG_GROUP_ID,
)
context = {"task_instance": MagicMock()}
operator.execute(context)
mock_create_cluster.assert_called_once_with(
cluster_description="",
cluster_image_version="1.4",
cluster_name=None,
computenode_count=0,
computenode_disk_size=None,
computenode_disk_type=None,
computenode_resource_preset=None,
computenode_max_hosts_count=None,
computenode_measurement_duration=None,
computenode_warmup_duration=None,
computenode_stabilization_duration=None,
computenode_preemptible=False,
computenode_cpu_utilization_target=None,
computenode_decommission_timeout=None,
datanode_count=1,
datanode_disk_size=None,
datanode_disk_type=None,
datanode_resource_preset=None,
folder_id="my_folder_id",
masternode_disk_size=None,
masternode_disk_type=None,
masternode_resource_preset=None,
s3_bucket="my_bucket_name",
service_account_id=None,
services=("HDFS", "YARN", "MAPREDUCE", "HIVE", "SPARK"),
ssh_public_keys=[
"ssh-rsa AAA5B3NzaC1yc2EAA1ADA2ABA3AA4QCxO38tKA0XIs9ivPxt7AYdf3bgtAR1ow3Qkb9GPQ6wkFHQq"
"cFDe6faKCxH6iDRt2o4D8L8Bx6zN42uZSB0nf8jkIxFTcEU3mFSXEbWByg78ao3dMrAAj1tyr1H1pON6P0="
],
subnet_id="my_subnet_id",
zone="ru-central1-c",
log_group_id=LOG_GROUP_ID,
properties=None,
enable_ui_proxy=False,
host_group_ids=None,
security_group_ids=None,
labels=None,
initialization_actions=None,
environment=None,
oslogin_enabled=False,
)
context["task_instance"].xcom_push.assert_has_calls(
[
call(key="cluster_id", value=mock_create_cluster().response.id),
call(key="yandexcloud_connection_id", value=CONNECTION_ID),
]
)
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.delete_cluster")
def test_delete_cluster_operator(self, mock_delete_cluster, *_):
operator = DataprocDeleteClusterOperator(
task_id="delete_cluster",
connection_id=CONNECTION_ID,
)
context = {"task_instance": MagicMock()}
context["task_instance"].xcom_pull.return_value = "my_cluster_id"
operator.execute(context)
context["task_instance"].xcom_pull.assert_called_once_with(key="cluster_id")
mock_delete_cluster.assert_called_once_with("my_cluster_id")
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.create_hive_job")
def test_create_hive_job_operator(self, mock_create_hive_job, *_):
operator = DataprocCreateHiveJobOperator(
task_id="create_hive_job",
query="SELECT 1;",
connection_id=CONNECTION_ID,
)
context = {"task_instance": MagicMock()}
context["task_instance"].xcom_pull.return_value = "my_cluster_id"
operator.execute(context)
context["task_instance"].xcom_pull.assert_has_calls(
[
call(key="cluster_id"),
]
)
mock_create_hive_job.assert_called_once_with(
cluster_id="my_cluster_id",
continue_on_failure=False,
name="Hive job",
properties=None,
query="SELECT 1;",
query_file_uri=None,
script_variables=None,
)
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.create_mapreduce_job")
def test_create_mapreduce_job_operator(self, mock_create_mapreduce_job, *_):
operator = DataprocCreateMapReduceJobOperator(
task_id="run_mapreduce_job",
connection_id=CONNECTION_ID,
main_class="org.apache.hadoop.streaming.HadoopStreaming",
file_uris=[
"s3a://some-in-bucket/jobs/sources/mapreduce-001/mapper.py",
"s3a://some-in-bucket/jobs/sources/mapreduce-001/reducer.py",
],
args=[
"-mapper",
"mapper.py",
"-reducer",
"reducer.py",
"-numReduceTasks",
"1",
"-input",
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"-output",
"s3a://some-out-bucket/dataproc/job/results",
],
properties={
"yarn.app.mapreduce.am.resource.mb": "2048",
"yarn.app.mapreduce.am.command-opts": "-Xmx2048m",
"mapreduce.job.maps": "6",
},
)
context = {"task_instance": MagicMock()}
context["task_instance"].xcom_pull.return_value = "my_cluster_id"
operator.execute(context)
context["task_instance"].xcom_pull.assert_has_calls(
[
call(key="cluster_id"),
]
)
mock_create_mapreduce_job.assert_called_once_with(
archive_uris=None,
args=[
"-mapper",
"mapper.py",
"-reducer",
"reducer.py",
"-numReduceTasks",
"1",
"-input",
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"-output",
"s3a://some-out-bucket/dataproc/job/results",
],
cluster_id="my_cluster_id",
file_uris=[
"s3a://some-in-bucket/jobs/sources/mapreduce-001/mapper.py",
"s3a://some-in-bucket/jobs/sources/mapreduce-001/reducer.py",
],
jar_file_uris=None,
main_class="org.apache.hadoop.streaming.HadoopStreaming",
main_jar_file_uri=None,
name="Mapreduce job",
properties={
"yarn.app.mapreduce.am.resource.mb": "2048",
"yarn.app.mapreduce.am.command-opts": "-Xmx2048m",
"mapreduce.job.maps": "6",
},
)
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.create_spark_job")
def test_create_spark_job_operator(self, mock_create_spark_job, *_):
operator = DataprocCreateSparkJobOperator(
task_id="create_spark_job",
connection_id=CONNECTION_ID,
main_jar_file_uri="s3a://data-proc-public/jobs/sources/java/dataproc-examples-1.0.jar",
main_class="ru.yandex.cloud.dataproc.examples.PopulationSparkJob",
file_uris=[
"s3a://some-in-bucket/jobs/sources/data/config.json",
],
archive_uris=[
"s3a://some-in-bucket/jobs/sources/data/country-codes.csv.zip",
],
jar_file_uris=[
"s3a://some-in-bucket/jobs/sources/java/icu4j-61.1.jar",
"s3a://some-in-bucket/jobs/sources/java/commons-lang-2.6.jar",
"s3a://some-in-bucket/jobs/sources/java/opencsv-4.1.jar",
"s3a://some-in-bucket/jobs/sources/java/json-20190722.jar",
],
args=[
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"s3a://some-out-bucket/dataproc/job/results/${{JOB_ID}}",
],
properties={
"spark.submit.deployMode": "cluster",
},
)
context = {"task_instance": MagicMock()}
context["task_instance"].xcom_pull.return_value = "my_cluster_id"
operator.execute(context)
context["task_instance"].xcom_pull.assert_has_calls(
[
call(key="cluster_id"),
]
)
mock_create_spark_job.assert_called_once_with(
archive_uris=["s3a://some-in-bucket/jobs/sources/data/country-codes.csv.zip"],
args=[
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"s3a://some-out-bucket/dataproc/job/results/${{JOB_ID}}",
],
cluster_id="my_cluster_id",
file_uris=["s3a://some-in-bucket/jobs/sources/data/config.json"],
jar_file_uris=[
"s3a://some-in-bucket/jobs/sources/java/icu4j-61.1.jar",
"s3a://some-in-bucket/jobs/sources/java/commons-lang-2.6.jar",
"s3a://some-in-bucket/jobs/sources/java/opencsv-4.1.jar",
"s3a://some-in-bucket/jobs/sources/java/json-20190722.jar",
],
main_class="ru.yandex.cloud.dataproc.examples.PopulationSparkJob",
main_jar_file_uri="s3a://data-proc-public/jobs/sources/java/dataproc-examples-1.0.jar",
name="Spark job",
properties={"spark.submit.deployMode": "cluster"},
packages=None,
repositories=None,
exclude_packages=None,
)
@patch("airflow.providers.yandex.utils.credentials.get_credentials")
@patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@patch("yandexcloud._wrappers.dataproc.Dataproc.create_pyspark_job")
def test_create_pyspark_job_operator(self, mock_create_pyspark_job, *_):
operator = DataprocCreatePysparkJobOperator(
task_id="create_pyspark_job",
connection_id=CONNECTION_ID,
main_python_file_uri="s3a://some-in-bucket/jobs/sources/pyspark-001/main.py",
python_file_uris=[
"s3a://some-in-bucket/jobs/sources/pyspark-001/geonames.py",
],
file_uris=[
"s3a://some-in-bucket/jobs/sources/data/config.json",
],
archive_uris=[
"s3a://some-in-bucket/jobs/sources/data/country-codes.csv.zip",
],
args=[
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"s3a://some-out-bucket/jobs/results/${{JOB_ID}}",
],
jar_file_uris=[
"s3a://some-in-bucket/jobs/sources/java/dataproc-examples-1.0.jar",
"s3a://some-in-bucket/jobs/sources/java/icu4j-61.1.jar",
"s3a://some-in-bucket/jobs/sources/java/commons-lang-2.6.jar",
],
properties={
"spark.submit.deployMode": "cluster",
},
)
context = {"task_instance": MagicMock()}
context["task_instance"].xcom_pull.return_value = "my_cluster_id"
operator.execute(context)
context["task_instance"].xcom_pull.assert_has_calls(
[
call(key="cluster_id"),
]
)
mock_create_pyspark_job.assert_called_once_with(
archive_uris=["s3a://some-in-bucket/jobs/sources/data/country-codes.csv.zip"],
args=[
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"s3a://some-out-bucket/jobs/results/${{JOB_ID}}",
],
cluster_id="my_cluster_id",
file_uris=["s3a://some-in-bucket/jobs/sources/data/config.json"],
jar_file_uris=[
"s3a://some-in-bucket/jobs/sources/java/dataproc-examples-1.0.jar",
"s3a://some-in-bucket/jobs/sources/java/icu4j-61.1.jar",
"s3a://some-in-bucket/jobs/sources/java/commons-lang-2.6.jar",
],
main_python_file_uri="s3a://some-in-bucket/jobs/sources/pyspark-001/main.py",
name="Pyspark job",
properties={"spark.submit.deployMode": "cluster"},
python_file_uris=["s3a://some-in-bucket/jobs/sources/pyspark-001/geonames.py"],
packages=None,
repositories=None,
exclude_packages=None,
)
| TestDataprocClusterCreateOperator |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 4486,
"end": 4704
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = ModuleWithStaticForward()
def forward(self, x):
return self.mod(x)
| ModuleCallModuleWithStaticForward |
python | pypa__pip | src/pip/_vendor/distlib/resources.py | {
"start": 3019,
"end": 3208
} | class ____(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
| ResourceContainer |
python | getsentry__sentry | src/sentry/management/commands/devsyncdb.py | {
"start": 145,
"end": 837
} | class ____(migrate.Command):
help = "Create db skipping migrations"
def handle(self, *args: Any, **options: Any) -> None:
class DisableMigrations:
def __contains__(self, item: str) -> bool:
return True
def __getitem__(self, item: str) -> None:
return None
orig = settings.MIGRATION_MODULES
settings.MIGRATION_MODULES = DisableMigrations()
options["run_syncdb"] = True
try:
for database in tuple(connections):
options["database"] = database
super().handle(*args, **options)
finally:
settings.MIGRATION_MODULES = orig
| Command |
python | tensorflow__tensorflow | third_party/xla/xla/backends/cpu/codegen/dot/dot_kernel_emitter_test.py | {
"start": 2002,
"end": 5529
} | class ____(parameterized.TestCase):
@parameterized.product(
emitter_type=emitter_types,
rhs_shape=[(4,), (4, 3), (4, 3, 10), (500, 10, 123)],
dtype=dtypes_to_test,
)
def test_vector_matrix_dot(self, emitter_type, rhs_shape, dtype):
value_range = (0.0, 20.0)
lhs_np = create_input(value_range, rhs_shape[0], dtype)
rhs_np = create_input(value_range, rhs_shape, dtype)
lhs_literal = create_literal(lhs_np)
rhs_literal = create_literal(rhs_np)
output_literal = create_literal(np.ndarray(rhs_shape[1:], dtype=dtype))
lhs_param = HloInstruction.create_parameter(0, lhs_literal.shape(), "lhs")
rhs_param = HloInstruction.create_parameter(1, rhs_literal.shape(), "rhs")
dot_dimension_numbers = testlib_base.DotDimensionNumbers([0], [0])
hlo_op = HloInstruction.create_dot(
output_literal.shape(), lhs_param, rhs_param, dot_dimension_numbers
)
hlo_module, buffer_assignment = utilities.build_hlo_module(
hlo_op, lhs_param, rhs_param
)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
emitter = emitter_type(
hlo_module.get_root_instruction(),
buffer_assignment,
jit_compiler.get_target_machine(),
)
runner = testlib_cpu.KernelRunner.create(
emitter.emit_kernel_definition(), jit_compiler
)
runner.call([lhs_literal, rhs_literal, output_literal])
np_result = np.tensordot(lhs_np, rhs_np, axes=(0, 0))
np.testing.assert_array_max_ulp(
np.asarray(output_literal),
np_result,
maxulp=10,
)
@parameterized.product(
emitter_type=emitter_types,
shapes=[
((1, 1), (1, 1)),
((1, 1), (1, 10)),
((2, 2), (2, 2)),
((2, 2), (2, 3)),
((10, 10), (10, 10)),
((15, 13), (13, 17)),
],
dtype=dtypes_to_test,
)
def test_matrix_multiplication(self, emitter_type, shapes, dtype):
if dtype == np.float16 and emitter_type is testlib_cpu.DotKernelEmitter:
self.skipTest("float16 is not supported by the dot emitter")
value_range = (0.0, 20.0)
lhs_np = create_input(value_range, shapes[0], dtype)
rhs_np = create_input(value_range, shapes[1], dtype)
lhs_literal = create_literal(lhs_np)
rhs_literal = create_literal(rhs_np)
output_shape = shapes[0][:-1] + shapes[1][1:]
output_literal = create_literal(np.ndarray(output_shape, dtype=dtype))
lhs_param = HloInstruction.create_parameter(0, lhs_literal.shape(), "lhs")
rhs_param = HloInstruction.create_parameter(1, rhs_literal.shape(), "rhs")
dot_dimension_numbers = testlib_base.DotDimensionNumbers([1], [0])
hlo_op = HloInstruction.create_dot(
output_literal.shape(), lhs_param, rhs_param, dot_dimension_numbers
)
hlo_module, buffer_assignment = utilities.build_hlo_module(
hlo_op, lhs_param, rhs_param
)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
emitter = emitter_type(
hlo_module.get_root_instruction(),
buffer_assignment,
jit_compiler.get_target_machine(),
)
kernel_definition = emitter.emit_kernel_definition()
runner = testlib_cpu.KernelRunner.create(kernel_definition, jit_compiler)
runner.call([lhs_literal, rhs_literal, output_literal])
np_result = lhs_np @ rhs_np
np.testing.assert_array_max_ulp(
np.asarray(output_literal),
np_result,
maxulp=10,
)
if __name__ == "__main__":
absltest.main()
| DotKernelTest |
python | nryoung__algorithms | tests/test_sorting.py | {
"start": 2271,
"end": 2808
} | class ____(SortingAlgorithmTestCase):
"""
Tests Merge sort on a small range from 0-9
also tests merge function included in merge sort
"""
def test_mergesort(self):
self.output = merge_sort.sort(self.input)
self.assertEqual(self.correct, self.output)
def test_merge(self):
self.seq1 = list(range(0, 5))
self.seq2 = list(range(5, 10))
self.seq = merge_sort.merge(self.seq1, self.seq2)
self.assertIs(self.seq[0], 0)
self.assertIs(self.seq[-1], 9)
| TestMergeSort |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 49536,
"end": 52154
} | class ____(LossFunctionWrapper):
"""Computes the Dice loss value between `y_true` and `y_pred`.
Formula:
```python
loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
axis: Tuple for which dimensions the loss is calculated. Defaults to
`None`.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Returns:
Dice loss value.
Example:
>>> y_true = [[[[1.0], [1.0]], [[0.0], [0.0]]],
... [[[1.0], [1.0]], [[0.0], [0.0]]]]
>>> y_pred = [[[[0.0], [1.0]], [[0.0], [1.0]]],
... [[[0.4], [0.0]], [[0.0], [0.9]]]]
>>> axis = (1, 2, 3)
>>> loss = keras.losses.Dice(axis=axis, reduction=None)(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.5, 0.75757575], shape=(2,), dtype=float32)
>>> loss = keras.losses.Dice()(y_true, y_pred)
>>> assert loss.shape == ()
>>> loss
array(0.6164384, shape=(), dtype=float32)
>>> y_true = np.array(y_true)
>>> y_pred = np.array(y_pred)
>>> loss = keras.losses.Dice(axis=axis, reduction=None)(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.5, 0.75757575], shape=(2,), dtype=float32)
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="dice",
axis=None,
dtype=None,
):
super().__init__(
dice, name=name, reduction=reduction, dtype=dtype, axis=axis
)
self.axis = axis
def get_config(self):
config = Loss.get_config(self)
config.update({"axis": self.axis})
return config
@keras_export("keras.losses.Tversky")
| Dice |
python | numba__numba | numba/cuda/tests/cudadrv/test_runtime.py | {
"start": 687,
"end": 1456
} | class ____(unittest.TestCase):
def test_is_supported_version_true(self):
for v in SUPPORTED_VERSIONS:
with patch.object(runtime, 'get_version', return_value=v):
self.assertTrue(runtime.is_supported_version())
@skip_on_cudasim('The simulator always simulates a supported runtime')
def test_is_supported_version_false(self):
# Check with an old unsupported version and some potential future
# versions
for v in ((10, 2), (11, 8), (12, 0)):
with patch.object(runtime, 'get_version', return_value=v):
self.assertFalse(runtime.is_supported_version())
def test_supported_versions(self):
self.assertEqual(SUPPORTED_VERSIONS, runtime.supported_versions)
| TestRuntime |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 31504,
"end": 32645
} | class ____(PForTestCase):
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_print_v2(self):
x = constant_op.constant([1, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
with ops.control_dependencies([
logging_ops.print_v2(
x1, "x1", array_ops.shape(x1), summarize=10)]):
return array_ops.identity(x1)
self._test_loop_fn(loop_fn, 3)
with self.captureWritesToStream(sys.stderr) as printed:
self.evaluate(pfor_control_flow_ops.pfor(loop_fn, 3))
self.assertIn("[1 2 3] x1 []", printed.contents())
def test_assert(self):
def loop_fn(i):
return control_flow_assert.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
sess.run(pfor_control_flow_ops.pfor(
lambda i, pfor_config: loop_fn(i), 3))
| LoggingTest |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 13048,
"end": 13363
} | class ____(_DeprecationTestCase):
message = "Passing in a parenthesized single number"
@pytest.mark.parametrize("string", ["(2)i,", "(3)3S,", "f,(2)f"])
def test_parenthesized_repeat_count(self, string):
self.assert_deprecated(np.dtype, args=(string,))
| TestDeprecatedDTypeParenthesizedRepeatCount |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 79851,
"end": 80556
} | class ____(metaclass=ABCMeta):
"""Abstract base class for modules"""
def __init__(self, context, handle, info_log, finalizer=None):
self.context = context
self.handle = handle
self.info_log = info_log
if finalizer is not None:
self._finalizer = weakref.finalize(self, finalizer)
def unload(self):
"""Unload this module from the context"""
self.context.unload_module(self)
@abstractmethod
def get_function(self, name):
"""Returns a Function object encapsulating the named function"""
@abstractmethod
def get_global_symbol(self, name):
"""Return a MemoryPointer referring to the named symbol"""
| Module |
python | django__django | tests/fixtures/tests.py | {
"start": 50801,
"end": 52061
} | class ____(TestCase):
"""
Custom class to limit fixture dirs.
"""
def test_loaddata_not_existent_fixture_file(self):
stdout_output = StringIO()
with self.assertRaisesMessage(
CommandError, "No fixture named 'this_fixture_doesnt_exist' found."
):
management.call_command(
"loaddata", "this_fixture_doesnt_exist", stdout=stdout_output
)
@mock.patch("django.db.connection.enable_constraint_checking")
@mock.patch("django.db.connection.disable_constraint_checking")
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking
):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(
CommandError, "No fixture named 'this_fixture_doesnt_exist' found."
):
management.call_command(
"loaddata", "this_fixture_doesnt_exist", verbosity=0
)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
| NonexistentFixtureTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.