language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 849,
"end": 2742
} | class ____:
start_pos: Optional[Tuple[int, int]] = None
string_name: str
parent_context = None
tree_name = None
is_value_name = True
"""
Used for the Jedi API to know if it's a keyword or an actual name.
"""
@abstractmethod
def infer(self):
raise NotImplementedError
@abstractmethod
def goto(self):
# Typically names are already definitions and therefore a goto on that
# name will always result on itself.
return {self}
def get_qualified_names(self, include_module_names=False):
qualified_names = self._get_qualified_names()
if qualified_names is None or not include_module_names:
return qualified_names
module_names = self.get_root_context().string_names
if module_names is None:
return None
return module_names + qualified_names
def _get_qualified_names(self):
# By default, a name has no qualified names.
return None
def get_root_context(self):
return self.parent_context.get_root_context()
def get_public_name(self):
return self.string_name
def __repr__(self):
if self.start_pos is None:
return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name)
return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__,
self.string_name, self.start_pos)
def is_import(self):
return False
def py__doc__(self):
return ''
@property
def api_type(self):
return self.parent_context.api_type
def get_defining_qualified_value(self):
"""
Returns either None or the value that is public and qualified. Won't
return a function, because a name in a function is never public.
"""
return None
| AbstractNameDefinition |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias18.py | {
"start": 716,
"end": 740
} | class ____(A[T2]): ...
| A_3 |
python | sqlalchemy__sqlalchemy | test/orm/test_versioning.py | {
"start": 17653,
"end": 22813
} | class ____(fixtures.MappedTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"node",
metadata,
Column("id", Integer, primary_key=True),
Column("version_id", Integer),
Column("parent_id", ForeignKey("node.id")),
)
@classmethod
def setup_classes(cls):
class Node(cls.Basic):
pass
def _fixture(self, o2m, post_update, insert=True):
Node = self.classes.Node
node = self.tables.node
self.mapper_registry.map_imperatively(
Node,
node,
properties={
"related": relationship(
Node,
remote_side=node.c.id if not o2m else node.c.parent_id,
post_update=post_update,
)
},
version_id_col=node.c.version_id,
)
s = fixture_session()
n1 = Node(id=1)
n2 = Node(id=2)
if insert:
s.add_all([n1, n2])
s.flush()
return s, n1, n2
def test_o2m_plain(self):
s, n1, n2 = self._fixture(o2m=True, post_update=False)
n1.related.append(n2)
with conditional_sane_rowcount_warnings(update=True):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 2)
def test_m2o_plain(self):
s, n1, n2 = self._fixture(o2m=False, post_update=False)
n1.related = n2
with conditional_sane_rowcount_warnings(update=True):
s.flush()
eq_(n1.version_id, 2)
eq_(n2.version_id, 1)
def test_o2m_post_update(self):
s, n1, n2 = self._fixture(o2m=True, post_update=True)
n1.related.append(n2)
with conditional_sane_rowcount_warnings(update=True):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 2)
def test_m2o_post_update(self):
s, n1, n2 = self._fixture(o2m=False, post_update=True)
n1.related = n2
with conditional_sane_rowcount_warnings(update=True):
s.flush()
eq_(n1.version_id, 2)
eq_(n2.version_id, 1)
def test_o2m_post_update_not_assoc_w_insert(self):
s, n1, n2 = self._fixture(o2m=True, post_update=True, insert=False)
n1.related.append(n2)
s.add_all([n1, n2])
with conditional_sane_rowcount_warnings(update=True):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 1)
def test_m2o_post_update_not_assoc_w_insert(self):
s, n1, n2 = self._fixture(o2m=False, post_update=True, insert=False)
n1.related = n2
s.add_all([n1, n2])
with conditional_sane_rowcount_warnings(update=True):
s.flush()
eq_(n1.version_id, 1)
eq_(n2.version_id, 1)
@testing.requires.sane_rowcount
def test_o2m_post_update_version_assert(self):
Node = self.classes.Node
s, n1, n2 = self._fixture(o2m=True, post_update=True)
n1.related.append(n2)
# outwit the database transaction isolation and SQLA's
# expiration at the same time by using different Session on
# same transaction
s2 = Session(bind=s.connection(bind_arguments=dict(mapper=Node)))
s2.query(Node).filter(Node.id == n2.id).update({"version_id": 3})
s2.commit()
assert_raises_message(
orm_exc.StaleDataError,
"UPDATE statement on table 'node' expected to "
r"update 1 row\(s\); 0 were matched.",
s.flush,
)
def test_o2m_post_update_no_sane_rowcount(self):
Node = self.classes.Node
s, n1, n2 = self._fixture(o2m=True, post_update=True)
n1.related.append(n2)
with (
patch.object(config.db.dialect, "supports_sane_rowcount", False),
patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
),
):
s2 = Session(bind=s.connection(bind_arguments=dict(mapper=Node)))
s2.query(Node).filter(Node.id == n2.id).update({"version_id": 3})
s2.commit()
with expect_warnings(
"Dialect .* does not support updated rowcount - "
"versioning cannot be verified."
):
s.flush()
@testing.requires.sane_rowcount
def test_m2o_post_update_version_assert(self):
Node = self.classes.Node
s, n1, n2 = self._fixture(o2m=False, post_update=True)
n1.related = n2
# outwit the database transaction isolation and SQLA's
# expiration at the same time by using different Session on
# same transaction
s2 = Session(bind=s.connection(bind_arguments=dict(mapper=Node)))
s2.query(Node).filter(Node.id == n1.id).update({"version_id": 3})
s2.commit()
assert_raises_message(
orm_exc.StaleDataError,
"UPDATE statement on table 'node' expected to "
r"update 1 row\(s\); 0 were matched.",
s.flush,
)
| VersionOnPostUpdateTest |
python | crytic__slither | slither/detectors/statements/msg_value_in_loop.py | {
"start": 2546,
"end": 4259
} | class ____(AbstractDetector):
"""
Detect the use of msg.value inside a loop
"""
ARGUMENT = "msg-value-loop"
HELP = "msg.value inside a loop"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop"
WIKI_TITLE = "`msg.value` inside a loop"
WIKI_DESCRIPTION = "Detect the use of `msg.value` inside a loop."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract MsgValueInLoop{
mapping (address => uint256) balances;
function bad(address[] memory receivers) public payable {
for (uint256 i=0; i < receivers.length; i++) {
balances[receivers[i]] += msg.value;
}
}
}
```
"""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = """
Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.
"""
def _detect(self) -> List[Output]:
""""""
results: List[Output] = []
for c in self.compilation_unit.contracts_derived:
values = detect_msg_value_in_loop(c)
for node, calls_stack in values:
func = node.function
info: DETECTOR_INFO = [func, " use msg.value in a loop: ", node, "\n"]
if len(calls_stack) > 0:
info.append("\tCalls stack containing the loop:\n")
for call in calls_stack:
info.extend(["\t\t", call, "\n"])
res = self.generate_result(info)
results.append(res)
return results
| MsgValueInLoop |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_training.py | {
"start": 10913,
"end": 27677
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(8, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_train_parity_single_group_shard_dim0(self):
"""
Tests train parity with DDP for a single FSDP group when sharding
parameters on dim-0.
"""
self.run_subtests(
{
"lin_shapes": [
[(16, 15), (15, 8)],
[(7, 15), (15, 3)],
[(16, 17), (17, 8)],
],
"use_shard_placement_fn": [False],
},
self._test_train_parity_single_group,
)
@skip_if_lt_x_gpu(2)
def test_train_parity_single_group_shard_largest_dim(self):
"""
Tests train parity with DDP for a single FSDP group when sharding
parameters on their largest dim.
"""
self.run_subtests(
{
# Sharding on nonzero dim requires even sharding
"lin_shapes": [[(32, 16), (16, 8)]],
"use_shard_placement_fn": [True],
},
self._test_train_parity_single_group,
)
def _test_train_parity_single_group(
self, lin_shapes: list[tuple[int, int]], use_shard_placement_fn: bool
):
torch.manual_seed(42)
model = nn.Sequential(
nn.Linear(*lin_shapes[0]), nn.ReLU(), nn.Linear(*lin_shapes[1])
)
ref_model = copy.deepcopy(model).to(device_type)
replicate(ref_model, device_ids=[self.rank])
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
def _shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
return Shard(param.shape.index(max(param.shape)))
shard_placement_fn = _shard_placement_fn if use_shard_placement_fn else None
fully_shard(model, shard_placement_fn=shard_placement_fn)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank + 1)
inp = (torch.randn((4, lin_shapes[0][0]), device=device_type.type),)
for iter_idx in range(10):
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(*inp).sum())
losses[-1].backward()
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU or TEST_XPU, "Sleep kernel not supported for HPU/XPU")
@compiled_fsdp_test(compile_compute_on_module=Transformer)
def test_train_parity_multi_group(self):
"""
Tests train parity against DDP when using multiple parameter groups for
communication (for communication and computation overlap plus memory
reduction).
"""
self.run_subtests(
{
"reshard_after_forward": [True, False, 2],
"test_device_type": [device_type.type],
"offload_policy": [OffloadPolicy()],
"delay_after_forward": [False, True],
"delay_before_all_gather": [False, True],
"delay_before_reduce_scatter": [False, True],
"delay_before_optim": [False, True],
"unshard_async_op": [False],
},
self._test_train_parity_multi_group,
)
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU or TEST_XPU, "sleep kernel not supported on HPU/XPU")
def test_train_parity_multi_group_cpu_offload_eager(self):
"""
Tests train parity against DDP when using multiple parameter groups for
communication and CPU offloading.
"""
self.run_subtests(
{
"reshard_after_forward": [True], # save CI time
"offload_policy": [
CPUOffloadPolicy(pin_memory=True),
CPUOffloadPolicy(pin_memory=False),
],
"test_device_type": [device_type.type],
"delay_after_forward": [False, True],
"delay_before_all_gather": [False, True],
"delay_before_reduce_scatter": [False, True],
"delay_before_optim": [False, True],
"unshard_async_op": [False],
},
self._test_train_parity_multi_group,
)
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU or TEST_XPU, "sleep kernel not supported on HPU/XPU")
@compiled_fsdp_test(compile_compute_on_module=Transformer)
def test_train_parity_multi_group_unshard_async_op(self):
"""
Tests train parity against DDP when using multiple parameter groups for
communication and setting ``unshard_async_op=True``.
"""
self.run_subtests(
{
"reshard_after_forward": [True],
"test_device_type": [device_type.type],
"offload_policy": [OffloadPolicy()],
"delay_after_forward": [False, True],
"delay_before_all_gather": [False, True],
"delay_before_reduce_scatter": [False, True],
"delay_before_optim": [False, True],
"unshard_async_op": [True],
},
self._test_train_parity_multi_group,
)
def _test_train_parity_multi_group(
self,
reshard_after_forward: Union[bool, int],
offload_policy: OffloadPolicy,
test_device_type: str,
delay_after_forward: bool,
delay_before_all_gather: bool,
delay_before_reduce_scatter: bool,
delay_before_optim: bool,
unshard_async_op: bool,
):
# Only test individual delays or all four delays to save test time
if (
delay_after_forward
+ delay_before_all_gather
+ delay_before_reduce_scatter
+ delay_before_optim
in (2, 3)
):
return
assert test_device_type in ("cuda", "hpu", "xpu", "cpu"), f"{test_device_type}"
torch.manual_seed(42)
vocab_size = 1024
model_args = ModelArgs(
n_layers=3,
n_heads=4,
vocab_size=vocab_size,
max_seq_len=64,
dropout_p=0,
)
model = Transformer(model_args)
ref_model = copy.deepcopy(model)
if test_device_type == device_type.type:
replicate(
ref_model.to(device_type),
device_ids=[self.rank],
)
else:
gloo_pg = dist.new_group(backend="gloo")
replicate(ref_model, process_group=gloo_pg)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
mesh = init_device_mesh(test_device_type, (self.world_size,))
fully_shard_fn = functools.partial(
fully_shard,
mesh=mesh,
reshard_after_forward=reshard_after_forward,
offload_policy=offload_policy,
)
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard_fn(module)
fully_shard_fn(model)
if unshard_async_op:
model._set_unshard_async_op(unshard_async_op)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
delay_in_ms = 100
orig_all_gather = dist.all_gather_into_tensor
orig_reduce_scatter = dist.reduce_scatter_tensor
def delayed_all_gather(*args, **kwargs):
torch.get_device_module(device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
return orig_all_gather(*args, **kwargs)
def delayed_reduce_scatter(*args, **kwargs):
torch.get_device_module(device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
return orig_reduce_scatter(*args, **kwargs)
torch.manual_seed(42 + self.rank + 1)
patch_all_gather_ctx = (
patch_all_gather(delayed_all_gather)
if delay_before_all_gather
else contextlib.nullcontext()
)
patch_reduce_scatter_ctx = (
patch_reduce_scatter(delayed_reduce_scatter)
if delay_before_reduce_scatter
else contextlib.nullcontext()
)
with patch_all_gather_ctx, patch_reduce_scatter_ctx:
for iter_idx in range(10):
inp = torch.randint(0, vocab_size, (3, 64), device=device_type)
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(inp).sum())
if _model is model and delay_after_forward:
torch.get_device_module(test_device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
losses[-1].backward()
if _model is model and delay_before_optim:
torch.get_device_module(test_device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_XPU, "Sleep is not supported on XPU")
def test_non_root_forward_backward(self):
"""
Tests running forward/backward through the root and then through a
non-root. The non-root needs to synchronize streams/queue the callback.
"""
torch.manual_seed(42)
lin_dim = 32
model = nn.Sequential(*[MLP(lin_dim, torch.device("cpu")) for _ in range(3)])
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for mlp in model:
fully_shard(mlp)
fully_shard(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
torch.manual_seed(42 + self.rank)
inp = torch.randn((8, lin_dim), device=device_type)
ref_root_loss = ref_model(inp).sum()
ref_root_loss.backward()
for param in ref_model.parameters():
dist.all_reduce(param.grad)
param.grad.detach().div_(self.world_size)
ref_optim.step()
ref_optim.zero_grad()
ref_nonroot_loss = ref_model[0](inp).sum()
ref_nonroot_loss.backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.detach().div_(self.world_size)
ref_optim.step()
root_loss = model(inp).sum()
root_loss.backward()
torch.get_device_module(device_type)._sleep(int(100 * get_cycles_per_ms()))
optim.step()
optim.zero_grad()
nonroot_loss = model[0](inp).sum()
nonroot_loss.backward()
optim.step()
self.assertEqual(ref_root_loss, root_loss)
self.assertEqual(ref_nonroot_loss, nonroot_loss)
self.assertEqual(ref_model(inp).sum(), model(inp).sum())
@skip_if_lt_x_gpu(2)
def test_multi_forward_module(self):
"""
Tests parity with DDP when running a module that participates multiple
times in forward.
"""
self.run_subtests(
{"reshard_after_forward": [True, False, 2]},
self._test_multi_forward_module,
)
def _test_multi_forward_module(self, reshard_after_forward: Union[bool, int]):
class MultiForwardModule(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.inner = nn.Linear(4, 4, device=device)
self.outer = nn.Linear(4, 5, device=device)
def forward(self, x):
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
torch.manual_seed(42)
model = MultiForwardModule(device=device_type.type)
ref_model = copy.deepcopy(model)
replicate(
ref_model,
device_ids=[self.rank],
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
fully_shard(model.inner)
fully_shard(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank)
inp = torch.randn((32, 4), device=device_type.type)
for iter_idx in range(10):
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
def test_explicit_prefetching(self):
torch.manual_seed(42)
model_args = ModelArgs(n_layers=8, dropout_p=0.0)
model = Transformer(model_args)
ref_model = replicate(copy.deepcopy(model).to(device_type))
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
for layer in itertools.chain(model.layers, [model]):
fully_shard(layer)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
num_to_forward_prefetch = num_to_backward_prefetch = 2
for i, layer in enumerate(model.layers):
if i >= len(model.layers) - num_to_forward_prefetch:
break
layers_to_prefetch = [
model.layers[i + j] for j in range(1, num_to_forward_prefetch + 1)
]
layer.set_modules_to_forward_prefetch(layers_to_prefetch)
for i, layer in enumerate(model.layers):
if i < num_to_backward_prefetch:
continue
layers_to_prefetch = [
model.layers[i - j] for j in range(1, num_to_backward_prefetch + 1)
]
layer.set_modules_to_backward_prefetch(layers_to_prefetch)
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 8), device=device_type.type)
for _ in range(10):
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad()
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU or TEST_XPU, "Sleep is not supported on HPU/XPU")
def test_post_optim_event(self):
torch.manual_seed(42)
model_args = ModelArgs(dropout_p=0.0)
model = Transformer(model_args)
ref_model = replicate(copy.deepcopy(model).to(device_type.type))
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
for layer in itertools.chain(model.layers, [model]):
fully_shard(layer)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
def step_post_hook(
fsdp_module: FSDPModule, opt: torch.optim.Optimizer, args, kwargs
) -> None:
post_optim_event = (
torch.get_device_module(device_type).current_stream().record_event()
)
fsdp_module.set_post_optim_event(post_optim_event)
optim.register_step_post_hook(functools.partial(step_post_hook, model))
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 8), device=device_type.type)
# Track all losses and check for equality at the end to avoid a CPU
# sync point after each iteration
ref_losses: list[torch.Tensor] = []
losses: list[torch.Tensor] = []
for _ in range(10):
ref_optim.zero_grad()
ref_losses.append(ref_model(inp).sum())
ref_losses[-1].backward()
ref_optim.step()
for _ in range(10):
optim.zero_grad()
losses.append(model(inp).sum())
losses[-1].backward()
optim.step()
# Sleep after the optimizer step to allow CPU to run ahead into the
# next iteration's forward, exercising the post-optim stream sync
torch.get_device_module(device_type)._sleep(int(25 * get_cycles_per_ms()))
for ref_loss, loss in zip(ref_losses, losses):
self.assertEqual(ref_loss, loss)
| TestFullyShard1DTrainingCore |
python | plotly__plotly.py | plotly/graph_objs/_choropleth.py | {
"start": 231,
"end": 65984
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "choropleth"
_valid_props = {
"autocolorscale",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"featureidkey",
"geo",
"geojson",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"locationmode",
"locations",
"locationssrc",
"marker",
"meta",
"metasrc",
"name",
"reversescale",
"selected",
"selectedpoints",
"showlegend",
"showscale",
"stream",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.choropleth.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def featureidkey(self):
"""
Sets the key in GeoJSON features which is used as id to match
the items included in the `locations` array. Only has an effect
when `geojson` is set. Support nested property, for example
"properties.name".
The 'featureidkey' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["featureidkey"]
@featureidkey.setter
def featureidkey(self, val):
self["featureidkey"] = val
@property
def geo(self):
"""
Sets a reference between this trace's geospatial coordinates
and a geographic map. If "geo" (the default value), the
geospatial coordinates refer to `layout.geo`. If "geo2", the
geospatial coordinates refer to `layout.geo2`, and so on.
The 'geo' property is an identifier of a particular
subplot, of type 'geo', that may be specified as the string 'geo'
optionally followed by an integer >= 1
(e.g. 'geo', 'geo1', 'geo2', 'geo3', etc.)
Returns
-------
str
"""
return self["geo"]
@geo.setter
def geo(self, val):
self["geo"] = val
@property
def geojson(self):
"""
Sets optional GeoJSON data associated with this trace. If not
given, the features on the base map are used. It can be set as
a valid GeoJSON object or as a URL string. Note that we only
accept GeoJSONs of type "FeatureCollection" or "Feature" with
geometries of type "Polygon" or "MultiPolygon".
The 'geojson' property accepts values of any type
Returns
-------
Any
"""
return self["geojson"]
@geojson.setter
def geojson(self, val):
self["geojson"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['location', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'location+z')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.choropleth.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.choropleth.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def locationmode(self):
"""
The library used by the *country names* `locationmode` option
is changing in an upcoming version. Country names in existing
plots may not work in the new version. Determines the set of
locations used to match entries in `locations` to regions on
the map. Values "ISO-3", "USA-states", *country names*
correspond to features on the base map and value "geojson-id"
corresponds to features from a custom GeoJSON linked to the
`geojson` attribute.
The 'locationmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['ISO-3', 'USA-states', 'country names', 'geojson-id']
Returns
-------
Any
"""
return self["locationmode"]
@locationmode.setter
def locationmode(self, val):
self["locationmode"] = val
@property
def locations(self):
"""
Sets the coordinates via location IDs or names. See
`locationmode` for more info.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.choropleth.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.choropleth.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.choropleth.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets the text elements associated with each location.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.choropleth.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def z(self):
"""
Sets the color values.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choropleth.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array. Only
has an effect when `geojson` is set. Support nested
property, for example "properties.name".
geo
Sets a reference between this trace's geospatial
coordinates and a geographic map. If "geo" (the default
value), the geospatial coordinates refer to
`layout.geo`. If "geo2", the geospatial coordinates
refer to `layout.geo2`, and so on.
geojson
Sets optional GeoJSON data associated with this trace.
If not given, the features on the base map are used. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.choropleth.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.choropleth.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
locationmode
The library used by the *country names* `locationmode`
option is changing in an upcoming version. Country
names in existing plots may not work in the new
version. Determines the set of locations used to match
entries in `locations` to regions on the map. Values
"ISO-3", "USA-states", *country names* correspond to
features on the base map and value "geojson-id"
corresponds to features from a custom GeoJSON linked to
the `geojson` attribute.
locations
Sets the coordinates via location IDs or names. See
`locationmode` for more info.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
marker
:class:`plotly.graph_objects.choropleth.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choropleth.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choropleth.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choropleth.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
featureidkey=None,
geo=None,
geojson=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
locationmode=None,
locations=None,
locationssrc=None,
marker=None,
meta=None,
metasrc=None,
name=None,
reversescale=None,
selected=None,
selectedpoints=None,
showlegend=None,
showscale=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Choropleth object
The data that describes the choropleth value-to-color mapping
is set in `z`. The geographic locations corresponding to each
value in `z` are set in `locations`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Choropleth`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choropleth.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array. Only
has an effect when `geojson` is set. Support nested
property, for example "properties.name".
geo
Sets a reference between this trace's geospatial
coordinates and a geographic map. If "geo" (the default
value), the geospatial coordinates refer to
`layout.geo`. If "geo2", the geospatial coordinates
refer to `layout.geo2`, and so on.
geojson
Sets optional GeoJSON data associated with this trace.
If not given, the features on the base map are used. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.choropleth.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.choropleth.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
locationmode
The library used by the *country names* `locationmode`
option is changing in an upcoming version. Country
names in existing plots may not work in the new
version. Determines the set of locations used to match
entries in `locations` to regions on the map. Values
"ISO-3", "USA-states", *country names* correspond to
features on the base map and value "geojson-id"
corresponds to features from a custom GeoJSON linked to
the `geojson` attribute.
locations
Sets the coordinates via location IDs or names. See
`locationmode` for more info.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
marker
:class:`plotly.graph_objects.choropleth.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choropleth.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choropleth.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choropleth.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Choropleth
"""
super().__init__("choropleth")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Choropleth
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Choropleth`""")
if locationmode == "country names" and kwargs.get("_validate"):
warnings.warn(
"The library used by the *country names* `locationmode` option is changing in an upcoming version. "
"Country names in existing plots may not work in the new version. "
"To ensure consistent behavior, consider setting `locationmode` to *ISO-3*.",
DeprecationWarning,
stacklevel=5,
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("featureidkey", arg, featureidkey)
self._set_property("geo", arg, geo)
self._set_property("geojson", arg, geojson)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("locationmode", arg, locationmode)
self._set_property("locations", arg, locations)
self._set_property("locationssrc", arg, locationssrc)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("reversescale", arg, reversescale)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._set_property("z", arg, z)
self._set_property("zauto", arg, zauto)
self._set_property("zmax", arg, zmax)
self._set_property("zmid", arg, zmid)
self._set_property("zmin", arg, zmin)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "choropleth"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Choropleth |
python | dask__dask | dask/dataframe/dask_expr/_str_accessor.py | {
"start": 3379,
"end": 3717
} | class ____(Blockwise):
_parameters = ["frame", "sep", "na_rep"]
_keyword_only = ["sep", "na_rep"]
@property
def _args(self) -> list:
return [self.frame] + self.operands[len(self._parameters) :]
@staticmethod
def operation(ser, *args, **kwargs):
return ser.str.cat(list(args), **kwargs)
| CatBlockwise |
python | numba__numba | numba/core/bytecode.py | {
"start": 8571,
"end": 9253
} | class ____(object):
def __init__(self, code):
self.code = code
self.iter = iter(_patched_opargs(_unpack_opargs(self.code.co_code)))
def __iter__(self):
return self
def _fetch_opcode(self):
return next(self.iter)
def next(self):
offset, opcode, arg, nextoffset = self._fetch_opcode()
return offset, ByteCodeInst(offset=offset, opcode=opcode, arg=arg,
nextoffset=nextoffset)
__next__ = next
def read_arg(self, size):
buf = 0
for i in range(size):
_offset, byte = next(self.iter)
buf |= byte << (8 * i)
return buf
| ByteCodeIter |
python | has2k1__plotnine | plotnine/scales/scale_linetype.py | {
"start": 310,
"end": 782
} | class ____(scale_discrete):
"""
Scale for line patterns
Notes
-----
The available linetypes are
`'solid', 'dashed', 'dashdot', 'dotted'`
If you need more custom linetypes, use
[](`~plotnine.scales.scale_linetype_manual`)
"""
_aesthetics = ["linetype"]
def __post_init__(self):
from mizani.palettes import manual_pal
super().__post_init__()
self.palette = manual_pal(LINETYPES)
@dataclass
| scale_linetype |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/extra/django/_impl.py | {
"start": 988,
"end": 1433
} | class ____:
def setup_example(self):
self._pre_setup()
def teardown_example(self, example):
self._post_teardown()
def __call__(self, result=None):
testMethod = getattr(self, self._testMethodName)
if getattr(testMethod, "is_hypothesis_test", False):
return unittest.TestCase.__call__(self, result)
else:
return dt.SimpleTestCase.__call__(self, result)
| HypothesisTestCase |
python | ray-project__ray | doc/source/ray-core/doc_code/cgraph_quickstart.py | {
"start": 3074,
"end": 3943
} | class ____:
def echo(self, msg):
return msg
actors = [EchoActor.remote() for _ in range(4)]
with InputNode() as inp:
outputs = [actor.echo.bind(inp) for actor in actors]
dag = MultiOutputNode(outputs)
compiled_dag = dag.experimental_compile()
# Kill one of the actors to simulate unexpected actor death.
ray.kill(actors[0])
ref = compiled_dag.execute(1)
live_actors = []
try:
ray.get(ref)
except ray.exceptions.ActorDiedError:
# At this point, the Compiled Graph is shutting down.
for actor in actors:
try:
# Check for live actors.
ray.get(actor.echo.remote("ping"))
live_actors.append(actor)
except ray.exceptions.RayActorError:
pass
# Optionally, use the live actors to create a new Compiled Graph.
assert live_actors == actors[1:]
# __cgraph_actor_death_end__
| EchoActor |
python | mlflow__mlflow | mlflow/store/artifact/databricks_artifact_repo_resources.py | {
"start": 763,
"end": 832
} | class ____(Enum):
READ = 1
WRITE = 2
@dataclass
| _CredentialType |
python | PyCQA__pylint | tests/functional/r/regression/regression_issue_4633.py | {
"start": 341,
"end": 466
} | class ____:
def whatever(self):
test_var = Ham()
while not test_var.queue.empty():
pass
| SecondHam |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 14992,
"end": 15060
} | class ____:
def __init__(*, tastes_bitter=None):
...
| Fruit |
python | huggingface__transformers | src/transformers/models/smollm3/modular_smollm3.py | {
"start": 1449,
"end": 10530
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SmolLM3Model`]. It is used to instantiate a
SmolLM3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the SmolLM3 3B.
e.g. [HuggingFaceTB/SmolLM3-3B](https://huggingface.co/HuggingFaceTB/SmolLM3-3B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the SmolLM3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`SmolLM3Model`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `16`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 128004):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the beginning of sentence token.
eos_token_id (`int`, *optional*, defaults to 128001):
The id of the end of sentence token.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*):
Sliding window attention (SWA) window size. If not specified, will default to `None`.
no_rope_layers (`List[int]`, *optional*):
List with at least the same length as the number of layers in the model.
A `1` at an index position indicates that the corresponding layer will use RoPE,
while a `0` indicates that it's a NoPE layer.
no_rope_layer_interval (`int`, *optional*, defaults to 4):
If `no_rope_layers` is `None`, it will be created using a NoPE layer every
`no_rope_layer_interval` layers.
layer_types (`list`, *optional*):
Attention pattern for each layer. Automatically computed based on sliding window and NoPE settings.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import SmolLM3Model, SmolLM3Config
>>> # Initializing a SmolLM3 style configuration
>>> configuration = SmolLM3Config()
>>> # Initializing a model from the SmolLM3 style configuration
>>> model = SmolLM3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "smollm3"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 2000000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 128256,
hidden_size: Optional[int] = 2048,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 36,
num_attention_heads: Optional[int] = 16,
num_key_value_heads: Optional[int] = 4,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 32768,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 128004,
bos_token_id: Optional[int] = 128000,
eos_token_id: Optional[int] = 128001,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
use_sliding_window: Optional[bool] = False,
sliding_window: Optional[int] = None,
no_rope_layers: Optional[int] = None,
no_rope_layer_interval: Optional[int] = 4,
layer_types: Optional[int] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.mlp_bias = mlp_bias
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
if no_rope_layers is None:
self.no_rope_layers = [
int((layer_idx + 1) % no_rope_layer_interval != 0) for layer_idx in range(num_hidden_layers)
]
else:
self.no_rope_layers = no_rope_layers
self.no_rope_layer_interval = no_rope_layer_interval
# Update layer_types based on sliding window and NoPE pattern
if layer_types is None:
layer_types = []
for layer_idx in range(num_hidden_layers):
has_rope = self.no_rope_layers[layer_idx]
if use_sliding_window and sliding_window is not None and not has_rope:
layer_types.append("sliding_attention")
else:
layer_types.append("full_attention")
self.layer_types = layer_types
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
| SmolLM3Config |
python | squidfunk__mkdocs-material | material/extensions/preview.py | {
"start": 1691,
"end": 5611
} | class ____(Treeprocessor):
"""
A Markdown treeprocessor to enable instant previews on links.
Note that this treeprocessor is dependent on the `relpath` treeprocessor
registered programmatically by MkDocs before rendering a page.
"""
def __init__(self, md: Markdown, config: dict):
"""
Initialize the treeprocessor.
Arguments:
md: The Markdown instance.
config: The configuration.
"""
super().__init__(md)
self.config = config
def run(self, root: Element):
"""
Run the treeprocessor.
Arguments:
root: The root element of the parsed Markdown document.
"""
at = self.md.treeprocessors.get_index_for_name("relpath")
# Hack: Python Markdown has no notion of where it is, i.e., which file
# is being processed. This seems to be a deliberate design decision, as
# it is not possible to access the file path of the current page, but
# it might also be an oversight that is now impossible to fix. However,
# since this extension is only useful in the context of Material for
# MkDocs, we can assume that the _RelativePathTreeprocessor is always
# present, telling us the file path of the current page. If that ever
# changes, we would need to wrap this extension in a plugin, but for
# the time being we are sneaky and will probably get away with it.
processor = self.md.treeprocessors[at]
if not isinstance(processor, _RelativePathTreeprocessor):
raise TypeError("Relative path processor not registered")
# Normalize configurations
configurations = self.config["configurations"]
configurations.append({
"sources": self.config.get("sources"),
"targets": self.config.get("targets")
})
# Walk through all configurations - @todo refactor so that we don't
# iterate multiple times over the same elements
for configuration in configurations:
# Skip, if the configuration defines nothing – we could also fix
# this in the file filter, but we first fix it here and check if
# it generalizes well enough to other inclusion/exclusion sites,
# because here, it would hinder the ability to automaticaly
# include all sources, while excluding specific targets.
if (
not configuration.get("sources") and
not configuration.get("targets")
):
continue
# Skip if page should not be considered
filter = get_filter(configuration, "sources")
if not filter(processor.file):
continue
# Walk through all links and add preview attributes
filter = get_filter(configuration, "targets")
for el in root.iter("a"):
href = el.get("href")
if not href:
continue
# Skip footnotes
if "footnote-ref" in el.get("class", ""):
continue
# Skip external links
url = urlparse(href)
if url.scheme or url.netloc:
continue
# Add preview attribute to internal links
for path in processor._possible_target_uris(
processor.file, url.path,
processor.config.use_directory_urls
):
target = processor.files.get_file_from_path(path)
if not target:
continue
# Include, if filter matches
if filter(target):
el.set("data-preview", "")
# -----------------------------------------------------------------------------
| PreviewProcessor |
python | charliermarsh__ruff | scripts/ty_benchmark/src/benchmark/__init__.py | {
"start": 237,
"end": 476
} | class ____(NamedTuple):
name: str
"""The name of the command to benchmark."""
command: list[str]
"""The command to benchmark."""
prepare: str | None = None
"""The command to run before each benchmark run."""
| Command |
python | pytorch__pytorch | torch/_export/passes/lift_constants_pass.py | {
"start": 646,
"end": 17478
} | class ____(collections.abc.MutableMapping):
"""A mapping class that understands how to use module constants (tensors,
ScriptObjects, FakeScriptObjects) as keys. We store tensors and FakeScriptObjects normally,
but ScriptObjects are stored by hash, because different torch.ScriptObjects can point to
the same underlying value (but we guarantee that they will `hash()` to the same value
if that's the case).
"""
def __init__(self) -> None:
# Underlying dict that we use to implement this mapping.
self._constant_attrs: dict[
Union[int, torch.Tensor, FakeScriptObject, torch.utils._pytree.TreeSpec],
list[Any],
] = {}
# Map from the hash(ScriptObject) to the ScriptObject itself. Used for
# APIs like `__iter__` that should look like they're returning the
# original ScriptObjects.
self._script_object_map: dict[int, torch.ScriptObject] = {}
def __getitem__(self, key: _ConstantAttributeType) -> Any:
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
assert isinstance(real_key, (int, torch.Tensor, FakeScriptObject))
return self._constant_attrs[real_key]
def __setitem__(self, key: _ConstantAttributeType, value):
# we shouldn't actually call this, should go to add() instead to handle aliasing
raise NotImplementedError(
"""Directly setting values for ConstantAttrMap is not supported, please use add(key, value) instead.
The same key can be mapped to multiple values, for handling constant aliasing."""
)
def add(self, key: _ConstantAttributeType, value: Any) -> None:
if isinstance(key, torch.ScriptObject):
if hash(key) not in self._constant_attrs:
self._constant_attrs[hash(key)] = []
self._constant_attrs[hash(key)].append(value)
self._script_object_map[hash(key)] = key
elif isinstance(key, (torch.Tensor, FakeScriptObject)):
if key not in self._constant_attrs:
self._constant_attrs[key] = []
self._constant_attrs[key].append(value)
else:
raise TypeError(
f"Expected key to be a tensor or ScriptObject, got {type(key)}"
)
def __delitem__(self, key: _ConstantAttributeType):
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
del self._constant_attrs[real_key]
def __iter__(self):
for key in self._constant_attrs:
if isinstance(key, int):
yield self._script_object_map[key]
else:
yield key
def __len__(self):
return len(self._constant_attrs)
def __contains__(self, key: object) -> bool:
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
return real_key in self._constant_attrs
def get_constant_fqn(node: torch.fx.Node, constant_name: str) -> str:
# The FQN of the constant tensor in the state dict should
# correspond to the module where the constant tensor was
# originally used.
if len(node.meta["nn_module_stack"]) == 0:
return constant_name
parent_fqn = list(node.meta["nn_module_stack"].values())[-1][0]
if len(parent_fqn) > 0:
return f"{parent_fqn}.{constant_name}"
else:
return constant_name
def _get_first_fqn(
const_attrs: ConstantAttrMap,
key: _ConstantAttributeType,
) -> Any:
fqns = const_attrs.get(key)
return fqns[0] if fqns else None
def _unused_constant(node: torch.fx.Node) -> Optional[list[torch.fx.Node]]:
"""
If there is a tensor constant created while tracing, here is how the graph
looks like:
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant0,))
%detach_ : [num_users=?] = call_function[target=torch.ops.aten.detach_.default](args = (%lift_fresh_copy,))
To check to see if the tensor constant is being used, we want to traverse to
the detach node to see if it's actually being used.
This function returns None if this constant is being used, otherwise it returns the
lift_fresh and detach node to be removed later.
""" # noqa: B950
if len(node.users) > 1:
return None
lift_fresh_node = next(iter(node.users.keys()))
if not (
lift_fresh_node.op == "call_function"
and lift_fresh_node.target
in (
torch.ops.aten.lift_fresh.default,
torch.ops.aten.lift_fresh_copy.default,
)
):
return None
if len(lift_fresh_node.users) > 1:
return None
# Case 1: lift node is not used anywhere
if len(lift_fresh_node.users) == 0:
return [lift_fresh_node, node]
detach_node = next(iter(lift_fresh_node.users.keys()))
if not (
detach_node.op == "call_function"
and detach_node.target
in (
torch.ops.aten.detach_.default,
torch.ops.aten.detach.default,
)
):
return None
if len(detach_node.users) > 0:
return None
else:
# Case 2: Lift node's child is not used anywhere
return [detach_node, lift_fresh_node, node]
def lift_constants_pass(
gm: torch.fx.GraphModule,
graph_signature: ExportGraphSignature,
constant_attrs: ConstantAttrMap,
) -> dict[str, _ConstantAttributeType]:
"""
Takes a graph module, graph signature, and modifies them inplace to lift any
constants (tensors or custom classes) as inputs to the graph. Returns a
dictionary of names to constants.
Arguments:
gm (torch.fx.GraphModule): The graph module containing the graph and constants to lift.
graph_signature (ExportGraphSignature): This graph signature will be
mutated to add additional CONSTANT_TENSOR and CUSTOM_OBJ inputs.
constant_attrs (ConstantAttr): A mapping from a constant value to its
fully-qualified path in `gm`. This is used to maintain consistent
location of constants between the original module and the exported
version.
Returns:
A dictionary of fqn => constant value.
"""
all_constants: dict[str, _ConstantAttributeType] = {}
input_specs = graph_signature.input_specs
num_custom_obj = sum(
input_spec.kind == InputKind.CUSTOM_OBJ for input_spec in input_specs
)
num_tensor_constants = sum(
input_spec.kind == InputKind.CONSTANT_TENSOR for input_spec in input_specs
)
fake_mode = detect_fake_mode(
tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder")
)
first_user_input_loc, first_user_input = 0, next(iter(gm.graph.nodes))
used_target_names = set()
input_nodes = [node for node in gm.graph.nodes if node.op == "placeholder"]
assert len(input_nodes) == len(input_specs)
for i, (node, input_spec) in enumerate(zip(input_nodes, input_specs)):
used_target_names.add(input_spec.target)
if input_spec.kind == InputKind.USER_INPUT:
first_user_input = node
first_user_input_loc = i
break
lifted_objs = ConstantAttrMap()
renamed_targets = {}
for node in list(gm.graph.nodes):
if node.op == "get_attr":
if nodes_to_remove := _unused_constant(node):
# Remove the node if it's not being used
for node_rm in nodes_to_remove:
gm.graph.erase_node(node_rm)
continue
constant_val = _get_attr(gm, node.target)
# These are not hashable and not gonna be lifted
# so we can skip them earlier
if isinstance(constant_val, torch.fx.GraphModule):
continue
if "LoweredBackendModule" in type(constant_val).__name__:
continue
if "AOTInductorRunnerWrapper" in type(constant_val).__name__:
continue
if isinstance(constant_val, torch.utils._pytree.TreeSpec):
continue
if constant_val in lifted_objs:
# We already lifted this constant elsewhere. Just rewrite uses
# of this get_attr to point to the already-existing placeholder
# node.
const_placeholder_node = _get_first_fqn(lifted_objs, constant_val)
node.replace_all_uses_with(const_placeholder_node)
gm.graph.erase_node(node)
renamed_targets[node.name] = const_placeholder_node.name
continue
# For ScriptObject, Tensor and FakeScriptObject constants:
# First check if the constant was an attribute on some module by
# consulting `constant_attrs` map. If it is, use the fqn that keeps
# its location consistent with the eager module.
#
# If it's not in the `constant_attrs` map, that means it's an inline
# constant (e.g. x + torch.tensor(0)), and thus did not have a
# specific location in the eager module. In that case, just generate
# some name and attach it to the module in which it was used.
if isinstance(constant_val, (torch.ScriptObject, FakeScriptObject)):
constant_kind = InputKind.CUSTOM_OBJ
constant_fqn = _get_first_fqn(constant_attrs, constant_val)
if constant_fqn is not None:
constant_name = constant_fqn.replace(".", "_")
else:
constant_name = f"lifted_custom_{num_custom_obj}"
constant_fqn = get_constant_fqn(node, constant_name)
while constant_fqn in used_target_names:
num_custom_obj += 1
constant_name = f"lifted_custom_{num_custom_obj}"
constant_fqn = get_constant_fqn(node, constant_name)
num_custom_obj += 1
elif isinstance(constant_val, torch.Tensor):
# Remove the parameterness of constant_val
if isinstance(constant_val, torch.nn.Parameter):
log.debug(
"%s created when tracing %s is a parameter. But "
"it's not registered with register_parameter(). export will treat it as a constant tensor",
str(node.target),
str(node.meta.get("stack_trace", "<unknown stack>")),
)
# We get the real data out of the parameter by disabling the surrounding fake mode.
with unset_fake_temporarily():
constant_val = constant_val.data
constant_kind = InputKind.CONSTANT_TENSOR
constant_fqn = _get_first_fqn(constant_attrs, constant_val)
if constant_fqn is not None:
constant_name = constant_fqn.replace(".", "_")
else:
constant_name = f"lifted_tensor_{num_tensor_constants}"
constant_fqn = get_constant_fqn(node, constant_name)
while constant_fqn in used_target_names:
num_tensor_constants += 1
constant_name = f"lifted_tensor_{num_tensor_constants}"
constant_fqn = get_constant_fqn(node, constant_name)
num_tensor_constants += 1
else:
raise SpecViolationError(
f"getattr node {node} referencing unsupported type {type(constant_val)}"
)
with gm.graph.inserting_before(first_user_input):
# Insert the constant node before the first user input
const_placeholder_node = gm.graph.placeholder(constant_name)
# match target name with its node name in case there is name collision
# and suffix is added to node name in fx
const_placeholder_node.target = const_placeholder_node.name
for k, v in node.meta.items():
const_placeholder_node.meta[k] = v
# Once the FQN has been used, remove nn_module_stack, stack_trace
const_placeholder_node.meta.pop("nn_module_stack")
const_placeholder_node.meta.pop("stack_trace", None)
input_spec_arg: ArgumentSpec
if isinstance(constant_val, torch.Tensor):
if fake_mode is not None:
const_placeholder_node.meta["val"] = fake_mode.from_tensor(
constant_val, static_shapes=True
)
const_placeholder_node.meta["val"].constant = constant_val
else:
const_placeholder_node.meta["val"] = constant_val
input_spec_arg = TensorArgument(name=const_placeholder_node.name)
elif isinstance(constant_val, torch._C.ScriptObject):
class_fqn = constant_val._type().qualified_name() # type: ignore[attr-defined]
const_placeholder_node.meta["val"] = CustomObjArgument(
constant_fqn, class_fqn
)
input_spec_arg = CustomObjArgument(
name=const_placeholder_node.name, class_fqn=class_fqn
)
elif isinstance(constant_val, FakeScriptObject):
class_fqn = constant_val.script_class_name
const_placeholder_node.meta["val"] = CustomObjArgument(
constant_fqn, class_fqn, constant_val
)
input_spec_arg = CustomObjArgument(
name=const_placeholder_node.name,
class_fqn=class_fqn,
fake_val=constant_val,
)
else:
raise SpecViolationError(
f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
)
lifted_objs.add(constant_val, const_placeholder_node)
node.replace_all_uses_with(const_placeholder_node)
gm.graph.erase_node(node)
renamed_targets[node.name] = const_placeholder_node.name
# Add the constant as a buffer to the graph signature
graph_signature.input_specs.insert(
first_user_input_loc,
InputSpec(
kind=constant_kind,
arg=input_spec_arg,
target=constant_fqn,
),
)
if constant_val in constant_attrs:
for fqn in constant_attrs[constant_val]:
all_constants[fqn] = constant_val
else:
all_constants[constant_fqn] = constant_val
first_user_input_loc += 1
for spec in graph_signature.output_specs:
if spec.arg.name in renamed_targets:
spec.arg.name = renamed_targets[spec.arg.name]
return all_constants
def rewrite_script_object_meta(
gm: torch.fx.GraphModule,
) -> dict[str, _ConstantAttributeType]:
"""When tracing, we produce a graph with FakeScriptObject in the
meta["val"].
For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
"""
constants: dict[
str,
_ConstantAttributeType,
] = {}
for node in gm.graph.nodes:
if "val" not in node.meta:
continue
old_meta = node.meta["val"]
if isinstance(old_meta, torch.ScriptObject):
class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
new_meta = CustomObjArgument(node.name, class_fqn)
constants[node.name] = old_meta
node.meta["val"] = new_meta
elif isinstance(old_meta, FakeScriptObject):
class_fqn = old_meta.script_class_name # type: ignore[attr-defined]
new_meta = CustomObjArgument(node.name, class_fqn, old_meta)
constants[node.name] = old_meta
node.meta["val"] = new_meta
return constants
def _materialize_and_lift_constants(
gm: torch.fx.GraphModule,
export_graph_signature: ExportGraphSignature,
constant_attrs: ConstantAttrMap,
) -> dict[str, _ConstantAttributeType]:
constants = rewrite_script_object_meta(gm)
constants.update(lift_constants_pass(gm, export_graph_signature, constant_attrs))
return constants
| ConstantAttrMap |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 56331,
"end": 56923
} | class ____:
# sfx is sf(x). The values were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 100
# def gibrat_sf(x):
# return 1 - mp.ncdf(mp.log(x))
#
# E.g.
#
# >>> float(gibrat_sf(1.5))
# 0.3425678305148459
#
@pytest.mark.parametrize('x, sfx', [(1.5, 0.3425678305148459),
(5000, 8.173334352522493e-18)])
def test_sf_isf(self, x, sfx):
assert_allclose(stats.gibrat.sf(x), sfx, rtol=2e-14)
assert_allclose(stats.gibrat.isf(sfx), x, rtol=2e-14)
| TestGibrat |
python | huggingface__transformers | src/transformers/models/albert/modeling_albert.py | {
"start": 8424,
"end": 9924
} | class ____(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
attention_output, _ = self.attention(hidden_states, attention_mask, **kwargs)
ffn_output = apply_chunking_to_forward(
self.ff_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output)
return hidden_states
def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor:
ffn_output = self.ffn(attention_output)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
return ffn_output
| AlbertLayer |
python | huggingface__transformers | tests/models/cvt/test_modeling_cvt.py | {
"start": 1342,
"end": 1614
} | class ____(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "embed_dim"))
self.parent.assertTrue(hasattr(config, "num_heads"))
| CvtConfigTester |
python | pytorch__pytorch | test/inductor/extension_backends/triton/device_interface.py | {
"start": 144,
"end": 369
} | class ____:
def __init__(self) -> None:
self.major = 8 # TODO: bypass check for H100 in triton_heuristics.py
self.max_threads_per_multi_processor = 1
self.multi_processor_count = 80
| DeviceProperties |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 10606,
"end": 11496
} | class ____(ChainedSource):
member: str
def __post_init__(self) -> None:
assert self.base, "Can't construct an AttrSource without a valid base source"
if "." in self.member:
member_parts = self.member.split(".")
object.__setattr__(
self, "base", AttrSource(self.base, ".".join(member_parts[:-1]))
)
object.__setattr__(self, "member", member_parts[-1])
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen(self.base)
codegen.extend_output(codegen.create_load_attrs(self.member))
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"object.__getattribute__({self.base.name()}, {self.member!r})"
# Represents obj.__dict__ where obj is a type object
@dataclasses.dataclass(frozen=True)
| GenericAttrSource |
python | pytorch__pytorch | torch/distributed/checkpoint/_pg_transport.py | {
"start": 771,
"end": 1167
} | class ____:
"""
This is the metadata for a tensor that is used to transfer checkpoints.
It contains the shape, the dtype, the storage offset and the stride of the
tensor.
This must be pickleable so that it can be sent over the wire.
"""
shape: torch.Size
dtype: torch.dtype
storage_offset: int
stride: tuple[int, ...]
nbytes: int
@dataclass
| _TensorMeta |
python | pytorch__pytorch | test/dynamo/test_base_output.py | {
"start": 369,
"end": 2363
} | class ____(torch._dynamo.test_case.TestCase):
@maybe_skip
def test_create(self):
def fn(a):
tmp = unet_2d.UNet2DOutput(a + 1)
return tmp
torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
@maybe_skip
def test_assign(self):
def fn(a):
tmp = unet_2d.UNet2DOutput(a + 1)
tmp.sample = a + 2
return tmp
args = [torch.randn(10)]
obj1 = fn(*args)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnts)(fn)
obj2 = opt_fn(*args)
self.assertTrue(same(obj1.sample, obj2.sample))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def _common(self, fn, op_count):
args = [
unet_2d.UNet2DOutput(
sample=torch.randn(10),
)
]
obj1 = fn(*args)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnts)(fn)
obj2 = opt_fn(*args)
self.assertTrue(same(obj1, obj2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, op_count)
@maybe_skip
def test_getattr(self):
def fn(obj: unet_2d.UNet2DOutput):
x = obj.sample * 10
return x
self._common(fn, 1)
@maybe_skip
def test_getitem(self):
def fn(obj: unet_2d.UNet2DOutput):
x = obj["sample"] * 10
return x
self._common(fn, 1)
@maybe_skip
def test_tuple(self):
def fn(obj: unet_2d.UNet2DOutput):
a = obj.to_tuple()
return a[0] * 10
self._common(fn, 1)
@maybe_skip
def test_index(self):
def fn(obj: unet_2d.UNet2DOutput):
return obj[0] * 10
self._common(fn, 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestBaseOutput |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-cut-a-stick.py | {
"start": 33,
"end": 579
} | class ____(object):
def minCost(self, n, cuts):
"""
:type n: int
:type cuts: List[int]
:rtype: int
"""
sorted_cuts = sorted(cuts + [0, n])
dp = [[0]*len(sorted_cuts) for _ in xrange(len(sorted_cuts))]
for l in xrange(2, len(sorted_cuts)):
for i in xrange(len(sorted_cuts)-l):
dp[i][i+l] = min(dp[i][j]+dp[j][i+l] for j in xrange(i+1, i+l)) + \
sorted_cuts[i+l]-sorted_cuts[i]
return dp[0][len(sorted_cuts)-1]
| Solution |
python | pytorch__pytorch | tools/experimental/torchfuzz/checks.py | {
"start": 832,
"end": 2187
} | class ____(Check):
"""Check that runs eager and compiled, compares forward numerics."""
def codegen(self, args_tuple: str) -> list[str]:
return [
f"args = {args_tuple}",
"out_eager = fuzzed_program(*args)",
"out_eager.sum().backward()",
"print('Eager Success! ✅')",
"compiled_program = torch.compile(fuzzed_program, fullgraph=True, dynamic=True)",
"out_compiled = compiled_program(*args)",
"out_compiled.sum().backward()",
"print('Compile Success! ✅')",
"out_eager_sum = out_eager.sum()",
"out_compiled_sum = out_compiled.sum()",
"diff = (out_eager_sum - out_compiled_sum).abs().item()",
"rel_diff = diff / (out_eager_sum.abs().item() + 1e-12) * 100",
"print(f'Relative diff (sum): {rel_diff:.6f}%')",
"if rel_diff > 5 and diff > 1:",
" print(f'❌ Forward output sums differ significantly (relative and absolute)!')",
" print('out_eager_sum:', out_eager_sum.item())",
" print('out_compiled_sum:', out_compiled_sum.item())",
" print('Absolute diff:', diff)",
" print('Relative diff (%):', rel_diff)",
" import sys; sys.exit(1)",
]
| EagerVsFullGraphDynamicCompileWithNumericsCheck |
python | pydata__xarray | xarray/backends/h5netcdf_.py | {
"start": 3072,
"end": 14857
} | class ____(WritableCFDataStore):
"""Store for reading and writing data via h5netcdf"""
__slots__ = (
"_filename",
"_group",
"_manager",
"_mode",
"autoclose",
"format",
"is_remote",
"lock",
)
def __init__(
self,
manager: FileManager | h5netcdf.File | h5netcdf.Group,
group=None,
mode=None,
format="NETCDF4",
lock=HDF5_LOCK,
autoclose=False,
):
import h5netcdf
if isinstance(manager, h5netcdf.File | h5netcdf.Group):
if group is None:
root, group = find_root_and_group(manager)
else:
if type(manager) is not h5netcdf.File:
raise ValueError(
"must supply a h5netcdf.File if the group argument is provided"
)
root = manager
manager = DummyFileManager(root)
self._manager = manager
self._group = group
self._mode = mode
self.format = format or "NETCDF4"
# todo: utilizing find_root_and_group seems a bit clunky
# making filename available on h5netcdf.Group seems better
self._filename = find_root_and_group(self.ds)[0].filename
self.is_remote = is_remote_uri(self._filename)
self.lock = ensure_lock(lock)
self.autoclose = autoclose
def get_child_store(self, group: str) -> Self:
if self.format == "NETCDF4_CLASSIC":
raise ValueError("Cannot create sub-groups in `NETCDF4_CLASSIC` format.")
if self._group is not None:
group = os.path.join(self._group, group)
return type(self)(
self._manager,
group=group,
mode=self._mode,
lock=self.lock,
autoclose=self.autoclose,
)
@classmethod
def open(
cls,
filename,
mode="r",
format="NETCDF4",
group=None,
lock=None,
autoclose=False,
invalid_netcdf=None,
phony_dims=None,
decode_vlen_strings=True,
driver=None,
driver_kwds=None,
storage_options: dict[str, Any] | None = None,
):
import h5netcdf
if isinstance(filename, str) and is_remote_uri(filename) and driver is None:
mode_ = "rb" if mode == "r" else mode
filename = _open_remote_file(
filename, mode=mode_, storage_options=storage_options
)
if isinstance(filename, BytesIOProxy):
source = filename
filename = io.BytesIO()
source.getvalue = filename.getbuffer
if isinstance(filename, io.IOBase) and mode == "r":
magic_number = read_magic_number_from_file(filename)
if not magic_number.startswith(b"\211HDF\r\n\032\n"):
raise ValueError(
f"{magic_number!r} is not the signature of a valid netCDF4 file"
)
if format is None:
format = "NETCDF4"
if format not in ["NETCDF4", "NETCDF4_CLASSIC"]:
raise ValueError(f"invalid format for h5netcdf backend: {format}")
kwargs = {
"invalid_netcdf": invalid_netcdf,
"decode_vlen_strings": decode_vlen_strings,
"driver": driver,
}
if driver_kwds is not None:
kwargs.update(driver_kwds)
if phony_dims is not None:
kwargs["phony_dims"] = phony_dims
if Version(h5netcdf.__version__) > Version("1.6.4"):
kwargs["format"] = format
elif format == "NETCDF4_CLASSIC":
raise ValueError(
"h5netcdf >= 1.7.0 is required to save output in NETCDF4_CLASSIC format."
)
if lock is None:
if mode == "r":
lock = HDF5_LOCK
else:
lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])
manager_cls = (
CachingFileManager
if isinstance(filename, str) and not is_remote_uri(filename)
else PickleableFileManager
)
manager = manager_cls(h5netcdf.File, filename, mode=mode, kwargs=kwargs)
return cls(
manager,
group=group,
format=format,
mode=mode,
lock=lock,
autoclose=autoclose,
)
def _acquire(self, needs_lock=True):
with self._manager.acquire_context(needs_lock) as root:
ds = _nc4_require_group(
root, self._group, self._mode, create_group=_h5netcdf_create_group
)
return ds
@property
def ds(self):
return self._acquire()
def open_store_variable(self, name, var):
import h5netcdf.core
import h5py
dimensions = var.dimensions
data = indexing.LazilyIndexedArray(H5NetCDFArrayWrapper(name, self))
attrs = _read_attributes(var)
# netCDF4 specific encoding
encoding = {
"chunksizes": var.chunks,
"fletcher32": var.fletcher32,
"shuffle": var.shuffle,
}
if var.chunks:
encoding["preferred_chunks"] = dict(
zip(var.dimensions, var.chunks, strict=True)
)
# Convert h5py-style compression options to NetCDF4-Python
# style, if possible
if var.compression == "gzip":
encoding["zlib"] = True
encoding["complevel"] = var.compression_opts
elif var.compression is not None:
encoding["compression"] = var.compression
encoding["compression_opts"] = var.compression_opts
# save source so __repr__ can detect if it's local or not
encoding["source"] = self._filename
encoding["original_shape"] = data.shape
vlen_dtype = h5py.check_dtype(vlen=var.dtype)
if vlen_dtype is str:
encoding["dtype"] = str
elif vlen_dtype is not None: # pragma: no cover
# xarray doesn't support writing arbitrary vlen dtypes yet.
pass
# just check if datatype is available and create dtype
# this check can be removed if h5netcdf >= 1.4.0 for any environment
elif (datatype := getattr(var, "datatype", None)) and isinstance(
datatype, h5netcdf.core.EnumType
):
encoding["dtype"] = np.dtype(
data.dtype,
metadata={
"enum": datatype.enum_dict,
"enum_name": datatype.name,
},
)
else:
encoding["dtype"] = var.dtype
return Variable(dimensions, data, attrs, encoding)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return FrozenDict(_read_attributes(self.ds))
def get_dimensions(self):
return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())
def get_parent_dimensions(self):
return FrozenDict(collect_ancestor_dimensions(self.ds))
def get_encoding(self):
return {
"unlimited_dims": {
k for k, v in self.ds.dimensions.items() if v.isunlimited()
}
}
def set_dimension(self, name, length, is_unlimited=False):
_ensure_no_forward_slash_in_name(name)
if is_unlimited:
self.ds.dimensions[name] = None
self.ds.resize_dimension(name, length)
else:
self.ds.dimensions[name] = length
def set_attribute(self, key, value):
if self.format == "NETCDF4_CLASSIC":
value = encode_nc3_attr_value(value)
self.ds.attrs[key] = value
def encode_variable(self, variable, name=None):
if self.format == "NETCDF4_CLASSIC":
return encode_nc3_variable(variable, name=name)
else:
return _encode_nc4_variable(variable, name=name)
def prepare_variable(
self, name, variable, check_encoding=False, unlimited_dims=None
):
import h5py
_ensure_no_forward_slash_in_name(name)
attrs = variable.attrs.copy()
dtype = _get_datatype(
variable, nc_format=self.format, raise_on_invalid_encoding=check_encoding
)
fillvalue = attrs.pop("_FillValue", None)
if dtype is str:
dtype = h5py.special_dtype(vlen=str)
# check enum metadata and use h5netcdf.core.EnumType
if (
hasattr(self.ds, "enumtypes")
and (meta := np.dtype(dtype).metadata)
and (e_name := meta.get("enum_name"))
and (e_dict := meta.get("enum"))
):
dtype = _build_and_get_enum(self, name, dtype, e_name, e_dict)
encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding)
kwargs = {}
# Convert from NetCDF4-Python style compression settings to h5py style
# If both styles are used together, h5py takes precedence
# If set_encoding=True, raise ValueError in case of mismatch
if encoding.pop("zlib", False):
if check_encoding and encoding.get("compression") not in (None, "gzip"):
raise ValueError("'zlib' and 'compression' encodings mismatch")
encoding.setdefault("compression", "gzip")
if (
check_encoding
and "complevel" in encoding
and "compression_opts" in encoding
and encoding["complevel"] != encoding["compression_opts"]
):
raise ValueError("'complevel' and 'compression_opts' encodings mismatch")
complevel = encoding.pop("complevel", 0)
if complevel != 0:
encoding.setdefault("compression_opts", complevel)
encoding["chunks"] = encoding.pop("chunksizes", None)
# Do not apply compression, filters or chunking to scalars.
if variable.shape:
for key in [
"compression",
"compression_opts",
"shuffle",
"chunks",
"fletcher32",
]:
if key in encoding:
kwargs[key] = encoding[key]
if name not in self.ds:
nc4_var = self.ds.create_variable(
name,
dtype=dtype,
dimensions=variable.dims,
fillvalue=fillvalue,
**kwargs,
)
else:
nc4_var = self.ds[name]
for k, v in attrs.items():
if self.format == "NETCDF4_CLASSIC":
v = encode_nc3_attr_value(v)
nc4_var.attrs[k] = v
target = H5NetCDFArrayWrapper(name, self)
return target, variable.data
def sync(self):
self.ds.sync()
def close(self, **kwargs):
self._manager.close(**kwargs)
def _check_phony_dims(phony_dims):
emit_phony_dims_warning = False
if phony_dims is None:
emit_phony_dims_warning = True
phony_dims = "access"
return emit_phony_dims_warning, phony_dims
def _emit_phony_dims_warning():
emit_user_level_warning(
"The 'phony_dims' kwarg now defaults to 'access'. "
"Previously 'phony_dims=None' would raise an error. "
"For full netcdf equivalence please use phony_dims='sort'.",
UserWarning,
)
def _normalize_filename_or_obj(
filename_or_obj: T_PathFileOrDataStore,
) -> str | ReadBuffer | AbstractDataStore:
if isinstance(filename_or_obj, bytes | memoryview):
return io.BytesIO(filename_or_obj)
else:
return _normalize_path(filename_or_obj)
| H5NetCDFStore |
python | numba__numba | numba/tests/test_parfors.py | {
"start": 165587,
"end": 166200
} | class ____(TestCase):
@TestCase.run_test_in_subprocess()
def test_diagnostics_env_var1(self):
os.environ['NUMBA_PARALLEL_DIAGNOSTICS']='4'
with captured_stdout() as stdout:
@njit(parallel=True)
def impl():
n = 100
b = np.zeros((n), dtype=np.float64)
for i in prange(n):
b[i] = 1
return b
impl()
the_output = stdout.getvalue()
self.assertIn("Parallel Accelerator Optimizing", the_output)
if __name__ == "__main__":
unittest.main()
| TestDiagnosticEnvVar |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 35066,
"end": 36056
} | class ____(
_MutableDictTestBase, fixtures.MappedTest
):
@classmethod
def define_tables(cls, metadata):
import json
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR(50)
cache_ok = True
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
MutableDict = cls._type_fixture()
MutableDict.associate_with(JSONEncodedDict)
Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", JSONEncodedDict),
Column("unrelated_data", String(50)),
)
| MutableAssociationScalarJSONTest |
python | tornadoweb__tornado | tornado/web.py | {
"start": 93476,
"end": 97324
} | class ____(httputil.HTTPMessageDelegate):
def __init__(
self,
application: Application,
request: httputil.HTTPServerRequest,
handler_class: Type[RequestHandler],
handler_kwargs: Optional[Dict[str, Any]],
path_args: Optional[List[bytes]],
path_kwargs: Optional[Dict[str, bytes]],
) -> None:
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = [] # type: List[bytes]
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> Optional[Awaitable[None]]:
if self.stream_request_body:
self.request._body_future = Future()
return self.execute()
return None
def data_received(self, data: bytes) -> Optional[Awaitable[None]]:
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
return None
def finish(self) -> None:
if self.stream_request_body:
future_set_result_unless_cancelled(self.request._body_future, None)
else:
# Note that the body gets parsed in RequestHandler._execute so it can be in
# the right exception handler scope.
self.request.body = b"".join(self.chunks)
self.execute()
def on_connection_close(self) -> None:
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None # type: ignore
def execute(self) -> Optional[Awaitable[None]]:
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get("static_hash_cache", True):
static_handler_class = self.application.settings.get(
"static_handler_class", StaticFileHandler
)
static_handler_class.reset()
self.handler = self.handler_class(
self.application, self.request, **self.handler_kwargs
)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
fut = gen.convert_yielded(
self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
)
fut.add_done_callback(lambda f: f.result())
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
| _HandlerDelegate |
python | networkx__networkx | networkx/readwrite/tests/test_graph6.py | {
"start": 4613,
"end": 6559
} | class ____:
def test_null_graph(self):
G = nx.null_graph()
assert g6.to_graph6_bytes(G) == b">>graph6<<?\n"
def test_trivial_graph(self):
G = nx.trivial_graph()
assert g6.to_graph6_bytes(G) == b">>graph6<<@\n"
def test_complete_graph(self):
assert g6.to_graph6_bytes(nx.complete_graph(4)) == b">>graph6<<C~\n"
def test_large_complete_graph(self):
G = nx.complete_graph(67)
assert g6.to_graph6_bytes(G, header=False) == b"~?@B" + b"~" * 368 + b"w\n"
def test_no_header(self):
G = nx.complete_graph(4)
assert g6.to_graph6_bytes(G, header=False) == b"C~\n"
def test_complete_bipartite_graph(self):
G = nx.complete_bipartite_graph(6, 9)
assert g6.to_graph6_bytes(G, header=False) == b"N??F~z{~Fw^_~?~?^_?\n"
@pytest.mark.parametrize("G", (nx.MultiGraph(), nx.DiGraph()))
def test_no_directed_or_multi_graphs(self, G):
with pytest.raises(nx.NetworkXNotImplemented):
g6.to_graph6_bytes(G)
def test_length(self):
for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
# Strip the trailing newline.
gstr = g6.to_graph6_bytes(G, header=False).rstrip()
assert len(gstr) == ((i - 1) * i // 2 + 5) // 6 + (1 if i < 63 else 4)
def test_roundtrip(self):
for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
data = g6.to_graph6_bytes(G)
H = nx.from_graph6_bytes(data.rstrip())
assert nodes_equal(G.nodes(), H.nodes())
assert edges_equal(G.edges(), H.edges())
@pytest.mark.parametrize("edge", ((0, 1), (1, 2), (1, 42)))
def test_relabeling(self, edge):
G = nx.Graph([edge])
assert g6.to_graph6_bytes(G) == b">>graph6<<A_\n"
| TestToGraph6Bytes |
python | ansible__ansible | packaging/release.py | {
"start": 9169,
"end": 9392
} | class ____:
"""Details required to create a GitHub release."""
user: str
repo: str
tag: str
target: str
title: str
body: str
pre_release: bool
@dataclasses.dataclass(frozen=True)
| GitHubRelease |
python | getsentry__sentry | src/sentry_plugins/pushover/client.py | {
"start": 46,
"end": 809
} | class ____(ApiClient):
base_url = "https://api.pushover.net/1"
allow_redirects = False
plugin_name = "pushover"
def __init__(self, userkey=None, apikey=None):
self.userkey = userkey
self.apikey = apikey
super().__init__()
def request(self, method, path, data):
# see https://pushover.net/api
# We can no longer send JSON because pushover disabled incoming
# JSON data: http://updates.pushover.net/post/39822700181/
payload = {"user": self.userkey, "token": self.apikey}
payload.update(data)
return self._request(path=path, method=method, data=payload, json=False)
def send_message(self, data):
return self.request("POST", "/messages.json", data)
| PushoverClient |
python | python-poetry__poetry | tests/types.py | {
"start": 3707,
"end": 3884
} | class ____(Protocol):
def __call__(
self,
distribution_locations: list[Path],
metadata_locations: list[Path],
) -> None: ...
| PythonHostedFileMocker |
python | kennethreitz__tablib | tests/test_tablib.py | {
"start": 44238,
"end": 44863
} | class ____(BaseTestCase):
def test_cli_export_github(self):
self.assertEqual(
'|---|---|---|\n| a | b | c |',
tablib.Dataset(['a', 'b', 'c']).export('cli', tablefmt='github')
)
def test_cli_export_simple(self):
self.assertEqual(
'- - -\na b c\n- - -',
tablib.Dataset(['a', 'b', 'c']).export('cli', tablefmt='simple')
)
def test_cli_export_grid(self):
self.assertEqual(
'+---+---+---+\n| a | b | c |\n+---+---+---+',
tablib.Dataset(['a', 'b', 'c']).export('cli', tablefmt='grid')
)
| CliTests |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 66546,
"end": 66717
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('size', c_ulonglong),
]
VgpuRuntimeState_v1 = 0x1000010
| nvmlVgpuRuntimeState_v1_t |
python | pytorch__pytorch | torch/_inductor/choices.py | {
"start": 1655,
"end": 25785
} | class ____:
"""
This class contains a collection of default heuristics that effect performance of our generated
code. We try to not put correctness requirements in this file.
You can override the choices made here by doing:
class MyHeuristics(InductorChoices):
...
torch._inductor.virtualized.V.set_choices_handler(MyHeuristics())
"""
def get_config_heuristics(
self, device_type: Optional[str] = "cuda"
) -> BaseConfigHeuristic:
if device_type == "cuda":
if torch.version.hip is None:
return CUDAConfigHeuristic()
else:
return ROCmConfigHeuristic()
elif device_type == "xpu":
return XPUConfigHeuristic()
elif device_type == "cpu":
return CPUConfigHeuristic()
elif device_type == "mtia":
return MTIAConfigHeuristic()
else:
return BaseConfigHeuristic()
# Conv configs
def get_conv_configs(
self, device_type: Optional[str] = "cuda"
) -> partial[Generator[TritonConfig, None, None]]:
conv_heuristics = self.get_config_heuristics(device_type)
return conv_heuristics.get_conv_configs()
# Flex attention configs
# TODO(coconutruben): break out flexattention/decode configs into the new retrieval mechanism
def get_flex_attention_fwd_configs(
self, head_dim: int, dtype: torch.dtype, device_type: Optional[str] = "cuda"
) -> list[Any]:
flex_heuristics = self.get_config_heuristics(device_type)
return flex_heuristics.get_flex_attn_fwd_configs(head_dim, dtype)
def get_flex_attention_bwd_configs(
self, head_dim: int, dtype: torch.dtype, device_type: Optional[str] = "cuda"
) -> list[Any]:
flex_heuristics = self.get_config_heuristics(device_type)
return flex_heuristics.get_flex_attn_bwd_configs(head_dim, dtype)
def get_flex_decode_configs(
self, head_dim: int, dtype: torch.dtype, device_type: Optional[str] = "cuda"
) -> list[Any]:
flex_heuristics = self.get_config_heuristics(device_type)
return flex_heuristics.get_flex_decode_configs(head_dim, dtype)
def _finalize_template_configs(
self,
template_choices: dict[str, Generator[KernelTemplateChoice, None, None]],
kernel_inputs: KernelInputs,
templates: list[Union[KernelTemplate, ExternKernelChoice]],
op_name: str,
kwarg_overrides: Optional[dict[str, dict[str, Any]]] = None,
) -> list[KernelTemplateChoice]:
"""
This method can be subclassed to perform any override/modification of the choices.
The incoming parameters are cheap (generators), so you can do any overrides without
incurring too much cost. Override this method to customize the kernel template choices
before they are converted to ChoiceCaller objects, which is expensive on template codegen.
The full list of arguments are here to facilitate any overrides you may want to do,
as they can be used to start from scratch for each template if so desired.
Args:
template_choices: Dictionary mapping template UIDs to generators of KernelTemplateChoice objects
kernel_inputs: MMKernelInputs containing input tensor nodes and matrix indices
templates: List of template objects (KernelTemplate or ExternKernelChoice) in use
op_name: Operation name (e.g., "bmm", "baddbmm", "addmm")
kwarg_overrides: Optional dict of kwargs to override for each template heuristic
Returns:
Flattened list of KernelTemplateChoice objects across all templates
"""
choices: list[KernelTemplateChoice] = []
for choice_gen in template_choices.values():
choices.extend(choice_gen)
return choices
def get_ktc(
self,
kernel_inputs: KernelInputs,
template: Union[KernelTemplate, ExternKernelChoice],
op_name: str,
kwarg_overrides: Optional[dict[str, Any]] = None,
) -> Generator[KernelTemplateChoice, None, None]:
"""
Utility to get the KernelTemplateChoice generator for a specific input.
This is a per template/op call, whereas get_template_configs is an op wide call (all templates).
Consider when overriding/using at which level you need to make decisions
"""
# Extract device_type from kernel_inputs
device_type = kernel_inputs.device_type
assert device_type is not None, "get_ktc requires a valid device type"
# Extract template_name from the template object
template_name = template.uid
# Get the appropriate template-specific heuristic
heuristic = get_template_heuristic(template_name, device_type, op_name)
cs = heuristic.get_template_configs(
kernel_inputs,
op_name,
)
# adjust the kernel inputs to the template-specific heuristic, if needed
# default here is to just return the kernel_inputs as is
inputs_val = heuristic.adjust_kernel_inputs(kernel_inputs, op_name)
extra_kwargs = heuristic.get_extra_kwargs(kernel_inputs, op_name)
# Create KernelTemplateChoice generator using the moved function
overrides = kwarg_overrides or {}
return make_ktc_generator(
template=template,
cs=cs,
extra_kwargs=extra_kwargs,
overrides=overrides,
layout=kernel_inputs.output_layout(),
inputs=inputs_val,
)
def _need_to_fix_layout(
self,
adjusted_choices: list[KernelTemplateChoice],
op_name: str,
) -> bool:
"""
Check if we need to fix the layout instead of keeping it flexible
Args:
ktc: KernelTemplateChoice object
Returns:
True if we need to fix the layout, False otherwise
"""
# TODO: debug and fix
# NOTE: on mps, we see issues with flexible layouts on baddmm. This check just makes sure
# that for mps, everything stays as it was before this optimization
if len(adjusted_choices) > 0:
if adjusted_choices[0].inputs.device_type == "mps" and op_name not in [
"mm",
"addmm",
]:
return True
# Since the following backends are not using get_mm_configs yet through the singular call,
if not (config.max_autotune or config.max_autotune_gemm):
# no danger of using other backends than ATEN
if not config.max_autotune_allow_flexible_layouts and op_name not in [
# The historical implementation for mm and addmm allowed had flexible layouts in the
# not max-autotune world
"mm",
"addmm",
]:
# TODO: deprecate this by migrating users to the new behavior
return True
return False
if not config.max_autotune_allow_flexible_layouts:
# we always need to fix the layout
return True
# Since the following backends are not using get_template_configs yet through the singular call,
# we don't know if they are a valid choice or not. Instead, just skip the optimization
# defensively.
# TODO(coconutruben): remove this once CPP,CK,CUTLASS are supported
if _use_autotune_backend("CUTLASS"):
return True
if _use_autotune_backend("CK") or _use_autotune_backend("CKTILE"):
return True
if _use_autotune_backend("CPP"):
return True
return any(
not isinstance(ktc.template, ExternKernelChoice) for ktc in adjusted_choices
)
def get_template_configs(
self,
kernel_inputs: KernelInputs,
templates: list[Union[KernelTemplate, ExternKernelChoice]],
op_name: str,
kwarg_overrides: Optional[dict[str, dict[str, Any]]] = None,
) -> list[ChoiceCaller]:
"""
Get list of ChoiceCallers for MM templates using template-specific heuristics.
Args:
kernel_inputs: MMKernelInputs containing input tensor nodes and matrix indices
layout: Output layout
templates: List of template objects (KernelTemplate or ExternKernelChoice)
op_name: Operation name (e.g., "bmm", "baddbmm", "addmm", "mm_plus_mm")
kwarg_overrides: Optional dict of kwargs to override for each template heuristic,
indexed by template.uid. These only override the per config kwargs, not the extra kwargs
Returns:
List of ChoiceCaller objects from the templates
"""
if kwarg_overrides is None:
kwarg_overrides = {}
input_tensors = kernel_inputs.nodes()
if len(input_tensors) < 2:
raise ValueError(f"Need at least 2 input tensors, got {len(input_tensors)}")
layout = kernel_inputs.output_layout()
# First pass: Create dict of template.uid to generator of KernelTemplateChoice objects
template_choices = {}
for template in templates:
template_choices[template.uid] = self.get_ktc(
kernel_inputs,
template,
op_name,
kwarg_overrides.get(template.uid, {}),
)
# Second pass: Adjust the template choices
adjusted_choices = self._finalize_template_configs(
template_choices,
kernel_inputs,
templates,
op_name,
kwarg_overrides,
)
# Layout optimization: if all choices are ExternKernelChoice and layout is FixedLayout, convert to FlexibleLayout
if self._need_to_fix_layout(adjusted_choices, op_name):
layout = kernel_inputs.output_layout(flexible=False)
for ktc in adjusted_choices:
ktc.layout = layout
# for good measure, delete the cached ChoiceCaller from the ktc if it existed.
# ExternKernelChoice are cheap to generate
if hasattr(ktc, "_choice"):
del ktc._choice
# Third pass: Convert to ChoiceCaller objects
return [ktc.choice for ktc in adjusted_choices if ktc.choice is not None]
def triton_kernel_kwargs(
self,
kernel_cls: type[TritonKernel],
features: SIMDKernelFeatures,
groups: list[sympy.Expr],
kernel_kwargs: dict[str, Any],
) -> dict[str, Any]:
"""Hook to change the kwargs passed to TritonKernel, used to apply fixed configurations"""
return kernel_kwargs
@staticmethod
def should_use_cooperative_reduction(features: SIMDKernelFeatures) -> bool:
"""Heuristic to decide if a cooperative reduction should be used."""
if config.triton.force_cooperative_reductions:
return True
if (
not config.triton.cooperative_reductions
or V.graph.get_current_device_or_throw().type == "cpu"
):
return False
xhint = V.graph.sizevars.size_hint(features.numel, fallback=2)
if xhint <= 8:
threshold = 32768 * xhint
elif xhint <= 16:
threshold = 2097152
else:
return False
# TODO(jansel): should this default on for dynamic shapes?
return V.graph.sizevars.statically_known_geq(
features.reduction_numel, threshold
)
@staticmethod
def should_use_persistent_reduction(
features: SIMDKernelFeatures, cooperative_reduction: bool
) -> bool:
"""
Heuristic to decide if a persistent reduction should be used.
"""
if not config.triton.persistent_reductions:
return False
threshold = {
ReductionHint.INNER: 1024,
}.get(features.get_reduction_hint(), 64)
if features.get_reduction_hint() not in (
ReductionHint.INNER,
ReductionHint.OUTER_TINY,
):
bounds = bound_sympy(features.reduction_numel)
lower = bounds.lower
upper = bounds.upper
if not all(
(
(isinstance(bound, int) or bound.is_constant())
and bound != torch.utils._sympy.numbers.IntInfinity()
)
for bound in (lower, upper)
):
return False
lower = next_power_of_2(int(lower))
upper = next_power_of_2(int(upper))
# If we are are coalescing on xblock (not ReductionHint.INNER) and this is not a tiny kernel
# (not ReductionHint.OUTER_TINY), do not use persistent reduction if it induces tile
# quantization. Persistent reduction forces rblock == rnumel, if the bounds between lower
# and upper are large, for the lower values we will be masking off large % of read/writes,
# when we could expand the coalescing xblock instead.
if lower != upper:
return False
if cooperative_reduction:
# The RSPLIT of cooperative reductions means each thread block is operating on fewer elements
try:
threshold *= 32 // min(
V.graph.sizevars.size_hint_or_throw(features.numel), 32
)
except ValueError:
pass # unbacked symint
# If multi_kernel is enabled, we do more aggressive persistent reduction.
# This may result in some persistent reductions slower than the
# corresponding non-persistent reductions. MultiKernel will do benchmarking
# to pick the faster one.
if config.triton.multi_kernel:
threshold *= 16
return V.graph.sizevars.statically_known_leq(
features.reduction_numel, threshold
) # type: ignore[arg-types]
@staticmethod
def reduction_split_factor(
device: torch.device,
reduction_numel_hint: int,
numel_hint: int,
inner_reduction: bool,
) -> int:
"""Heuristic to decide the RSPLIT used for split reductions.
When a reduction has a small number of outputs there is not enough parallelism,
so we will do the reduction in two phases."""
props = DeviceProperties.create(device)
num_sm = props.multi_processor_count
min_elements_per_thread = 32
max_elements_per_thread = 512
threads_per_sm = 2048
min_elements_per_device = min_elements_per_thread * num_sm * threads_per_sm
max_elements_per_device = max_elements_per_thread * num_sm * threads_per_sm
num_warps = 8
num_threads = 32 * num_warps
if inner_reduction:
# do heuristics that's close to eager mode for split inner reduction
# we leak reduction autotune configs here, and will need to refactor to avoid this later
if numel_hint >= 2 * num_sm: # don't split if there are enough outputs
return 1
if reduction_numel_hint <= 8192:
return 1
if reduction_numel_hint * numel_hint <= min_elements_per_device:
split_size = min_elements_per_thread
elif reduction_numel_hint * numel_hint < max_elements_per_device:
target_blocks = num_sm * threads_per_sm // (2 * num_threads)
blocks_per_output = (target_blocks + numel_hint - 1) // numel_hint
tmp_split_size = (
reduction_numel_hint + num_threads * blocks_per_output - 1
) // (num_threads * blocks_per_output)
divisors = sympy.divisors(reduction_numel_hint)
closest = min(divisors, key=lambda x: abs(x - tmp_split_size))
if abs(closest - tmp_split_size) < 30:
# prefer even splits, but never smalle than min_elements_per_thread
split_size = max(closest, min_elements_per_thread)
else:
split_size = tmp_split_size
else:
divisors = sympy.divisors(reduction_numel_hint)
closest = min(divisors, key=lambda x: abs(x - max_elements_per_thread))
if abs(closest - max_elements_per_thread) < 50:
# prefer even splits
split_size = closest
else:
split_size = max_elements_per_thread
return (reduction_numel_hint + split_size * num_threads - 1) // (
split_size * num_threads
)
else:
# TODO the best heuristic currently has XBLOCK (corresponding to numel_hint) 128
# extend to even smaller number of outputs
rvals_per_thread = 4 # comes from heuristics, refactor to not leak here
xvals_per_block = 128
xblocks = (numel_hint + xvals_per_block - 1) // xvals_per_block
if reduction_numel_hint * numel_hint < min_elements_per_device:
split_size = min_elements_per_thread
elif reduction_numel_hint * numel_hint < max_elements_per_device:
target_blocks = num_sm * threads_per_sm // (num_threads)
target_blocks = (target_blocks + xblocks - 1) // xblocks
tmp_split_size = (
reduction_numel_hint + rvals_per_thread * target_blocks - 1
) // (rvals_per_thread * target_blocks)
divisors = sympy.divisors(reduction_numel_hint)
closest = min(divisors, key=lambda x: abs(x - tmp_split_size))
if abs(tmp_split_size - closest) < 20:
split_size = max(closest, min_elements_per_thread)
else:
split_size = tmp_split_size
else:
divisors = sympy.divisors(reduction_numel_hint)
closest = min(divisors, key=lambda x: abs(x - max_elements_per_thread))
if abs(closest - max_elements_per_thread) < 50:
# prefer even splits
split_size = closest
else:
split_size = max_elements_per_thread
return (reduction_numel_hint + rvals_per_thread * split_size - 1) // (
rvals_per_thread * split_size
)
@staticmethod
def can_fuse(
scheduler: Scheduler,
node1: BaseSchedulerNode,
node2: BaseSchedulerNode,
shared_data_score: int,
) -> bool:
"""
Heuristics to prevent fusion applied to both horizontal and vertical fusions. Heuristics here should not
be needed for correctness and tweaking them may yield additional performance.
See also some related heuristics that can be changed via config:
- config.triton.tiling_prevents_pointwise_fusion
- config.triton.tiling_prevents_reduction_fusion
- config.aggressive_fusion (will cause this function to be called more times)
"""
if (
shared_data_score == 0 and not MixOrderReduction.can_fuse(node1, node2)
) and (
not config.aggressive_fusion or node1.is_reduction() or node2.is_reduction()
):
if is_metric_table_enabled("fusion_failure_due_to_indexing_mismatch"):
common_buf_names: OrderedSet[str] = (
node1.read_writes.buffer_names() & node2.read_writes.buffer_names()
)
if len(common_buf_names) > 0:
get_metric_table("fusion_failure_due_to_indexing_mismatch").add_row(
lambda: {
"pre_grad_graph_id": V.graph.graph_id,
"post_grad_graph_id": V.graph.post_grad_graph_id,
"node1_name": node1.get_name(),
"node2_name": node2.get_name(),
"node1_debug_str": write_text(node1.debug_str()),
"node2_debug_str": write_text(node2.debug_str()),
"common_buffer_names": list(common_buf_names), # type: ignore[dict-item]
"failure_reason": scheduler.decide_fusion_fail_reason(
node1, node2, common_buf_names
),
}
)
WhyNoFuse(node1, node2)("no shared data due to indexing mismatch")
return False
WhyNoFuse(node1, node2)("no shared data")
return False # heuristic not needed for correctness
if (
not node1.is_foreach()
and not node2.is_foreach()
and len(node1.get_nodes()) + len(node2.get_nodes()) > config.max_fusion_size
):
WhyNoFuse(node1, node2)("exceeds max fusion")
return False # heuristic not needed for correctness
if scheduler.can_fusion_increase_peak_memory(node1, node2):
WhyNoFuse(node1, node2)("Fusion will increase peak memory")
return False
if (
config.max_fusion_unique_io_buffers is not None
and scheduler.fusion_prevent_too_many_reads_and_writes(
node1,
node2,
config.max_fusion_unique_io_buffers,
)
):
WhyNoFuse(node1, node2)("fusion_prevent_too_many_reads_and_writes")
return False
return True
@staticmethod
def can_fuse_vertical(
scheduler: Scheduler,
node1: BaseSchedulerNode,
node2: BaseSchedulerNode,
shared_data_score: int,
) -> bool:
"""Hook for heuristics to prevent vertical (producer/consumer) fusions"""
return True
@staticmethod
def can_fuse_horizontal(
scheduler: Scheduler,
node1: BaseSchedulerNode,
node2: BaseSchedulerNode,
shared_data_score: int,
) -> bool:
"""Hook for heuristics to prevent horizontal (consumer/consumer) fusions"""
if MixOrderReduction.can_fuse(node1, node2):
# For mix order reduction, we disregard shared data or
# distance.
return True
if shared_data_score < config.score_fusion_memory_threshold:
WhyNoFuse(node1, node2)("score_fusion_memory_threshold")
return False
if scheduler.are_long_distant_nodes(node1, node2):
WhyNoFuse(node1, node2)(
"Nodes are too far away. Fusing them may increase peak memory."
)
return False
return True
@staticmethod
def score_fusion(
scheduler: Scheduler,
node1: BaseSchedulerNode,
node2: BaseSchedulerNode,
) -> Sortable:
"""
Assign a score (higher comes first) to the fusion of node1 and node2.
When different fusions conflict with each other, this is the way we
decide what order to run them in.
Our current score is based on:
- The type of fusion (template/reduction/etc)
- Estimate of the saved memory operations
- Fusions closer together in original graph order
"""
memory_score = scheduler.score_fusion_memory(node1, node2)
proximity_score = -max(
abs(node1.min_order - node2.max_order),
abs(node2.min_order - node1.max_order),
)
# prologue fusion always last
if node2.is_template():
template_score = 0
else:
template_score = 1 + (
(node1.is_template() == config.epilogue_fusion_first)
and memory_score > 0
)
# pyrefly: ignore [bad-return]
return (
template_score,
node1.is_reduction() == node2.is_reduction() and memory_score > 0,
memory_score,
proximity_score,
)
| InductorChoices |
python | joke2k__faker | faker/providers/color/de/__init__.py | {
"start": 140,
"end": 5877
} | class ____(ColorProvider):
"""
Color provider for ``de`` locale. Source: https://www.sttmedia.com/colornames
"""
all_colors: OrderedDictType[str, str] = OrderedDict(
(
("Eisfarben", "#F0F8FF"),
("Antikweiß", "#FAEBD7"),
("Wasser", "#00FFFF"),
("Aquamarinblau", "#7FFFD4"),
("Azur", "#F0FFFF"),
("Beige", "#F5F5DC"),
("Biskuit", "#FFE4C4"),
("Schwarz", "#000000"),
("Mandelweiß", "#FFEBCD"),
("Blau", "#0000FF"),
("Blauviolett", "#8A2BE2"),
("Braun", "#A52A2A"),
("Gelbbraun", "#DEB887"),
("Kadettenblau", "#5F9EA0"),
("Hellgrün", "#7FFF00"),
("Schokolade", "#D2691E"),
("Koralle", "#FF7F50"),
("Kornblumenblau", "#6495ED"),
("Mais", "#FFF8DC"),
("Karminrot", "#DC143C"),
("Cyan", "#00FFFF"),
("Dunkelblau", "#00008B"),
("Dunkelcyan", "#008B8B"),
("Dunkle Goldrutenfarbe", "#B8860B"),
("Dunkelgrau", "#A9A9A9"),
("Dunkelgrün", "#006400"),
("Dunkelkhaki", "#BDB76B"),
("Dunkelmagenta", "#8B008B"),
("Dunkles Olivgrün", "#556B2F"),
("Dunkles Orange", "#FF8C00"),
("Dunkle Orchidee", "#9932CC"),
("Dunkelrot", "#8B0000"),
("Dunkle Lachsfarbe", "#E9967A"),
("Dunkles Seegrün", "#8FBC8F"),
("Dunkles Schieferblau", "#483D8B"),
("Dunkles Schiefergrau", "#2F4F4F"),
("Dunkeltürkis", "#00CED1"),
("Dunkelviolett", "#9400D3"),
("Tiefrosa", "#FF1493"),
("Tiefes Himmelblau", "#00BFFF"),
("Trübes Grau", "#696969"),
("Persenningblau", "#1E90FF"),
("Backstein", "#B22222"),
("Blütenweiß", "#FFFAF0"),
("Waldgrün", "#228B22"),
("Fuchsia", "#FF00FF"),
("Gainsboro", "#DCDCDC"),
("Geisterweiß", "#F8F8FF"),
("Gold", "#FFD700"),
("Goldrute", "#DAA520"),
("Grau", "#808080"),
("Grün", "#008000"),
("Grüngelb", "#ADFF2F"),
("Honigmelone", "#F0FFF0"),
("Leuchtendes Rosa", "#FF69B4"),
("Indischrot", "#CD5C5C"),
("Indigo", "#4B0082"),
("Elfenbein", "#FFFFF0"),
("Khaki", "#F0E68C"),
("Lavendel", "#E6E6FA"),
("Lavendelrosa", "#FFF0F5"),
("Rasengrün", "#7CFC00"),
("Chiffongelb", "#FFFACD"),
("Hellblau", "#ADD8E6"),
("Helles Korallenrot", "#F08080"),
("Helles Cyan", "#E0FFFF"),
("Helles Goldrutengelb", "#FAFAD2"),
("Hellgrau", "#D3D3D3"),
("Hellgrün", "#90EE90"),
("Hellrosa", "#FFB6C1"),
("Helle Lachsfarbe", "#FFA07A"),
("Helles Seegrün", "#20B2AA"),
("Helles Himmelblau", "#87CEFA"),
("Helles Schiefergrau", "#778899"),
("Helles Stahlblau", "#B0C4DE"),
("Hellgelb", "#FFFFE0"),
("Limone", "#00FF00"),
("Limonengrün", "#32CD32"),
("Leinen", "#FAF0E6"),
("Magenta", "#FF00FF"),
("Kastanie", "#800000"),
("Mittleres Aquamarin", "#66CDAA"),
("Mittleres Blau", "#0000CD"),
("Mittlere Orchidee", "#BA55D3"),
("Mittleres Violett", "#9370DB"),
("Mittleres Seegrün", "#3CB371"),
("Mittleres Schieferblau", "#7B68EE"),
("Mittleres Frühlingsgrün", "#00FA9A"),
("Mittleres Türkis", "#48D1CC"),
("Mittleres Violettrot", "#C71585"),
("Mitternachtsblau", "#191970"),
("Minzcreme", "#F5FFFA"),
("Altrosa", "#FFE4E1"),
("Mokassin", "#FFE4B5"),
("Navajoweiß", "#FFDEAD"),
("Marineblau", "#000080"),
("Alte Spitze", "#FDF5E6"),
("Olivgrün", "#808000"),
("Olivgraubraun", "#6B8E23"),
("Orange", "#FFA500"),
("Orangerot", "#FF4500"),
("Orchidee", "#DA70D6"),
("Blasse Goldrutenfarbe", "#EEE8AA"),
("Blassgrün", "#98FB98"),
("Blasstürkis", "#AFEEEE"),
("Blasses Violetrot", "#DB7093"),
("Papayacreme", "#FFEFD5"),
("Pfirsich", "#FFDAB9"),
("Peru", "#CD853F"),
("Rosa", "#FFC0CB"),
("Pflaume", "#DDA0DD"),
("Taubenblau", "#B0E0E6"),
("Lila", "#800080"),
("Rot", "#FF0000"),
("Rosiges Braun", "#BC8F8F"),
("Königsblau", "#4169E1"),
("Sattelbraun", "#8B4513"),
("Lachsfarben", "#FA8072"),
("Sandbraun", "#F4A460"),
("Seegrün", "#2E8B57"),
("Muschelfarben", "#FFF5EE"),
("Siennaerde", "#A0522D"),
("Silber", "#C0C0C0"),
("Himmelblau", "#87CEEB"),
("Schieferblau", "#6A5ACD"),
("Schiefergrau", "#708090"),
("Schneeweiß", "#FFFAFA"),
("Frühlingsgrün", "#00FF7F"),
("Stahlblau", "#4682B4"),
("Hautfarben", "#D2B48C"),
("Petrol", "#008080"),
("Distel", "#D8BFD8"),
("Tomatenrot", "#FF6347"),
("Türkis", "#40E0D0"),
("Violett", "#EE82EE"),
("Weizen", "#F5DEB3"),
("Weiß", "#FFFFFF"),
("Rauchfarben", "#F5F5F5"),
("Gelb", "#FFFF00"),
("Gelbgrün", "#9ACD32"),
)
)
| Provider |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor24.py | {
"start": 1801,
"end": 1848
} | class ____: ...
T_A = TypeVar("T_A", bound=A)
| A |
python | doocs__leetcode | lcof/面试题14- I. 剪绳子/Solution2.py | {
"start": 0,
"end": 257
} | class ____:
def cuttingRope(self, n: int) -> int:
if n < 4:
return n - 1
if n % 3 == 0:
return pow(3, n // 3)
if n % 3 == 1:
return pow(3, n // 3 - 1) * 4
return pow(3, n // 3) * 2
| Solution |
python | huggingface__transformers | src/transformers/models/vit_msn/modeling_vit_msn.py | {
"start": 19406,
"end": 21788
} | class ____(ViTMSNPreTrainedModel):
def __init__(self, config: ViTMSNConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.vit = ViTMSNModel(config)
# Classifier head
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
interpolate_pos_encoding: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, ViTMSNForImageClassification
>>> import torch
>>> from PIL import Image
>>> import requests
>>> torch.manual_seed(2) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-msn-small")
>>> model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
tusker
```
"""
outputs: BaseModelOutput = self.vit(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs)
sequence_output = outputs.last_hidden_state
logits = self.classifier(sequence_output[:, 0, :])
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel"]
| ViTMSNForImageClassification |
python | getsentry__sentry-python | sentry_sdk/session.py | {
"start": 598,
"end": 5589
} | class ____:
def __init__(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
session_mode="application", # type: str
):
# type: (...) -> None
if sid is None:
sid = uuid.uuid4()
if started is None:
started = datetime.now(timezone.utc)
if status is None:
status = "ok"
self.status = status
self.did = None # type: Optional[str]
self.started = started
self.release = None # type: Optional[str]
self.environment = None # type: Optional[str]
self.duration = None # type: Optional[float]
self.user_agent = None # type: Optional[str]
self.ip_address = None # type: Optional[str]
self.session_mode = session_mode # type: str
self.errors = 0
self.update(
sid=sid,
did=did,
timestamp=timestamp,
duration=duration,
release=release,
environment=environment,
user_agent=user_agent,
ip_address=ip_address,
errors=errors,
user=user,
)
@property
def truncated_started(self):
# type: (...) -> datetime
return _minute_trunc(self.started)
def update(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
):
# type: (...) -> None
# If a user is supplied we pull some data form it
if user:
if ip_address is None:
ip_address = user.get("ip_address")
if did is None:
did = user.get("id") or user.get("email") or user.get("username")
if sid is not None:
self.sid = _make_uuid(sid)
if did is not None:
self.did = str(did)
if timestamp is None:
timestamp = datetime.now(timezone.utc)
self.timestamp = timestamp
if started is not None:
self.started = started
if duration is not None:
self.duration = duration
if release is not None:
self.release = release
if environment is not None:
self.environment = environment
if ip_address is not None:
self.ip_address = ip_address
if user_agent is not None:
self.user_agent = user_agent
if errors is not None:
self.errors = errors
if status is not None:
self.status = status
def close(
self,
status=None, # type: Optional[SessionStatus]
):
# type: (...) -> Any
if status is None and self.status == "ok":
status = "exited"
if status is not None:
self.update(status=status)
def get_json_attrs(
self,
with_user_info=True, # type: Optional[bool]
):
# type: (...) -> Any
attrs = {}
if self.release is not None:
attrs["release"] = self.release
if self.environment is not None:
attrs["environment"] = self.environment
if with_user_info:
if self.ip_address is not None:
attrs["ip_address"] = self.ip_address
if self.user_agent is not None:
attrs["user_agent"] = self.user_agent
return attrs
def to_json(self):
# type: (...) -> Any
rv = {
"sid": str(self.sid),
"init": True,
"started": format_timestamp(self.started),
"timestamp": format_timestamp(self.timestamp),
"status": self.status,
} # type: Dict[str, Any]
if self.errors:
rv["errors"] = self.errors
if self.did is not None:
rv["did"] = self.did
if self.duration is not None:
rv["duration"] = self.duration
attrs = self.get_json_attrs()
if attrs:
rv["attrs"] = attrs
return rv
| Session |
python | pypa__setuptools | setuptools/_vendor/jaraco/collections/__init__.py | {
"start": 12564,
"end": 13066
} | class ____:
"""
Provide a getitem interface for attributes of an object.
Let's say you want to get at the string.lowercase property in a formatted
string. It's easy with DictAdapter.
>>> import string
>>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string))
lowercase is abcdefghijklmnopqrstuvwxyz
"""
def __init__(self, wrapped_ob):
self.object = wrapped_ob
def __getitem__(self, name):
return getattr(self.object, name)
| DictAdapter |
python | google__pytype | pytype/pytd/booleq.py | {
"start": 3124,
"end": 5705
} | class ____(BooleanTerm):
"""An equality constraint.
This declares an equality between a variable and a value, or a variable
and a variable. External code should use Eq rather than creating an _Eq
instance directly.
Attributes:
left: A string; left side of the equality. This is expected to be the string
with the higher ascii value, so e.g. strings starting with "~" (ascii
0x7e) should be on the left.
right: A string; right side of the equality. This is the lower ascii value.
"""
__slots__ = ("left", "right")
def __init__(self, left, right):
"""Initialize an equality.
Args:
left: A string. Left side of the equality.
right: A string. Right side of the equality.
"""
self.left = left
self.right = right
def __repr__(self):
return f"Eq({self.left!r}, {self.right!r})"
def __str__(self):
return f"{self.left} == {self.right}"
def __hash__(self):
return hash((self.left, self.right))
def __eq__(self, other):
return (
self.__class__ == other.__class__
and self.left == other.left
and self.right == other.right
)
def simplify(self, assignments):
"""Simplify this equality.
This will try to look up the values, and return FALSE if they're no longer
possible. Also, when comparing two variables, it will compute the
intersection, and return a disjunction of variable=value equalities instead.
Args:
assignments: Variable assignments (dict mapping strings to sets of
strings). Used to determine whether this equality is still possible, and
to compute intersections between two variables.
Returns:
A new BooleanTerm.
"""
if self.right in assignments:
return self
else:
return self if self.right in assignments[self.left] else FALSE
def extract_pivots(self, assignments):
"""Extract the pivots. See BooleanTerm.extract_pivots()."""
if self.left in assignments and self.right in assignments:
intersection = assignments[self.left] & assignments[self.right]
return {
self.left: frozenset(intersection),
self.right: frozenset(intersection),
}
else:
return {
self.left: frozenset((self.right,)),
self.right: frozenset((self.left,)),
}
def extract_equalities(self):
return ((self.left, self.right),)
def _expr_set_hash(expr_set):
# We sort the hash of individual expressions so that two equal sets
# have the same hash value.
return hash(tuple(sorted(hash(e) for e in expr_set)))
| _Eq |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/output.py | {
"start": 7825,
"end": 9376
} | class ____(OutputDefinition):
"""Variant of :py:class:`OutputDefinition <dagster.OutputDefinition>` for an
output that will dynamically alter the graph at runtime.
When using in a composition function such as :py:func:`@job <dagster.job>`,
dynamic outputs must be used with either:
* ``map`` - clone downstream nodes for each separate :py:class:`DynamicOutput`
* ``collect`` - gather across all :py:class:`DynamicOutput` in to a list
Uses the same constructor as :py:class:`OutputDefinition <dagster.OutputDefinition>`
.. code-block:: python
@op(
config_schema={
"path": Field(str, default_value=file_relative_path(__file__, "sample"))
},
output_defs=[DynamicOutputDefinition(str)],
)
def files_in_directory(context):
path = context.op_config["path"]
dirname, _, filenames = next(os.walk(path))
for file in filenames:
yield DynamicOutput(os.path.join(dirname, file), mapping_key=_clean(file))
@job
def process_directory():
files = files_in_directory()
# use map to invoke an op on each dynamic output
file_results = files.map(process_file)
# use collect to gather the results in to a list
summarize_directory(file_results.collect())
"""
@property
def is_dynamic(self) -> bool:
return True
| DynamicOutputDefinition |
python | pandas-dev__pandas | pandas/core/nanops.py | {
"start": 2346,
"end": 52884
} | class ____:
def __init__(self, name=None, **kwargs) -> None:
self.name = name
self.kwargs = kwargs
def __call__(self, alt: F) -> F:
bn_name = self.name or alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
**kwds,
):
if len(self.kwargs) > 0:
for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
if values.size == 0 and kwds.get("min_count") is None:
# We are empty, returning NA for our type
# Only applies for the default `min_count` of None
# since that affects how empty arrays are handled.
# TODO(GH-18976) update all the nanops methods to
# correctly handle empty inputs and remove this check.
# It *may* just be `var`
return _na_for_min_count(values, axis)
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
if kwds.get("mask", None) is None:
# `mask` is not recognised by bottleneck, would raise
# TypeError if called
kwds.pop("mask", None)
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return cast(F, f)
def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
# Bottleneck chokes on datetime64, PeriodDtype (or and EA)
if dtype != object and not needs_i8_conversion(dtype):
# GH 42878
# Bottleneck uses naive summation leading to O(n) loss of precision
# unlike numpy which implements pairwise summation, which has O(log(n)) loss
# crossref: https://github.com/pydata/bottleneck/issues/379
# GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
# GH 9422
# further we also want to preserve NaN when all elements
# are NaN, unlike bottleneck/numpy which consider this
# to be 0
return name not in ["nansum", "nanprod", "nanmean"]
return False
def _has_infs(result) -> bool:
if isinstance(result, np.ndarray):
if result.dtype in ("f8", "f4"):
# Note: outside of an nanops-specific test, we always have
# result.ndim == 1, so there is no risk of this ravel making a copy.
return lib.has_infs(result.ravel("K"))
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(
dtype: DtypeObj, fill_value: Scalar | None = None, fill_value_typ=None
):
"""return the correct fill value for the dtype of the values"""
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == "+inf":
return np.inf
else:
return -np.inf
else:
if fill_value_typ == "+inf":
# need the max int here
return lib.i8max
else:
return iNaT
def _maybe_get_mask(
values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None
) -> npt.NDArray[np.bool_] | None:
"""
Compute a mask if and only if necessary.
This function will compute a mask iff it is necessary. Otherwise,
return the provided mask (potentially None) when a mask does not need to be
computed.
A mask is never necessary if the values array is of boolean or integer
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
dtype that is interpretable as either boolean or integer data (eg,
timedelta64), a mask must be provided.
If the skipna parameter is False, a new mask will not be computed.
The mask is computed using isna() by default. Setting invert=True selects
notna() as the masking function.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
mask : Optional[ndarray]
nan-mask if known
Returns
-------
Optional[np.ndarray[bool]]
"""
if mask is None:
if values.dtype.kind in "biu":
# Boolean data cannot contain nulls, so signal via mask being None
return None
if skipna or values.dtype.kind in "mM":
mask = isna(values)
return mask
def _get_values(
values: np.ndarray,
skipna: bool,
fill_value: Any = None,
fill_value_typ: str | None = None,
mask: npt.NDArray[np.bool_] | None = None,
) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None]:
"""
Utility to get the values view, mask, dtype, dtype_max, and fill_value.
If both mask and fill_value/fill_value_typ are not None and skipna is True,
the values array will be copied.
For input arrays of boolean or integer dtypes, copies will only occur if a
precomputed mask, a fill_value/fill_value_typ, and skipna=True are
provided.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
fill_value : Any
value to fill NaNs with
fill_value_typ : str
Set to '+inf' or '-inf' to handle dtype-specific infinities
mask : Optional[np.ndarray[bool]]
nan-mask if known
Returns
-------
values : ndarray
Potential copy of input value array
mask : Optional[ndarray[bool]]
Mask for values, if deemed necessary to compute
"""
# In _get_values is only called from within nanops, and in all cases
# with scalar fill_value. This guarantee is important for the
# np.where call below
mask = _maybe_get_mask(values, skipna, mask)
dtype = values.dtype
datetimelike = False
if values.dtype.kind in "mM":
# changing timedelta64/datetime64 to int64 needs to happen after
# finding `mask` above
values = np.asarray(values.view("i8"))
datetimelike = True
if skipna and (mask is not None):
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(
dtype, fill_value=fill_value, fill_value_typ=fill_value_typ
)
if fill_value is not None:
if mask.any():
if datetimelike or _na_ok_dtype(dtype):
values = values.copy()
np.putmask(values, mask, fill_value)
else:
# np.where will promote if needed
values = np.where(~mask, values, fill_value)
return values, mask
def _get_dtype_max(dtype: np.dtype) -> np.dtype:
# return a platform independent precision dtype
dtype_max = dtype
if dtype.kind in "bi":
dtype_max = np.dtype(np.int64)
elif dtype.kind == "u":
dtype_max = np.dtype(np.uint64)
elif dtype.kind == "f":
dtype_max = np.dtype(np.float64)
return dtype_max
def _na_ok_dtype(dtype: DtypeObj) -> bool:
if needs_i8_conversion(dtype):
return False
return not issubclass(dtype.type, np.integer)
def _wrap_results(result, dtype: np.dtype, fill_value=None):
"""wrap our results if needed"""
if result is NaT:
pass
elif dtype.kind == "M":
if fill_value is None:
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
if isna(result):
result = np.datetime64("NaT", "ns").astype(dtype)
else:
result = np.int64(result).view(dtype)
# retain original unit
result = result.astype(dtype, copy=False)
else:
# If we have float dtype, taking a view will give the wrong result
result = result.astype(dtype)
elif dtype.kind == "m":
if not isinstance(result, np.ndarray):
if result == fill_value or np.isnan(result):
result = np.timedelta64("NaT").astype(dtype)
elif np.fabs(result) > lib.i8max:
# raise if we have a timedelta64[ns] which is too large
raise ValueError("overflow in timedelta operation")
else:
# return a timedelta64 with the original unit
result = np.int64(result).astype(dtype, copy=False)
else:
result = result.astype("m8[ns]").view(dtype)
return result
def _datetimelike_compat(func: F) -> F:
"""
If we have datetime64 or timedelta64 values, ensure we have a correct
mask before calling the wrapped function, then cast back afterwards.
"""
@functools.wraps(func)
def new_func(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
**kwargs,
):
orig_values = values
datetimelike = values.dtype.kind in "mM"
if datetimelike and mask is None:
mask = isna(values)
result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
if datetimelike:
result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)
if not skipna:
assert mask is not None # checked above
result = _mask_datetimelike_result(result, axis, mask, orig_values)
return result
return cast(F, new_func)
def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray:
"""
Return the missing value for `values`.
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction, required if values.ndim > 1.
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
"""
# we either return np.nan or pd.NaT
if values.dtype.kind in "iufcb":
values = values.astype("float64")
fill_value = na_value_for_dtype(values.dtype)
if values.ndim == 1:
return fill_value
elif axis is None:
return fill_value
else:
result_shape = values.shape[:axis] + values.shape[axis + 1 :]
return np.full(result_shape, fill_value, dtype=values.dtype)
def maybe_operate_rowwise(func: F) -> F:
"""
NumPy operations on C-contiguous ndarrays with axis=1 can be
very slow if axis 1 >> axis 0.
Operate row-by-row and concatenate the results.
"""
@functools.wraps(func)
def newfunc(values: np.ndarray, *, axis: AxisInt | None = None, **kwargs):
if (
axis == 1
and values.ndim == 2
and values.flags["C_CONTIGUOUS"]
# only takes this path for wide arrays (long dataframes), for threshold see
# https://github.com/pandas-dev/pandas/pull/43311#issuecomment-974891737
and (values.shape[1] / 1000) > values.shape[0]
and values.dtype != object
and values.dtype != bool
):
arrs = list(values)
if kwargs.get("mask") is not None:
mask = kwargs.pop("mask")
results = [
func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs))
]
else:
results = [func(x, **kwargs) for x in arrs]
return np.array(results)
return func(values, axis=axis, **kwargs)
return cast(F, newfunc)
def nanany(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> bool:
"""
Check if any elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2])
>>> nanops.nanany(s.values)
np.True_
>>> from pandas.core import nanops
>>> s = pd.Series([np.nan])
>>> nanops.nanany(s.values)
np.False_
"""
if values.dtype.kind in "iub" and mask is None:
# GH#26032 fastpath
# error: Incompatible return value type (got "Union[bool_, ndarray]",
# expected "bool")
return values.any(axis) # type: ignore[return-value]
if values.dtype.kind == "M":
# GH#34479
raise TypeError("datetime64 type does not support operation 'any'")
values, _ = _get_values(values, skipna, fill_value=False, mask=mask)
# For object type, any won't necessarily return
# boolean values (numpy/numpy#4352)
if values.dtype == object:
values = values.astype(bool)
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
# "bool")
return values.any(axis) # type: ignore[return-value]
def nanall(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> bool:
"""
Check if all elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanall(s.values)
np.True_
>>> from pandas.core import nanops
>>> s = pd.Series([1, 0])
>>> nanops.nanall(s.values)
np.False_
"""
if values.dtype.kind in "iub" and mask is None:
# GH#26032 fastpath
# error: Incompatible return value type (got "Union[bool_, ndarray]",
# expected "bool")
return values.all(axis) # type: ignore[return-value]
if values.dtype.kind == "M":
# GH#34479
raise TypeError("datetime64 type does not support operation 'all'")
values, _ = _get_values(values, skipna, fill_value=True, mask=mask)
# For object type, all won't necessarily return
# boolean values (numpy/numpy#4352)
if values.dtype == object:
values = values.astype(bool)
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
# "bool")
return values.all(axis) # type: ignore[return-value]
@disallow("M8")
@_datetimelike_compat
@maybe_operate_rowwise
def nansum(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
min_count: int = 0,
mask: npt.NDArray[np.bool_] | None = None,
) -> npt.NDArray[np.floating] | float | NaTType:
"""
Sum the elements along an axis ignoring NaNs
Parameters
----------
values : ndarray[dtype]
axis : int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nansum(s.values)
np.float64(3.0)
"""
dtype = values.dtype
values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
dtype_sum = _get_dtype_max(dtype)
if dtype.kind == "f":
dtype_sum = dtype
elif dtype.kind == "m":
dtype_sum = np.dtype(np.float64)
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
return the_sum
def _mask_datetimelike_result(
result: np.ndarray | np.datetime64 | np.timedelta64,
axis: AxisInt | None,
mask: npt.NDArray[np.bool_],
orig_values: np.ndarray,
) -> np.ndarray | np.datetime64 | np.timedelta64 | NaTType:
if isinstance(result, np.ndarray):
# we need to apply the mask
result = result.astype("i8").view(orig_values.dtype)
axis_mask = mask.any(axis=axis)
result[axis_mask] = iNaT
else:
if mask.any():
return np.int64(iNaT).view(orig_values.dtype)
return result
@bottleneck_switch()
@_datetimelike_compat
def nanmean(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> float:
"""
Compute the mean of the element along an axis ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanmean(s.values)
np.float64(1.5)
"""
if values.dtype == object and len(values) > 1_000 and mask is None:
# GH#54754 if we are going to fail, try to fail-fast
nanmean(values[:1000], axis=axis, skipna=skipna)
dtype = values.dtype
values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
dtype_sum = _get_dtype_max(dtype)
dtype_count = np.dtype(np.float64)
# not using needs_i8_conversion because that includes period
if dtype.kind in "mM":
dtype_sum = np.dtype(np.float64)
elif dtype.kind in "iu":
dtype_sum = np.dtype(np.float64)
elif dtype.kind == "f":
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _ensure_numeric(the_sum)
if axis is not None and getattr(the_sum, "ndim", False):
count = cast(np.ndarray, count)
with np.errstate(all="ignore"):
# suppress division by zero warnings
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return the_mean
@bottleneck_switch()
def nanmedian(
values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask=None
) -> float | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float | ndarray
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 2])
>>> nanops.nanmedian(s.values)
2.0
>>> s = pd.Series([np.nan, np.nan, np.nan])
>>> nanops.nanmedian(s.values)
nan
"""
# for floats without mask, the data already uses NaN as missing value
# indicator, and `mask` will be calculated from that below -> in those
# cases we never need to set NaN to the masked values
using_nan_sentinel = values.dtype.kind == "f" and mask is None
def get_median(x: np.ndarray, _mask=None):
if _mask is None:
_mask = notna(x)
else:
_mask = ~_mask
if not skipna and not _mask.all():
return np.nan
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings(
"ignore", "All-NaN slice encountered", RuntimeWarning
)
warnings.filterwarnings("ignore", "Mean of empty slice", RuntimeWarning)
res = np.nanmedian(x[_mask])
return res
dtype = values.dtype
values, mask = _get_values(values, skipna, mask=mask, fill_value=None)
if values.dtype.kind != "f":
if values.dtype == object:
# GH#34671 avoid casting strings to numeric
inferred = lib.infer_dtype(values)
if inferred in ["string", "mixed"]:
raise TypeError(f"Cannot convert {values} to numeric")
try:
values = values.astype("f8")
except ValueError as err:
# e.g. "could not convert string to float: 'a'"
raise TypeError(str(err)) from err
if not using_nan_sentinel and mask is not None:
if not values.flags.writeable:
values = values.copy()
values[mask] = np.nan
notempty = values.size
res: float | np.ndarray
# an array from a frame
if values.ndim > 1 and axis is not None:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
res = np.apply_along_axis(get_median, axis, values)
else:
# fastpath for the skipna case
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings(
"ignore", "All-NaN slice encountered", RuntimeWarning
)
if (values.shape[1] == 1 and axis == 0) or (
values.shape[0] == 1 and axis == 1
):
# GH52788: fastpath when squeezable, nanmedian for 2D array slow
res = np.nanmedian(np.squeeze(values), keepdims=True)
else:
res = np.nanmedian(values, axis=axis)
else:
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
res = _get_empty_reduction_result(values.shape, axis)
else:
# otherwise return a scalar value
res = get_median(values, mask) if notempty else np.nan
return _wrap_results(res, dtype)
def _get_empty_reduction_result(
shape: Shape,
axis: AxisInt,
) -> np.ndarray:
"""
The result from a reduction on an empty ndarray.
Parameters
----------
shape : Tuple[int, ...]
axis : int
Returns
-------
np.ndarray
"""
shp = np.array(shape)
dims = np.arange(len(shape))
ret = np.empty(shp[dims != axis], dtype=np.float64)
ret.fill(np.nan)
return ret
def _get_counts_nanvar(
values_shape: Shape,
mask: npt.NDArray[np.bool_] | None,
axis: AxisInt | None,
ddof: int,
dtype: np.dtype = np.dtype(np.float64),
) -> tuple[float | np.ndarray, float | np.ndarray]:
"""
Get the count of non-null values along an axis, accounting
for degrees of freedom.
Parameters
----------
values_shape : Tuple[int, ...]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
ddof : int
degrees of freedom
dtype : type, optional
type to use for count
Returns
-------
count : int, np.nan or np.ndarray
d : int, np.nan or np.ndarray
"""
count = _get_counts(values_shape, mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_float(count):
if count <= ddof:
# error: Incompatible types in assignment (expression has type
# "float", variable has type "Union[floating[Any], ndarray[Any,
# dtype[floating[Any]]]]")
count = np.nan # type: ignore[assignment]
d = np.nan
else:
# count is not narrowed by is_float check
count = cast(np.ndarray, count)
mask = count <= ddof
if mask.any():
np.putmask(d, mask, np.nan)
np.putmask(count, mask, np.nan)
return count, d
@bottleneck_switch(ddof=1)
def nanstd(
values,
*,
axis: AxisInt | None = None,
skipna: bool = True,
ddof: int = 1,
mask=None,
):
"""
Compute the standard deviation along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanstd(s.values)
1.0
"""
if values.dtype == "M8[ns]":
values = values.view("m8[ns]")
orig_dtype = values.dtype
values, mask = _get_values(values, skipna, mask=mask)
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))
return _wrap_results(result, orig_dtype)
@disallow("M8", "m8")
@bottleneck_switch(ddof=1)
def nanvar(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
ddof: int = 1,
mask=None,
):
"""
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanvar(s.values)
1.0
"""
dtype = values.dtype
mask = _maybe_get_mask(values, skipna, mask)
if dtype.kind in "iu":
values = values.astype("f8")
if mask is not None:
values[mask] = np.nan
if values.dtype.kind == "f":
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
if values.dtype.kind == "c":
# Need to use absolute value for complex numbers.
sqr = _ensure_numeric(abs(avg - values) ** 2)
else:
sqr = _ensure_numeric((avg - values) ** 2)
if mask is not None:
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if dtype.kind == "f":
result = result.astype(dtype, copy=False)
return result
@disallow("M8", "m8")
def nansem(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
ddof: int = 1,
mask: npt.NDArray[np.bool_] | None = None,
) -> float:
"""
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nansem(s.values)
np.float64(0.5773502691896258)
"""
# This checks if non-numeric-like data is passed with numeric_only=False
# and raises a TypeError otherwise
nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
mask = _maybe_get_mask(values, skipna, mask)
if values.dtype.kind != "f":
values = values.astype("f8")
if not skipna and mask is not None and mask.any():
return np.nan
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name=f"nan{meth}")
@_datetimelike_compat
def reduction(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
):
if values.size == 0:
return _na_for_min_count(values, axis)
dtype = values.dtype
values, mask = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask
)
result = getattr(values, meth)(axis)
result = _maybe_null_out(
result, axis, mask, values.shape, datetimelike=dtype.kind in "mM"
)
return result
return reduction
nanmin = _nanminmax("min", fill_value_typ="+inf")
nanmax = _nanminmax("max", fill_value_typ="-inf")
def nanargmax(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> int | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of max value in specified axis or -1 in the NA case
Examples
--------
>>> from pandas.core import nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmax(arr)
np.int64(4)
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 2] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., nan],
[ 9., 10., nan]])
>>> nanops.nanargmax(arr, axis=1)
array([2, 2, 1, 1])
"""
values, mask = _get_values(values, True, fill_value_typ="-inf", mask=mask)
result = values.argmax(axis)
# error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
# signedinteger[Any]"; expected "ndarray[Any, Any]"
result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
return result
def nanargmin(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> int | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of min value in specified axis or -1 in the NA case
Examples
--------
>>> from pandas.core import nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmin(arr)
np.int64(0)
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 0] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[nan, 7., 8.],
[nan, 10., 11.]])
>>> nanops.nanargmin(arr, axis=1)
array([0, 0, 1, 1])
"""
values, mask = _get_values(values, True, fill_value_typ="+inf", mask=mask)
result = values.argmin(axis)
# error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
# signedinteger[Any]"; expected "ndarray[Any, Any]"
result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
return result
@disallow("M8", "m8")
@maybe_operate_rowwise
def nanskew(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> float:
"""
Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 1, 2])
>>> nanops.nanskew(s.values)
np.float64(1.7320508075688787)
"""
mask = _maybe_get_mask(values, skipna, mask)
if values.dtype.kind != "f":
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
elif not skipna and mask is not None and mask.any():
return np.nan
with np.errstate(invalid="ignore", divide="ignore"):
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted**2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error. See comment in [nankurt]
max_abs = np.abs(values).max(axis, initial=0.0)
eps = np.finfo(m2.dtype).eps
constant_tolerance2 = ((eps * max_abs) ** 2) * count
constant_tolerance3 = ((eps * max_abs) ** 3) * count
m2 = _zero_out_fperr(m2, constant_tolerance2)
m3 = _zero_out_fperr(m3, constant_tolerance3)
with np.errstate(invalid="ignore", divide="ignore"):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2**1.5)
dtype = values.dtype
if dtype.kind == "f":
result = result.astype(dtype, copy=False)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
else:
result = dtype.type(0) if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow("M8", "m8")
@maybe_operate_rowwise
def nankurt(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> float:
"""
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 1, 3, 2])
>>> nanops.nankurt(s.values)
np.float64(-1.2892561983471076)
"""
mask = _maybe_get_mask(values, skipna, mask)
if values.dtype.kind != "f":
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
elif not skipna and mask is not None and mask.any():
return np.nan
with np.errstate(invalid="ignore", divide="ignore"):
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted**2
adjusted4 = adjusted2**2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
# Several floating point errors may occur during the summation due to rounding.
# This computation is similar to the one in Scipy
# https://github.com/scipy/scipy/blob/04d6d9c460b1fed83f2919ecec3d743cfa2e8317/scipy/stats/_stats_py.py#L1429
# With a few modifications, like using the maximum value instead of the averages
# and some adaptations because they use the average and we use the sum for `m2`.
# We need to estimate an upper bound to the error to consider the data constant.
# Let's call:
# x: true value in data
# y: floating point representation
# e: relative approximation error
# n: number of observations in array
#
# We have that:
# |x - y|/|x| <= e (See https://en.wikipedia.org/wiki/Machine_epsilon)
# (|x - y|/|x|)² <= e²
# Σ (|x - y|/|x|)² <= ne²
#
# Let's say that the fperr upper bound for m2 is constrained by the summation.
# |m2 - y|/|m2| <= ne²
# |m2 - y| <= n|m2|e²
#
# We will use max (x²) to estimate |m2|
max_abs = np.abs(values).max(axis, initial=0.0)
eps = np.finfo(m2.dtype).eps
constant_tolerance2 = ((eps * max_abs) ** 2) * count
constant_tolerance4 = ((eps * max_abs) ** 4) * count
m2 = _zero_out_fperr(m2, constant_tolerance2)
m4 = _zero_out_fperr(m4, constant_tolerance4)
with np.errstate(invalid="ignore", divide="ignore"):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numerator = count * (count + 1) * (count - 1) * m4
denominator = (count - 2) * (count - 3) * m2**2
if not isinstance(denominator, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denominator == 0:
return values.dtype.type(0)
with np.errstate(invalid="ignore", divide="ignore"):
result = numerator / denominator - adj
dtype = values.dtype
if dtype.kind == "f":
result = result.astype(dtype, copy=False)
if isinstance(result, np.ndarray):
result = np.where(denominator == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow("M8", "m8")
@maybe_operate_rowwise
def nanprod(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
min_count: int = 0,
mask: npt.NDArray[np.bool_] | None = None,
) -> float:
"""
Parameters
----------
values : ndarray[dtype]
axis : int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
Dtype
The product of all elements on a given axis. ( NaNs are treated as 1)
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2, 3, np.nan])
>>> nanops.nanprod(s.values)
np.float64(6.0)
"""
mask = _maybe_get_mask(values, skipna, mask)
if skipna and mask is not None:
values = values.copy()
values[mask] = 1
result = values.prod(axis)
# error: Incompatible return value type (got "Union[ndarray, float]", expected
# "float")
return _maybe_null_out( # type: ignore[return-value]
result, axis, mask, values.shape, min_count=min_count
)
def _maybe_arg_null_out(
result: np.ndarray,
axis: AxisInt | None,
mask: npt.NDArray[np.bool_] | None,
skipna: bool,
) -> np.ndarray | int:
# helper function for nanargmin/nanargmax
if mask is None:
return result
if axis is None or not getattr(result, "ndim", False):
if skipna and mask.all():
raise ValueError("Encountered all NA values")
elif not skipna and mask.any():
raise ValueError("Encountered an NA value with skipna=False")
else:
if skipna and mask.all(axis).any():
raise ValueError("Encountered all NA values")
elif not skipna and mask.any(axis).any():
raise ValueError("Encountered an NA value with skipna=False")
return result
def _get_counts(
values_shape: Shape,
mask: npt.NDArray[np.bool_] | None,
axis: AxisInt | None,
dtype: np.dtype[np.floating] = np.dtype(np.float64),
) -> np.floating | npt.NDArray[np.floating]:
"""
Get the count of non-null values along an axis
Parameters
----------
values_shape : tuple of int
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
"""
if axis is None:
if mask is not None:
n = mask.size - mask.sum()
else:
n = np.prod(values_shape)
return dtype.type(n)
if mask is not None:
count = mask.shape[axis] - mask.sum(axis)
else:
count = values_shape[axis]
if is_integer(count):
return dtype.type(count)
return count.astype(dtype, copy=False)
def _maybe_null_out(
result: np.ndarray | float | NaTType,
axis: AxisInt | None,
mask: npt.NDArray[np.bool_] | None,
shape: tuple[int, ...],
min_count: int = 1,
datetimelike: bool = False,
) -> np.ndarray | float | NaTType:
"""
Returns
-------
Dtype
The product of all elements on a given axis. ( NaNs are treated as 1)
"""
if mask is None and min_count == 0:
# nothing to check; short-circuit
return result
if axis is not None and isinstance(result, np.ndarray):
if mask is not None:
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
else:
# we have no nulls, kept mask=None in _maybe_get_mask
below_count = shape[axis] - min_count < 0
new_shape = shape[:axis] + shape[axis + 1 :]
null_mask = np.broadcast_to(below_count, new_shape)
if np.any(null_mask):
if datetimelike:
# GH#60646 For datetimelike, no need to cast to float
result[null_mask] = iNaT
elif is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype("c16")
elif not is_float_dtype(result):
result = result.astype("f8", copy=False)
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not NaT:
if check_below_min_count(shape, mask, min_count):
result_dtype = getattr(result, "dtype", None)
if is_float_dtype(result_dtype):
# error: Item "None" of "Optional[Any]" has no attribute "type"
result = result_dtype.type("nan") # type: ignore[union-attr]
else:
result = np.nan
return result
def check_below_min_count(
shape: tuple[int, ...], mask: npt.NDArray[np.bool_] | None, min_count: int
) -> bool:
"""
Check for the `min_count` keyword. Returns True if below `min_count` (when
missing value should be returned from the reduction).
Parameters
----------
shape : tuple
The shape of the values (`values.shape`).
mask : ndarray[bool] or None
Boolean numpy array (typically of same shape as `shape`) or None.
min_count : int
Keyword passed through from sum/prod call.
Returns
-------
bool
"""
if min_count > 0:
if mask is None:
# no missing values, only check size
non_nulls = np.prod(shape)
else:
non_nulls = mask.size - mask.sum()
if non_nulls < min_count:
return True
return False
def _zero_out_fperr(arg, tol: float | np.ndarray):
# #18044 reference this behavior to fix rolling skew/kurt issue
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < tol, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < tol else arg
@disallow("M8", "m8")
def nancorr(
a: np.ndarray,
b: np.ndarray,
*,
method: CorrelationMethod = "pearson",
min_periods: int | None = None,
) -> float:
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError("Operands to nancorr must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
a = _ensure_numeric(a)
b = _ensure_numeric(b)
f = get_corr_func(method)
return f(a, b)
def get_corr_func(
method: CorrelationMethod,
) -> Callable[[np.ndarray, np.ndarray], float]:
if method == "kendall":
from scipy.stats import kendalltau
def func(a, b):
return kendalltau(a, b)[0]
return func
elif method == "spearman":
from scipy.stats import spearmanr
def func(a, b):
return spearmanr(a, b)[0]
return func
elif method == "pearson":
def func(a, b):
return np.corrcoef(a, b)[0, 1]
return func
elif callable(method):
return method
raise ValueError(
f"Unknown method '{method}', expected one of "
"'kendall', 'spearman', 'pearson', or callable"
)
@disallow("M8", "m8")
def nancov(
a: np.ndarray,
b: np.ndarray,
*,
min_periods: int | None = None,
ddof: int | None = 1,
) -> float:
if len(a) != len(b):
raise AssertionError("Operands to nancov must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
a = _ensure_numeric(a)
b = _ensure_numeric(b)
return np.cov(a, b, ddof=ddof)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if x.dtype.kind in "biu":
x = x.astype(np.float64)
elif x.dtype == object:
inferred = lib.infer_dtype(x)
if inferred in ["string", "mixed"]:
# GH#44008, GH#36703 avoid casting e.g. strings to numeric
raise TypeError(f"Could not convert {x} to numeric")
try:
x = x.astype(np.complex128)
except (TypeError, ValueError):
try:
x = x.astype(np.float64)
except ValueError as err:
# GH#29941 we get here with object arrays containing strs
raise TypeError(f"Could not convert {x} to numeric") from err
else:
if not np.any(np.imag(x)):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
if isinstance(x, str):
# GH#44008, GH#36703 avoid casting e.g. strings to numeric
raise TypeError(f"Could not convert string '{x}' to numeric")
try:
x = float(x)
except (TypeError, ValueError):
# e.g. "1+1j" or "foo"
try:
x = complex(x)
except ValueError as err:
# e.g. "foo"
raise TypeError(f"Could not convert {x} to numeric") from err
return x
def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
"""
Cumulative function with skipna support.
Parameters
----------
values : np.ndarray or ExtensionArray
accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate}
skipna : bool
Returns
-------
np.ndarray or ExtensionArray
"""
mask_a, mask_b = {
np.cumprod: (1.0, np.nan),
np.maximum.accumulate: (-np.inf, np.nan),
np.cumsum: (0.0, np.nan),
np.minimum.accumulate: (np.inf, np.nan),
}[accum_func]
# This should go through ea interface
assert values.dtype.kind not in "mM"
# We will be applying this function to block values
if skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)):
vals = values.copy()
mask = isna(vals)
vals[mask] = mask_a
result = accum_func(vals, axis=0)
result[mask] = mask_b
else:
result = accum_func(values, axis=0)
return result
| bottleneck_switch |
python | kamyu104__LeetCode-Solutions | Python/minimize-malware-spread.py | {
"start": 510,
"end": 1297
} | class ____(object):
def minMalwareSpread(self, graph, initial):
"""
:type graph: List[List[int]]
:type initial: List[int]
:rtype: int
"""
union_find = UnionFind(len(graph))
for i in xrange(len(graph)):
for j in xrange(i+1, len(graph)):
if graph[i][j] == 1:
union_find.union_set(i, j)
union_size = collections.Counter(union_find.find_set(i) for i in xrange(len(graph)))
malware_count = collections.Counter(union_find.find_set(i) for i in initial)
return min(initial, key=lambda x: [malware_count[union_find.find_set(x)] > 1,
-union_size[union_find.find_set(x)],
x])
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-aws/infra/worker/events_stack.py | {
"start": 404,
"end": 4243
} | class ____(Stack):
"""EventBridge and SQS infrastructure for ECS task state monitoring."""
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Parameters
self.work_pool_name = CfnParameter(
self,
"WorkPoolName",
type="String",
description="Name of the Prefect work pool",
default="ecs-work-pool",
)
self.existing_cluster_arn = CfnParameter(
self,
"ExistingClusterArn",
type="String",
description="ARN of existing ECS cluster to monitor",
)
# Create only the event infrastructure
self.queue, self.dlq = self.create_sqs_infrastructure()
self.eventbridge_rule = self.create_eventbridge_rule(
self.queue, cluster_arn=self.existing_cluster_arn.value_as_string
)
# Output the queue configuration for workers to consume
CfnOutput(
self,
"EcsEventsQueueName",
value=self.queue.queue_name,
description="Name of the SQS queue receiving ECS events",
)
def create_sqs_infrastructure(self) -> tuple[sqs.Queue, sqs.Queue]:
"""Create SQS queue and DLQ for ECS events."""
# Dead Letter Queue
dlq = sqs.Queue(
self,
"EcsEventsDLQ",
queue_name=f"{self.work_pool_name.value_as_string}-ecs-events-dlq",
visibility_timeout=Duration.seconds(60),
retention_period=Duration.days(14),
)
# Main Queue
queue = sqs.Queue(
self,
"EcsEventsQueue",
queue_name=f"{self.work_pool_name.value_as_string}-ecs-events",
visibility_timeout=Duration.seconds(300),
dead_letter_queue=sqs.DeadLetterQueue(
max_receive_count=3,
queue=dlq,
),
retention_period=Duration.days(7),
)
CfnOutput(
self,
"EcsEventsQueueUrl",
value=queue.queue_url,
description="URL of the SQS queue receiving ECS events",
)
CfnOutput(
self,
"EcsEventsQueueArn",
value=queue.queue_arn,
description="ARN of the SQS queue receiving ECS events",
)
return queue, dlq
def create_eventbridge_rule(
self, queue: sqs.Queue, cluster_arn: str = None
) -> events.Rule:
"""Create EventBridge rule for ECS task state changes."""
# Use CDK's EventPattern class instead of raw dict
if cluster_arn:
event_pattern = events.EventPattern(
source=["aws.ecs"],
detail_type=["ECS Task State Change"],
detail={
"clusterArn": [cluster_arn],
},
)
else:
event_pattern = events.EventPattern(
source=["aws.ecs"],
detail_type=["ECS Task State Change"],
detail={},
)
rule = events.Rule(
self,
"EcsTaskStateChangeRule",
rule_name=f"{self.work_pool_name.value_as_string}-ecs-task-events",
description="Capture ECS task state changes for Prefect workers",
event_pattern=event_pattern,
targets=[events_targets.SqsQueue(queue)],
)
# Grant EventBridge permission to send messages to SQS
queue.grant_send_messages(iam.ServicePrincipal("events.amazonaws.com"))
CfnOutput(
self,
"EventBridgeRuleArn",
value=rule.rule_arn,
description="ARN of the EventBridge rule for ECS events",
)
return rule
| EcsEventsStack |
python | huggingface__transformers | src/transformers/models/dots1/configuration_dots1.py | {
"start": 878,
"end": 10435
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Dots1Model`]. It is used to instantiate a
`dots.llm1` model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
[rednote-hilab/dots.llm1.base](https://huggingface.co/rednote-hilab/dots.llm1.base).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 152064):
Vocabulary size of the model. Defines the number of different tokens that can be represented by the
`input_ids` passed when calling [`Dots1Model`].
hidden_size (`int`, *optional*, defaults to 4608):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Dimension of the MoE representations.
num_hidden_layers (`int`, *optional*, defaults to 62):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 32):
Number of key/value heads for Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, Multi
Head Attention (MHA) is used. If `num_key_value_heads=1`, Multi Query Attention (MQA) is used. Otherwise,
Grouped Query Attention (GQA) is used. If not specified, defaults to `num_attention_heads`.
n_shared_experts (`int`, *optional*, default=None):
Number of shared experts. None means dense model.
n_routed_experts (`int`, *optional*, default=None):
Number of routed experts. None means dense model.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token (selected experts only within `topk_group` groups).
num_experts_per_tok (`int`, *optional*, default=None):
Number of selected experts. None means dense model.
first_k_dense_replace (`int`, *optional*, defaults to 0):
Number of dense layers at the beginning of the model before the first MoE layer.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the weights of the routed experts.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string).
max_position_embeddings (`int`, *optional*, defaults to 2048):
Maximum sequence length the model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon used by the RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions. Only relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie the input and output word embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the self-attention projections.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout ratio for the attention probabilities.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor for routed experts.
sliding_window (`int`, *optional*, defaults to 4096):
Size of the sliding window for attention. If not specified, defaults to `4096`.
max_window_layers (`int`, *optional*, defaults to 62):
The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
additional layer afterwards will use SWA (Sliding Window Attention).
layer_types (`list`, *optional*):
Attention pattern for each layer.
Examples:
```python
>>> from transformers import Dots1Model, Dots1Config
>>> # Initializing a Dots1 style configuration
>>> configuration = Dots1Config()
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "dots1"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: Optional[int] = 152064,
hidden_size: Optional[int] = 4608,
intermediate_size: Optional[int] = 10944,
moe_intermediate_size: Optional[int] = 1408,
num_hidden_layers: Optional[int] = 62,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 32,
n_shared_experts: Optional[int] = None,
n_routed_experts: Optional[int] = None,
n_group: Optional[int] = 1,
topk_group: Optional[int] = 1,
num_experts_per_tok: Optional[int] = None,
first_k_dense_replace: Optional[int] = 0,
norm_topk_prob: Optional[bool] = False,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
routed_scaling_factor: Optional[float] = 1.0,
sliding_window: Optional[int] = 4096,
max_window_layers: Optional[int] = 62,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.num_experts_per_tok = num_experts_per_tok
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.n_group = n_group
self.topk_group = topk_group
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.routed_scaling_factor = routed_scaling_factor
self.sliding_window = sliding_window
self.max_window_layers = max_window_layers
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if self.sliding_window is not None and i >= self.max_window_layers
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Dots1Config"]
| Dots1Config |
python | streamlit__streamlit | lib/tests/streamlit/elements/plotly_chart_test.py | {
"start": 10908,
"end": 19355
} | class ____(DeltaGeneratorTestCase):
"""Test plotly_chart width parameter functionality."""
@parameterized.expand(
[
# width, expected_width_spec, expected_width_value
("stretch", "use_stretch", True),
("content", "pixel_width", 700), # Content width resolves to 700px default
(500, "pixel_width", 500),
]
)
def test_plotly_chart_width_combinations(
self,
width: str | int,
expected_width_spec: str,
expected_width_value: bool | int,
):
"""Test plotly chart with various width combinations."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, width=width)
delta = self.get_delta_from_queue()
el = delta.new_element
# Check width_config on the element
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, expected_width_spec) == expected_width_value
@parameterized.expand(
[
# use_container_width, width, expected_width_spec, expected_width_value
(True, None, "use_stretch", True), # use_container_width=True -> stretch
(
False,
None,
"pixel_width",
700,
), # use_container_width=False, no width -> default 700
(
True,
500,
"use_stretch",
True,
), # use_container_width overrides width -> stretch
(
True,
"content",
"use_stretch",
True,
), # use_container_width overrides width -> stretch
(
False,
"content",
"pixel_width",
700,
), # content width resolves to 700px default when no figure width
(
False,
500,
"pixel_width",
500,
), # integer width -> pixel width
]
)
@patch("streamlit.elements.plotly_chart.show_deprecation_warning")
def test_use_container_width_deprecation(
self,
use_container_width: bool,
width: str | int | None,
expected_width_spec: str,
expected_width_value: bool | int,
mock_show_warning,
):
"""Test deprecation warning and translation logic."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
kwargs = {"use_container_width": use_container_width}
if width is not None:
kwargs["width"] = width
st.plotly_chart(data, **kwargs)
# Check that deprecation warning was called
mock_show_warning.assert_called_once()
delta = self.get_delta_from_queue()
el = delta.new_element
# Check width_config reflects the expected width (NOT deprecated proto fields)
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, expected_width_spec) == expected_width_value
@parameterized.expand(
[
("width", "invalid_width"),
("width", 0), # width must be positive
("width", -100), # negative width
]
)
def test_width_validation_errors(self, param_name: str, invalid_value: str | int):
"""Test that invalid width values raise validation errors."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
with pytest.raises(StreamlitAPIException):
st.plotly_chart(data, width=invalid_value)
def test_width_parameter_with_selections(self):
"""Test width parameter works correctly with selections activated."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, width="content", on_select="rerun", key="test_key")
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
assert el.width_config.pixel_width == 700 # Content width defaults to 700px
assert len(el.plotly_chart.selection_mode) > 0 # Selections are activated
def test_width_defaults_to_stretch(self):
"""Test that width parameter defaults to stretch when not provided."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data)
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "use_stretch"
assert el.width_config.use_stretch
@parameterized.expand([(500, 500), (10, 10), (None, 700)])
def test_content_width_behavior(
self, figure_width: int | None, expected_width: int
):
"""Test that content width resolves figure layout width correctly."""
import plotly.graph_objs as go
fig = go.Figure()
fig.add_trace(go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17]))
if figure_width is not None:
fig.update_layout(width=figure_width, height=300)
st.plotly_chart(fig, width="content")
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
assert el.width_config.pixel_width == expected_width
def test_content_width_with_various_data_types(self):
"""Test content width handling with different plotly-accepted data types."""
import plotly.graph_objs as go
with self.subTest("matplotlib_figure"):
import matplotlib.pyplot as plt
# Create a matplotlib figure
fig, ax = plt.subplots(figsize=(8, 6)) # 8 inches * 100 dpi = 800px width
ax.plot([1, 2, 3, 4], [10, 15, 13, 17])
ax.set_title("Matplotlib Figure")
st.plotly_chart(fig, width="content")
plt.close(fig) # Clean up
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
# Matplotlib figures get converted, may not preserve exact width
# but should still resolve to a reasonable value
assert el.width_config.pixel_width >= 100
with self.subTest("data_list"):
# Create plotly data as a list (no explicit width in layout)
data = [go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])]
st.plotly_chart(data, width="content")
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
# No explicit width, should default to 700
assert el.width_config.pixel_width == 700
with self.subTest("data_dict"):
# Create plotly data as a dictionary (no explicit width)
data_dict = {
"data": [{"x": [1, 2, 3, 4], "y": [10, 15, 13, 17], "type": "scatter"}],
"layout": {"title": "Dict Data"},
}
st.plotly_chart(data_dict, width="content")
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
# No explicit width, should default to 700
assert el.width_config.pixel_width == 700
with self.subTest("plotly_figure_with_width"):
# Create plotly figure with explicit width
fig = go.Figure()
fig.add_trace(go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17]))
fig.update_layout(width=600, height=400, title="Figure with Width")
st.plotly_chart(fig, width="content")
delta = self.get_delta_from_queue()
el = delta.new_element
assert el.width_config.WhichOneof("width_spec") == "pixel_width"
# Should use the explicit width from the figure
assert el.width_config.pixel_width == 600
| PlotlyChartWidthTest |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/models.py | {
"start": 1029,
"end": 1226
} | class ____:
"""Individual diagnostics log entry."""
correlation_id: str
timestamp: str
level: str
category: str
message: str
data: dict[str, Any]
@record
| DiagnosticsEntry |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 17052,
"end": 17174
} | class ____(AssetRef):
"""URI reference to an asset."""
uri: str
_dependency_type = "asset-uri-ref"
| AssetUriRef |
python | pandas-dev__pandas | pandas/core/arrays/string_arrow.py | {
"start": 1971,
"end": 19291
} | class ____(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray):
"""
Extension array for string data in a ``pyarrow.ChunkedArray``.
.. warning::
ArrowStringArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : pyarrow.Array or pyarrow.ChunkedArray
The array of data.
dtype : StringDtype
The dtype for the array.
Attributes
----------
None
Methods
-------
None
See Also
--------
:func:`array`
The recommended function for creating an ArrowStringArray.
Series.str
The string methods are available on Series backed by
an ArrowStringArray.
Notes
-----
ArrowStringArray returns a BooleanArray for comparison methods.
Examples
--------
>>> pd.array(["This is", "some text", None, "data."], dtype="string[pyarrow]")
<ArrowStringArray>
['This is', 'some text', <NA>, 'data.']
Length: 4, dtype: string
"""
# error: Incompatible types in assignment (expression has type "StringDtype",
# base class "ArrowExtensionArray" defined the type as "ArrowDtype")
_dtype: StringDtype # type: ignore[assignment]
def __init__(self, values, *, dtype: StringDtype | None = None) -> None:
_check_pyarrow_available()
if isinstance(values, (pa.Array, pa.ChunkedArray)) and (
pa.types.is_string(values.type)
or _is_string_view(values.type)
or (
pa.types.is_dictionary(values.type)
and (
pa.types.is_string(values.type.value_type)
or pa.types.is_large_string(values.type.value_type)
or _is_string_view(values.type.value_type)
)
)
):
values = pc.cast(values, pa.large_string())
super().__init__(values)
if dtype is None:
dtype = StringDtype(storage="pyarrow", na_value=libmissing.NA)
self._dtype = dtype
if not pa.types.is_large_string(self._pa_array.type):
raise ValueError(
"ArrowStringArray requires a PyArrow (chunked) array of "
"large_string type"
)
def _from_pyarrow_array(self, pa_array):
"""
Construct from the pyarrow array result of an operation, retaining
self.dtype.na_value.
"""
return type(self)(pa_array, dtype=self.dtype)
@classmethod
def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar:
pa_scalar = super()._box_pa_scalar(value, pa_type)
if pa.types.is_string(pa_scalar.type) and pa_type is None:
pa_scalar = pc.cast(pa_scalar, pa.large_string())
return pa_scalar
@classmethod
def _box_pa_array(
cls, value, pa_type: pa.DataType | None = None, copy: bool = False
) -> pa.Array | pa.ChunkedArray:
pa_array = super()._box_pa_array(value, pa_type)
if pa.types.is_string(pa_array.type) and pa_type is None:
pa_array = pc.cast(pa_array, pa.large_string())
return pa_array
def __len__(self) -> int:
"""
Length of this array.
Returns
-------
length : int
"""
return len(self._pa_array)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> Self:
from pandas.core.arrays.masked import BaseMaskedArray
_check_pyarrow_available()
if dtype and not (isinstance(dtype, str) and dtype == "string"):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, StringDtype) and dtype.storage == "pyarrow"
if isinstance(scalars, BaseMaskedArray):
# avoid costly conversion to object dtype in ensure_string_array and
# numerical issues with Float32Dtype
na_values = scalars._mask
result = scalars._data
result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
pa_arr = pa.array(result, mask=na_values, type=pa.large_string())
elif isinstance(scalars, (pa.Array, pa.ChunkedArray)):
pa_arr = pc.cast(scalars, pa.large_string())
else:
# convert non-na-likes to str
result = lib.ensure_string_array(scalars, copy=copy)
pa_arr = pa.array(result, type=pa.large_string(), from_pandas=True)
# error: Argument "dtype" to "ArrowStringArray" has incompatible type
return cls(pa_arr, dtype=dtype) # type: ignore[arg-type]
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: ExtensionDtype, copy: bool = False
) -> Self:
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@property
def dtype(self) -> StringDtype: # type: ignore[override]
"""
An instance of 'string[pyarrow]'.
"""
return self._dtype
def insert(self, loc: int, item) -> ArrowStringArray:
if self.dtype.na_value is np.nan and item is np.nan:
item = libmissing.NA
if not isinstance(item, str) and item is not libmissing.NA:
raise TypeError(
f"Invalid value '{item}' for dtype 'str'. Value should be a "
f"string or missing value, got '{type(item).__name__}' instead."
)
return super().insert(loc, item)
def _convert_bool_result(self, values, na=lib.no_default, method_name=None):
validate_na_arg(na, name="na")
if self.dtype.na_value is np.nan:
if na is lib.no_default or isna(na):
# NaN propagates as False
values = values.fill_null(False)
else:
values = values.fill_null(na)
return values.to_numpy()
else:
if na is not lib.no_default and not isna(na): # pyright: ignore [reportGeneralTypeIssues]
values = values.fill_null(na)
return BooleanDtype().__from_arrow__(values)
def _maybe_convert_setitem_value(self, value):
"""Maybe convert value to be pyarrow compatible."""
if is_scalar(value):
if isna(value):
value = None
elif not isinstance(value, str):
raise TypeError(
f"Invalid value '{value}' for dtype 'str'. Value should be a "
f"string or missing value, got '{type(value).__name__}' instead."
)
else:
value = np.array(value, dtype=object, copy=True)
value[isna(value)] = None
for v in value:
if not (v is None or isinstance(v, str)):
raise TypeError(
"Invalid value for dtype 'str'. Value should be a "
"string or missing value (or array of those)."
)
return super()._maybe_convert_setitem_value(value)
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
value_set = [
pa_scalar.as_py()
for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values]
if pa_scalar.type in (pa.string(), pa.null(), pa.large_string())
]
# short-circuit to return all False array.
if not value_set:
return np.zeros(len(self), dtype=bool)
result = pc.is_in(
self._pa_array, value_set=pa.array(value_set, type=self._pa_array.type)
)
# pyarrow 2.0.0 returned nulls, so we explicitly specify dtype to convert nulls
# to False
return np.array(result, dtype=np.bool_)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if dtype == self.dtype:
if copy:
return self.copy()
return self
elif isinstance(dtype, NumericDtype):
data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype))
return dtype.__from_arrow__(data)
elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating):
return self.to_numpy(dtype=dtype, na_value=np.nan)
return super().astype(dtype, copy=copy)
# ------------------------------------------------------------------------
# String methods interface
_str_isalnum = ArrowStringArrayMixin._str_isalnum
_str_isalpha = ArrowStringArrayMixin._str_isalpha
_str_isdecimal = ArrowStringArrayMixin._str_isdecimal
_str_isdigit = ArrowStringArrayMixin._str_isdigit
_str_islower = ArrowStringArrayMixin._str_islower
_str_isnumeric = ArrowStringArrayMixin._str_isnumeric
_str_isspace = ArrowStringArrayMixin._str_isspace
_str_istitle = ArrowStringArrayMixin._str_istitle
_str_isupper = ArrowStringArrayMixin._str_isupper
_str_map = BaseStringArray._str_map
_str_startswith = ArrowStringArrayMixin._str_startswith
_str_endswith = ArrowStringArrayMixin._str_endswith
_str_pad = ArrowStringArrayMixin._str_pad
_str_lower = ArrowStringArrayMixin._str_lower
_str_upper = ArrowStringArrayMixin._str_upper
_str_strip = ArrowStringArrayMixin._str_strip
_str_lstrip = ArrowStringArrayMixin._str_lstrip
_str_rstrip = ArrowStringArrayMixin._str_rstrip
_str_removesuffix = ArrowStringArrayMixin._str_removesuffix
_str_removeprefix = ArrowStringArrayMixin._str_removeprefix
_str_find = ArrowStringArrayMixin._str_find
_str_get = ArrowStringArrayMixin._str_get
_str_capitalize = ArrowStringArrayMixin._str_capitalize
_str_title = ArrowStringArrayMixin._str_title
_str_swapcase = ArrowStringArrayMixin._str_swapcase
_str_slice_replace = ArrowStringArrayMixin._str_slice_replace
_str_len = ArrowStringArrayMixin._str_len
_str_slice = ArrowStringArrayMixin._str_slice
@staticmethod
def _is_re_pattern_with_flags(pat: str | re.Pattern) -> bool:
# check if `pat` is a compiled regex pattern with flags that are not
# supported by pyarrow
return (
isinstance(pat, re.Pattern)
and (pat.flags & ~(re.IGNORECASE | re.UNICODE)) != 0
)
@staticmethod
def _preprocess_re_pattern(pat: re.Pattern, case: bool) -> tuple[str, bool, int]:
pattern = pat.pattern
flags = pat.flags
# flags is not supported by pyarrow, but `case` is -> extract and remove
if flags & re.IGNORECASE:
case = False
flags = flags & ~re.IGNORECASE
# when creating a pattern with re.compile and a string, it automatically
# gets a UNICODE flag, while pyarrow assumes unicode for strings anyway
flags = flags & ~re.UNICODE
return pattern, case, flags
def _str_contains(
self,
pat,
case: bool = True,
flags: int = 0,
na=lib.no_default,
regex: bool = True,
):
if flags or self._is_re_pattern_with_flags(pat):
return super()._str_contains(pat, case, flags, na, regex)
if isinstance(pat, re.Pattern):
# TODO flags passed separately by user are ignored
pat, case, flags = self._preprocess_re_pattern(pat, case)
return ArrowStringArrayMixin._str_contains(self, pat, case, flags, na, regex)
def _str_match(
self,
pat: str | re.Pattern,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
):
if flags or self._is_re_pattern_with_flags(pat):
return super()._str_match(pat, case, flags, na)
if isinstance(pat, re.Pattern):
pat, case, flags = self._preprocess_re_pattern(pat, case)
return ArrowStringArrayMixin._str_match(self, pat, case, flags, na)
def _str_fullmatch(
self,
pat: str | re.Pattern,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
):
if flags or self._is_re_pattern_with_flags(pat):
return super()._str_fullmatch(pat, case, flags, na)
if isinstance(pat, re.Pattern):
pat, case, flags = self._preprocess_re_pattern(pat, case)
return ArrowStringArrayMixin._str_fullmatch(self, pat, case, flags, na)
def _str_replace(
self,
pat: str | re.Pattern,
repl: str | Callable,
n: int = -1,
case: bool = True,
flags: int = 0,
regex: bool = True,
):
if (
isinstance(pat, re.Pattern)
or callable(repl)
or not case
or flags
or ( # substitution contains a named group pattern
# https://docs.python.org/3/library/re.html
isinstance(repl, str) and r"\g<" in repl
)
):
return super()._str_replace(pat, repl, n, case, flags, regex)
return ArrowStringArrayMixin._str_replace(
self, pat, repl, n, case, flags, regex
)
def _str_repeat(self, repeats: int | Sequence[int]):
if not isinstance(repeats, int):
return super()._str_repeat(repeats)
else:
return ArrowExtensionArray._str_repeat(self, repeats=repeats)
def _str_count(self, pat: str, flags: int = 0):
if flags:
return super()._str_count(pat, flags)
result = pc.count_substring_regex(self._pa_array, pat)
return self._convert_int_result(result)
def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
if dtype is None:
dtype = np.int64
dummies_pa, labels = ArrowExtensionArray(self._pa_array)._str_get_dummies(
sep, dtype
)
if len(labels) == 0:
return np.empty(shape=(0, 0), dtype=dtype), labels
dummies = np.vstack(dummies_pa.to_numpy())
_dtype = pandas_dtype(dtype)
dummies_dtype: NpDtype
if isinstance(_dtype, np.dtype):
dummies_dtype = _dtype
else:
dummies_dtype = np.bool_
return dummies.astype(dummies_dtype, copy=False), labels
def _convert_int_result(self, result):
if self.dtype.na_value is np.nan:
if isinstance(result, pa.Array):
result = result.to_numpy(zero_copy_only=False)
else:
result = result.to_numpy()
if result.dtype == np.int32:
result = result.astype(np.int64)
return result
return Int64Dtype().__from_arrow__(result)
def _convert_rank_result(self, result):
if self.dtype.na_value is np.nan:
if isinstance(result, pa.Array):
result = result.to_numpy(zero_copy_only=False)
else:
result = result.to_numpy()
return result.astype("float64", copy=False)
return Float64Dtype().__from_arrow__(result)
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
if self.dtype.na_value is np.nan and name in ["any", "all"]:
if not skipna:
nas = pc.is_null(self._pa_array)
arr = pc.or_kleene(nas, pc.not_equal(self._pa_array, ""))
else:
arr = pc.not_equal(self._pa_array, "")
result = ArrowExtensionArray(arr)._reduce(
name, skipna=skipna, keepdims=keepdims, **kwargs
)
if keepdims:
# ArrowExtensionArray will return a length-1 bool[pyarrow] array
return result.astype(np.bool_)
return result
if name in ("min", "max", "sum", "argmin", "argmax"):
result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs)
else:
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
if name in ("argmin", "argmax") and isinstance(result, pa.Array):
return self._convert_int_result(result)
elif isinstance(result, pa.Array):
return type(self)(result, dtype=self.dtype)
else:
return result
def value_counts(self, dropna: bool = True) -> Series:
result = super().value_counts(dropna=dropna)
if self.dtype.na_value is np.nan:
res_values = result._values.to_numpy()
return result._constructor(
res_values, index=result.index, name=result.name, copy=False
)
return result
def _cmp_method(self, other, op):
if (
isinstance(other, (BaseStringArray, ArrowExtensionArray))
and self.dtype.na_value is not libmissing.NA
and other.dtype.na_value is libmissing.NA
):
# NA has priority of NaN semantics
return NotImplemented
result = super()._cmp_method(other, op)
if self.dtype.na_value is np.nan:
if op == operator.ne:
return result.to_numpy(np.bool_, na_value=True)
else:
return result.to_numpy(np.bool_, na_value=False)
return result
def __pos__(self) -> Self:
raise TypeError(f"bad operand type for unary +: '{self.dtype}'")
| ArrowStringArray |
python | sympy__sympy | sympy/categories/baseclasses.py | {
"start": 803,
"end": 1101
} | class ____(Symbol):
"""
The base class for any kind of object in an abstract category.
Explanation
===========
While technically any instance of :class:`~.Basic` will do, this
class is the recommended way to create abstract objects in
abstract categories.
"""
| Object |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 32941,
"end": 34090
} | class ____(PrefectFilterBaseModel):
"""Filter by `TaskRun.start_time`."""
before_: Optional[DateTime] = Field(
default=None,
description="Only include task runs starting at or before this time",
)
after_: Optional[DateTime] = Field(
default=None,
description="Only include task runs starting at or after this time",
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only return task runs without a start time"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.before_ is not None:
filters.append(db.TaskRun.start_time <= self.before_)
if self.after_ is not None:
filters.append(db.TaskRun.start_time >= self.after_)
if self.is_null_ is not None:
filters.append(
db.TaskRun.start_time.is_(None)
if self.is_null_
else db.TaskRun.start_time.is_not(None)
)
return filters
| TaskRunFilterStartTime |
python | ray-project__ray | python/ray/data/_internal/arrow_block.py | {
"start": 2272,
"end": 4374
} | class ____(Mapping):
"""
Row of a tabular Dataset backed by a Arrow Table block.
"""
def __init__(self, row: Any):
self._row = row
def __getitem__(self, key: Union[str, List[str]]) -> Any:
from ray.data.extensions import get_arrow_extension_tensor_types
tensor_arrow_extension_types = get_arrow_extension_tensor_types()
def get_item(keys: List[str]) -> Any:
schema = self._row.schema
if isinstance(schema.field(keys[0]).type, tensor_arrow_extension_types):
# Build a tensor row.
return tuple(
[
ArrowBlockAccessor._build_tensor_row(
self._row, col_name=key, row_idx=0
)
for key in keys
]
)
table = self._row.select(keys)
if len(table) == 0:
return None
items = [col[0] for col in table.columns]
try:
# Try to interpret this as a pyarrow.Scalar value.
return tuple([item.as_py() for item in items])
except AttributeError:
# Assume that this row is an element of an extension array, and
# that it is bypassing pyarrow's scalar model for Arrow < 8.0.0.
return items
is_single_item = isinstance(key, str)
keys = [key] if is_single_item else key
items = get_item(keys)
if items is None:
return None
elif is_single_item:
return items[0]
else:
return items
def __iter__(self) -> Iterator:
for k in self._row.column_names:
yield k
def __len__(self):
return self._row.num_columns
def as_pydict(self) -> Dict[str, Any]:
return dict(self.items())
def __str__(self):
return row_str(self)
def __repr__(self):
return row_repr(self)
def _repr_pretty_(self, p, cycle):
return row_repr_pretty(self, p, cycle)
| ArrowRow |
python | ray-project__ray | python/ray/tests/test_gcs_fault_tolerance.py | {
"start": 981,
"end": 36152
} | class ____:
def method(self, x):
return x + 2
@ray.remote
def increase(x):
return x + 1
def cluster_kill_gcs_wait(cluster):
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# Kill gcs server.
cluster.head_node.kill_gcs_server()
# Wait to prevent the gcs server process becoming zombie.
gcs_server_process.wait()
wait_for_pid_to_exit(gcs_server_pid, 300)
def test_gcs_server_restart(ray_start_regular_with_external_redis):
actor1 = Increase.remote()
result = ray.get(actor1.method.remote(1))
assert result == 3
ray._private.worker._global_node.kill_gcs_server()
ray._private.worker._global_node.start_gcs_server()
actor2 = Increase.remote()
result = ray.get(actor2.method.remote(2))
assert result == 4
result = ray.get(increase.remote(1))
assert result == 2
# Check whether actor1 is alive or not.
# NOTE: We can't execute it immediately after gcs restarts
# because it takes time for the worker to exit.
result = ray.get(actor1.method.remote(7))
assert result == 9
@pytest.mark.skip(
reason="GCS pubsub may lose messages after GCS restarts. Need to "
"implement re-fetching state in GCS client.",
)
# TODO(mwtian): re-enable after fixing https://github.com/ray-project/ray/issues/22340
def test_gcs_server_restart_during_actor_creation(
ray_start_regular_with_external_redis,
):
ids = []
# We reduce the number of actors because there are too many actors created
# and `Too many open files` error will be thrown.
for i in range(0, 20):
actor = Increase.remote()
ids.append(actor.method.remote(1))
ray._private.worker._global_node.kill_gcs_server()
ray._private.worker._global_node.start_gcs_server()
# The timeout seems too long.
# TODO(mwtian): after fixing reconnection in GCS pubsub, try using a lower
# timeout.
ready, unready = ray.wait(ids, num_returns=20, timeout=240)
print("Ready objects is {}.".format(ready))
print("Unready objects is {}.".format(unready))
assert len(unready) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis",
[
generate_system_config_map(
health_check_initial_delay_ms=0,
health_check_period_ms=1000,
health_check_failure_threshold=3,
enable_autoscaler_v2=True,
),
],
indirect=True,
)
def test_autoscaler_init(
ray_start_cluster_head_with_external_redis,
):
"""
Checks that autoscaler initializes properly after GCS restarts.
"""
cluster = ray_start_cluster_head_with_external_redis
cluster.add_node()
cluster.wait_for_nodes()
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 2
assert nodes[0]["alive"] and nodes[1]["alive"]
# Restart gcs server process.
cluster_kill_gcs_wait(cluster)
cluster.head_node.start_gcs_server()
# Fetch the cluster status from the autoscaler and check that it works.
status = get_cluster_status(cluster.address)
wait_for_condition(lambda: len(status.idle_nodes) == 2)
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis",
[
generate_system_config_map(
health_check_initial_delay_ms=0,
health_check_period_ms=1000,
health_check_failure_threshold=3,
),
],
indirect=True,
)
def test_node_failure_detector_when_gcs_server_restart(
ray_start_cluster_head_with_external_redis,
):
"""Checks that the node failure detector is correct when gcs server restart.
We set the cluster to timeout nodes after 2 seconds of heartbeats. We then
kill gcs server and remove the worker node and restart gcs server again to
check that the removed node will die finally.
"""
cluster = ray_start_cluster_head_with_external_redis
worker = cluster.add_node()
cluster.wait_for_nodes()
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 2
assert nodes[0]["alive"] and nodes[1]["alive"]
to_be_removed_node = None
for node in nodes:
if node["RayletSocketName"] == worker.raylet_socket_name:
to_be_removed_node = node
assert to_be_removed_node is not None
cluster_kill_gcs_wait(cluster)
raylet_process = worker.all_processes["raylet"][0].process
raylet_pid = raylet_process.pid
# Remove worker node.
cluster.remove_node(worker, allow_graceful=False)
# Wait to prevent the raylet process becoming zombie.
raylet_process.wait()
wait_for_pid_to_exit(raylet_pid)
# Restart gcs server process.
cluster.head_node.start_gcs_server()
def condition():
nodes = ray.nodes()
assert len(nodes) == 2
for node in nodes:
if node["NodeID"] == to_be_removed_node["NodeID"]:
return not node["alive"]
return False
# Wait for the removed node dead.
wait_for_condition(condition, timeout=10)
def test_actor_raylet_resubscription(ray_start_regular_with_external_redis):
# stat an actor
@ray.remote
class A:
def ready(self):
return os.getpid()
actor = A.options(name="abc", max_restarts=0).remote()
pid = ray.get(actor.ready.remote())
print("actor is ready and kill gcs")
ray._private.worker._global_node.kill_gcs_server()
print("make actor exit")
p = psutil.Process(pid)
p.kill()
p.wait(timeout=10)
print("start gcs")
ray._private.worker._global_node.start_gcs_server()
print("try actor method again")
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.ready.remote())
def test_del_actor_after_gcs_server_restart(ray_start_regular_with_external_redis):
actor = Increase.options(name="abc").remote()
result = ray.get(actor.method.remote(1))
assert result == 3
ray._private.worker._global_node.kill_gcs_server()
ray._private.worker._global_node.start_gcs_server()
actor_id = actor._actor_id.hex()
del actor
def condition():
actor_status = ray.util.state.get_actor(id=actor_id)
if actor_status.state == "DEAD":
return True
else:
return False
# Wait for the actor dead.
wait_for_condition(condition, timeout=10)
# If `ReportActorOutOfScope` was successfully called,
# name should be properly deleted.
with pytest.raises(ValueError):
ray.get_actor("abc")
def test_raylet_resubscribe_to_worker_death(
tmp_path, ray_start_regular_with_external_redis
):
"""Verify that the Raylet resubscribes to worker death notifications on GCS restart."""
child_task_pid_path = tmp_path / "blocking_child.pid"
@ray.remote(num_cpus=0)
def child():
print("Child worker ID:", ray.get_runtime_context().get_worker_id())
child_task_pid_path.write_text(str(os.getpid()))
while True:
time.sleep(0.1)
print("Child still running...")
@ray.remote(num_cpus=0)
def parent() -> Tuple[int, int, ray.ObjectRef]:
print("Parent worker ID:", ray.get_runtime_context().get_worker_id())
child_obj_ref = child.remote()
# Wait for the child to be running and report back its PID.
wait_for_condition(lambda: child_task_pid_path.exists(), timeout=10)
child_pid = int(child_task_pid_path.read_text())
return os.getpid(), child_pid, child_obj_ref
parent_pid, child_pid, child_obj_ref = ray.get(parent.remote())
print(f"Parent PID: {parent_pid}, child PID: {child_pid}")
assert parent_pid != child_pid
# Kill and restart the GCS.
ray._private.worker._global_node.kill_gcs_server()
ray._private.worker._global_node.start_gcs_server()
# Schedule an actor to ensure that the GCS is back alive and the Raylet is
# reconnected to it.
# TODO(iycheng): this shouldn't be necessary, but the current resubscription
# implementation can lose the worker failure message because we don't ask for
# the snapshot of worker statuses.
@ray.remote
class A:
pass
ray.get(A.remote().__ray_ready__.remote())
# Kill the parent task and verify that the child task is killed due to fate sharing
# with its parent.
print("Killing parent process.")
p = psutil.Process(parent_pid)
p.kill()
p.wait()
print("Parent process exited.")
# The child task should exit.
wait_for_pid_to_exit(child_pid, 20)
with pytest.raises(ray.exceptions.OwnerDiedError):
ray.get(child_obj_ref)
def test_core_worker_resubscription(tmp_path, ray_start_regular_with_external_redis):
# This test is to ensure core worker will resubscribe to GCS after GCS
# restarts.
lock_file = str(tmp_path / "lock")
lock = FileLock(lock_file)
lock.acquire()
@ray.remote
class Actor:
def __init__(self):
lock = FileLock(lock_file)
lock.acquire()
def ready(self):
return
a = Actor.remote()
r = a.ready.remote()
# Actor is not ready before GCS is down.
ray._private.worker._global_node.kill_gcs_server()
lock.release()
# Actor is ready after GCS starts
ray._private.worker._global_node.start_gcs_server()
# Test the resubscribe works: if not, it'll timeout because worker
# will think the actor is not ready.
ray.get(r, timeout=5)
def test_detached_actor_restarts(ray_start_regular_with_external_redis):
# Detached actors are owned by GCS. This test is to ensure detached actors
# can restart even GCS restarts.
@ray.remote
class A:
def ready(self):
return os.getpid()
a = A.options(name="a", lifetime="detached", max_restarts=-1).remote()
pid = ray.get(a.ready.remote())
ray._private.worker._global_node.kill_gcs_server()
p = psutil.Process(pid)
p.kill()
ray._private.worker._global_node.start_gcs_server()
while True:
try:
assert ray.get(a.ready.remote()) != pid
break
except ray.exceptions.RayActorError:
continue
def test_gcs_client_reconnect(ray_start_regular_with_external_redis):
"""Tests reconnect behavior on GCS restart for sync and asyncio clients."""
gcs_client = ray._private.worker.global_worker.gcs_client
gcs_client.internal_kv_put(b"a", b"b", True, None)
assert gcs_client.internal_kv_get(b"a", None) == b"b"
def _get(use_asyncio: bool) -> bytes:
if use_asyncio:
async def _get_async() -> bytes:
return await gcs_client.async_internal_kv_get(b"a", None)
result = asyncio.run(_get_async())
else:
result = gcs_client.internal_kv_get(b"a", None)
return result
# Kill the GCS, start an internal KV GET request, and check that it succeeds once
# the GCS is restarted.
ray._private.worker._global_node.kill_gcs_server()
with ThreadPoolExecutor(max_workers=2) as executor:
sync_future = executor.submit(_get, False)
asyncio_future = executor.submit(_get, True)
ray._private.worker._global_node.start_gcs_server()
assert sync_future.result() == b"b"
assert asyncio_future.result() == b"b"
@pytest.mark.parametrize(
"ray_start_regular_with_external_redis",
[
{
**generate_system_config_map(
gcs_rpc_server_reconnect_timeout_s=3600,
),
"namespace": "actor",
}
],
indirect=True,
)
def test_actor_workloads(ray_start_regular_with_external_redis):
"""Tests actor creation and task execution while the GCS is down."""
@ray.remote(num_cpus=0)
class Counter:
def noop(self, v: Any) -> Any:
return v
# Start two actors, one normal and one detached, and wait for them to be running.
counter_1 = Counter.remote()
r = ray.get(counter_1.noop.remote(1))
assert r == 1
detached_counter = Counter.options(
lifetime="detached", name="detached_counter"
).remote()
assert ray.get(detached_counter.noop.remote("detached")) == "detached"
# Kill the GCS.
ray._private.worker._global_node.kill_gcs_server()
# Tasks to the existing actors should continue to work.
assert ray.get(counter_1.noop.remote(1)) == 1
# Create a new actor. Making actor calls shouldn't error and they should
# succeed after the GCS comes back up and starts the actor.
counter_2 = Counter.remote()
counter_2_alive_ref = counter_2.noop.remote(2)
ready, _ = ray.wait([counter_2_alive_ref], timeout=0.1)
assert len(ready) == 0
# Restart the GCS and check that the actor is started and task succeeds.
ray._private.worker._global_node.start_gcs_server()
assert ray.get(counter_2_alive_ref) == 2
# Check that the existing actors continue to function, including the detached
# actor being called from another driver.
assert ray.get(counter_1.noop.remote(1)) == 1
return
run_string_as_driver(
"""
import ray
ray.init("auto", namespace="actor")
detached_counter = ray.get_actor("detached_counter")
assert ray.get(detached_counter.noop.remote("detached")) == "detached"
"""
)
@pytest.mark.parametrize(
"ray_start_regular_with_external_redis",
[
{
**generate_system_config_map(
gcs_rpc_server_reconnect_timeout_s=3600,
),
"namespace": "actor",
}
],
indirect=True,
)
def test_pg_actor_workloads(ray_start_regular_with_external_redis):
bundle1 = {"CPU": 1}
pg = placement_group([bundle1], strategy="STRICT_PACK")
ray.get(pg.ready())
@ray.remote
class Counter:
def r(self, v):
return v
def pid(self):
return os.getpid()
c = Counter.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
r = ray.get(c.r.remote(10))
assert r == 10
print("GCS is killed")
pid = ray.get(c.pid.remote())
ray.worker._global_node.kill_gcs_server()
assert ray.get(c.r.remote(10)) == 10
ray.worker._global_node.start_gcs_server()
for _ in range(100):
assert pid == ray.get(c.pid.remote())
@pytest.mark.parametrize(
"ray_start_regular_with_external_redis",
[
generate_system_config_map(
gcs_server_request_timeout_seconds=10,
)
],
indirect=True,
)
def test_get_actor_when_gcs_is_down(ray_start_regular_with_external_redis):
@ray.remote
def create_actor():
@ray.remote
class A:
def pid(self):
return os.getpid()
a = A.options(lifetime="detached", name="A").remote()
ray.get(a.pid.remote())
ray.get(create_actor.remote())
ray._private.worker._global_node.kill_gcs_server()
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get_actor("A")
@pytest.mark.parametrize(
"ray_start_regular_with_external_redis",
[
generate_system_config_map(
gcs_server_request_timeout_seconds=10,
)
],
indirect=True,
)
@pytest.mark.skip(
reason="python publisher and subscriber doesn't handle gcs server failover"
)
def test_publish_and_subscribe_error_info(ray_start_regular_with_external_redis):
address_info = ray_start_regular_with_external_redis
gcs_server_addr = address_info["gcs_address"]
subscriber = ray._raylet.GcsErrorSubscriber(address=gcs_server_addr)
subscriber.subscribe()
gcs_client = ray._raylet.GcsClient(address=gcs_server_addr)
print("sending error message 1")
gcs_client.publish_error(b"aaa_id", "", "test error message 1")
ray._private.worker._global_node.kill_gcs_server()
ray._private.worker._global_node.start_gcs_server()
print("sending error message 2")
gcs_client.publish_error(b"bbb_id", "", "test error message 2")
print("done")
(key_id, err) = subscriber.poll()
assert key_id == b"bbb_id"
assert err["error_message"] == "test error message 2"
subscriber.close()
@pytest.fixture
def redis_replicas(monkeypatch):
monkeypatch.setenv("TEST_EXTERNAL_REDIS_REPLICAS", "3")
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis",
[
generate_system_config_map(
gcs_server_request_timeout_seconds=10,
redis_db_connect_retries=50,
)
],
indirect=True,
)
def test_redis_failureover(redis_replicas, ray_start_cluster_head_with_external_redis):
"""This test is to cover ray cluster's behavior when Redis master failed.
The management of the Redis cluster is not covered by Ray, but Ray should handle
the failure correctly.
For this test we ensure:
- When Redis master failed, Ray should crash (TODO: make ray automatically switch to
new master).
- After Redis recovered, Ray should be able to use the new Master.
- When the master becomes slaves, Ray should crash.
"""
cluster = ray_start_cluster_head_with_external_redis
import redis
redis_addr = os.environ.get("RAY_REDIS_ADDRESS")
ip, port = parse_address(redis_addr)
redis_cli = redis.Redis(ip, port)
def get_connected_nodes():
return [
(k, v) for (k, v) in redis_cli.cluster("nodes").items() if v["connected"]
]
wait_for_condition(
lambda: len(get_connected_nodes())
== int(os.environ.get("TEST_EXTERNAL_REDIS_REPLICAS"))
)
nodes = redis_cli.cluster("nodes")
leader_cli = None
follower_cli = []
for addr in nodes:
ip, port = parse_address(addr)
cli = redis.Redis(ip, port)
meta = nodes[addr]
flags = meta["flags"].split(",")
if "master" in flags:
leader_cli = cli
print("LEADER", addr, redis_addr)
else:
follower_cli.append(cli)
leader_pid = leader_cli.info()["process_id"]
@ray.remote(max_restarts=-1)
class Counter:
def r(self, v):
return v
def pid(self):
return os.getpid()
c = Counter.options(name="c", namespace="test", lifetime="detached").remote()
c_pid = ray.get(c.pid.remote())
c_process = psutil.Process(pid=c_pid)
r = ray.get(c.r.remote(10))
assert r == 10
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# Wait until all data is updated in the replica
leader_cli.set("_hole", "0")
wait_for_condition(lambda: all([b"_hole" in f.keys("*") for f in follower_cli]))
# Now kill pid
leader_process = psutil.Process(pid=leader_pid)
leader_process.kill()
print(">>> Waiting gcs server to exit", gcs_server_pid)
wait_for_pid_to_exit(gcs_server_pid, 1000)
print("GCS killed")
follower_cli[0].cluster("failover", "takeover")
wait_for_condition(
lambda: len(get_connected_nodes())
== int(os.environ.get("TEST_EXTERNAL_REDIS_REPLICAS")) - 1
)
# Kill Counter actor. It should restart after GCS is back
c_process.kill()
# Cleanup the in memory data and then start gcs
cluster.head_node.kill_gcs_server(False)
print("Start gcs")
cluster.head_node.start_gcs_server()
assert len(ray.nodes()) == 1
assert ray.nodes()[0]["alive"]
driver_script = f"""
import ray
ray.init('{cluster.address}')
@ray.remote
def f():
return 10
assert ray.get(f.remote()) == 10
c = ray.get_actor("c", namespace="test")
v = ray.get(c.r.remote(10))
assert v == 10
print("DONE")
"""
# Make sure the cluster is usable
wait_for_condition(lambda: "DONE" in run_string_as_driver(driver_script))
# Now make follower_cli[0] become replica
# and promote follower_cli[1] as leader
follower_cli[1].cluster("failover", "takeover")
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# GCS should exit in this case
print(">>> Waiting gcs server to exit", gcs_server_pid)
wait_for_pid_to_exit(gcs_server_pid, 10000)
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis_sentinel",
[
generate_system_config_map(
gcs_server_request_timeout_seconds=10,
redis_db_connect_retries=50,
)
],
indirect=True,
)
def test_redis_with_sentinel_failureover(
ray_start_cluster_head_with_external_redis_sentinel,
):
"""This test is to cover ray cluster's behavior with Redis sentinel.
The expectation is Redis sentinel should manage failover
automatically, and GCS can continue talking to the same address
without any human intervention on Redis.
For this test we ensure:
- When Redis master failed, Ray should crash (TODO: GCS should
autommatically try re-connect to sentinel).
- When restart Ray, it should continue talking to sentinel, which
should return information about new master.
"""
cluster = ray_start_cluster_head_with_external_redis_sentinel
import redis
redis_addr = os.environ.get("RAY_REDIS_ADDRESS")
ip, port = parse_address(redis_addr)
redis_cli = redis.Redis(ip, port)
print(redis_cli.info("sentinel"))
redis_name = redis_cli.info("sentinel")["master0"]["name"]
def get_sentinel_nodes():
leader_address = (
redis_cli.sentinel_master(redis_name)["ip"],
redis_cli.sentinel_master(redis_name)["port"],
)
follower_addresses = [
(x["ip"], x["port"]) for x in redis_cli.sentinel_slaves(redis_name)
]
return [leader_address] + follower_addresses
wait_for_condition(lambda: len(get_sentinel_nodes()) == redis_sentinel_replicas())
@ray.remote(max_restarts=-1)
class Counter:
def r(self, v):
return v
def pid(self):
return os.getpid()
c = Counter.options(name="c", namespace="test", lifetime="detached").remote()
c_pid = ray.get(c.pid.remote())
c_process = psutil.Process(pid=c_pid)
r = ray.get(c.r.remote(10))
assert r == 10
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
leader_cli = redis.Redis(*get_sentinel_nodes()[0])
leader_pid = leader_cli.info()["process_id"]
follower_cli = [redis.Redis(*x) for x in get_sentinel_nodes()[1:]]
# Wait until all data is updated in the replica
leader_cli.set("_hole", "0")
wait_for_condition(lambda: all([b"_hole" in f.keys("*") for f in follower_cli]))
current_leader = get_sentinel_nodes()[0]
# Now kill pid
leader_process = psutil.Process(pid=leader_pid)
leader_process.kill()
print(">>> Waiting gcs server to exit", gcs_server_pid)
wait_for_pid_to_exit(gcs_server_pid, 1000)
print("GCS killed")
wait_for_condition(lambda: current_leader != get_sentinel_nodes()[0])
# Kill Counter actor. It should restart after GCS is back
c_process.kill()
# Cleanup the in memory data and then start gcs
cluster.head_node.kill_gcs_server(False)
print("Start gcs")
cluster.head_node.start_gcs_server()
assert len(ray.nodes()) == 1
assert ray.nodes()[0]["alive"]
driver_script = f"""
import ray
ray.init('{cluster.address}')
@ray.remote
def f():
return 10
assert ray.get(f.remote()) == 10
c = ray.get_actor("c", namespace="test")
v = ray.get(c.r.remote(10))
assert v == 10
print("DONE")
"""
# Make sure the cluster is usable
wait_for_condition(lambda: "DONE" in run_string_as_driver(driver_script))
@pytest.mark.parametrize(
"ray_start_regular",
[
generate_system_config_map(
enable_cluster_auth=True,
raylet_liveness_self_check_interval_ms=5000,
)
],
indirect=True,
)
def test_raylet_fate_sharing(ray_start_regular):
# Kill GCS and check that raylets kill themselves when not backed by Redis,
# and stay alive when backed by Redis.
# Raylets should kill themselves due to cluster ID mismatch in the
# non-persisted case.
raylet_proc = ray._private.worker._global_node.all_processes[
ray_constants.PROCESS_TYPE_RAYLET
][0].process
def check_raylet_healthy():
return raylet_proc.poll() is None
wait_for_condition(lambda: check_raylet_healthy())
for i in range(10):
assert check_raylet_healthy()
ray._private.worker._global_node.kill_gcs_server()
ray._private.worker._global_node.start_gcs_server()
if not external_redis_test_enabled():
# Waiting for raylet to become unhealthy
wait_for_condition(lambda: not check_raylet_healthy())
else:
# Waiting for raylet to stay healthy
for i in range(10):
assert check_raylet_healthy()
def test_session_name(ray_start_cluster):
# Kill GCS and check that raylets kill themselves when not backed by Redis,
# and stay alive when backed by Redis.
# Raylets should kill themselves due to cluster ID mismatch in the
# non-persisted case.
cluster = ray_start_cluster
cluster.add_node()
cluster.wait_for_nodes()
head_node = cluster.head_node
session_dir = head_node.get_session_dir_path()
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
cluster.remove_node(head_node, allow_graceful=False)
# Wait to prevent the gcs server process becoming zombie.
gcs_server_process.wait()
wait_for_pid_to_exit(gcs_server_pid, 1000)
# Add head node back
cluster.add_node()
head_node = cluster.head_node
new_session_dir = head_node.get_session_dir_path()
if not external_redis_test_enabled():
assert session_dir != new_session_dir
else:
assert session_dir == new_session_dir
@pytest.mark.parametrize(
"ray_start_regular_with_external_redis",
[
generate_system_config_map(
gcs_server_request_timeout_seconds=10,
raylet_liveness_self_check_interval_ms=3000,
)
],
indirect=True,
)
def test_redis_data_loss_no_leak(ray_start_regular_with_external_redis):
@ray.remote
def create_actor():
@ray.remote
class A:
def pid(self):
return os.getpid()
a = A.options(lifetime="detached", name="A").remote()
ray.get(a.pid.remote())
ray.get(create_actor.remote())
ray._private.worker._global_node.kill_gcs_server()
# Delete redis
redis_addr = os.environ.get("RAY_REDIS_ADDRESS")
import redis
ip, port = parse_address(redis_addr)
cli = redis.Redis(ip, port)
cli.flushall()
raylet_proc = ray._private.worker._global_node.all_processes[
ray_constants.PROCESS_TYPE_RAYLET
][0].process
def check_raylet_healthy():
return raylet_proc.poll() is None
wait_for_condition(lambda: check_raylet_healthy())
# Start GCS
ray._private.worker._global_node.start_gcs_server()
# Waiting for raylet to become unhealthy
wait_for_condition(lambda: not check_raylet_healthy())
def test_redis_logs(external_redis):
try:
process = subprocess.Popen(
["ray", "start", "--head"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate(timeout=30)
print(stdout.decode())
print(stderr.decode())
assert "redis_context.cc" not in stderr.decode()
assert "redis_context.cc" not in stdout.decode()
assert "Resolve Redis address" not in stderr.decode()
assert "Resolve Redis address" not in stdout.decode()
# assert "redis_context.cc" not in result.output
finally:
from click.testing import CliRunner
import ray.scripts.scripts as scripts
runner = CliRunner(env={"RAY_USAGE_STATS_PROMPT_ENABLED": "0"})
runner.invoke(
scripts.stop,
[
"--force",
],
)
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis",
[
generate_system_config_map(
gcs_rpc_server_reconnect_timeout_s=2,
)
],
indirect=True,
)
def test_job_finished_after_head_node_restart(
ray_start_cluster_head_with_external_redis,
):
cluster = ray_start_cluster_head_with_external_redis
head_node = cluster.head_node
# submit job
client = JobSubmissionClient(head_node.address)
submission_id = client.submit_job(
entrypoint="python -c 'import ray; ray.init(); print(ray.cluster_resources()); \
import time; time.time.sleep(1000)'"
)
def get_job_info(submission_id):
gcs_client = GcsClient(cluster.address)
all_job_info = gcs_client.get_all_job_info(job_or_submission_id=submission_id)
return list(
filter(
lambda job_info: "job_submission_id" in job_info.config.metadata
and job_info.config.metadata["job_submission_id"] == submission_id,
list(all_job_info.values()),
)
)
def _check_job_running(submission_id: str) -> bool:
job_infos = get_job_info(submission_id)
if len(job_infos) == 0:
return False
job_info = job_infos[0].job_info
return job_info.status == JobStatus.RUNNING
# wait until job info is written in redis
wait_for_condition(_check_job_running, submission_id=submission_id, timeout=10)
# kill head node
ray.shutdown()
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
cluster.remove_node(head_node)
# Wait to prevent the gcs server process becoming zombie.
gcs_server_process.wait()
wait_for_pid_to_exit(gcs_server_pid, 1000)
# restart head node
cluster.add_node()
ray.init(cluster.address)
# verify if job is finished, which marked is_dead
def _check_job_is_dead(submission_id: str) -> bool:
job_infos = get_job_info(submission_id)
if len(job_infos) == 0:
return False
job_info = job_infos[0]
return job_info.is_dead
wait_for_condition(_check_job_is_dead, submission_id=submission_id, timeout=10)
def raises_exception(exc_type, f):
try:
f()
except exc_type:
return True
return False
@pytest.mark.parametrize(
"case",
[
{"kill_job": False, "kill_actor": False, "expect_alive": "all"},
{"kill_job": True, "kill_actor": False, "expect_alive": "AB"},
{"kill_job": True, "kill_actor": True, "expect_alive": "none"},
{"kill_job": False, "kill_actor": True, "expect_alive": "regular"},
],
)
@pytest.mark.skipif(not external_redis_test_enabled(), reason="Only valid in redis env")
def test_gcs_server_restart_destroys_out_of_scope_actors(
external_redis, ray_start_cluster, case
):
"""
If an actor goes out of scope *when GCS is down*, when GCS restarts, the actor
should be destroyed by GCS in its restarting.
Set up: in a job,
- create a regular actor
- create a detached actor A, which creates a child actor B
Situations:
Case 0: nobody died
all should be alive
Case 1: before GCS is down, job died
regular actor should be dead, A and B should still be alive
Case 2: before GCS is down, job died; during GCS is down, A died
all should be dead
Case 3: during GCS is down, A died
regular actor should be alive, A and B should be dead
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
@ray.remote
class A:
def __init__(self):
self.children = []
def getpid(self):
return os.getpid()
def spawn(self, name, namespace):
child = A.options(name=name, namespace=namespace).remote()
self.children.append(child)
return child
regular = A.options(name="regular", namespace="ns").remote()
detached = A.options(lifetime="detached", name="parent", namespace="ns").remote()
child = ray.get(detached.spawn.remote("child", "ns"))
regular_pid = ray.get(regular.getpid.remote())
detached_pid = ray.get(detached.getpid.remote())
child_pid = ray.get(child.getpid.remote())
print(f"regular actor ID: {regular._actor_id}, pid: {regular_pid}")
print(f"detached actor ID: {detached._actor_id}, pid: {detached_pid}")
print(f"child actor ID: {child._actor_id}, pid: {child_pid}")
if case["kill_job"]:
# kill the job and restart.
ray.shutdown()
ray.init(address=cluster.address)
cluster_kill_gcs_wait(cluster)
# When GCS is down...
if case["kill_actor"]:
os.kill(detached_pid, signal.SIGKILL)
cluster.head_node.start_gcs_server()
print("GCS restarted")
if case["expect_alive"] == "all":
regular2 = ray.get_actor("regular", namespace="ns")
detached2 = ray.get_actor("parent", namespace="ns")
child2 = ray.get_actor("child", namespace="ns")
assert ray.get(regular2.getpid.remote()) == regular_pid
assert ray.get(detached2.getpid.remote()) == detached_pid
assert ray.get(child2.getpid.remote()) == child_pid
elif case["expect_alive"] == "AB":
with pytest.raises(ValueError):
ray.get_actor("regular", namespace="ns")
detached2 = ray.get_actor("parent", namespace="ns")
child2 = ray.get_actor("child", namespace="ns")
assert ray.get(detached2.getpid.remote()) == detached_pid
assert ray.get(child2.getpid.remote()) == child_pid
elif case["expect_alive"] == "none":
with pytest.raises(ValueError):
ray.get_actor("regular", namespace="ns")
# It took some time for raylet to report worker failure.
wait_for_condition(
lambda: raises_exception(
ValueError, lambda: ray.get_actor("parent", namespace="ns")
)
)
wait_for_condition(
lambda: raises_exception(
ValueError, lambda: ray.get_actor("child", namespace="ns")
)
)
elif case["expect_alive"] == "regular":
regular2 = ray.get_actor("regular", namespace="ns")
wait_for_condition(
lambda: raises_exception(
ValueError, lambda: ray.get_actor("parent", namespace="ns")
)
)
wait_for_condition(
lambda: raises_exception(
ValueError, lambda: ray.get_actor("child", namespace="ns")
)
)
assert ray.get(regular2.getpid.remote()) == regular_pid
else:
raise ValueError(f"Unknown case: {case}")
MyPlugin = "MyPlugin"
MY_PLUGIN_CLASS_PATH = "ray.tests.test_gcs_fault_tolerance.HangPlugin"
| Increase |
python | ipython__ipython | tests/test_zzz_autoreload.py | {
"start": 1493,
"end": 2985
} | class ____:
def __init__(self):
self.ns = {}
self.user_ns = self.ns
self.user_ns["In"] = []
self.user_ns_hidden = {}
self.events = EventManager(self, {"pre_run_cell", pre_run_cell})
self.auto_magics = AutoreloadMagics(shell=self)
self.events.register("pre_run_cell", self.auto_magics.pre_run_cell)
self.input_transformer_manager = ipt2.TransformerManager()
register_magics = set_hook = noop
def showtraceback(
self,
exc_tuple=None,
filename=None,
tb_offset=None,
exception_only=False,
running_compiled_code=False,
):
traceback.print_exc()
def run_code(self, code):
transformed_cell = self.input_transformer_manager.transform_cell(code)
self.events.trigger(
"pre_run_cell",
ExecutionInfo(
raw_cell=code,
transformed_cell=code,
store_history=False,
silent=False,
shell_futures=False,
cell_id=None,
),
)
exec(code, self.user_ns)
self.auto_magics.post_execute_hook()
def push(self, items):
self.ns.update(items)
def magic_autoreload(self, parameter):
self.auto_magics.autoreload(parameter)
def magic_aimport(self, parameter, stream=None):
self.auto_magics.aimport(parameter, stream=stream)
self.auto_magics.post_execute_hook()
| FakeShell |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/sentry_app_publish_request.py | {
"start": 1021,
"end": 1232
} | class ____(serializers.Serializer):
question = serializers.CharField(required=True, allow_null=False)
answer = serializers.CharField(required=True, allow_null=False)
| SentryAppPublishQuestionnaireSerializer |
python | mlflow__mlflow | mlflow/types/llm.py | {
"start": 23016,
"end": 24035
} | class ____(_BaseDataclass):
"""
A single chat response generated by the model.
ref: https://platform.openai.com/docs/api-reference/chat/object
Args:
message (:py:class:`ChatMessage`): The message that was generated.
index (int): The index of the response in the list of responses.
Defaults to ``0``
finish_reason (str): The reason why generation stopped.
**Optional**, defaults to ``"stop"``
logprobs (:py:class:`ChatChoiceLogProbs`): Log probability information for the choice.
**Optional**, defaults to ``None``
"""
message: ChatMessage
index: int = 0
finish_reason: str = "stop"
logprobs: ChatChoiceLogProbs | None = None
def __post_init__(self):
self._validate_field("index", int, True)
self._validate_field("finish_reason", str, True)
self._convert_dataclass("message", ChatMessage, True)
self._convert_dataclass("logprobs", ChatChoiceLogProbs, False)
@dataclass
| ChatChoice |
python | doocs__leetcode | solution/1400-1499/1426.Counting Elements/Solution.py | {
"start": 0,
"end": 155
} | class ____:
def countElements(self, arr: List[int]) -> int:
cnt = Counter(arr)
return sum(v for x, v in cnt.items() if cnt[x + 1])
| Solution |
python | pytest-dev__pytest | src/_pytest/python.py | {
"start": 9201,
"end": 12128
} | class ____(nodes.Node):
"""this mix-in inherits from Node to carry over the typing information
as its intended to always mix in before a node
its position in the mro is unaffected"""
_ALLOW_MARKERS = True
@property
def module(self):
"""Python module object this node was collected from (can be None)."""
node = self.getparent(Module)
return node.obj if node is not None else None
@property
def cls(self):
"""Python class object this node was collected from (can be None)."""
node = self.getparent(Class)
return node.obj if node is not None else None
@property
def instance(self):
"""Python instance object the function is bound to.
Returns None if not a test method, e.g. for a standalone test function,
a class or a module.
"""
# Overridden by Function.
return None
@property
def obj(self):
"""Underlying Python object."""
obj = getattr(self, "_obj", None)
if obj is None:
self._obj = obj = self._getobj()
# XXX evil hack
# used to avoid Function marker duplication
if self._ALLOW_MARKERS:
self.own_markers.extend(get_unpacked_marks(self.obj))
# This assumes that `obj` is called before there is a chance
# to add custom keys to `self.keywords`, so no fear of overriding.
self.keywords.update((mark.name, mark) for mark in self.own_markers)
return obj
@obj.setter
def obj(self, value):
self._obj = value
def _getobj(self):
"""Get the underlying Python object. May be overwritten by subclasses."""
# TODO: Improve the type of `parent` such that assert/ignore aren't needed.
assert self.parent is not None
obj = self.parent.obj # type: ignore[attr-defined]
return getattr(obj, self.name)
def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str:
"""Return Python path relative to the containing module."""
parts = []
for node in self.iter_parents():
name = node.name
if isinstance(node, Module):
name = os.path.splitext(name)[0]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
return ".".join(parts)
def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]:
# XXX caching?
path, lineno = getfslineno(self.obj)
modpath = self.getmodpath()
return path, lineno, modpath
# As an optimization, these builtin attribute names are pre-ignored when
# iterating over an object during collection -- the pytest_pycollect_makeitem
# hook is not called for them.
# fmt: off
| PyobjMixin |
python | lazyprogrammer__machine_learning_examples | supervised_class/perceptron.py | {
"start": 755,
"end": 3105
} | class ____:
def fit(self, X, Y, learning_rate=1.0, epochs=1000):
# solution
# self.w = np.array([-0.5, 0.5])
# self.b = 0.1
# initialize random weights
D = X.shape[1]
self.w = np.random.randn(D)
self.b = 0
N = len(Y)
costs = []
for epoch in range(epochs):
# determine which samples are misclassified, if any
Yhat = self.predict(X)
incorrect = np.nonzero(Y != Yhat)[0]
if len(incorrect) == 0:
# we are done!
break
# choose a random incorrect sample
i = np.random.choice(incorrect)
self.w += learning_rate*Y[i]*X[i]
self.b += learning_rate*Y[i]
# cost is incorrect rate
c = len(incorrect) / float(N)
costs.append(c)
print("final w:", self.w, "final b:", self.b, "epochs:", (epoch+1), "/", epochs)
plt.plot(costs)
plt.show()
def predict(self, X):
return np.sign(X.dot(self.w) + self.b)
def score(self, X, Y):
P = self.predict(X)
return np.mean(P == Y)
if __name__ == '__main__':
# linearly separable data
X, Y = get_data()
plt.scatter(X[:,0], X[:,1], c=Y, s=100, alpha=0.5)
plt.show()
Ntrain = len(Y) // 2
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
model = Perceptron()
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("Training time:", (datetime.now() - t0))
t0 = datetime.now()
print("Train accuracy:", model.score(Xtrain, Ytrain))
print("Time to compute train accuracy:", (datetime.now() - t0), "Train size:", len(Ytrain))
t0 = datetime.now()
print("Test accuracy:", model.score(Xtest, Ytest))
print("Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest))
# mnist
X, Y = get_mnist()
idx = np.logical_or(Y == 0, Y == 1)
X = X[idx]
Y = Y[idx]
Y[Y == 0] = -1
model = Perceptron()
t0 = datetime.now()
model.fit(X, Y, learning_rate=1e-2)
print("MNIST train accuracy:", model.score(X, Y))
# xor data
print("")
print("XOR results:")
X, Y = get_simple_xor()
Y[Y == 0] = -1
model.fit(X, Y)
print("XOR accuracy:", model.score(X, Y))
| Perceptron |
python | Textualize__textual | src/textual/events.py | {
"start": 24058,
"end": 24214
} | class ____(Event, bubble=False):
"""Sent to screen when it is no longer active.
- [ ] Bubbles
- [ ] Verbose
"""
@rich.repr.auto
| ScreenSuspend |
python | getsentry__sentry | src/sentry/auth/elevated_mode.py | {
"start": 584,
"end": 1938
} | class ____(ABC):
@property
@abstractmethod
def is_active(self) -> bool:
pass
@abstractmethod
def is_privileged_request(self) -> tuple[bool, InactiveReason]:
pass
@abstractmethod
def get_session_data(self, current_datetime: datetime | None = None) -> dict[str, Any] | None:
pass
@abstractmethod
def _populate(self) -> None:
pass
@abstractmethod
def set_logged_in(self, user: User, current_datetime: datetime | None = None) -> None:
pass
@abstractmethod
def set_logged_out(self) -> None:
pass
@abstractmethod
def on_response(cls, response: HttpResponse) -> None:
pass
# TODO(schew2381): Delete this method after the option is removed
def has_elevated_mode(request: HttpRequest) -> bool:
"""
This is a temporary helper method that checks if the user on the request has
the staff option enabled. If so, it checks is_active_staff and otherwise
defaults to checking is_active_superuser.
"""
from sentry.auth.staff import has_staff_option, is_active_staff
from sentry.auth.superuser import is_active_superuser
if isinstance(request.user, AnonymousUser):
return False
if has_staff_option(request.user):
return is_active_staff(request)
return is_active_superuser(request)
| ElevatedMode |
python | getsentry__sentry | src/sentry/utils/kvstore/encoding.py | {
"start": 194,
"end": 1676
} | class ____(KVStorage[K, TDecoded]):
"""
This class provides a wrapper that can be used to transparently
encode/decode values in the provided key/value storage to another type on
reading and writing by using the provided codec. This allows key/value
storages that have different value types to be used interchangably by
wrapping one or both storages so that they expect a common type.
"""
def __init__(
self, store: KVStorage[K, TEncoded], value_codec: Codec[TDecoded, TEncoded]
) -> None:
self.store = store
self.value_codec = value_codec
def get(self, key: K) -> TDecoded | None:
value = self.store.get(key)
if value is None:
return None
return self.value_codec.decode(value)
def get_many(self, keys: Sequence[K]) -> Iterator[tuple[K, TDecoded]]:
for key, value in self.store.get_many(keys):
yield key, self.value_codec.decode(value)
def set(self, key: K, value: TDecoded, ttl: timedelta | None = None) -> None:
return self.store.set(key, self.value_codec.encode(value), ttl)
def delete(self, key: K) -> None:
return self.store.delete(key)
def delete_many(self, keys: Sequence[K]) -> None:
return self.store.delete_many(keys)
def bootstrap(self, automatic_expiry: bool = True) -> None:
return self.store.bootstrap()
def destroy(self) -> None:
return self.store.destroy()
| KVStorageCodecWrapper |
python | pydata__xarray | xarray/coding/variables.py | {
"start": 21689,
"end": 22817
} | class ____(VariableCoder):
"""Code boolean values."""
def encode(self, variable: Variable, name: T_Name = None) -> Variable:
if (
(variable.dtype == bool)
and ("dtype" not in variable.encoding)
and ("dtype" not in variable.attrs)
):
dims, data, attrs, encoding = unpack_for_encoding(variable)
attrs["dtype"] = "bool"
data = duck_array_ops.astype(data, dtype="i1", copy=True)
return Variable(dims, data, attrs, encoding, fastpath=True)
else:
return variable
def decode(self, variable: Variable, name: T_Name = None) -> Variable:
if variable.attrs.get("dtype", False) == "bool":
dims, data, attrs, encoding = unpack_for_decoding(variable)
# overwrite (!) dtype in encoding, and remove from attrs
# needed for correct subsequent encoding
encoding["dtype"] = attrs.pop("dtype")
data = BoolTypeArray(data)
return Variable(dims, data, attrs, encoding, fastpath=True)
else:
return variable
| BooleanCoder |
python | sphinx-doc__sphinx | sphinx/domains/_index.py | {
"start": 1035,
"end": 3259
} | class ____(ABC):
"""An Index is the description for a domain-specific index. To add an index to
a domain, subclass Index, overriding the three name attributes:
* `name` is an identifier used for generating file names.
It is also used for a hyperlink target for the index. Therefore, users can
refer the index page using ``ref`` role and a string which is combined
domain name and ``name`` attribute (ex. ``:ref:`py-modindex```).
* `localname` is the section title for the index.
* `shortname` is a short name for the index, for use in the relation bar in
HTML output. Can be empty to disable entries in the relation bar.
and providing a :meth:`generate` method. Then, add the index class to
your domain's `indices` list. Extensions can add indices to existing
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain`.
.. versionchanged:: 3.0
Index pages can be referred by domain name and index name via
:rst:role:`ref` role.
"""
name: ClassVar[str]
localname: ClassVar[str]
shortname: ClassVar[str | None] = None
def __init__(self, domain: Domain) -> None:
if not self.name or self.localname is None:
msg = f'Index subclass {self.__class__.__name__} has no valid name or localname'
raise SphinxError(msg)
self.domain = domain
@abstractmethod
def generate(
self, docnames: Iterable[str] | None = None
) -> tuple[list[tuple[str, list[IndexEntry]]], bool]:
"""Get entries for the index.
If ``docnames`` is given, restrict to entries referring to these
docnames.
The return value is a tuple of ``(content, collapse)``:
``collapse``
A boolean that determines if sub-entries should start collapsed (for
output formats that support collapsing sub-entries).
``content``:
A sequence of ``(letter, entries)`` tuples, where ``letter`` is the
"heading" for the given ``entries``, usually the starting letter, and
``entries`` is a sequence of single entries.
Each entry is an :py:class:`IndexEntry`.
"""
raise NotImplementedError
| Index |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 13417,
"end": 13999
} | class ____(graphene.Mutation):
"""Resumes a set of partition backfill runs. Resuming a backfill will not retry any failed runs."""
Output = graphene.NonNull(GrapheneResumeBackfillResult)
class Arguments:
backfillId = graphene.NonNull(graphene.String)
class Meta:
name = "ResumeBackfillMutation"
@capture_error
@require_permission_check(Permissions.LAUNCH_PARTITION_BACKFILL)
def mutate(self, graphene_info: ResolveInfo, backfillId: str):
return resume_partition_backfill(graphene_info, backfillId)
| GrapheneResumeBackfillMutation |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-of-distinct-subarrays-with-length-k.py | {
"start": 44,
"end": 636
} | class ____(object):
def maximumSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
result = left = total = 0
lookup = set()
for right in xrange(len(nums)):
while nums[right] in lookup or len(lookup) == k:
lookup.remove(nums[left])
total -= nums[left]
left += 1
lookup.add(nums[right])
total += nums[right]
if len(lookup) == k:
result = max(result, total)
return result
| Solution |
python | plotly__plotly.py | plotly/graph_objs/isosurface/_legendgrouptitle.py | {
"start": 233,
"end": 2960
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface"
_path_str = "isosurface.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.isosurface.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | pytorch__pytorch | test/onnx/exporter/test_api.py | {
"start": 330,
"end": 455
} | class ____(torch.nn.Module):
def forward(self, x):
y = x + 1
z = y.relu()
return (y, z)
| SampleModel |
python | cython__cython | tests/run/pep3135_class_cell.py | {
"start": 3453,
"end": 3518
} | class ____:
def method(self): return __class__
@cython.cclass
| K |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/text/semantic_splitter.py | {
"start": 1135,
"end": 11005
} | class ____(NodeParser):
"""
Semantic node parser.
Splits a document into Nodes, with each node being a group of semantically related sentences.
Args:
buffer_size (int): number of sentences to group together when evaluating semantic similarity
embed_model: (BaseEmbedding): embedding model to use
sentence_splitter (Optional[Callable]): splits text into sentences
breakpoint_percentile_threshold (int): dissimilarity threshold for creating semantic breakpoints, lower value will generate more nodes
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
sentence_splitter: SentenceSplitterCallable = Field(
default_factory=split_by_sentence_tokenizer,
description="The text splitter to use when splitting documents.",
exclude=True,
)
embed_model: SerializeAsAny[BaseEmbedding] = Field(
description="The embedding model to use to for semantic comparison",
)
buffer_size: int = Field(
default=1,
description=(
"The number of sentences to group together when evaluating semantic similarity. "
"Set to 1 to consider each sentence individually. "
"Set to >1 to group sentences together."
),
)
breakpoint_percentile_threshold: int = Field(
default=95,
description=(
"The percentile of cosine dissimilarity that must be exceeded between a "
"group of sentences and the next to form a node. The smaller this "
"number is, the more nodes will be generated"
),
)
@classmethod
def class_name(cls) -> str:
return "SemanticSplitterNodeParser"
@classmethod
def from_defaults(
cls,
embed_model: Optional[BaseEmbedding] = None,
breakpoint_percentile_threshold: Optional[int] = 95,
buffer_size: Optional[int] = 1,
sentence_splitter: Optional[Callable[[str], List[str]]] = None,
original_text_metadata_key: str = DEFAULT_OG_TEXT_METADATA_KEY,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
id_func: Optional[Callable[[int, Document], str]] = None,
) -> "SemanticSplitterNodeParser":
callback_manager = callback_manager or CallbackManager([])
sentence_splitter = sentence_splitter or split_by_sentence_tokenizer()
if embed_model is None:
try:
from llama_index.embeddings.openai import (
OpenAIEmbedding,
) # pants: no-infer-dep
embed_model = embed_model or OpenAIEmbedding()
except ImportError:
raise ImportError(
"`llama-index-embeddings-openai` package not found, "
"please run `pip install llama-index-embeddings-openai`"
)
id_func = id_func or default_id_func
return cls(
embed_model=embed_model,
breakpoint_percentile_threshold=breakpoint_percentile_threshold,
buffer_size=buffer_size,
sentence_splitter=sentence_splitter,
original_text_metadata_key=original_text_metadata_key,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
id_func=id_func,
)
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Parse document into nodes."""
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.build_semantic_nodes_from_documents([node], show_progress)
all_nodes.extend(nodes)
return all_nodes
async def _aparse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Asynchronously parse document into nodes."""
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = await self.abuild_semantic_nodes_from_documents(
[node], show_progress
)
all_nodes.extend(nodes)
return all_nodes
def build_semantic_nodes_from_documents(
self,
documents: Sequence[Document],
show_progress: bool = False,
) -> List[BaseNode]:
"""Build window nodes from documents."""
all_nodes: List[BaseNode] = []
for doc in documents:
text = doc.text
text_splits = self.sentence_splitter(text)
sentences = self._build_sentence_groups(text_splits)
combined_sentence_embeddings = self.embed_model.get_text_embedding_batch(
[s["combined_sentence"] for s in sentences],
show_progress=show_progress,
)
for i, embedding in enumerate(combined_sentence_embeddings):
sentences[i]["combined_sentence_embedding"] = embedding
distances = self._calculate_distances_between_sentence_groups(sentences)
chunks = self._build_node_chunks(sentences, distances)
nodes = build_nodes_from_splits(
chunks,
doc,
id_func=self.id_func,
)
all_nodes.extend(nodes)
return all_nodes
async def abuild_semantic_nodes_from_documents(
self,
documents: Sequence[Document],
show_progress: bool = False,
) -> List[BaseNode]:
"""Asynchronously build window nodes from documents."""
all_nodes: List[BaseNode] = []
for doc in documents:
text = doc.text
text_splits = self.sentence_splitter(text)
sentences = self._build_sentence_groups(text_splits)
combined_sentence_embeddings = (
await self.embed_model.aget_text_embedding_batch(
[s["combined_sentence"] for s in sentences],
show_progress=show_progress,
)
)
for i, embedding in enumerate(combined_sentence_embeddings):
sentences[i]["combined_sentence_embedding"] = embedding
distances = self._calculate_distances_between_sentence_groups(sentences)
chunks = self._build_node_chunks(sentences, distances)
nodes = build_nodes_from_splits(
chunks,
doc,
id_func=self.id_func,
)
all_nodes.extend(nodes)
return all_nodes
def _build_sentence_groups(
self, text_splits: List[str]
) -> List[SentenceCombination]:
sentences: List[SentenceCombination] = [
{
"sentence": x,
"index": i,
"combined_sentence": "",
"combined_sentence_embedding": [],
}
for i, x in enumerate(text_splits)
]
# Group sentences and calculate embeddings for sentence groups
for i in range(len(sentences)):
combined_sentence = ""
for j in range(i - self.buffer_size, i):
if j >= 0:
combined_sentence += sentences[j]["sentence"]
combined_sentence += sentences[i]["sentence"]
for j in range(i + 1, i + 1 + self.buffer_size):
if j < len(sentences):
combined_sentence += sentences[j]["sentence"]
sentences[i]["combined_sentence"] = combined_sentence
return sentences
def _calculate_distances_between_sentence_groups(
self, sentences: List[SentenceCombination]
) -> List[float]:
distances = []
for i in range(len(sentences) - 1):
embedding_current = sentences[i]["combined_sentence_embedding"]
embedding_next = sentences[i + 1]["combined_sentence_embedding"]
similarity = self.embed_model.similarity(embedding_current, embedding_next)
distance = 1 - similarity
distances.append(distance)
return distances
def _build_node_chunks(
self, sentences: List[SentenceCombination], distances: List[float]
) -> List[str]:
chunks = []
if len(distances) > 0:
breakpoint_distance_threshold = np.percentile(
distances, self.breakpoint_percentile_threshold
)
indices_above_threshold = [
i for i, x in enumerate(distances) if x > breakpoint_distance_threshold
]
# Chunk sentences into semantic groups based on percentile breakpoints
start_index = 0
for index in indices_above_threshold:
group = sentences[start_index : index + 1]
combined_text = "".join([d["sentence"] for d in group])
chunks.append(combined_text)
start_index = index + 1
if start_index < len(sentences):
combined_text = "".join(
[d["sentence"] for d in sentences[start_index:]]
)
chunks.append(combined_text)
else:
# If, for some reason we didn't get any distances (i.e. very, very small documents) just
# treat the whole document as a single node
chunks = [" ".join([s["sentence"] for s in sentences])]
return chunks
| SemanticSplitterNodeParser |
python | ethereum__web3.py | web3/geth.py | {
"start": 3325,
"end": 3432
} | class ____(Module):
admin: GethAdmin
txpool: GethTxPool
debug: GethDebug
# --- async --- #
| Geth |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/query.py | {
"start": 2851,
"end": 3357
} | class ____:
"""
Represents a wrapper around the results of a list of MQLQuery(s) which exposes useful methods to run on the query
results.
"""
results: list[QueryResult]
def apply_transformer(
self, transformer: QueryResultsTransformer[QueryTransformerResult]
) -> QueryTransformerResult:
"""
Applies a transformer on the `results` and returns the value of the transformation.
"""
return transformer.transform(self.results)
| MQLQueriesResult |
python | encode__httpx | httpx/_auth.py | {
"start": 3191,
"end": 3600
} | class ____(Auth):
"""
Allows the 'auth' argument to be passed as a simple callable function,
that takes the request, and returns a new, modified request.
"""
def __init__(self, func: typing.Callable[[Request], Request]) -> None:
self._func = func
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
yield self._func(request)
| FunctionAuth |
python | anthropics__anthropic-sdk-python | src/anthropic/types/redacted_thinking_block_param.py | {
"start": 226,
"end": 358
} | class ____(TypedDict, total=False):
data: Required[str]
type: Required[Literal["redacted_thinking"]]
| RedactedThinkingBlockParam |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/mutable.py | {
"start": 33920,
"end": 37048
} | class ____(Mutable, Set[_T]):
"""A set type that implements :class:`.Mutable`.
The :class:`.MutableSet` object implements a set that will
emit change events to the underlying mapping when the contents of
the set are altered, including when values are added or removed.
Note that :class:`.MutableSet` does **not** apply mutable tracking to the
*values themselves* inside the set. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure. To support this use case,
build a subclass of :class:`.MutableSet` that provides appropriate
coercion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. seealso::
:class:`.MutableDict`
:class:`.MutableList`
"""
def update(self, *arg: Iterable[_T]) -> None:
set.update(self, *arg)
self.changed()
def intersection_update(self, *arg: Iterable[Any]) -> None:
set.intersection_update(self, *arg)
self.changed()
def difference_update(self, *arg: Iterable[Any]) -> None:
set.difference_update(self, *arg)
self.changed()
def symmetric_difference_update(self, *arg: Iterable[_T]) -> None:
set.symmetric_difference_update(self, *arg)
self.changed()
def __ior__(self, other: AbstractSet[_T]) -> MutableSet[_T]: # type: ignore[override,misc] # noqa: E501
self.update(other)
return self
def __iand__(self, other: AbstractSet[object]) -> MutableSet[_T]:
self.intersection_update(other)
return self
def __ixor__(self, other: AbstractSet[_T]) -> MutableSet[_T]: # type: ignore[override,misc] # noqa: E501
self.symmetric_difference_update(other)
return self
def __isub__(self, other: AbstractSet[object]) -> MutableSet[_T]: # type: ignore[misc] # noqa: E501
self.difference_update(other)
return self
def add(self, elem: _T) -> None:
set.add(self, elem)
self.changed()
def remove(self, elem: _T) -> None:
set.remove(self, elem)
self.changed()
def discard(self, elem: _T) -> None:
set.discard(self, elem)
self.changed()
def pop(self, *arg: Any) -> _T:
result = set.pop(self, *arg)
self.changed()
return result
def clear(self) -> None:
set.clear(self)
self.changed()
@classmethod
def coerce(cls, index: str, value: Any) -> Optional[MutableSet[_T]]:
"""Convert plain set to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, set):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
def __getstate__(self) -> Set[_T]:
return set(self)
def __setstate__(self, state: Iterable[_T]) -> None:
self.update(state)
def __reduce_ex__(
self, proto: SupportsIndex
) -> Tuple[type, Tuple[List[int]]]:
return (self.__class__, (list(self),))
| MutableSet |
python | kamyu104__LeetCode-Solutions | Python/k-th-largest-perfect-subtree-size-in-binary-tree.py | {
"start": 2408,
"end": 4103
} | class ____(object):
def kthLargestPerfectSubtree(self, root, k):
"""
:type root: Optional[TreeNode]
:type k: int
:rtype: int
"""
def nth_element(nums, left, n, right, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target):
i = left
while i <= right:
if compare(nums[i], target):
nums[i], nums[left] = nums[left], nums[i]
left += 1
i += 1
elif compare(target, nums[i]):
nums[i], nums[right] = nums[right], nums[i]
right -= 1
else:
i += 1
return left, right
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx])
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def dfs(curr):
if not curr:
result.append(0)
return
dfs(curr.left)
left = result[-1]
dfs(curr.right)
right = result[-1]
result.append(left+right+1 if left == right != -1 else -1)
result = []
dfs(root)
nth_element(result, 0, k-1, len(result)-1, lambda a, b: a > b)
return result[k-1] if k-1 < len(result) and result[k-1] > 0 else -1
| Solution2 |
python | pytorch__pytorch | test/quantization/core/test_quantized_module.py | {
"start": 1558,
"end": 59023
} | class ____(QuantizationTestCase):
def test_relu(self):
relu_module = nn.ReLU()
relu6_module = nnq.ReLU6()
x = torch.arange(-10, 10, dtype=torch.float)
y_ref = torch.relu(x)
y6_ref = torch.nn.modules.ReLU6()(x)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.qint32)
qy = relu_module(qx)
qy6 = relu6_module(qx)
self.assertEqual(y_ref, qy.dequantize(),
msg="ReLU module API failed")
self.assertEqual(y6_ref, qy6.dequantize(),
msg="ReLU6 module API failed")
@override_qengines
def test_linear(self):
"""test API functionality for nn.quantized.linear"""
options = itertools.product(
[1, 5],
[16, 32],
[4, 8],
[True, False],
[True, False])
for (batch_size, in_features, out_features, use_bias, per_channel) in options:
self._test_linear_api_impl(
nnq.Linear, 'QuantizedLinear', torch.ops.quantized.linear, batch_size,
in_features, out_features, use_bias, per_channel)
@override_qengines
def test_linear_relu(self):
"""test API functionality for nn.intrinsic.quantized.linear_relu"""
options = itertools.product(
[1, 5],
[16, 32],
[4, 8],
[True, False],
[True, False])
for (batch_size, in_features, out_features, use_bias, per_channel) in options:
self._test_linear_api_impl(
nniq.LinearReLU, 'QuantizedLinearReLU', torch.ops.quantized.linear_relu,
batch_size, in_features, out_features, use_bias, per_channel)
def _test_linear_api_impl(self, qlinear_module, module_name, qlinear_op,
batch_size, in_features, out_features, use_bias,
per_channel, **post_ops_kwargs):
if torch.backends.quantized.engine == 'qnnpack':
per_channel = False
W = torch.rand(out_features, in_features).float()
if per_channel:
scale_tensor = torch.ones(out_features, dtype=torch.double)
zero_point_tensor = torch.zeros(out_features, dtype=torch.long)
for i in range(len(scale_tensor)):
scale_tensor[i] = (i + 1.0) / 255.0
W_q = torch.quantize_per_channel(W, scales=scale_tensor,
zero_points=zero_point_tensor,
axis=0, dtype=torch.qint8)
else:
# ONEDNN only supports symmetric quantization of weight
W_zp = 0 if qengine_is_onednn() else 4
W_q = torch.quantize_per_tensor(W, 0.1, W_zp, torch.qint8)
X = torch.rand(batch_size, in_features).float()
X_q = torch.quantize_per_tensor(X, 0.2, 10, torch.quint8)
B = torch.rand(out_features).float() if use_bias else None
scale = 0.5
zero_point = 3
qlinear = qlinear_module(in_features, out_features, **post_ops_kwargs)
qlinear_copy = copy.deepcopy(qlinear)
# set random quantized weight and bias before test torch scriptable
qlinear_copy.set_weight_bias(W_q, B)
self.checkScriptable(qlinear_copy, [[X_q]], check_save_load=True)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear(X_q)
qlinear.set_weight_bias(W_q, B)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q, atol=1e-5, rtol=0)
# testing packed param implementation
qlinear.scale = float(scale)
qlinear.zero_point = int(zero_point)
Z_q = qlinear(X_q)
# Check if the module implementation matches calling the
# ops directly
W_pack = qlinear._packed_params._packed_params
Z_ref = qlinear_op(X_q, W_pack, scale, zero_point, **post_ops_kwargs)
self.assertEqual(Z_ref, Z_q)
self.assertTrue(module_name in str(qlinear))
# Test serialization of quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
b = io.BytesIO()
torch.save(model_dict, b)
for weights_only in [True, False]:
b.seek(0)
loaded_dict = torch.load(b, weights_only=weights_only)
for key in model_dict:
if isinstance(model_dict[key], torch._C.ScriptObject):
assert isinstance(loaded_dict[key], torch._C.ScriptObject)
w_model, b_model = torch.ops.quantized.linear_unpack(model_dict[key])
w_loaded, b_loaded = torch.ops.quantized.linear_unpack(loaded_dict[key])
self.assertEqual(w_model, w_loaded)
self.assertEqual(b_model, b_loaded)
else:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qlinear = qlinear_module(
in_features, out_features, **post_ops_kwargs)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_params._packed_params),
linear_unpack(loaded_qlinear._packed_params._packed_params))
self.assertEqual(qlinear.scale, loaded_qlinear.scale)
self.assertEqual(qlinear.zero_point, loaded_qlinear.zero_point)
# scripting will add __overloads__ to __dict__, which is why we script a copy
# to be able to do the check in the next line
self.checkScriptable(copy.deepcopy(loaded_qlinear), [[X_q]], check_save_load=True)
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertEqual(qlinear._weight_bias(), loaded_qlinear._weight_bias())
self.assertEqual(qlinear._weight_bias(), torch.ops.quantized.linear_unpack(qlinear._packed_params._packed_params))
Z_q2 = loaded_qlinear(X_q)
self.assertEqual(Z_q, Z_q2)
# Test serialization
b = io.BytesIO()
torch.save(qlinear, b)
b.seek(0)
# weights_only=False as this is legacy code that saves the model
loaded = torch.load(b, weights_only=False)
self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.scale, loaded.scale)
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test torch.package
buffer = io.BytesIO()
with PackageExporter(buffer) as pe:
pe.save_pickle("module", "qlinear.pkl", qlinear)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_from_package = importer.load_pickle("module", "qlinear.pkl")
self.assertEqual(qlinear.weight(), loaded_from_package.weight())
self.assertEqual(qlinear.scale, loaded_from_package.scale)
self.assertEqual(qlinear.zero_point, loaded_from_package.zero_point)
for name, _ in loaded_from_package.named_modules():
# noop, just make sure attribute "_modules" is restored correctly during torch.package import
assert(name is not None) # noqa: E275
# Test copy and deepcopy
copied_linear = copy.copy(qlinear)
self.assertEqual(copied_linear.bias(), qlinear.bias())
self.assertEqual(copied_linear.scale, qlinear.scale)
self.assertEqual(copied_linear.zero_point,
qlinear.zero_point)
Y_copied = copied_linear(X_q)
np.testing.assert_array_almost_equal(
Z_q.int_repr().numpy(), Y_copied.int_repr().numpy(), decimal=0)
deepcopied_linear = copy.deepcopy(qlinear)
self.assertEqual(deepcopied_linear.bias(), qlinear.bias())
self.assertEqual(deepcopied_linear.scale, qlinear.scale)
self.assertEqual(deepcopied_linear.zero_point,
qlinear.zero_point)
Y_deepcopied = copied_linear(X_q)
np.testing.assert_array_almost_equal(
Z_q.int_repr().numpy(), Y_deepcopied.int_repr().numpy(), decimal=0)
# Test JIT
self.checkScriptable(qlinear, [[X_q]], check_save_load=True)
# Make sure `from_float` works for all linear variants
modules_under_test = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
for mut in modules_under_test:
# Test from_float.
float_linear = mut(in_features, out_features).float()
float_linear.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.prepare(float_linear, inplace=True)
float_linear(X.float())
# Sequential allows swapping using "convert".
quantized_float_linear = torch.nn.Sequential(float_linear)
quantized_float_linear = torch.ao.quantization.convert(quantized_float_linear, inplace=True)
# Smoke test to make sure the module actually runs
quantized_float_linear(X_q)
# Smoke test extra_repr
self.assertTrue('QuantizedLinear' in str(quantized_float_linear))
def test_quant_dequant_api(self):
r = torch.tensor([[1., -1.], [1., -1.]], dtype=torch.float)
scale, zero_point, dtype = 1.0, 2, torch.qint8
# testing Quantize API
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
quant_m = nnq.Quantize(scale, zero_point, dtype)
qr2 = quant_m(r)
self.assertEqual(qr, qr2)
# testing Dequantize API
rqr = qr.dequantize()
dequant_m = nnq.DeQuantize()
rqr2 = dequant_m(qr2)
self.assertEqual(rqr, rqr2)
def _test_conv_api_impl(
self, module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size, out_channels_per_group,
groups, kernel_size, stride, padding, padding_mode, dilation,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, post_op, use_channelwise, X2_scale=1.0, X2_zero_point=0):
for i in range(len(kernel_size)):
assume(input_feature_map_size[i] + 2 * padding[i]
>= dilation[i] * (kernel_size[i] - 1) + 1)
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
(X, X_q, W, W_q, b) = _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale, X_zero_point,
W_scale, W_zero_point, use_bias, use_channelwise)
example_input = [X, ]
example_input_q = [X_q, ]
if post_op in ["add", "add_relu"]:
X2, X2_q = _make_conv_add_extra_input_tensor(X2_scale, X2_zero_point, conv_module[0](X).size())
example_input = [X, X2]
example_input_q = [X_q, X2_q]
# Make sure the weight shape is correct
self.assertTrue(qconv_module.weight().shape == W_q.shape)
qconv_module.set_weight_bias(W_q, b)
qconv_module.scale = Y_scale
qconv_module.zero_point = Y_zero_point
raw_conv_module = conv_module[0] if post_op in ["relu", "add", "add_relu"] else conv_module
raw_conv_module.weight.data = W
if use_bias:
raw_conv_module.bias.data = b
# Test members
self.assertTrue(module_name == qconv_module._get_name(), module_name + " " + qconv_module._get_name())
self.assertTrue(hasattr(qconv_module, '_packed_params'))
self.assertTrue(hasattr(qconv_module, 'scale'))
self.assertTrue(hasattr(qconv_module, 'zero_point'))
# Test properties
self.assertEqual(W_q, qconv_module.weight())
if use_bias:
self.assertEqual(b, qconv_module.bias())
self.assertEqual(Y_scale, qconv_module.scale)
self.assertEqual(Y_zero_point, qconv_module.zero_point)
# Test forward
Y_exp = conv_module(*example_input)
Y_exp = torch.quantize_per_tensor(
Y_exp, scale=Y_scale, zero_point=Y_zero_point, dtype=torch.quint8)
Y_act = qconv_module(*example_input_q)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between reference
# and test. Off-by-1 differences arise due to the order of round and
# zero_point addition operation, i.e., if addition followed by round is
# used by reference and round followed by addition is used by test, the
# results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
# 4 assuming the rounding mode is round-to-nearest, ties-to-even.
# skip numerics checking for reference module
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_act.int_repr().numpy(), decimal=0)
# Test serialization of quantized Conv Module using state_dict
model_dict = qconv_module.state_dict()
self.assertEqual(model_dict['weight'], W_q)
if use_bias:
self.assertEqual(model_dict['bias'], b)
bytes_io = io.BytesIO()
torch.save(model_dict, bytes_io)
for weights_only in [True, False]:
bytes_io.seek(0)
loaded_dict = torch.load(bytes_io, weights_only=weights_only)
for key in loaded_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qconv_module = type(qconv_module)(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, use_bias, padding_mode=padding_mode)
loaded_qconv_module.load_state_dict(loaded_dict)
self.assertTrue(dir(loaded_qconv_module) == dir(qconv_module))
self.assertTrue(module_name == loaded_qconv_module._get_name())
self.assertTrue(hasattr(loaded_qconv_module, '_packed_params'))
self.assertTrue(hasattr(loaded_qconv_module, '_weight_bias'))
self.assertEqual(qconv_module.weight(), loaded_qconv_module.weight())
if use_bias:
self.assertEqual(qconv_module.bias(), loaded_qconv_module.bias())
self.assertEqual(qconv_module.scale, loaded_qconv_module.scale)
self.assertEqual(qconv_module.zero_point,
loaded_qconv_module.zero_point)
Y_loaded = loaded_qconv_module(*example_input_q)
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_loaded.int_repr().numpy(), decimal=0)
# Test serialization
b = io.BytesIO()
torch.save(qconv_module, b)
b.seek(0)
# weights_only=False as this is legacy code that saves the model
loaded_conv = torch.load(b, weights_only=False)
self.assertEqual(loaded_conv.bias(), qconv_module.bias())
self.assertEqual(loaded_conv.scale, qconv_module.scale)
self.assertEqual(loaded_conv.zero_point,
qconv_module.zero_point)
# Test copy and deepcopy
copied_conv = copy.copy(qconv_module)
self.assertEqual(copied_conv.bias(), qconv_module.bias())
self.assertEqual(copied_conv.scale, qconv_module.scale)
self.assertEqual(copied_conv.zero_point,
qconv_module.zero_point)
Y_copied = copied_conv(*example_input_q)
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_copied.int_repr().numpy(), decimal=0)
deepcopied_conv = copy.deepcopy(qconv_module)
self.assertEqual(deepcopied_conv.bias(), qconv_module.bias())
self.assertEqual(deepcopied_conv.scale, qconv_module.scale)
self.assertEqual(deepcopied_conv.zero_point,
qconv_module.zero_point)
Y_deepcopied = deepcopied_conv(*example_input_q)
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_deepcopied.int_repr().numpy(), decimal=0)
# JIT testing
self.checkScriptable(
qconv_module, [example_input_q],
check_save_load=True)
class _FusedModule_two_input_args(torch.ao.nn.intrinsic._FusedModule):
# Help Module for ConvAdd2d since torch.ao.nn.intrinsic._FusedModule only support one input arg
def forward(self, x1, x2):
input = self[0](x1, x2)
return input
# Test from_float
fused_conv_module = _FusedModule_two_input_args(conv_module) \
if post_op in ["add", "add_relu"] else torch.ao.nn.intrinsic._FusedModule(conv_module)
fused_conv_module.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.prepare(fused_conv_module, inplace=True)
example_input[0] = example_input[0].float()
fused_conv_module(*example_input)
converted_qconv_module = fused_conv_module
reference_mapping = get_default_static_quant_module_mappings()
reference_mapping[type(conv_module)] = type(qconv_module)
torch.ao.quantization.convert(converted_qconv_module, mapping=reference_mapping, inplace=True)
# Smoke test to make sure the module actually runs
if use_bias:
self.assertEqual(conv_module[0].bias if (post_op in ["relu", "add", "add_relu"]) else conv_module.bias,
converted_qconv_module[0].bias())
# Smoke test extra_repr
self.assertTrue(module_name == converted_qconv_module[0]._get_name())
@override_qengines
def test_conv1d_api(self):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_channelwise
)
for pad_mode, use_bias, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
batch_size = 2
in_channels_per_group = 2
length = 8
out_channels_per_group = 2
groups = 3
kernel = 3
stride = 2
pad = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (length,)
kernel_size = (kernel, )
stride = (stride, )
pad = (pad, )
dilation = (dilation, )
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
qconv_cls = nnq.Conv1d
module_name = "QuantizedConv1d"
qconv_module = qconv_cls(
in_channels, out_channels, kernel, stride, pad,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv1d(
in_channels, out_channels, kernel, stride, pad,
dilation, groups, use_bias, padding_mode=pad_mode)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, pad, pad_mode,
dilation, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale,
Y_zero_point, use_bias, "none", use_channelwise)
@override_qengines
def test_conv1d_relu_api(self):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_channelwise
)
batch_size = 2
in_channels_per_group = 2
length = 8
out_channels_per_group = 2
groups = 3
kernel = 3
stride = 2
pad = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (length,)
kernel_size = (kernel, )
stride = (stride, )
pad = (pad, )
dilation = (dilation, )
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nniq.ConvReLU1d
module_name = "QuantizedConvReLU1d"
for pad_mode, use_bias, use_channelwise in options:
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
qconv_module = qconv_cls(
in_channels, out_channels, kernel, stride, pad,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv1d(
in_channels, out_channels, kernel, stride, pad,
dilation, groups, use_bias, padding_mode=pad_mode)
relu_module = nn.ReLU()
conv_module = nni.ConvReLU1d(conv_module, relu_module)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, pad, pad_mode,
dilation, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale,
Y_zero_point, use_bias, "relu", use_channelwise)
@override_qengines
def test_conv2d_api(self):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_channelwise
)
for pad_mode, use_bias, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nnq.Conv2d
module_name = "QuantizedConv2d"
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "none", use_channelwise)
@override_qengines
def test_conv2d_relu_api(self):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_channelwise
)
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nniq.ConvReLU2d
module_name = "QuantizedConvReLU2d"
for pad_mode, use_bias, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
relu_module = nn.ReLU()
conv_module = nni.ConvReLU2d(conv_module, relu_module)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "relu", use_channelwise)
@skipIfNoFBGEMM
def test_conv3d_api(self):
options = itertools.product(
[True, False], # use_bias
[True, False], # use_channelwise
)
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
D = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
kernel_d = 3
stride_h = 2
stride_w = 2
stride_d = 2
pad_mode = "zeros" # 3d doesn't support reflect padding
pad_h = 1
pad_w = 1
pad_d = 1
dilation = 1
# Tests the correctness of the conv3d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (D, H, W)
kernel_size = (kernel_d, kernel_h, kernel_w)
stride = (stride_d, stride_h, stride_w)
padding = (pad_d, pad_h, pad_w)
dilation = (dilation, dilation, dilation)
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nnq.Conv3d
module_name = "QuantizedConv3d"
for use_bias, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
with override_quantized_engine('fbgemm'):
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv3d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale,
W_zero_point, Y_scale, Y_zero_point, use_bias, "none",
use_channelwise)
@skipIfNoFBGEMM
def test_conv3d_relu_api(self):
options = itertools.product(
[True, False], # use_bias
[True, False], # use_channelwise
)
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
D = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
kernel_d = 3
stride_h = 2
stride_w = 2
stride_d = 2
pad_mode = "zeros" # 3d doesn't support reflect padding
pad_h = 1
pad_w = 1
pad_d = 1
dilation = 1
# Tests the correctness of the conv3d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (D, H, W)
kernel_size = (kernel_d, kernel_h, kernel_w)
stride = (stride_d, stride_h, stride_w)
padding = (pad_d, pad_h, pad_w)
dilation = (dilation, dilation, dilation)
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nniq.ConvReLU3d
module_name = "QuantizedConvReLU3d"
for use_bias, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
with override_quantized_engine('fbgemm'):
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv3d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
relu_module = nn.ReLU()
conv_module = nni.ConvReLU3d(conv_module, relu_module)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale,
W_zero_point, Y_scale, Y_zero_point, use_bias, "relu",
use_channelwise)
@skipIfNoONEDNN
def test_conv2d_add(self):
"""test API functionality for nn.intrinsic.quantized.ConvAdd2d"""
with override_quantized_engine('onednn'):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_channelwise
)
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
X_scale = 1.3
X_zero_point = 2
X2_scale = 1.2
X2_zero_point = 1
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nniq.ConvAdd2d
module_name = "QuantizedConvAdd2d"
for pad_mode, use_bias, use_channelwise in options:
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
conv_module = torch.ao.nn.intrinsic.ConvAdd2d(conv_module, torch.add)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "add", use_channelwise, X2_scale, X2_zero_point)
@skipIfNoONEDNN
def test_conv2d_add_relu(self):
"""test API functionality for nn.intrinsic.quantized.ConvAdd2d"""
with override_quantized_engine('onednn'):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_channelwise
)
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
X_scale = 1.3
X_zero_point = 2
X2_scale = 1.2
X2_zero_point = 1
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
qconv_cls = nniq.ConvAddReLU2d
module_name = "QuantizedConvAddReLU2d"
for pad_mode, use_bias, use_channelwise in options:
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
conv_module = torch.ao.nn.intrinsic.ConvAddReLU2d(conv_module, torch.add, nn.ReLU())
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "add_relu", use_channelwise, X2_scale, X2_zero_point)
def test_pool_api(self):
"""Tests the correctness of the pool module.
The correctness is defined against the functional implementation.
"""
N, C, H, W = 10, 10, 10, 3
kwargs = {
'kernel_size': 2,
'stride': None,
'padding': 0,
'dilation': 1
}
scale, zero_point = 1.0 / 255, 128
X = torch.randn(N, C, H, W, dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
qX_expect = torch.nn.functional.max_pool2d(qX, **kwargs)
pool_under_test = torch.ao.nn.quantized.MaxPool2d(**kwargs)
qX_hat = pool_under_test(qX)
self.assertEqual(qX_expect, qX_hat)
# JIT Testing
self.checkScriptable(pool_under_test, [[X]])
def test_dropout(self):
"""Tests the correctness of the dropout module.
The correctness is defined against the functional implementation.
"""
x = torch.randn((2, 4, 6, 8), dtype=torch.float)
float_mod = torch.nn.Dropout(p=0.5)
float_mod.training = False
y_ref = float_mod(x)
quant_ref = torch.quantize_per_tensor(y_ref, 1.0, 0, dtype=torch.quint8)
quant_mod = nnq.Dropout(p=0.5)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
qy = quant_mod(qx)
self.assertEqual(quant_ref.int_repr().numpy(), qy.int_repr().numpy(),
msg="Dropout module API failed")
def _test_dropout_serialization(self, get_model, data1, data2):
m1 = get_model()
m1.qconfig = torch.ao.quantization.default_qconfig
mp1 = torch.ao.quantization.prepare(m1)
mp1(data1)
mq1 = torch.ao.quantization.convert(mp1)
ref1 = mq1(data2)
m2 = get_model()
m2.qconfig = torch.ao.quantization.default_qconfig
mp2 = torch.ao.quantization.prepare(m2)
mq2 = torch.ao.quantization.convert(mp2)
mq2.load_state_dict(mq1.state_dict())
ref2 = mq2(data2)
self.assertTrue(torch.allclose(ref1, ref2))
def test_dropout_serialization(self):
data1 = torch.randn(2, 4, 6, 8)
data2 = torch.randn(2, 4, 6, 8)
def _get_model():
return nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Dropout(p=0.5),
torch.ao.quantization.DeQuantStub()
).eval()
self._test_dropout_serialization(_get_model, data1, data2)
def test_batch_norm2d(self):
"""Tests the correctness of the batchnorm2d module.
The correctness is defined against the functional implementation.
"""
x = torch.randn((2, 4, 6, 8), dtype=torch.float)
float_mod = torch.nn.BatchNorm2d(4)
float_mod.training = False
y_ref = float_mod(x)
quant_ref = torch.quantize_per_tensor(y_ref, 1.0, 0, dtype=torch.quint8)
quant_mod = nnq.BatchNorm2d(4)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
qy = quant_mod(qx)
self.assertEqual(quant_ref.int_repr().numpy(), qy.int_repr().numpy(),
msg="BatchNorm2d module API failed")
def test_batch_norm3d(self):
"""Tests the correctness of the batchnorm3d module.
The correctness is defined against the functional implementation.
"""
x = torch.randn((2, 4, 6, 8, 10), dtype=torch.float)
float_mod = torch.nn.BatchNorm3d(4)
float_mod.training = False
y_ref = float_mod(x)
quant_ref = torch.quantize_per_tensor(y_ref, 1.0, 0, dtype=torch.quint8)
quant_mod = nnq.BatchNorm3d(4)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
qy = quant_mod(qx)
self.assertEqual(quant_ref.int_repr().numpy(), qy.int_repr().numpy(),
msg="BatchNorm3d module API failed")
def _test_batch_norm_serialization(self, get_model, data1, data2):
m1 = get_model()
m1.qconfig = torch.ao.quantization.default_qconfig
mp1 = torch.ao.quantization.prepare(m1)
mp1(data1)
mq1 = torch.ao.quantization.convert(mp1)
ref1 = mq1(data2)
m2 = get_model()
m2.qconfig = torch.ao.quantization.default_qconfig
mp2 = torch.ao.quantization.prepare(m2)
mq2 = torch.ao.quantization.convert(mp2)
mq2.load_state_dict(mq1.state_dict())
ref2 = mq2(data2)
self.assertTrue(torch.allclose(ref1, ref2))
def test_batch_norm2d_serialization(self):
data1 = torch.randn(2, 4, 6, 8)
data2 = torch.randn(2, 4, 6, 8)
def _get_model():
return nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.BatchNorm2d(4),
torch.ao.quantization.DeQuantStub()
).eval()
self._test_batch_norm_serialization(_get_model, data1, data2)
def test_batch_norm3d_serialization(self):
data1 = torch.randn(2, 4, 6, 8, 1)
data2 = torch.randn(2, 4, 6, 8, 1)
def _get_model():
return nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.BatchNorm3d(4),
torch.ao.quantization.DeQuantStub()
).eval()
self._test_batch_norm_serialization(_get_model, data1, data2)
def test_layer_norm(self):
"""Tests the correctness of the layernorm module.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims = (1, 4, 8)
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = torch.nn.LayerNorm(dqX.size()[1:]).float()
float_mod.weight = torch.nn.Parameter(torch.rand(*dims[1:]))
float_mod.bias = torch.nn.Parameter(torch.rand(*dims[1:]))
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = nnq.LayerNorm(
qX.size()[1:], float_mod.weight, float_mod.bias, y_scale, y_zero_point)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg=f"LayerNorm module API failed, qY_ref\n{qY_ref} vs qY\n{qY}")
def test_group_norm(self):
"""Tests the correctness of the groupnorm module.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims = (1, 4, 8)
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = torch.nn.GroupNorm(2, 4).float()
float_mod.weight = torch.nn.Parameter(torch.rand(dims[1]))
float_mod.bias = torch.nn.Parameter(torch.rand(dims[1]))
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = nnq.GroupNorm(
2, 2, float_mod.weight, float_mod.bias, y_scale, y_zero_point)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg=f"GroupNorm module API failed, qY_ref\n{qY_ref} vs qY\n{qY}")
def test_instance_norm(self):
"""Tests the correctness of the instancenorm{n}d modules.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims_to_modules = [
((1, 4, 8), torch.nn.InstanceNorm1d, nnq.InstanceNorm1d),
((1, 4, 8, 1), torch.nn.InstanceNorm2d, nnq.InstanceNorm2d),
((1, 4, 8, 1, 1), torch.nn.InstanceNorm3d, nnq.InstanceNorm3d),
]
for dim_to_modules in dims_to_modules:
dims, float_cls, q_cls = dim_to_modules
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(
X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = float_cls(dims[1]).float()
float_mod.weight = torch.nn.Parameter(torch.rand(dims[1]))
float_mod.bias = torch.nn.Parameter(torch.rand(dims[1]))
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = q_cls(
dims[1], float_mod.weight, float_mod.bias, y_scale,
y_zero_point)
qY = quant_mod(qX)
self.assertEqual(
qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg=f"InstanceNorm module API failed, qY_ref\n{qY_ref} vs qY\n{qY}")
def _test_activation_module_impl(self, name, float_module_class, quantized_module_class, extra_kwargs):
"""Tests the correctness of the ELU module.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims = (1, 4, 8)
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = float_module_class(**extra_kwargs).float()
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = quantized_module_class(y_scale, y_zero_point, **extra_kwargs)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg=f"{name} module API failed, qY_ref\n{qY_ref} vs qY\n{qY}")
def _test_leaky_relu_serialization(self):
scale_original = 10.0 / 256
zero_point_original = 1.0
quant_mod_original = nnq.LeakyReLU(scale_original, zero_point_original)
state_dict = quant_mod_original.state_dict()
scale_new = 5.0 / 256
zero_point_new = 2.0
quant_mod_new = nnq.LeakyReLU(scale_new, zero_point_new)
quant_mod_new.load_state_dict(state_dict)
self.assertEqual(quant_mod_original.scale, quant_mod_new.scale)
self.assertEqual(quant_mod_original.zero_point, quant_mod_new.zero_point)
def test_elu(self):
"""Tests the correctness of the ELU module.
The correctness is defined against the functional implementation.
"""
self._test_activation_module_impl("ELU", nn.ELU, nnq.ELU, {"alpha": 1.5})
def test_leaky_relu(self):
self._test_activation_module_impl("LeakyReLU", nn.LeakyReLU, nnq.LeakyReLU, {"negative_slope": 0.2})
self._test_leaky_relu_serialization()
def test_sigmoid(self):
self._test_activation_module_impl("Sigmoid", nn.Sigmoid, nnq.Sigmoid, {})
def _test_hard_swish_serialization(self):
scale_original = 10.0 / 256
zero_point_original = 1.0
quant_mod_original = nnq.Hardswish(scale_original, zero_point_original)
state_dict = quant_mod_original.state_dict()
scale_new = 5.0 / 256
zero_point_new = 2.0
quant_mod_new = nnq.Hardswish(scale_new, zero_point_new)
quant_mod_new.load_state_dict(state_dict)
self.assertEqual(quant_mod_original.scale, quant_mod_new.scale)
self.assertEqual(quant_mod_original.zero_point, quant_mod_new.zero_point)
def test_hard_swish(self):
self._test_activation_module_impl("Hardswish", nn.Hardswish, nnq.Hardswish, {})
self._test_hard_swish_serialization()
@given(
num_embeddings=st.integers(10, 50),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
set_qconfig=st.booleans(),
)
@skipIfNoFBGEMM
def test_embedding_api(self, num_embeddings, embedding_dim, set_qconfig):
num_lengths = np.random.randint(1, 6)
lengths = np.random.randint(0, 21, size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = torch.from_numpy(np.random.randint(low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
weights = torch.from_numpy((np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(np.float32))
obs = default_float_qparams_observer()
obs(weights)
qparams = obs.calculate_qparams()
dtypes = [torch.quint4x2, torch.quint8]
embedding_funcs = [torch.ops.quantized.embedding_4bit, torch.ops.quantized.embedding_byte]
for dtype, embedding_func in zip(dtypes, embedding_funcs):
# Quantize the weights
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=dtype)
qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype)
qemb.set_weight(qweight)
qemb(indices)
# Ensure the module has the correct weights
self.assertEqual(qweight, qemb.weight())
w_packed = qemb._packed_params._packed_weight
module_out = qemb(indices)
# Call the bit qembedding operator directly
ref = embedding_func(w_packed, indices, pruned_weights=False)
self.assertEqual(module_out, ref)
self.checkEmbeddingSerialization(qemb, num_embeddings, embedding_dim, indices, None, set_qconfig=False,
is_emb_bag=False, dtype=dtype)
@given(
num_embeddings=st.integers(10, 50),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
set_qconfig=st.booleans(),
)
@skipIfNoFBGEMM
def test_embedding_bag_api(self, num_embeddings, embedding_dim, num_offsets, set_qconfig):
r"""Test execution and serialization for dynamic quantized embedding_bag modules on int8
"""
num_lengths = np.random.randint(1, 6)
lengths = np.random.randint(0, 21, size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = torch.from_numpy(np.random.randint(low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
offsets = lengths_to_offsets(lengths)
# include the last offset
offsets = torch.cat((offsets, torch.tensor([indices.size(0)], dtype=torch.long)), 0)
weights = torch.from_numpy((np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(np.float32))
for qdtype in [torch.quint8, torch.quint4x2]:
obs = PerChannelMinMaxObserver(dtype=qdtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=qdtype)
qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, mode='sum', _weight=qweight, dtype=qdtype)
qemb(indices, offsets)
# Ensure the module has the correct weights
self.assertEqual(qweight, qemb.weight())
w_packed = qemb._packed_params._packed_weight
module_out = qemb(indices, offsets)
# Call the qembedding_bag operator directly
if qdtype == torch.quint8:
ref = torch.ops.quantized.embedding_bag_byte(w_packed, indices, offsets, mode=0,
per_sample_weights=None,
include_last_offset=True)
else:
ref = torch.ops.quantized.embedding_bag_4bit(w_packed, indices, offsets, mode=0,
per_sample_weights=None,
include_last_offset=True)
self.assertEqual(module_out, ref)
self.checkEmbeddingSerialization(qemb, num_embeddings, embedding_dim, indices,
offsets, set_qconfig, is_emb_bag=True, dtype=qdtype)
def test_prelu(self):
for num_parameters in range(1, 10):
x = torch.randn(4, num_parameters, 4)
qx = torch.quantize_per_tensor_dynamic(x, dtype=torch.quint8, reduce_range=False)
f_prelu = torch.nn.PReLU(num_parameters=num_parameters)
f_prelu.weight = torch.nn.Parameter(torch.randn(num_parameters).abs())
f_prelu.qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.default_observer,
weight=torch.ao.quantization.default_observer,)
f_prelu.activation_post_process = f_prelu.qconfig.activation()
f_prelu.activation_post_process(f_prelu(x))
q_prelu = nnq.PReLU.from_float(f_prelu)
w_obs = f_prelu.qconfig.weight()
w_obs(f_prelu.weight)
w_scale, w_zp = w_obs.calculate_qparams()
q_prelu_weight = torch.quantize_per_tensor(
f_prelu.weight,
dtype=torch.quint8,
scale=w_scale,
zero_point=w_zp
).dequantize()
# check that the weight makes sense
self.assertEqual(q_prelu.weight.dequantize(), q_prelu_weight)
f_prelu.weight = torch.nn.Parameter(q_prelu.weight.dequantize())
qy = q_prelu(qx)
qy_ref = torch.quantize_per_tensor(
f_prelu(qx.dequantize()), q_prelu.scale, q_prelu.zero_point, dtype=torch.quint8
)
# check that the output makes sense
self.assertEqual(qy, qy_ref, atol=.1, rtol=.1)
def test_channel_shuffle(self):
"""Tests the correctness of the ChannelShuffle module.
"""
x_scale = 10.0 / 256
x_zero_point = 1
y_scale = x_scale
y_zero_point = x_zero_point
dims = (1, 4, 4, 8)
groups = 2
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = torch.nn.ChannelShuffle(groups).float()
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = torch.nn.ChannelShuffle(groups)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg=f"ChannelShuffle module API failed, qY_ref\n{qY_ref} vs qY\n{qY}")
@skipIfNoONEDNN
def test_linear_leaky_relu(self):
"""test API functionality for nn.intrinsic.quantized.linear_leaky_relu"""
with override_quantized_engine('onednn'):
options = itertools.product(
[1, 5], # batch size
[16, 32], # in_features
[4, 8], # out_features
[True, False], # use_bias
[True, False], # per_channel
[0.01, 0.05]) # negative slope
for (batch_size, in_features, out_features, use_bias,
per_channel, neg_slope) in options:
self._test_linear_api_impl(
nniq.LinearLeakyReLU, 'QuantizedLinearLeakyReLU',
torch.ops.quantized.linear_leaky_relu,
batch_size, in_features, out_features, use_bias,
per_channel, negative_slope=neg_slope)
@skipIfNoONEDNN
def test_linear_tanh(self):
"""test API functionality for nn.intrinsic.quantized.linear_tanh"""
with override_quantized_engine('onednn'):
options = itertools.product(
[1, 5], # batch size
[16, 32], # in_features
[4, 8], # out_features
[True, False], # use_bias
[True, False]) # negative slope
for (batch_size, in_features, out_features, use_bias,
per_channel) in options:
self._test_linear_api_impl(
nniq.LinearTanh, 'QuantizedLinearTanh',
torch.ops.quantized.linear_tanh,
batch_size, in_features, out_features, use_bias,
per_channel)
| TestStaticQuantizedModule |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/teams.py | {
"start": 999,
"end": 1146
} | class ____(BaseModel):
"""Team collection serializer for responses."""
teams: list[TeamResponse]
total_entries: int
| TeamCollectionResponse |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/inprocess.py | {
"start": 1733,
"end": 1892
} | class ____(SuperQObject, InProcessHBChannel):
# This signal will never be fired, but it needs to exist
kernel_died = QtCore.Signal()
| QtInProcessHBChannel |
python | huggingface__transformers | src/transformers/models/helium/modeling_helium.py | {
"start": 13002,
"end": 14807
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int] = None):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = HeliumAttention(config=config, layer_idx=layer_idx)
self.mlp = HeliumMLP(config)
self.input_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| HeliumDecoderLayer |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 97971,
"end": 100235
} | class ____(Request):
"""
Get all metric/variant pairs reported for tasks in a specific project.
If no project is specified, metrics/variant paris reported for all tasks will be returned.
If the project does not exist, an empty list will be returned.
:param project: Project ID
:type project: str
:param include_subprojects: If set to 'true' and the project field is set then
the result includes metrics/variants from the subproject tasks
:type include_subprojects: bool
"""
_service = "projects"
_action = "get_unique_metric_variants"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"include_subprojects": {
"default": True,
"description": "If set to 'true' and the project field is set then the result includes metrics/variants from the subproject tasks",
"type": ["boolean", "null"],
},
"project": {"description": "Project ID", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self, project: Optional[str] = None, include_subprojects: Optional[bool] = True, **kwargs: Any
) -> None:
super(GetUniqueMetricVariantsRequest, self).__init__(**kwargs)
self.project = project
self.include_subprojects = include_subprojects
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("include_subprojects")
def include_subprojects(self) -> Optional[bool]:
return self._property_include_subprojects
@include_subprojects.setter
def include_subprojects(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_subprojects = None
return
self.assert_isinstance(value, "include_subprojects", (bool,))
self._property_include_subprojects = value
| GetUniqueMetricVariantsRequest |
python | kubernetes-client__python | kubernetes/client/models/v1_ingress_class_parameters_reference.py | {
"start": 383,
"end": 8009
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str',
'namespace': 'str',
'scope': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name',
'namespace': 'namespace',
'scope': 'scope'
}
def __init__(self, api_group=None, kind=None, name=None, namespace=None, scope=None, local_vars_configuration=None): # noqa: E501
"""V1IngressClassParametersReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_group = None
self._kind = None
self._name = None
self._namespace = None
self._scope = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.kind = kind
self.name = name
if namespace is not None:
self.namespace = namespace
if scope is not None:
self.scope = scope
@property
def api_group(self):
"""Gets the api_group of this V1IngressClassParametersReference. # noqa: E501
apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
:return: The api_group of this V1IngressClassParametersReference. # noqa: E501
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""Sets the api_group of this V1IngressClassParametersReference.
apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
:param api_group: The api_group of this V1IngressClassParametersReference. # noqa: E501
:type: str
"""
self._api_group = api_group
@property
def kind(self):
"""Gets the kind of this V1IngressClassParametersReference. # noqa: E501
kind is the type of resource being referenced. # noqa: E501
:return: The kind of this V1IngressClassParametersReference. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1IngressClassParametersReference.
kind is the type of resource being referenced. # noqa: E501
:param kind: The kind of this V1IngressClassParametersReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this V1IngressClassParametersReference. # noqa: E501
name is the name of resource being referenced. # noqa: E501
:return: The name of this V1IngressClassParametersReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1IngressClassParametersReference.
name is the name of resource being referenced. # noqa: E501
:param name: The name of this V1IngressClassParametersReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1IngressClassParametersReference. # noqa: E501
namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\". # noqa: E501
:return: The namespace of this V1IngressClassParametersReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1IngressClassParametersReference.
namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\". # noqa: E501
:param namespace: The namespace of this V1IngressClassParametersReference. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def scope(self):
"""Gets the scope of this V1IngressClassParametersReference. # noqa: E501
scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". # noqa: E501
:return: The scope of this V1IngressClassParametersReference. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this V1IngressClassParametersReference.
scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". # noqa: E501
:param scope: The scope of this V1IngressClassParametersReference. # noqa: E501
:type: str
"""
self._scope = scope
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressClassParametersReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressClassParametersReference):
return True
return self.to_dict() != other.to_dict()
| V1IngressClassParametersReference |
python | pytorch__pytorch | torch/autograd/_functions/tensor.py | {
"start": 194,
"end": 987
} | class ____(Function):
@staticmethod
@deprecated(
"`torch.autograd._functions.Type` is deprecated as of PyTorch 2.1, "
"please use `torch.tensor.to(dtype=dtype)` instead.",
category=FutureWarning,
)
# pyrefly: ignore [bad-override]
def forward(ctx, i, dest_type):
ctx.input_type = type(i)
ctx.input_device = -1 if not i.is_cuda else i.get_device()
return i.type(dest_type)
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
if ctx.input_device == -1:
return grad_output.type(ctx.input_type), None
else:
with torch.accelerator.device_index(ctx.input_device):
return grad_output.type(ctx.input_type), None
# TODO: deprecate this
| Type |
python | joke2k__faker | faker/providers/company/th_TH/__init__.py | {
"start": 82,
"end": 3171
} | class ____(CompanyProvider):
formats = OrderedDict(
(
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
(
"{{company_limited_prefix}}{{last_name}}{{company_suffix}} {{company_limited_suffix}}",
0.2,
),
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
("{{company_prefix}}{{last_name}}", 0.2),
("{{company_prefix}}{{last_name}}{{company_suffix}}", 0.2),
("{{last_name}}{{company_suffix}}", 0.1),
("{{nonprofit_prefix}}{{last_name}}", 0.1),
("{{last_name}}-{{last_name}}", 0.05),
("{{last_name}}และ{{last_name}}", 0.05),
("{{company_limited_prefix}}{{last_name}}", 0.01),
)
)
company_prefixes = OrderedDict(
(
("ห้างหุ้นส่วนจำกัด ", 0.3),
("หจก.", 0.2),
("บจก.", 0.1),
("บมจ.", 0.1),
("ห้างหุ้นส่วนสามัญ ", 0.1),
("หสน.", 0.01),
)
)
nonprofit_prefixes = OrderedDict(
(
("สมาคม", 0.4),
("มูลนิธิ", 0.3),
("ชมรม", 0.2),
("สหภาพแรงงาน", 0.1),
)
)
company_suffixes = (
"และเพื่อน",
"และบุตร",
"แอนด์ซันส์",
"กรุ๊ป",
"การช่าง",
"ก่อสร้าง",
"บริการ",
"เซอร์วิส",
"กลการ",
"ซัพพลาย",
"คอมมิวนิเคชั่น",
"พืชผล",
"เอเยนซี",
"เอ็นจิเนียริ่ง",
"คอนสตรัคชั่น",
"วิศวกรรม",
"วิศวการ",
"คอมพิวเตอร์",
"พานิช",
"ขนส่ง",
"เฟอนิชชิ่ง",
"เฟอร์นิเจอร์",
"อุตสาหกรรม",
"เอนเตอรไพรส์",
"จิวเวลรี่",
"อะไหล่ยนต์",
"ภาพยนตร์",
"ยานยนต์",
"เทรดดิ้ง",
"การค้า",
"แลบ",
"เคมิคอล",
"อิมปอร์ตเอ็กซปอร์ต",
"อินเตอร์เนชั่นแนล",
"บรรจุภัณฑ์",
"แพคกิ้ง",
"มอเตอร์",
"โอสถ",
"การบัญชี",
"สโตร์",
)
company_limited_prefixes = OrderedDict(
(
("บริษัท ", 0.95),
("ธนาคาร", 0.03),
("บริษัทหลักทรัพย์ ", 0.005),
("กองทุนรวม", 0.005),
)
)
company_limited_suffixes = OrderedDict(
(
("จำกัด", 0.85),
("จำกัด (มหาชน)", 0.15),
)
)
def company_prefix(self) -> str:
"""
:example: 'ห้างหุ้นส่วนจำกัด'
"""
return self.random_element(self.company_prefixes)
def company_limited_prefix(self) -> str:
"""
:example: 'บริษัท'
"""
return self.random_element(self.company_limited_prefixes)
def company_limited_suffix(self) -> str:
"""
:example: 'จำกัด'
"""
return self.random_element(self.company_limited_suffixes)
def nonprofit_prefix(self) -> str:
"""
:example: 'มูลนิธิ'
"""
return self.random_element(self.nonprofit_prefixes)
| Provider |
python | astropy__astropy | astropy/coordinates/tests/test_representation_methods.py | {
"start": 428,
"end": 1773
} | class ____:
"""Manipulation of Representation shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def setup_class(cls):
# We set up some representations with, on purpose, copy=False,
# so we can check that broadcasting is handled correctly.
lon = Longitude(np.arange(0, 24, 4), u.hourangle)
lat = Latitude(np.arange(-90, 91, 30), u.deg)
# With same-sized arrays
cls.s0 = SphericalRepresentation(
lon[:, np.newaxis] * np.ones(lat.shape),
lat * np.ones(lon.shape)[:, np.newaxis],
np.ones(lon.shape + lat.shape) * u.kpc,
copy=False,
)
cls.diff = SphericalDifferential(
d_lon=np.ones(cls.s0.shape) * u.mas / u.yr,
d_lat=np.ones(cls.s0.shape) * u.mas / u.yr,
d_distance=np.ones(cls.s0.shape) * u.km / u.s,
copy=False,
)
cls.s0 = cls.s0.with_differentials(cls.diff)
# With unequal arrays -> these will be broadcasted.
cls.s1 = SphericalRepresentation(
lon[:, np.newaxis], lat, 1.0 * u.kpc, differentials=cls.diff, copy=False
)
# For completeness on some tests, also a cartesian one
cls.c0 = cls.s0.to_cartesian()
| ShapeSetup |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 55421,
"end": 56878
} | class ____(unittest.TestCase):
def test_context_manager_retry_one(self):
from tenacity import Retrying
raise_ = True
for attempt in Retrying():
with attempt:
if raise_:
raise_ = False
raise Exception("Retry it!")
def test_context_manager_on_error(self):
from tenacity import Retrying
class CustomError(Exception):
pass
retry = Retrying(retry=tenacity.retry_if_exception_type(IOError))
def test():
for attempt in retry:
with attempt:
raise CustomError("Don't retry!")
self.assertRaises(CustomError, test)
def test_context_manager_retry_error(self):
from tenacity import Retrying
retry = Retrying(stop=tenacity.stop_after_attempt(2))
def test():
for attempt in retry:
with attempt:
raise Exception("Retry it!")
self.assertRaises(RetryError, test)
def test_context_manager_reraise(self):
from tenacity import Retrying
class CustomError(Exception):
pass
retry = Retrying(reraise=True, stop=tenacity.stop_after_attempt(2))
def test():
for attempt in retry:
with attempt:
raise CustomError("Don't retry!")
self.assertRaises(CustomError, test)
| TestContextManager |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/integrations.py | {
"start": 26355,
"end": 29916
} | class ____(WebhookMixin, APIView):
"""
Webhook consumer for Bitbucket.
Accepts webhook events from Bitbucket, 'repo:push' events trigger builds.
Expects the following JSON::
{
"push": {
"changes": [{
"new": {
"name": "branch-name",
...
},
"old" {
"name": "branch-name",
...
},
...
}],
...
},
...
}
See full payload here:
- https://confluence.atlassian.com/bitbucket/event-payloads-740262817.html#EventPayloads-Push
"""
integration_type = Integration.BITBUCKET_WEBHOOK
def handle_webhook(self):
"""
Handle Bitbucket events for push.
Bitbucket doesn't have a separate event for creation/deletion, instead
it sets the new attribute (null if it is a deletion) and the old
attribute (null if it is a creation).
"""
event = self.request.headers.get(BITBUCKET_EVENT_HEADER, BITBUCKET_PUSH)
structlog.contextvars.bind_contextvars(webhook_event=event)
webhook_bitbucket.send(
Project,
project=self.project,
data=self.request.data,
event=event,
)
# NOTE: we can't call `self.update_default_branch` here because
# BitBucket does not tell us what is the `default_branch` for a
# repository in these incoming webhooks.
if event == BITBUCKET_PUSH:
try:
data = self.request.data
changes = data["push"]["changes"]
versions_info = []
for change in changes:
old = change["old"]
new = change["new"]
# Normal push to master
if old is not None and new is not None:
version_type = BRANCH if new["type"] == "branch" else TAG
versions_info.append(VersionInfo(name=new["name"], type=version_type))
# BitBuck returns an array of changes rather than
# one webhook per change. If we have at least one normal push
# we don't trigger the sync versions, because that
# will be triggered with the normal push.
if versions_info:
return self.get_response_push(
self.project,
versions_info,
)
log.debug("Triggered sync_versions.")
return self.sync_versions_response(self.project)
except KeyError as exc:
raise ParseError("Invalid request") from exc
return None
def is_payload_valid(self):
"""
BitBucket use a HMAC hexdigest hash to sign the payload.
It is sent in the request's header.
See https://support.atlassian.com/bitbucket-cloud/docs/manage-webhooks/#Secure-webhooks.
"""
signature = self.request.headers.get(BITBUCKET_SIGNATURE_HEADER)
if not signature:
return False
secret = self.get_integration().secret
msg = self.request.body.decode()
digest = WebhookMixin.get_digest(secret, msg)
result = hmac.compare_digest(
b"sha256=" + digest.encode(),
signature.encode(),
)
return result
| BitbucketWebhookView |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/cli_utils.py | {
"start": 660,
"end": 1005
} | class ____(argparse.Action):
"""
Internal custom Action to help detect arguments that aren't default.
"""
non_default_args: Set[str] = set()
def __call__(self, arg_parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
DetectDefault.non_default_args.add(self.dest)
| DetectDefault |
python | PyCQA__isort | isort/exceptions.py | {
"start": 4241,
"end": 4716
} | class ____(ISortError):
"""Raised when an isort literal sorting comment is used, with a type that doesn't match the
supplied data structure's type.
"""
def __init__(self, kind: type, expected_kind: type):
super().__init__(
f"isort was told to sort a literal of type {expected_kind} but was given "
f"a literal of type {kind}."
)
self.kind = kind
self.expected_kind = expected_kind
| LiteralSortTypeMismatch |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 21316,
"end": 25245
} | class ____(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("2010-12-21")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean("12/21/2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField(self):
"""
Localized DateFields in a non-localized environment act as unlocalized
widgets
"""
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("2010-12-21")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean("12/21/2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
def test_dateField_with_inputformat(self):
"""
DateFields with manually specified input formats can accept those
formats
"""
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean("21.12.2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean("21-12-2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField_with_inputformat(self):
"""
Localized DateFields with manually specified input formats can accept
those formats.
"""
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean("21.12.2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean("21-12-2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
| SimpleDateFormatTests |
python | buildout__buildout | src/zc/buildout/easy_install.py | {
"start": 4545,
"end": 8276
} | class ____(EnvironmentMixin, pkg_resources.Environment):
"""Buildout version of Environment with canonicalized names.
* pkg_resources defines the Environment class
* setuptools defines a PackageIndex class that inherits from Environment
* Buildout needs a few fixes that should be used by both.
The fixes are needed for this issue, where distributions created by
setuptools 69.3+ get a different name than with older versions:
https://github.com/buildout/buildout/issues/647
And since May 2025 we override the can_add method to work better on Mac:
accept distributions when the architecture (machine type) matches,
instead of failing when the major or minor version do not match.
See long explanation in https://github.com/buildout/buildout/pull/707
It boils down to this, depending on how you installed Python:
% bin/zopepy
>>> import pkg_resources
>>> pkg_resources.get_platform()
'macosx-11.0-arm64'
>>> pkg_resources.get_supported_platform()
'macosx-15.4-arm64'
Here macosx-11.0 is the platform on which the Python was built/compiled.
And macosx-15.4 is the current platform (my laptop).
This gives problems when we get a Mac-specific wheel. We turn it into an
egg that has the result of get_supported_platform() in its name.
Then our code in easy_install._get_matching_dist_in_location creates a
pkg_resources.Environment with the egg location. Under the hood,
pkg_resources.compatible_platforms is called, and this does not find any
matching dists because it compares the platform in the egg name with that
of the system, which is pkg_resources.get_platform().
So an egg created on the current machine by the current Python may not be
recognized. This is obviously wrong.
"""
@cached_property
def _mac_machine_type(self):
"""Machine type (architecture) on Mac.
Adapted from pkg_resources.compatible_platforms.
If self.platform is something like 'macosx-15.4-arm64', we return 'arm64.
"""
match = macosVersionString.match(self.platform)
if match is None:
# no Mac
return ""
return match.group(3)
def can_add(self, dist: Distribution) -> bool:
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
For Mac we make a change compared to the original. Platforms like
'macosx-11.0-arm64' and 'macosx-15.4-arm64' are considered compatible.
"""
if super().can_add(dist):
return True
if sys.platform != "darwin":
# Our override is only useful on Mac OSX.
return False
# The rest of the code is a combination of the original
# pkg_resources.Environment.can_add and pkg_resources.compatible_platforms.
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
if not py_compat:
return False
provMac = macosVersionString.match(dist.platform)
if not provMac:
# The dist is not for Mac.
return False
provided_machine_type = provMac.group(3)
if provided_machine_type != self._mac_machine_type:
return False
logger.debug(
"Accepted dist %s although its provided platform %s does not "
"match our supported platform %s.",
dist,
dist.platform,
self.platform,
)
return True
| Environment |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 2171,
"end": 2853
} | class ____:
"""Since the serializer is replacing the `jina_pb2 to know how to exactly serialize messages, this is just a placeholder that
delegates the serializing and deserializing to the internal protobuf structure with no extra optimization.
"""
@staticmethod
def SerializeToString(x):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
return x.SerializeToString()
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
ep = jina_pb2.EndpointsProto()
ep.ParseFromString(x)
return ep
| EndpointsProto |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 83503,
"end": 83842
} | class ____:
key: ClassVar[str] = "opt_ctx"
dtype: Optional[torch.dtype] = None
ops_name: str = ""
@functools.cache
def jinja2_env() -> Any:
try:
import jinja2
return jinja2.Environment(
undefined=jinja2.StrictUndefined,
)
except ImportError:
return None
| OptimizationContext |
python | redis__redis-py | redis/multidb/database.py | {
"start": 2413,
"end": 3569
} | class ____(BaseDatabase, SyncDatabase):
def __init__(
self,
client: Union[redis.Redis, RedisCluster],
circuit: CircuitBreaker,
weight: float,
health_check_url: Optional[str] = None,
):
"""
Initialize a new Database instance.
Args:
client: Underlying Redis client instance for database operations
circuit: Circuit breaker for handling database failures
weight: Weight value used for database failover prioritization
health_check_url: Health check URL associated with the current database
"""
self._client = client
self._cb = circuit
self._cb.database = self
super().__init__(weight, health_check_url)
@property
def client(self) -> Union[redis.Redis, RedisCluster]:
return self._client
@client.setter
def client(self, client: Union[redis.Redis, RedisCluster]):
self._client = client
@property
def circuit(self) -> CircuitBreaker:
return self._cb
@circuit.setter
def circuit(self, circuit: CircuitBreaker):
self._cb = circuit
| Database |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_import_functionality.py | {
"start": 24948,
"end": 25862
} | class ____(AdminTestMixin, TestCase):
"""
Display correct import order when 'import_order' is declared (issue 1845).
Ensure that the prompt text on the import page renders the
fields in the correct order.
"""
def setUp(self):
super().setUp()
EBookResource._meta.import_order = ("id", "name", "published", "author_email")
def tearDown(self):
super().tearDown()
EBookResource._meta.import_order = ()
def test_import_preview_order(self):
response = self._get_url_response(self.ebook_import_url)
# test display rendered in correct order
target_re = (
r"This importer will import the following fields:[\\n\s]+"
r"<code>id, name, published_date, Email of the author, Author Name</code>"
r"[\\n\s]+"
)
self.assertRegex(response.content.decode(), target_re)
| DeclaredImportOrderTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.