language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/actions.py
|
{
"start": 26711,
"end": 27631
}
|
class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a log."""
name: str = Field(default=..., description="The logger name.")
level: int = Field(default=..., description="The log level.")
message: str = Field(default=..., description="The log message.")
timestamp: DateTime = Field(default=..., description="The log timestamp.")
flow_run_id: Optional[UUID] = Field(default=None)
task_run_id: Optional[UUID] = Field(default=None)
worker_id: Optional[UUID] = Field(default=None)
def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""
The worker_id field is only included in logs sent to Prefect Cloud.
If it's unset, we should not include it in the log payload.
"""
data = super().model_dump(*args, **kwargs)
if self.worker_id is None:
data.pop("worker_id")
return data
|
LogCreate
|
python
|
astropy__astropy
|
astropy/io/ascii/qdp.py
|
{
"start": 15043,
"end": 15158
}
|
class ____(core.DefaultSplitter):
"""
Split on space for QDP tables.
"""
delimiter = " "
|
QDPSplitter
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/torch_entities/model_serialization.py
|
{
"start": 227,
"end": 1128
}
|
class ____:
"""
Set this context by calling
```
with exporting_to_onnx():
```
Within this context, the variable exporting_to_onnx.is_exporting() will be true.
This implementation is thread safe.
"""
# local is_exporting flag for each thread
_local_data = threading.local()
_local_data._is_exporting = False
# global lock shared among all threads, to make sure only one thread is exporting at a time
_lock = threading.Lock()
def __enter__(self):
self._lock.acquire()
self._local_data._is_exporting = True
def __exit__(self, *args):
self._local_data._is_exporting = False
self._lock.release()
@staticmethod
def is_exporting():
if not hasattr(exporting_to_onnx._local_data, "_is_exporting"):
return False
return exporting_to_onnx._local_data._is_exporting
|
exporting_to_onnx
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/migrations/0006_add_assets_cleaned.py
|
{
"start": 149,
"end": 995
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("organizations", "0005_historicalorganization_historicalteam"),
]
operations = [
migrations.AddField(
model_name="historicalorganization",
name="artifacts_cleaned",
field=models.BooleanField(
default=False,
help_text="Artifacts are cleaned out from storage",
verbose_name="Artifacts Cleaned",
),
),
migrations.AddField(
model_name="organization",
name="artifacts_cleaned",
field=models.BooleanField(
default=False,
help_text="Artifacts are cleaned out from storage",
verbose_name="Artifacts Cleaned",
),
),
]
|
Migration
|
python
|
getsentry__responses
|
responses/__init__.py
|
{
"start": 19776,
"end": 21448
}
|
class ____(BaseResponse):
def __init__(
self,
method: str,
url: "_URLPatternType",
callback: Callable[[Any], Any],
stream: Optional[bool] = None,
content_type: Optional[str] = "text/plain",
**kwargs: Any,
) -> None:
super().__init__(method, url, **kwargs)
self.callback = callback
if stream is not None:
warn(
"stream argument is deprecated. Use stream parameter in request directly",
DeprecationWarning,
)
self.stream: Optional[bool] = stream
self.content_type: Optional[str] = content_type
def get_response(self, request: "PreparedRequest") -> HTTPResponse:
headers = self.get_headers()
result = self.callback(request)
if isinstance(result, Exception):
raise result
status, r_headers, body = result
if isinstance(body, Exception):
raise body
# If the callback set a content-type remove the one
# set in add_callback() so that we don't have multiple
# content type values.
has_content_type = False
if isinstance(r_headers, dict) and "Content-Type" in r_headers:
has_content_type = True
elif isinstance(r_headers, list):
has_content_type = any(
[h for h in r_headers if h and h[0].lower() == "content-type"]
)
if has_content_type:
headers.pop("Content-Type", None)
body = _handle_body(body)
headers.extend(r_headers)
return _form_response(body, headers, status, request.method)
|
CallbackResponse
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/queries/bm25/generate/sync.py
|
{
"start": 294,
"end": 431
}
|
class ____(
Generic[Properties, References],
_BM25GenerateExecutor[ConnectionSync, Properties, References],
):
pass
|
_BM25Generate
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py
|
{
"start": 1973,
"end": 3334
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 1"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[[0.0]], max=[1.0]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Dimensions must be equal|incorrect size"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[0.0, 0.1], max=[1.0]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 1"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[1.0], max=[[1.0]]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Dimensions must be equal|incorrect size"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[0.0], max=[1.0, 1.1]))
|
FakeQuantWithMinMaxVarsPerChannelOpTest
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/datasets/mnist.py
|
{
"start": 8920,
"end": 11270
}
|
class ____(QueryGalleryDataset):
"""
MNIST for metric learning with query and gallery split.
MnistQGDataset should be used for test stage.
For this dataset we used only test part of the MNIST and only
those images that are labeled as 5, 6, 7, 8, 9.
Args:
gallery_fraq: gallery size
**kwargs: MNIST args
"""
_split = 5
classes = [
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(self, gallery_fraq: Optional[float] = 0.2, **kwargs) -> None:
"""Init."""
self._mnist = MNIST(train=False, **kwargs)
self._filter()
self._gallery_size = int(gallery_fraq * len(self._mnist))
self._query_size = len(self._mnist) - self._gallery_size
self._is_query = torch.zeros(len(self._mnist)).type(torch.bool)
self._is_query[: self._query_size] = True
def _filter(self) -> None:
"""Filter MNIST dataset: select images of 5, 6, 7, 8, 9 classes."""
mask = self._mnist.targets >= self._split
self._mnist.data = self._mnist.data[mask]
self._mnist.targets = self._mnist.targets[mask]
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""
Get item method for dataset
Args:
idx: index of the object
Returns:
Dict with features, targets and is_query flag
"""
image, label = self._mnist[idx]
return {
"features": image,
"targets": label,
"is_query": self._is_query[idx],
}
def __len__(self) -> int:
"""Length"""
return len(self._mnist)
def __repr__(self) -> None:
"""Print info about the dataset"""
return self._mnist.__repr__()
@property
def gallery_size(self) -> int:
"""Query Gallery dataset should have gallery_size property"""
return self._gallery_size
@property
def query_size(self) -> int:
"""Query Gallery dataset should have query_size property"""
return self._query_size
@property
def data(self) -> torch.Tensor:
"""Images from MNIST"""
return self._mnist.data
@property
def targets(self) -> torch.Tensor:
"""Labels of digits"""
return self._mnist.targets
|
MnistQGDataset
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 34283,
"end": 35839
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
domain: str,
api_key: str,
start_date: str,
requests_per_minute: Optional[int] = None,
sync_lag_minutes: Optional[int] = None,
):
"""Airbyte Source for Freshcaller.
Documentation can be found at https://docs.airbyte.com/integrations/sources/freshcaller
Args:
name (str): The name of the destination.
domain (str): Used to construct Base URL for the Freshcaller APIs
api_key (str): Freshcaller API Key. See the docs for more information on how to obtain this key.
requests_per_minute (Optional[int]): The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account.
start_date (str): UTC date and time. Any data created after this date will be replicated.
sync_lag_minutes (Optional[int]): Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched
"""
self.domain = check.str_param(domain, "domain")
self.api_key = check.str_param(api_key, "api_key")
self.requests_per_minute = check.opt_int_param(requests_per_minute, "requests_per_minute")
self.start_date = check.str_param(start_date, "start_date")
self.sync_lag_minutes = check.opt_int_param(sync_lag_minutes, "sync_lag_minutes")
super().__init__("Freshcaller", name)
|
FreshcallerSource
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py
|
{
"start": 2436,
"end": 2736
}
|
class ____(rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
|
Plus1RNNCell
|
python
|
huggingface__transformers
|
src/transformers/models/segformer/configuration_segformer.py
|
{
"start": 814,
"end": 6793
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SegformerModel`]. It is used to instantiate an
SegFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SegFormer
[nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability before the classification head.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 256):
The dimension of the all-MLP decode head.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import SegformerModel, SegformerConfig
>>> # Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> configuration = SegformerConfig()
>>> # Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> model = SegformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "segformer"
def __init__(
self,
num_channels=3,
num_encoder_blocks=4,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[32, 64, 160, 256],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
num_attention_heads=[1, 2, 5, 8],
mlp_ratios=[4, 4, 4, 4],
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
classifier_dropout_prob=0.1,
initializer_range=0.02,
drop_path_rate=0.1,
layer_norm_eps=1e-6,
decoder_hidden_size=256,
semantic_loss_ignore_index=255,
**kwargs,
):
super().__init__(**kwargs)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True.",
FutureWarning,
)
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.depths = depths
self.sr_ratios = sr_ratios
self.hidden_sizes = hidden_sizes
self.patch_sizes = patch_sizes
self.strides = strides
self.mlp_ratios = mlp_ratios
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.drop_path_rate = drop_path_rate
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.reshape_last_stage = kwargs.get("reshape_last_stage", True)
self.semantic_loss_ignore_index = semantic_loss_ignore_index
__all__ = ["SegformerConfig"]
|
SegformerConfig
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/serialization/ssh.py
|
{
"start": 8404,
"end": 10768
}
|
class ____:
"""Format for RSA keys.
Public:
mpint e, n
Private:
mpint n, e, d, iqmp, p, q
"""
def get_public(
self, data: memoryview
) -> tuple[tuple[int, int], memoryview]:
"""RSA public fields"""
e, data = _get_mpint(data)
n, data = _get_mpint(data)
return (e, n), data
def load_public(
self, data: memoryview
) -> tuple[rsa.RSAPublicKey, memoryview]:
"""Make RSA public key from data."""
(e, n), data = self.get_public(data)
public_numbers = rsa.RSAPublicNumbers(e, n)
public_key = public_numbers.public_key()
return public_key, data
def load_private(
self, data: memoryview, pubfields, unsafe_skip_rsa_key_validation: bool
) -> tuple[rsa.RSAPrivateKey, memoryview]:
"""Make RSA private key from data."""
n, data = _get_mpint(data)
e, data = _get_mpint(data)
d, data = _get_mpint(data)
iqmp, data = _get_mpint(data)
p, data = _get_mpint(data)
q, data = _get_mpint(data)
if (e, n) != pubfields:
raise ValueError("Corrupt data: rsa field mismatch")
dmp1 = rsa.rsa_crt_dmp1(d, p)
dmq1 = rsa.rsa_crt_dmq1(d, q)
public_numbers = rsa.RSAPublicNumbers(e, n)
private_numbers = rsa.RSAPrivateNumbers(
p, q, d, dmp1, dmq1, iqmp, public_numbers
)
private_key = private_numbers.private_key(
unsafe_skip_rsa_key_validation=unsafe_skip_rsa_key_validation
)
return private_key, data
def encode_public(
self, public_key: rsa.RSAPublicKey, f_pub: _FragList
) -> None:
"""Write RSA public key"""
pubn = public_key.public_numbers()
f_pub.put_mpint(pubn.e)
f_pub.put_mpint(pubn.n)
def encode_private(
self, private_key: rsa.RSAPrivateKey, f_priv: _FragList
) -> None:
"""Write RSA private key"""
private_numbers = private_key.private_numbers()
public_numbers = private_numbers.public_numbers
f_priv.put_mpint(public_numbers.n)
f_priv.put_mpint(public_numbers.e)
f_priv.put_mpint(private_numbers.d)
f_priv.put_mpint(private_numbers.iqmp)
f_priv.put_mpint(private_numbers.p)
f_priv.put_mpint(private_numbers.q)
|
_SSHFormatRSA
|
python
|
pola-rs__polars
|
py-polars/src/polars/datatypes/classes.py
|
{
"start": 914,
"end": 1320
}
|
class ____(Generic[R]):
"""Decorator that allows a method to be called from the class OR instance."""
def __init__(self, func: Callable[..., R]) -> None:
self.func = func
def __get__(self, instance: Any, type_: Any) -> Callable[..., R]:
if instance is not None:
return self.func.__get__(instance, type_)
return self.func.__get__(type_, type_)
|
classinstmethod
|
python
|
django__django
|
tests/indexes/models.py
|
{
"start": 31,
"end": 526
}
|
class ____(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs["related_name"] = "+"
# Set unique to enable model cache.
kwargs["unique"] = True
super().__init__(to, on_delete, from_fields, to_fields, **kwargs)
|
CurrentTranslation
|
python
|
psf__black
|
src/black/linegen.py
|
{
"start": 2342,
"end": 33281
}
|
class ____(Visitor[Line]):
"""Generates reformatted Line objects. Empty lines are not emitted.
Note: destroys the tree it's visiting by mutating prefixes of its leaves
in ways that will no longer stringify to valid Python code on the tree.
"""
def __init__(self, mode: Mode, features: Collection[Feature]) -> None:
self.mode = mode
self.features = features
self.current_line: Line
self.__post_init__()
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
If the line is empty, only emit if it makes sense.
If the line is too long, split it first and then generate.
If any lines were generated, set up a new current_line.
"""
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
if len(self.current_line.leaves) == 1 and is_async_stmt_or_funcdef(
self.current_line.leaves[0]
):
# Special case for async def/for/with statements. `visit_async_stmt`
# adds an `ASYNC` leaf then visits the child def/for/with statement
# nodes. Line yields from those nodes shouldn't treat the former
# `ASYNC` leaf as a complete line.
return
complete_line = self.current_line
self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
"""Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node, mode=self.mode):
if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment
yield from self.line()
self.current_line.append(comment)
yield from self.line()
if any_open_brackets:
node.prefix = ""
if node.type not in WHITESPACE:
self.current_line.append(node)
yield from super().visit_default(node)
def visit_test(self, node: Node) -> Iterator[Line]:
"""Visit an `x if y else z` test"""
already_parenthesized = (
node.prev_sibling and node.prev_sibling.type == token.LPAR
)
if not already_parenthesized:
# Similar to logic in wrap_in_parentheses
lpar = Leaf(token.LPAR, "")
rpar = Leaf(token.RPAR, "")
prefix = node.prefix
node.prefix = ""
lpar.prefix = prefix
node.insert_child(0, lpar)
node.append_child(rpar)
yield from self.visit_default(node)
def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
"""Increase indentation level, maybe yield a line."""
# In blib2to3 INDENT never holds comments.
yield from self.line(+1)
yield from self.visit_default(node)
def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
"""Decrease indentation level, maybe yield a line."""
# The current line might still wait for trailing comments. At DEDENT time
# there won't be any (they would be prefixes on the preceding NEWLINE).
# Emit the line then.
yield from self.line()
# While DEDENT has no value, its prefix may contain standalone comments
# that belong to the current indentation level. Get 'em.
yield from self.visit_default(node)
# Finally, emit the dedent.
yield from self.line(-1)
def visit_stmt(
self, node: Node, keywords: set[str], parens: set[str]
) -> Iterator[Line]:
"""Visit a statement.
This implementation is shared for `if`, `while`, `for`, `try`, `except`,
`def`, `with`, `class`, `assert`, and assignments.
The relevant Python language `keywords` for a given statement will be
NAME leaves within it. This methods puts those on a separate line.
`parens` holds a set of string leaf values immediately after which
invisible parens should be put.
"""
normalize_invisible_parens(
node, parens_after=parens, mode=self.mode, features=self.features
)
for child in node.children:
if is_name_token(child) and child.value in keywords:
yield from self.line()
yield from self.visit(child)
def visit_typeparams(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
node.children[0].prefix = ""
def visit_typevartuple(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
node.children[1].prefix = ""
def visit_paramspec(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
node.children[1].prefix = ""
def visit_dictsetmaker(self, node: Node) -> Iterator[Line]:
if Preview.wrap_long_dict_values_in_parens in self.mode:
for i, child in enumerate(node.children):
if i == 0:
continue
if node.children[i - 1].type == token.COLON:
if (
child.type == syms.atom
and child.children[0].type in OPENING_BRACKETS
and not is_walrus_assignment(child)
):
maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=self.mode,
features=self.features,
remove_brackets_around_comma=False,
)
else:
wrap_in_parentheses(node, child, visible=False)
yield from self.visit_default(node)
def visit_funcdef(self, node: Node) -> Iterator[Line]:
"""Visit function definition."""
yield from self.line()
# Remove redundant brackets around return type annotation.
is_return_annotation = False
for child in node.children:
if child.type == token.RARROW:
is_return_annotation = True
elif is_return_annotation:
if child.type == syms.atom and child.children[0].type == token.LPAR:
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=self.mode,
features=self.features,
remove_brackets_around_comma=False,
):
wrap_in_parentheses(node, child, visible=False)
else:
wrap_in_parentheses(node, child, visible=False)
is_return_annotation = False
for child in node.children:
yield from self.visit(child)
def visit_match_case(self, node: Node) -> Iterator[Line]:
"""Visit either a match or case statement."""
normalize_invisible_parens(
node, parens_after=set(), mode=self.mode, features=self.features
)
yield from self.line()
for child in node.children:
yield from self.visit(child)
def visit_suite(self, node: Node) -> Iterator[Line]:
"""Visit a suite."""
if is_stub_suite(node):
yield from self.visit(node.children[2])
else:
yield from self.visit_default(node)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
"""Visit a statement without nested statements."""
prev_type: int | None = None
for child in node.children:
if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):
wrap_in_parentheses(node, child, visible=False)
prev_type = child.type
if node.parent and node.parent.type in STATEMENT:
if is_parent_function_or_class(node) and is_stub_body(node):
yield from self.visit_default(node)
else:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
if node.parent and is_stub_suite(node.parent):
node.prefix = ""
yield from self.visit_default(node)
return
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
"""Visit `async def`, `async for`, `async with`."""
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.ASYNC or child.type == STANDALONE_COMMENT:
# STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async
# line.
break
internal_stmt = next(children)
yield from self.visit(internal_stmt)
def visit_decorators(self, node: Node) -> Iterator[Line]:
"""Visit decorators."""
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_power(self, node: Node) -> Iterator[Line]:
for idx, leaf in enumerate(node.children[:-1]):
next_leaf = node.children[idx + 1]
if not isinstance(leaf, Leaf):
continue
value = leaf.value.lower()
if (
leaf.type == token.NUMBER
and next_leaf.type == syms.trailer
# Ensure that we are in an attribute trailer
and next_leaf.children[0].type == token.DOT
# It shouldn't wrap hexadecimal, binary and octal literals
and not value.startswith(("0x", "0b", "0o"))
# It shouldn't wrap complex literals
and "j" not in value
):
wrap_in_parentheses(node, leaf)
remove_await_parens(node, mode=self.mode, features=self.features)
yield from self.visit_default(node)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
"""Remove a semicolon and put the other statement on a separate line."""
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
"""End of file. Process outstanding comments and end with a newline."""
yield from self.visit_default(leaf)
yield from self.line()
def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
if not self.current_line.bracket_tracker.any_open_brackets():
yield from self.line()
# STANDALONE_COMMENT nodes created by our special handling in
# normalize_fmt_off for comment-only blocks have fmt:off as the first
# line and fmt:on as the last line (each directive on its own line,
# not embedded in other text). These should be appended directly
# without calling visit_default, which would process their prefix and
# lose indentation. Normal STANDALONE_COMMENT nodes go through
# visit_default.
value = leaf.value
lines = value.splitlines()
if len(lines) >= 2:
# Check if first line (after stripping whitespace) is exactly a
# fmt:off directive
first_line = lines[0].lstrip()
first_is_fmt_off = first_line in FMT_OFF
# Check if last line (after stripping whitespace) is exactly a
# fmt:on directive
last_line = lines[-1].lstrip()
last_is_fmt_on = last_line in FMT_ON
is_fmt_off_block = first_is_fmt_off and last_is_fmt_on
else:
is_fmt_off_block = False
if is_fmt_off_block:
# This is a fmt:off/on block from normalize_fmt_off - we still need
# to process any prefix comments (like markdown comments) but append
# the fmt block itself directly to preserve its formatting
# Only process prefix comments if there actually is a prefix with comments
if leaf.prefix and any(
line.strip().startswith("#")
and not _contains_fmt_directive(line.strip())
for line in leaf.prefix.split("\n")
):
for comment in generate_comments(leaf, mode=self.mode):
yield from self.line()
self.current_line.append(comment)
yield from self.line()
# Clear the prefix since we've processed it as comments above
leaf.prefix = ""
self.current_line.append(leaf)
yield from self.line()
else:
# Normal standalone comment - process through visit_default
yield from self.visit_default(leaf)
def visit_factor(self, node: Node) -> Iterator[Line]:
"""Force parentheses between a unary op and a binary power:
-2 ** 8 -> -(2 ** 8)
"""
_operator, operand = node.children
if (
operand.type == syms.power
and len(operand.children) == 3
and operand.children[1].type == token.DOUBLESTAR
):
lpar = Leaf(token.LPAR, "(")
rpar = Leaf(token.RPAR, ")")
index = operand.remove() or 0
node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
yield from self.visit_default(node)
def visit_tname(self, node: Node) -> Iterator[Line]:
"""
Add potential parentheses around types in function parameter lists to be made
into real parentheses in case the type hint is too long to fit on a line
Examples:
def foo(a: int, b: float = 7): ...
->
def foo(a: (int), b: (float) = 7): ...
"""
if len(node.children) == 3 and maybe_make_parens_invisible_in_atom(
node.children[2], parent=node, mode=self.mode, features=self.features
):
wrap_in_parentheses(node, node.children[2], visible=False)
yield from self.visit_default(node)
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
normalize_unicode_escape_sequences(leaf)
if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value):
# We're ignoring docstrings with backslash newline escapes because changing
# indentation of those changes the AST representation of the code.
if self.mode.string_normalization:
docstring = normalize_string_prefix(leaf.value)
# We handle string normalization at the end of this method, but since
# what we do right now acts differently depending on quote style (ex.
# see padding logic below), there's a possibility for unstable
# formatting. To avoid a situation where this function formats a
# docstring differently on the second pass, normalize it early.
docstring = normalize_string_quotes(docstring)
else:
docstring = leaf.value
prefix = get_string_prefix(docstring)
docstring = docstring[len(prefix) :] # Remove the prefix
quote_char = docstring[0]
# A natural way to remove the outer quotes is to do:
# docstring = docstring.strip(quote_char)
# but that breaks on """""x""" (which is '""x').
# So we actually need to remove the first character and the next two
# characters but only if they are the same as the first.
quote_len = 1 if docstring[1] != quote_char else 3
docstring = docstring[quote_len:-quote_len]
docstring_started_empty = not docstring
indent = " " * 4 * self.current_line.depth
if is_multiline_string(leaf):
docstring = fix_multiline_docstring(docstring, indent)
else:
docstring = docstring.strip()
has_trailing_backslash = False
if docstring:
# Add some padding if the docstring starts / ends with a quote mark.
if docstring[0] == quote_char:
docstring = " " + docstring
if docstring[-1] == quote_char:
docstring += " "
if docstring[-1] == "\\":
backslash_count = len(docstring) - len(docstring.rstrip("\\"))
if backslash_count % 2:
# Odd number of tailing backslashes, add some padding to
# avoid escaping the closing string quote.
docstring += " "
has_trailing_backslash = True
elif not docstring_started_empty:
docstring = " "
# We could enforce triple quotes at this point.
quote = quote_char * quote_len
# It's invalid to put closing single-character quotes on a new line.
if quote_len == 3:
# We need to find the length of the last line of the docstring
# to find if we can add the closing quotes to the line without
# exceeding the maximum line length.
# If docstring is one line, we don't put the closing quotes on a
# separate line because it looks ugly (#3320).
lines = docstring.splitlines()
last_line_length = len(lines[-1]) if docstring else 0
# If adding closing quotes would cause the last line to exceed
# the maximum line length, and the closing quote is not
# prefixed by a newline then put a line break before
# the closing quotes
if (
len(lines) > 1
and last_line_length + quote_len > self.mode.line_length
and len(indent) + quote_len <= self.mode.line_length
and not has_trailing_backslash
):
if leaf.value[-1 - quote_len] == "\n":
leaf.value = prefix + quote + docstring + quote
else:
leaf.value = prefix + quote + docstring + "\n" + indent + quote
else:
leaf.value = prefix + quote + docstring + quote
else:
leaf.value = prefix + quote + docstring + quote
if self.mode.string_normalization and leaf.type == token.STRING:
leaf.value = normalize_string_prefix(leaf.value)
leaf.value = normalize_string_quotes(leaf.value)
yield from self.visit_default(leaf)
def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]:
normalize_numeric_literal(leaf)
yield from self.visit_default(leaf)
def visit_atom(self, node: Node) -> Iterator[Line]:
"""Visit any atom"""
if len(node.children) == 3:
first = node.children[0]
last = node.children[-1]
if (first.type == token.LSQB and last.type == token.RSQB) or (
first.type == token.LBRACE and last.type == token.RBRACE
):
# Lists or sets of one item
maybe_make_parens_invisible_in_atom(
node.children[1],
parent=node,
mode=self.mode,
features=self.features,
)
yield from self.visit_default(node)
def visit_fstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split f-strings at all.
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
for child in node.children
if child.type == syms.fstring_replacement_field
):
# string normalization doesn't account for nested quotes,
# causing breakages. skip normalization when nested quotes exist
yield from self.visit_default(string_leaf)
return
yield from self.visit_STRING(string_leaf)
def visit_tstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split t-strings at all.
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
for child in node.children
if child.type == syms.fstring_replacement_field
):
# string normalization doesn't account for nested quotes,
# causing breakages. skip normalization when nested quotes exist
yield from self.visit_default(string_leaf)
return
yield from self.visit_STRING(string_leaf)
# TODO: Uncomment Implementation to format f-string children
# fstring_start = node.children[0]
# fstring_end = node.children[-1]
# assert isinstance(fstring_start, Leaf)
# assert isinstance(fstring_end, Leaf)
# quote_char = fstring_end.value[0]
# quote_idx = fstring_start.value.index(quote_char)
# prefix, quote = (
# fstring_start.value[:quote_idx],
# fstring_start.value[quote_idx:]
# )
# if not is_docstring(node, self.mode):
# prefix = normalize_string_prefix(prefix)
# assert quote == fstring_end.value
# is_raw_fstring = "r" in prefix or "R" in prefix
# middles = [
# leaf
# for leaf in node.leaves()
# if leaf.type == token.FSTRING_MIDDLE
# ]
# if self.mode.string_normalization:
# middles, quote = normalize_fstring_quotes(quote, middles, is_raw_fstring)
# fstring_start.value = prefix + quote
# fstring_end.value = quote
# yield from self.visit_default(node)
def visit_comp_for(self, node: Node) -> Iterator[Line]:
if Preview.wrap_comprehension_in in self.mode:
normalize_invisible_parens(
node, parens_after={"in"}, mode=self.mode, features=self.features
)
yield from self.visit_default(node)
def visit_old_comp_for(self, node: Node) -> Iterator[Line]:
yield from self.visit_comp_for(node)
def __post_init__(self) -> None:
"""You are in a twisty little maze of passages."""
self.current_line = Line(mode=self.mode)
v = self.visit_stmt
Ø: set[str] = set()
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
self.visit_if_stmt = partial(
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
)
self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
self.visit_try_stmt = partial(
v, keywords={"try", "except", "else", "finally"}, parens=Ø
)
self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"})
self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
# PEP 634
self.visit_match_stmt = self.visit_match_case
self.visit_case_block = self.visit_match_case
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
def _hugging_power_ops_line_to_string(
line: Line,
features: Collection[Feature],
mode: Mode,
) -> str | None:
try:
return line_to_string(next(hug_power_op(line, features, mode)))
except CannotTransform:
return None
def transform_line(
line: Line, mode: Mode, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Transform a `line`, potentially splitting it into many lines.
They should fit in the allotted `line_length` but might not be able to.
`features` are syntactical features that may be used in the output.
"""
if line.is_comment:
yield line
return
line_str = line_to_string(line)
# We need the line string when power operators are hugging to determine if we should
# split the line. Default to line_str, if no power operator are present on the line.
line_str_hugging_power_ops = (
_hugging_power_ops_line_to_string(line, features, mode) or line_str
)
ll = mode.line_length
sn = mode.string_normalization
string_merge = StringMerger(ll, sn)
string_paren_strip = StringParenStripper(ll, sn)
string_split = StringSplitter(ll, sn)
string_paren_wrap = StringParenWrapper(ll, sn)
transformers: list[Transformer]
if (
not line.contains_uncollapsable_type_comments()
and not line.should_split_rhs
and not line.magic_trailing_comma
and (
is_line_short_enough(line, mode=mode, line_str=line_str_hugging_power_ops)
or line.contains_unsplittable_type_ignore()
)
and not (line.inside_brackets and line.contains_standalone_comments())
and not line.contains_implicit_multiline_string_with_comments()
):
# Only apply basic string preprocessing, since lines shouldn't be split here.
if Preview.string_processing in mode:
transformers = [string_merge, string_paren_strip]
else:
transformers = []
elif line.is_def and not should_split_funcdef_with_rhs(line, mode):
transformers = [left_hand_split]
else:
def _rhs(
self: object, line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
"""Wraps calls to `right_hand_split`.
The calls increasingly `omit` right-hand trailers (bracket pairs with
content), meaning the trailers get glued together to split on another
bracket pair instead.
"""
for omit in generate_trailers_to_omit(line, mode.line_length):
lines = list(right_hand_split(line, mode, features, omit=omit))
# Note: this check is only able to figure out if the first line of the
# *current* transformation fits in the line length. This is true only
# for simple cases. All others require running more transforms via
# `transform_line()`. This check doesn't know if those would succeed.
if is_line_short_enough(lines[0], mode=mode):
yield from lines
return
# All splits failed, best effort split with no omits.
# This mostly happens to multiline strings that are by definition
# reported as not fitting a single line, as well as lines that contain
# trailing commas (those have to be exploded).
yield from right_hand_split(line, mode, features=features)
# HACK: nested functions (like _rhs) compiled by mypyc don't retain their
# __name__ attribute which is needed in `run_transformer` further down.
# Unfortunately a nested class breaks mypyc too. So a class must be created
# via type ... https://github.com/mypyc/mypyc/issues/884
rhs = type("rhs", (), {"__call__": _rhs})()
if Preview.string_processing in mode:
if line.inside_brackets:
transformers = [
string_merge,
string_paren_strip,
string_split,
delimiter_split,
standalone_comment_split,
string_paren_wrap,
rhs,
]
else:
transformers = [
string_merge,
string_paren_strip,
string_split,
string_paren_wrap,
rhs,
]
else:
if line.inside_brackets:
transformers = [delimiter_split, standalone_comment_split, rhs]
else:
transformers = [rhs]
# It's always safe to attempt hugging of power operations and pretty much every line
# could match.
transformers.append(hug_power_op)
for transform in transformers:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
try:
result = run_transformer(line, transform, mode, features, line_str=line_str)
except CannotTransform:
continue
else:
yield from result
break
else:
yield line
def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
"""If a funcdef has a magic trailing comma in the return type, then we should first
split the line with rhs to respect the comma.
"""
return_type_leaves: list[Leaf] = []
in_return_type = False
for leaf in line.leaves:
if leaf.type == token.COLON:
in_return_type = False
if in_return_type:
return_type_leaves.append(leaf)
if leaf.type == token.RARROW:
in_return_type = True
# using `bracket_split_build_line` will mess with whitespace, so we duplicate a
# couple lines from it.
result = Line(mode=line.mode, depth=line.depth)
leaves_to_track = get_leaves_inside_matching_brackets(return_type_leaves)
for leaf in return_type_leaves:
result.append(
leaf,
preformatted=True,
track_bracket=id(leaf) in leaves_to_track,
)
# we could also return true if the line is too long, and the return type is longer
# than the param list. Or if `should_split_rhs` returns True.
return result.magic_trailing_comma is not None
|
LineGenerator
|
python
|
django__django
|
tests/view_tests/models.py
|
{
"start": 609,
"end": 681
}
|
class ____(BaseArticle):
date_created = models.DateTimeField()
|
Article
|
python
|
huggingface__transformers
|
src/transformers/integrations/tensor_parallel.py
|
{
"start": 20747,
"end": 22612
}
|
class ____(TensorParallelLayer):
"""
Simple class used to define the hooks to add to a layer when we just want to gather the outputs
"""
def __init__(
self,
input_layouts: Placement | None = None,
output_layouts: Placement | None = None,
use_local_output: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.input_layouts = (input_layouts or Replicate(),)
self.output_layouts = output_layouts
self.desired_input_layouts = (Replicate(),)
self.use_local_output = use_local_output
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
mod.expert_parallel_group = device_mesh.get_group()
if inputs and isinstance(inputs[0], DTensor):
inputs = inputs[0].to_local()
return inputs
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
if isinstance(outputs, torch.Tensor):
dist.all_reduce(outputs, op=dist.ReduceOp.SUM, async_op=False)
else:
dist.all_reduce(outputs[0], op=dist.ReduceOp.SUM, async_op=False)
return outputs
def shard_tensor(
self,
param,
param_type=None,
param_casting_dtype=None,
to_contiguous=None,
rank=None,
device_mesh=None,
tensor_idx=None,
):
shard = [Replicate()]
parameter = param[...].to(param_casting_dtype)
self.shard = shard
return parameter, shard
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
distribute_module(
module,
device_mesh,
partial(self._prepare_input_fn, None, None),
partial(self._prepare_output_fn, None, None),
)
|
GatherParallel
|
python
|
simonw__datasette
|
datasette/facets.py
|
{
"start": 11294,
"end": 18285
}
|
class ____(Facet):
type = "array"
def _is_json_array_of_strings(self, json_string):
try:
array = json.loads(json_string)
except ValueError:
return False
for item in array:
if not isinstance(item, str):
return False
return True
async def suggest(self):
columns = await self.get_columns(self.sql, self.params)
suggested_facets = []
already_enabled = [c["config"]["simple"] for c in self.get_configs()]
for column in columns:
if column in already_enabled:
continue
# Is every value in this column either null or a JSON array?
suggested_facet_sql = """
with limited as (select * from ({sql}) limit {suggest_consider})
select distinct json_type({column})
from limited
where {column} is not null and {column} != ''
""".format(
column=escape_sqlite(column),
sql=self.sql,
suggest_consider=self.suggest_consider,
)
try:
results = await self.ds.execute(
self.database,
suggested_facet_sql,
self.params,
truncate=False,
custom_time_limit=self.ds.setting("facet_suggest_time_limit_ms"),
log_sql_errors=False,
)
types = tuple(r[0] for r in results.rows)
if types in (("array",), ("array", None)):
# Now check that first 100 arrays contain only strings
first_100 = [
v[0]
for v in await self.ds.execute(
self.database,
(
"select {column} from ({sql}) "
"where {column} is not null "
"and {column} != '' "
"and json_array_length({column}) > 0 "
"limit 100"
).format(column=escape_sqlite(column), sql=self.sql),
self.params,
truncate=False,
custom_time_limit=self.ds.setting(
"facet_suggest_time_limit_ms"
),
log_sql_errors=False,
)
]
if first_100 and all(
self._is_json_array_of_strings(r) for r in first_100
):
suggested_facets.append(
{
"name": column,
"type": "array",
"toggle_url": self.ds.absolute_url(
self.request,
self.ds.urls.path(
path_with_added_args(
self.request, {"_facet_array": column}
)
),
),
}
)
except (QueryInterrupted, sqlite3.OperationalError):
continue
return suggested_facets
async def facet_results(self):
# self.configs should be a plain list of columns
facet_results = []
facets_timed_out = []
facet_size = self.get_facet_size()
for source_and_config in self.get_configs():
config = source_and_config["config"]
source = source_and_config["source"]
column = config.get("column") or config["simple"]
# https://github.com/simonw/datasette/issues/448
facet_sql = """
with inner as ({sql}),
deduped_array_items as (
select
distinct j.value,
inner.*
from
json_each([inner].{col}) j
join inner
)
select
value as value,
count(*) as count
from
deduped_array_items
group by
value
order by
count(*) desc, value limit {limit}
""".format(
col=escape_sqlite(column),
sql=self.sql,
limit=facet_size + 1,
)
try:
facet_rows_results = await self.ds.execute(
self.database,
facet_sql,
self.params,
truncate=False,
custom_time_limit=self.ds.setting("facet_time_limit_ms"),
)
facet_results_values = []
facet_results.append(
{
"name": column,
"type": self.type,
"results": facet_results_values,
"hideable": source != "metadata",
"toggle_url": self.ds.urls.path(
path_with_removed_args(
self.request, {"_facet_array": column}
)
),
"truncated": len(facet_rows_results) > facet_size,
}
)
facet_rows = facet_rows_results.rows[:facet_size]
pairs = self.get_querystring_pairs()
for row in facet_rows:
value = str(row["value"])
selected = (f"{column}__arraycontains", value) in pairs
if selected:
toggle_path = path_with_removed_args(
self.request, {f"{column}__arraycontains": value}
)
else:
toggle_path = path_with_added_args(
self.request, {f"{column}__arraycontains": value}
)
facet_results_values.append(
{
"value": value,
"label": value,
"count": row["count"],
"toggle_url": self.ds.absolute_url(
self.request, toggle_path
),
"selected": selected,
}
)
except QueryInterrupted:
facets_timed_out.append(column)
return facet_results, facets_timed_out
|
ArrayFacet
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/relationships/tutorial001_py310.py
|
{
"start": 228,
"end": 387
}
|
class ____(TeamBase, table=True):
id: int | None = Field(default=None, primary_key=True)
heroes: list["Hero"] = Relationship(back_populates="team")
|
Team
|
python
|
jina-ai__jina
|
tests/unit/orchestrate/flow/flow-construct/test_flow_except.py
|
{
"start": 1383,
"end": 4189
}
|
class ____(Executor):
@requests
def foo(self, **kwargs):
raise NotImplementedError
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_except_with_shards(mocker, protocol):
def validate(req):
assert req.status.code == jina_pb2.StatusProto.ERROR
err_routes = [
r.status for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR
]
assert len(err_routes) == 1
assert err_routes[0].exception.executor == 'DummyCrafterExcept'
assert err_routes[0].exception.name == 'ZeroDivisionError'
f = (
Flow(protocol=protocol)
.add(name='r1')
.add(name='r2', uses=DummyCrafterExcept, shards=3)
.add(name='r3', uses=NotImplementedExecutor)
)
on_error_mock = mocker.Mock()
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index([Document(text='abbcs'), Document(text='efgh')], on_error=on_error_mock)
f.index([Document(text='abbcs'), Document(text='efgh')], on_error=on_error_mock)
validate_callback(on_error_mock, validate)
@pytest.mark.parametrize('protocol', ['grpc', 'websocket', 'http'])
def test_on_error_callback(mocker, protocol):
def validate(x, *args):
x = x.routes
assert len(x) == 3 # gateway, r1, r3, gateway
badones = [r for r in x if r.status.code == jina_pb2.StatusProto.ERROR]
assert badones[0].executor == 'r3'
f = (
Flow(protocol=protocol)
.add(name='r1')
.add(name='r3', uses=NotImplementedExecutor)
)
on_error_mock = mocker.Mock()
with f:
f.index(
[Document(text='abbcs'), Document(text='efgh')],
on_error=on_error_mock,
)
validate_callback(on_error_mock, validate)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_no_error_callback(mocker, protocol):
f = Flow(protocol=protocol).add(name='r1').add(name='r3')
on_error_mock = mocker.Mock()
with f:
results = f.index(
[Document(text='abbcs'), Document(text='efgh')],
on_error=on_error_mock,
)
assert len(results) > 0
on_error_mock.assert_not_called()
@pytest.mark.parametrize('protocol', ['websocket', 'http', 'grpc'])
def test_flow_on_callback(protocol):
f = Flow(protocol=protocol).add()
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(
from_ndarray(np.random.random([10, 10])),
on_done=f1,
on_error=f2,
on_always=f3,
)
assert hit == ['done', 'always']
|
NotImplementedExecutor
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/legend_handler.py
|
{
"start": 1394,
"end": 6179
}
|
class ____:
"""
A base class for default legend handlers.
The derived classes are meant to override *create_artists* method, which
has the following signature::
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
The overridden method needs to create artists of the given
transform that fits in the given dimension (xdescent, ydescent,
width, height) that are scaled by fontsize if necessary.
"""
def __init__(self, xpad=0., ypad=0., update_func=None):
"""
Parameters
----------
xpad : float, optional
Padding in x-direction.
ypad : float, optional
Padding in y-direction.
update_func : callable, optional
Function for updating the legend handler properties from another
legend handler, used by `~HandlerBase.update_prop`.
"""
self._xpad, self._ypad = xpad, ypad
self._update_prop_func = update_func
def _update_prop(self, legend_handle, orig_handle):
if self._update_prop_func is None:
self._default_update_prop(legend_handle, orig_handle)
else:
self._update_prop_func(legend_handle, orig_handle)
def _default_update_prop(self, legend_handle, orig_handle):
legend_handle.update_from(orig_handle)
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def adjust_drawing_area(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
):
xdescent = xdescent - self._xpad * fontsize
ydescent = ydescent - self._ypad * fontsize
width = width - self._xpad * fontsize
height = height - self._ypad * fontsize
return xdescent, ydescent, width, height
def legend_artist(self, legend, orig_handle,
fontsize, handlebox):
"""
Return the artist that this HandlerBase generates for the given
original artist/handle.
Parameters
----------
legend : `~matplotlib.legend.Legend`
The legend for which these legend artists are being created.
orig_handle : :class:`matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
fontsize : int
The fontsize in pixels. The artists being created should
be scaled according to the given fontsize.
handlebox : `~matplotlib.offsetbox.OffsetBox`
The box which has been created to hold this legend entry's
artists. Artists created in the `legend_artist` method must
be added to this handlebox inside this method.
"""
xdescent, ydescent, width, height = self.adjust_drawing_area(
legend, orig_handle,
handlebox.xdescent, handlebox.ydescent,
handlebox.width, handlebox.height,
fontsize)
artists = self.create_artists(legend, orig_handle,
xdescent, ydescent, width, height,
fontsize, handlebox.get_transform())
# create_artists will return a list of artists.
for a in artists:
handlebox.add_artist(a)
# we only return the first artist
return artists[0]
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
"""
Return the legend artists generated.
Parameters
----------
legend : `~matplotlib.legend.Legend`
The legend for which these legend artists are being created.
orig_handle : `~matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
xdescent, ydescent, width, height : int
The rectangle (*xdescent*, *ydescent*, *width*, *height*) that the
legend artists being created should fit within.
fontsize : int
The fontsize in pixels. The legend artists being created should
be scaled according to the given fontsize.
trans : `~matplotlib.transforms.Transform`
The transform that is applied to the legend artists being created.
Typically from unit coordinates in the handler box to screen
coordinates.
"""
raise NotImplementedError('Derived must override')
|
HandlerBase
|
python
|
wandb__wandb
|
tools/cloud_tool.py
|
{
"start": 495,
"end": 1097
}
|
class ____:
instance_name: str = "sdk-compute"
num_nodes: int = 1
machine_type: str = "n1-highcpu-4"
maintenance_policy: str = "TERMINATE"
disk_size: str = "10GB"
disk_type: str = "pd-ssd"
# accelerator_type: str = "nvidia-tesla-t4"
# accelerator_count: int = 1
container_registry: str = "gcr.io"
gcp_project_id: str = "wandb-client-cicd"
project: str = "ubuntu-os-cloud"
vm_image_name: str = "ubuntu-2004-focal-v20221018"
python_version: str = "3.8"
git_branch: str = "main"
test_args: str = "--all"
wandb_version: str = "0.13.6"
|
GCEConfig
|
python
|
dask__dask
|
dask/array/tests/test_array_core.py
|
{
"start": 85972,
"end": 183963
}
|
class ____:
def __init__(self, x):
self.x = x
self.dtype = x.dtype
self.shape = x.shape
self.ndim = len(x.shape)
def __getitem__(self, i):
return self.x[i]
@pytest.mark.parametrize(
"x,chunks",
[
(np.arange(25).reshape((5, 5)), (5, 5)),
(np.arange(25).reshape((5, 5)), -1),
(np.array([[1]]), 1),
(np.array(1), 1),
],
)
@pytest.mark.parametrize("inline_array", [True, False])
def test_from_array_tasks_always_call_getter(x, chunks, inline_array):
dx = da.from_array(
MyArray(x), chunks=chunks, asarray=False, inline_array=inline_array
)
assert_eq(x, dx)
@pytest.mark.parametrize(
"x",
[
np.array([[1, 2], [3, 4]]),
np.ma.array([[1, 2], [3, 4]], mask=[[True, False], [False, False]]),
np.ma.array([1], mask=[True]),
np.ma.array([1.5], mask=[True]),
np.ma.array(1, mask=True),
np.ma.array(1.5, mask=True),
],
)
def test_from_array_ndarray_onechunk(x):
"""ndarray with a single chunk produces a minimal single key dict"""
dx = da.from_array(x, chunks=-1)
assert_eq(x, dx)
assert len(dx.dask) == 1
assert dx.dask[(dx.name,) + (0,) * dx.ndim] is not x
assert_eq(dx.dask[(dx.name,) + (0,) * dx.ndim], x)
def test_from_array_ndarray_getitem():
"""For ndarray, don't use getter / getter_nofancy; use the cleaner
operator.getitem"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
assert_eq(x, dx)
assert (dx.dask[dx.name, 0, 0] == np.array([[1, 2]])).all()
@pytest.mark.parametrize("x", [[1, 2], (1, 2), memoryview(b"abc")])
def test_from_array_list(x):
"""Lists, tuples, and memoryviews are automatically converted to ndarray"""
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(dx.dask[dx.name, 0], np.ndarray)
dx = da.from_array(x, chunks=1)
assert_eq(np.array(x), dx)
assert dx.dask[dx.name, 0][0] == x[0]
# On MacOS Python 3.9, the order of the np.ScalarType tuple randomly changes across
# interpreter restarts, thus causing pytest-xdist failures; setting PYTHONHASHSEED does
# not help
@pytest.mark.parametrize(
"type_", sorted((t for t in np.ScalarType if t is not memoryview), key=str)
)
def test_from_array_scalar(type_):
"""Python and numpy scalars are automatically converted to ndarray"""
if type_ == np.datetime64:
x = np.datetime64("2000-01-01")
else:
x = type_(1)
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(
dx.dask[dx.name,],
np.ndarray,
)
@pytest.mark.parametrize("asarray,cls", [(True, np.ndarray), (False, np.matrix)])
@pytest.mark.parametrize("inline_array", [True, False])
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_from_array_no_asarray(asarray, cls, inline_array):
def assert_chunks_are_of_type(x):
chunks = compute_as_if_collection(Array, x.dask, x.__dask_keys__())
# If it's a tuple of tuples we want to concat, but if it's a tuple
# of 1d arrays, we just want to iterate directly
for c in concat(chunks) if isinstance(chunks[0], tuple) else chunks:
assert type(c) is cls
x = np.matrix(np.arange(100).reshape((10, 10)))
dx = da.from_array(x, chunks=(5, 5), asarray=asarray, inline_array=inline_array)
assert_chunks_are_of_type(dx)
assert_chunks_are_of_type(dx[0:5])
assert_chunks_are_of_type(dx[0:5][:, 0])
@pytest.mark.parametrize("wrap", [True, False])
@pytest.mark.parametrize("inline_array", [True, False])
def test_from_array_getitem(wrap, inline_array):
x = np.arange(10)
called = False
def my_getitem(a, ind):
nonlocal called
called = True
return a[ind]
xx = MyArray(x) if wrap else x
y = da.from_array(xx, chunks=(5,), getitem=my_getitem, inline_array=inline_array)
assert_eq(x, y)
# If we have a raw numpy array we eagerly slice, so custom getters
# are not called.
assert called is wrap
def test_from_array_minus_one():
x = np.arange(10)
y = da.from_array(x, -1)
assert y.chunks == ((10,),)
assert_eq(x, y)
@pytest.mark.parametrize("chunks", [-1, 2])
def test_array_copy_noop(chunks):
# Regression test for https://github.com/dask/dask/issues/9533
# Which is a revert of the solution for https://github.com/dask/dask/issues/3751
x = np.arange(10)
y = da.from_array(x, chunks=chunks)
y_c = y.copy()
assert y.name == y_c.name
def test_from_array_dask_array():
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
with pytest.raises(ValueError):
da.from_array(dx)
def test_from_array_dask_collection_warns():
class CustomCollection(np.ndarray):
def __dask_graph__(self):
return {"bar": 1}
x = CustomCollection([1, 2, 3])
with pytest.warns(UserWarning):
da.from_array(x)
# Ensure da.array warns too
with pytest.warns(UserWarning):
da.array(x)
def test_from_array_inline():
class MyArray(np.ndarray):
pass
a = np.array([1, 2, 3]).view(MyArray)
dsk = dict(da.from_array(a, name="my-array", inline_array=False).dask)
assert dsk["original-my-array"] is not a
assert_eq(dsk["original-my-array"], a)
dsk = dict(da.from_array(a, name="my-array", inline_array=True).dask)
assert "original-my-array" not in dsk
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray(asarray):
assert_eq(asarray([1, 2, 3]), np.asarray([1, 2, 3]))
x = asarray([1, 2, 3])
assert asarray(x) is x
y = [x[0], 2, x[2]]
assert_eq(asarray(y), x)
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray_array_dtype(asarray):
# test array input
x = asarray([1, 2])
assert_eq(asarray(x, dtype=da.float32), np.asarray(x, dtype=np.float32))
# dask->dask
x = asarray(x, dtype=da.float64)
assert x.dtype == da.float64
x = asarray(x, dtype=da.int32)
assert x.dtype == da.int32
x = asarray(x)
assert x.dtype == da.int32
# Test explicit null dtype. astype(None) converts to float!
x = asarray(x, dtype=None)
assert x.dtype == da.int32
# non-dask->dask
x = asarray(np.asarray([1, 2], dtype=np.int8))
assert x.dtype == da.int8
x = asarray(np.asarray([1, 2], dtype=np.int8), dtype=None)
assert x.dtype == da.int8
x = asarray(np.asarray([1, 2], dtype=np.int8), dtype=da.float64)
assert x.dtype == da.float64
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray_dask_dataframe(asarray):
# https://github.com/dask/dask/issues/3885
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
s = dd.from_pandas(pd.Series([1, 2, 3, 4]), 2)
result = asarray(s)
expected = s.values
assert_eq(result, expected)
df = s.to_frame(name="s")
result = asarray(df)
expected = df.values
assert_eq(result, expected)
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
@pytest.mark.parametrize("inline_array", [True, False])
def test_asarray_h5py(asarray, inline_array):
h5py = pytest.importorskip("h5py")
with tmpfile(".hdf5") as fn:
with h5py.File(fn, mode="a") as f:
d = f.create_dataset("/x", shape=(2, 2), dtype=float)
x = asarray(d, inline_array=inline_array)
# Check for the array in the dsk
dsk = dict(x.dask)
assert (d in dsk.values()) is not inline_array
assert not any(isinstance(v, np.ndarray) for v in dsk.values())
def test_asarray_chunks():
with dask.config.set({"array.chunk-size": "100 B"}):
x = np.ones(1000)
d = da.asarray(x)
assert d.npartitions > 1
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_asanyarray():
x = np.matrix([1, 2, 3])
dx = da.asanyarray(x)
assert dx.numblocks == (1, 1)
chunks = compute_as_if_collection(Array, dx.dask, dx.__dask_keys__())
assert isinstance(chunks[0][0], np.matrix)
assert da.asanyarray(dx) is dx
def test_asanyarray_dataframe():
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2)
x = np.asanyarray(df)
dx = da.asanyarray(ddf)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
x = np.asanyarray(df.x)
dx = da.asanyarray(ddf.x)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
def test_asanyarray_datetime64():
x = np.array(["2000-01-01"], dtype="datetime64")
dx = da.asanyarray(x)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={"n": 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert_eq(d, 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={"n": 2}))
def test_concatenate3_2():
x = np.array([1, 2])
assert_eq(concatenate3([x, x, x]), np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (
concatenate3([[x, x, x], [x, x, x]])
== np.array([[1, 2, 1, 2, 1, 2], [1, 2, 1, 2, 1, 2]])
).all()
assert (
concatenate3([[x, x], [x, x], [x, x]])
== np.array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]])
).all()
x = np.arange(12).reshape((2, 2, 3))
assert_eq(
concatenate3([[[x, x, x], [x, x, x]], [[x, x, x], [x, x, x]]]),
np.array(
[
[
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
],
[
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
],
[
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
],
[
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
],
]
),
)
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert_eq(
da.core.map_blocks(lambda a, b: a + 2 * b, d, e, dtype=d.dtype), x + 2 * y
)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2 * b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert_eq(res, x + 2 * z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
assert_eq(da.map_blocks(func, f, d, dtype=d.dtype), z + 2 * x)
def test_from_array_with_missing_chunks():
x = np.random.default_rng().standard_normal((2, 4, 3))
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
@pytest.mark.parametrize("func", [normalize_chunks, normalize_chunks_cached])
def test_normalize_chunks(func):
assert func(3, (4, 6)) == ((3, 1), (3, 3))
assert func(((3, 3), (8,)), (6, 8)) == ((3, 3), (8,))
assert func((4, 5), (9,)) == ((4, 5),)
assert func((4, 5), (9, 9)) == ((4, 4, 1), (5, 4))
assert func(-1, (5, 5)) == ((5,), (5,))
assert func((3, -1), (5, 5)) == ((3, 2), (5,))
assert func((3, None), (5, 5)) == ((3, 2), (5,))
if func is normalize_chunks:
assert func({0: 3}, (5, 5)) == ((3, 2), (5,))
assert func([[2, 2], [3, 3]]) == ((2, 2), (3, 3))
assert func(10, (30, 5)) == ((10, 10, 10), (5,))
assert func((), (0, 0)) == ((0,), (0,))
assert func(-1, (0, 3)) == ((0,), (3,))
assert func(((float("nan"),),)) == ((np.nan,),)
assert func("auto", shape=(20,), limit=5, dtype="uint8") == ((5, 5, 5, 5),)
assert func(("auto", None), (5, 5), dtype=int) == ((5,), (5,))
with pytest.raises(ValueError):
func(((10,),), (11,))
with pytest.raises(ValueError):
func(((5,), (5,)), (5,))
def test_single_element_tuple():
assert normalize_chunks(
(100, "auto"), (500, 500_000), dtype=np.int64, previous_chunks=((1,), (500,))
) == (
(100,) * 5,
(
167_500,
167_500,
165_000,
),
)
def test_align_chunks_to_previous_chunks():
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(512,), limit="600 B", dtype=np.uint8
)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(128,), limit="600 B", dtype=np.uint8
)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(512,), limit="1200 B", dtype=np.uint8
)
assert chunks == ((1024, 2000 - 1024),)
chunks = normalize_chunks(
"auto",
shape=(3, 10211, 10376),
previous_chunks=(1, 512, 512),
limit="1MiB",
dtype=np.float32,
)
assert chunks[0] == (1, 1, 1)
assert all(c % 512 == 0 for c in chunks[1][:-1])
assert all(c % 512 == 0 for c in chunks[2][:-1])
chunks = normalize_chunks(
"auto",
shape=(48, 720, 1440),
previous_chunks=((36, 12), (720,), (1440,)),
limit=134217728,
dtype=np.float32,
)
assert chunks == ((36, 12), (720,), (1440,))
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask" in str(e)
assert ".org" in str(e)
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert "rechunk(2)" in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, foo=None)
except TypeError as e:
assert "minimum" in str(e)
assert "foo" in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert_eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
h5py = pytest.importorskip("h5py")
with tmpfile("h5") as fn:
with h5py.File(fn, mode="a") as f:
x = f.create_dataset("/x", shape=(10, 10), dtype="f8")
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(scheduler="sync").shape == (1, 10, 10)
assert d[:, None, :].compute(scheduler="sync").shape == (10, 1, 10)
assert d[:, :, None].compute(scheduler="sync").shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert_eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _get_axis
x = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [
[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]],
]
for ind in inds:
slc = [
i if isinstance(i, (np.ndarray, list)) else slice(None, None) for i in ind
]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = x[tuple(slc)]
expected = expected.transpose(
[axis] + list(range(axis)) + list(range(axis + 1, expected.ndim))
)
assert_eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_slice_with_floats():
d = da.ones((5,), chunks=(3,))
with pytest.raises(IndexError):
d[1.5]
with pytest.raises(IndexError):
d[0:1.5]
with pytest.raises(IndexError):
d[[1, 1.5]]
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.uint32, np.uint64])
def test_slice_with_integer_types(dtype):
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = np.array([0, 3, 6], dtype=dtype)
assert_eq(dx[inds], x[inds])
@pytest.mark.parametrize("cls", [int, np.int32, np.int64, np.uint32, np.uint64])
def test_index_with_integer_types(cls):
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = cls(3)
assert_eq(dx[inds], x[inds])
def test_vindex_basic():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
# cases where basic and advanced indexing coincide
result = d.vindex[0]
assert_eq(result, x[0])
result = d.vindex[0, 1]
assert_eq(result, x[0, 1])
result = d.vindex[[0, 1], ::-1] # slices last
assert_eq(result, x[:2, ::-1])
def test_vindex_nd():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[[0, 1], [6, 0]], [[0, 1], [0, 7]]]
assert_eq(result, x[[[0, 1], [6, 0]], [[0, 1], [0, 7]]])
result = d.vindex[np.arange(7)[:, None], np.arange(8)[None, :]]
assert_eq(result, x)
result = d.vindex[np.arange(7)[None, :], np.arange(8)[:, None]]
assert_eq(result, x.T)
@pytest.mark.parametrize("size", [0, 1])
def test_vindex_preserve_chunksize(size):
np_arr = np.random.rand(10_000 * 40).reshape(100, 100, 40)
arr = da.from_array(np_arr, chunks=(50, 50, 20))
indices_2d = np.random.choice(np.arange(100), size=(10000 + size, 2))
idx1 = indices_2d[:, 0]
idx2 = indices_2d[:, 0]
result = arr.vindex[idx1, idx2, slice(None)]
assert result.chunks == (
(2500, 2500, 2500, 2500) + ((1,) if size else ()),
(20, 20),
)
assert_eq(result, np_arr[idx1, idx2, :])
def test_vindex_negative():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
result = d.vindex[np.array([0, -1])]
assert_eq(result, x[np.array([0, -1])])
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
pytest.raises(IndexError, lambda: d.vindex[np.newaxis])
pytest.raises(IndexError, lambda: d.vindex[[1, 2], [1, 2, 3]])
pytest.raises(IndexError, lambda: d.vindex[[True] * 5])
pytest.raises(IndexError, lambda: d.vindex[[0], [5]])
pytest.raises(IndexError, lambda: d.vindex[[0], [-6]])
with pytest.raises(IndexError, match="does not support indexing with dask objects"):
d.vindex[[0], [0], da.array([0])]
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]), np.array([[10, 20, 30], [40, 50, 60]])]
assert (
_vindex_merge(locations, values)
== np.array([[40, 50, 60], [1, 2, 3], [10, 20, 30]])
).all()
def test_vindex_identity():
rng = da.random.default_rng(42)
a, b = 10, 20
x = rng.random(a, chunks=a // 2)
assert x is x.vindex[:]
assert x is x.vindex[:a]
pytest.raises(IndexError, lambda: x.vindex[: a - 1])
pytest.raises(IndexError, lambda: x.vindex[1:])
pytest.raises(IndexError, lambda: x.vindex[0:a:2])
x = rng.random((a, b), chunks=(a // 2, b // 2))
assert x is x.vindex[:, :]
assert x is x.vindex[:a, :b]
pytest.raises(IndexError, lambda: x.vindex[:, : b - 1])
pytest.raises(IndexError, lambda: x.vindex[:, 1:])
pytest.raises(IndexError, lambda: x.vindex[:, 0:b:2])
def test_empty_array():
assert_eq(np.arange(0), da.arange(0, chunks=5))
def test_memmap():
with tmpfile("npy") as fn_1:
with tmpfile("npy") as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode="w+", dtype=x.dtype)
x.store(target)
assert_eq(target, x, check_type=False)
np.save(fn_2, target)
assert_eq(np.load(fn_2, mmap_mode="r"), x, check_type=False)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5 * 10 * 10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
with tmpdir() as dirname:
stackdir = os.path.join(dirname, "test")
da.to_npy_stack(stackdir, d, axis=0)
assert os.path.exists(os.path.join(stackdir, "0.npy"))
assert (np.load(os.path.join(stackdir, "1.npy")) == x[2:4]).all()
e = da.from_npy_stack(stackdir)
assert_eq(d, e)
def test_view():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.view(), d.view())
assert_eq(x.view("i4"), d.view("i4"))
assert_eq(x.view("i2"), d.view("i2"))
assert all(isinstance(s, int) for s in d.shape)
x = np.arange(8, dtype="i1")
d = da.from_array(x, chunks=(4,))
assert_eq(x.view("i4"), d.view("i4"))
with pytest.raises(ValueError):
x = np.arange(8, dtype="i1")
d = da.from_array(x, chunks=(3,))
d.view("i4")
with pytest.raises(ValueError):
d.view("i4", order="asdf")
def test_view_fortran():
x = np.asfortranarray(np.arange(64).reshape((8, 8)))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.T.view("i4").T, d.view("i4", order="F"))
assert_eq(x.T.view("i2").T, d.view("i2", order="F"))
def test_h5py_tokenize():
h5py = pytest.importorskip("h5py")
with tmpfile("hdf5") as fn1:
with tmpfile("hdf5") as fn2:
f = h5py.File(fn1, mode="a")
g = h5py.File(fn2, mode="a")
f["x"] = np.arange(10).astype(float)
g["x"] = np.ones(10).astype(float)
x1 = f["x"]
x2 = g["x"]
assert tokenize(x1) != tokenize(x2)
def test_map_blocks_with_changed_dimension():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0, dtype=d.dtype)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
# Provided chunks have wrong shape
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=(), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=((4, 4, 4),), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=1), chunks=((3, 4),), drop_axis=1)
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)
assert e.chunks == ((4, 3),)
assert_eq(e, x.sum(axis=1))
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = d.map_blocks(
lambda b: b[None, :, :, None],
chunks=(1, 4, 4, 1),
new_axis=[0, 3],
dtype=d.dtype,
)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
e = d.map_blocks(lambda b: b[None, :, :, None], new_axis=[0, 3], dtype=d.dtype)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
# Adding axis with a gap
with pytest.raises(ValueError):
d.map_blocks(lambda b: b, new_axis=(3, 4))
# Both new_axis and drop_axis
d = da.from_array(x, chunks=(8, 4))
e = d.map_blocks(
lambda b: b.sum(axis=0)[:, None, None],
drop_axis=0,
new_axis=(1, 2),
dtype=d.dtype,
)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=0)[:, None, None])
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(
lambda b: b.sum(axis=1)[:, None, None],
drop_axis=1,
new_axis=(1, 2),
dtype=d.dtype,
)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=1)[:, None, None])
def test_map_blocks_with_negative_drop_axis():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
for drop_axis in [0, -2]:
# test with equivalent positive and negative drop_axis
e = d.map_blocks(
lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype
)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
def test_map_blocks_with_invalid_drop_axis():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
for drop_axis in [x.ndim, -x.ndim - 1]:
with pytest.raises(ValueError):
d.map_blocks(
lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype
)
def test_map_blocks_custom_name():
res = da.map_blocks(lambda _: np.arange(4), chunks=(4,), name="foo", dtype=np.int64)
assert res.name == "foo", res.name
def test_map_blocks_with_changed_dimension_and_broadcast_chunks():
# https://github.com/dask/dask/issues/4299
a = da.from_array([1, 2, 3], 3)
b = da.from_array(np.array([0, 1, 2, 0, 1, 2]), chunks=3)
result = da.map_blocks(operator.add, a, b, chunks=b.chunks)
expected = da.from_array(np.array([1, 3, 5, 1, 3, 5]), chunks=3)
assert_eq(result, expected)
def test_broadcast_chunks():
assert broadcast_chunks() == ()
assert broadcast_chunks(((2, 3),)) == ((2, 3),)
assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)
a = ((10, 10, 10), (5, 5))
b = ((5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))
assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5))
a = ((10, 10, 10), (5, 5))
b = ((1,), (5, 5))
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))
a = ((10, 10, 10), (5, 5))
b = ((3, 3), (5, 5))
with pytest.raises(ValueError):
broadcast_chunks(a, b)
a = ((1,), (5, 5))
b = ((1,), (5, 5))
assert broadcast_chunks(a, b) == a
a = ((1,), (np.nan, np.nan, np.nan))
b = ((3, 3), (1,))
r = broadcast_chunks(a, b)
assert r[0] == b[0] and np.allclose(r[1], a[1], equal_nan=True)
a = ((3, 3), (1,))
b = ((1,), (np.nan, np.nan, np.nan))
r = broadcast_chunks(a, b)
assert r[0] == a[0] and np.allclose(r[1], b[1], equal_nan=True)
a = ((3, 3), (5, 5))
b = ((1,), (np.nan, np.nan, np.nan))
with pytest.raises(ValueError):
broadcast_chunks(a, b)
def test_chunks_error():
x = np.ones((10, 10))
with pytest.raises(ValueError):
da.from_array(x, chunks=(5,))
def test_array_compute_forward_kwargs():
x = da.arange(10, chunks=2).sum()
x.compute(bogus_keyword=10)
def test_dont_fuse_outputs():
dsk = {("x", 0): np.array([1, 2]), ("x", 1): (inc, ("x", 0))}
a = da.Array(dsk, "x", chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)
assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))
def test_dont_dealias_outputs():
dsk = {
("x", 0, 0): np.ones((2, 2)),
("x", 0, 1): np.ones((2, 2)),
("x", 1, 0): np.ones((2, 2)),
("x", 1, 1): ("x", 0, 0),
}
a = da.Array(dsk, "x", chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)
assert_eq(a, np.ones((4, 4)))
def test_timedelta_op():
x = np.array([np.timedelta64(10, "h")])
y = np.timedelta64(1, "h")
a = da.from_array(x, chunks=(1,)) / y
assert a.compute() == x / y
def test_to_delayed():
x = da.random.default_rng().random((4, 4), chunks=(2, 2))
y = x + 10
[[a, b], [c, d]] = y.to_delayed()
assert_eq(a.compute(), y[:2, :2])
s = 2
x = da.from_array(np.array(s), chunks=0)
a = x.to_delayed()[tuple()]
assert a.compute() == s
def test_to_delayed_optimize_graph():
x = da.ones((4, 4), chunks=(2, 2))
y = x[1:][1:][1:][:, 1:][:, 1:][:, 1:]
# optimizations
d = y.to_delayed().flatten().tolist()[0]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 1
assert d.key == (y.name, 0, 0)
assert d.dask.layers.keys() == {"delayed-" + y.name}
assert d.dask.dependencies == {"delayed-" + y.name: set()}
assert d.__dask_layers__() == ("delayed-" + y.name,)
# no optimizations
d2 = y.to_delayed(optimize_graph=False).flatten().tolist()[0]
assert d2.dask is y.dask
assert d2.key == (y.name, 0, 0)
assert d2.__dask_layers__() == y.__dask_layers__()
assert (d.compute() == d2.compute()).all()
def test_cumulative():
rng = np.random.default_rng(0)
x = da.arange(20, chunks=5)
assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())
assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())
assert_eq(da.nancumsum(x, axis=0), np.nancumsum(np.arange(20)))
assert_eq(da.nancumprod(x, axis=0), np.nancumprod(np.arange(20)))
a = rng.random(20)
a[rng.random(a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=5)
assert_eq(da.nancumsum(x, axis=0), np.nancumsum(a))
assert_eq(da.nancumprod(x, axis=0), np.nancumprod(a))
a = rng.random((20, 24))
x = da.from_array(a, chunks=(6, 5))
assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))
assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))
assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))
assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))
assert_eq(da.nancumsum(x, axis=0), np.nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), np.nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), np.nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), np.nancumprod(a, axis=1))
a = rng.random((20, 24))
a[rng.random(a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5))
assert_eq(da.nancumsum(x, axis=0), np.nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), np.nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), np.nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), np.nancumprod(a, axis=1))
a = rng.random((20, 24, 13))
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))
assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))
assert_eq(da.nancumsum(x, axis=axis), np.nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), np.nancumprod(a, axis=axis))
a = rng.random((20, 24, 13))
a[rng.random(a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(da.nancumsum(x, axis=axis), np.nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), np.nancumprod(a, axis=axis))
with pytest.raises(ValueError):
x.cumsum(axis=3)
with pytest.raises(ValueError):
x.cumsum(axis=-4)
def test_from_delayed():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)
assert isinstance(x, Array)
assert_eq(x, np.ones((5, 3)))
def test_from_delayed_meta():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), meta=np.ones(0))
assert isinstance(x, Array)
assert isinstance(x._meta, np.ndarray)
def test_from_delayed_future():
# https://github.com/dask/distributed/issues/9050
distributed = pytest.importorskip("distributed")
arr = np.zeros((10, 10))
with distributed.Client(n_workers=1) as client:
client.wait_for_workers(1)
fut = client.scatter(arr)
result = da.from_delayed(fut, shape=arr.shape, meta=arr[:0, :0])
assert_eq(result, arr, scheduler=client)
del fut
assert_eq(result, arr, scheduler=client)
def test_A_property():
x = da.ones(5, chunks=(2,))
assert x.A is x
def test_copy_mutate():
x = da.arange(5, chunks=(2,))
y = x.copy()
memo = {}
y2 = copy.deepcopy(x, memo=memo)
x[x % 2 == 0] = -1
xx = np.arange(5)
xx[xx % 2 == 0] = -1
assert_eq(x, xx)
assert_eq(y, np.arange(5))
assert_eq(y2, np.arange(5))
assert memo[id(x)] is y2
def test_npartitions():
assert da.ones(5, chunks=(2,)).npartitions == 3
assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6
def test_elemwise_name():
assert (da.ones(5, chunks=2) + 1).name.startswith("add-")
def test_map_blocks_name():
assert da.ones(5, chunks=2).map_blocks(inc).name.startswith("inc-")
def test_from_array_names():
x = np.ones(10)
a = da.from_array(x, chunks=2)
assert a.dask.keys() == {(a.name, i) for i in range(5)}
@pytest.mark.parametrize(
"array", [da.arange(100, chunks=25), da.ones((10, 10), chunks=25)]
)
def test_array_picklable(array):
from pickle import dumps, loads
a2 = loads(dumps(array))
assert_eq(array, a2)
a3 = da.ma.masked_equal(array, 0)
assert isinstance(a3._meta, np.ma.MaskedArray)
a4 = loads(dumps(a3))
assert_eq(a3, a4)
assert isinstance(a4._meta, np.ma.MaskedArray)
def test_from_array_raises_on_bad_chunks():
x = np.ones(10)
with pytest.raises(ValueError):
da.from_array(x, chunks=(5, 5, 5))
# with pytest.raises(ValueError):
# da.from_array(x, chunks=100)
with pytest.raises(ValueError):
da.from_array(x, chunks=((5, 5, 5),))
def test_concatenate_axes():
x = np.ones((2, 2, 2))
assert_eq(concatenate_axes([x, x], axes=[0]), np.ones((4, 2, 2)))
assert_eq(concatenate_axes([x, x, x], axes=[0]), np.ones((6, 2, 2)))
assert_eq(concatenate_axes([x, x], axes=[1]), np.ones((2, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 1]), np.ones((4, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 2]), np.ones((4, 2, 4)))
assert_eq(concatenate_axes([[x, x, x], [x, x, x]], axes=[1, 2]), np.ones((2, 4, 6)))
with pytest.raises(ValueError):
concatenate_axes(
[[x, x], [x, x]], axes=[0]
) # not all nested lists accounted for
with pytest.raises(ValueError):
concatenate_axes([x, x], axes=[0, 1, 2, 3]) # too many axes
def test_blockwise_concatenate():
x = da.ones((4, 4, 4), chunks=(2, 2, 2))
y = da.ones((4, 4), chunks=(2, 2))
def f(a, b):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert a.shape == (2, 4, 4)
assert b.shape == (4, 4)
return (a + b).sum(axis=(1, 2))
z = da.blockwise(f, "i", x, "ijk", y, "jk", concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones(4) * 32)
z = da.blockwise(add, "ij", y, "ij", y, "ij", concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones((4, 4)) * 2)
def f(a, b, c):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert isinstance(c, np.ndarray)
assert a.shape == (4, 2, 4)
assert b.shape == (4, 4)
assert c.shape == (4, 2)
return np.ones(2)
z = da.blockwise(
f, "j", x, "ijk", y, "ki", y, "ij", concatenate=True, dtype=x.dtype
)
assert_eq(z, np.ones(4), check_shape=False)
def test_common_blockdim():
assert common_blockdim([(5,), (5,)]) == (5,)
assert common_blockdim([(5,), (2, 3)]) == (2, 3)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 2, 3), (2, 3, 5)]) == (2, 3, 2, 3)
assert common_blockdim([(1, 2), (2, 1)]) == (1, 1, 1)
assert common_blockdim([(1, 2, 2), (2, 1, 2), (2, 2, 1)]) == (1, 1, 1, 1, 1)
def test_uneven_chunks_that_fit_neatly():
x = da.arange(10, chunks=((5, 5),))
y = da.ones(10, chunks=((5, 2, 3),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((5, 2, 3),)
def test_elemwise_uneven_chunks():
rng = da.random.default_rng()
x = da.arange(10, chunks=((4, 6),))
y = da.ones(10, chunks=((6, 4),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((4, 2, 4),)
x = rng.random((10, 10), chunks=((4, 6), (5, 2, 3)))
y = rng.random((4, 10, 10), chunks=((2, 2), (6, 4), (2, 3, 5)))
z = x + y
assert_eq(x + y, x.compute() + y.compute())
assert z.chunks == ((2, 2), (4, 2, 4), (2, 3, 2, 3))
def test_uneven_chunks_blockwise():
rng = da.random.default_rng()
x = rng.random((10, 10), chunks=((2, 3, 2, 3), (5, 5)))
y = rng.random((10, 10), chunks=((4, 4, 2), (4, 2, 4)))
z = da.blockwise(np.dot, "ik", x, "ij", y, "jk", dtype=x.dtype, concatenate=True)
assert z.chunks == (x.chunks[0], y.chunks[1])
assert_eq(z, x.compute().dot(y))
def test_warn_bad_rechunking():
x = da.ones((20, 20), chunks=(20, 1))
y = da.ones((20, 20), chunks=(1, 20))
with pytest.warns(da.core.PerformanceWarning, match="factor of 20"):
x + y
def test_concatenate_stack_dont_warn():
with warnings.catch_warnings(record=True) as record:
da.concatenate([da.ones(2, chunks=1)] * 62)
assert not record
with warnings.catch_warnings(record=True) as record:
da.stack([da.ones(2, chunks=1)] * 62)
assert not record
def test_map_blocks_delayed():
x = da.ones((10, 10), chunks=(5, 5))
y = np.ones((5, 5))
z = x.map_blocks(add, y, dtype=x.dtype)
z.dask.validate()
dask.optimize(z)[0].dask.validate()
yy = delayed(y)
zz = x.map_blocks(add, yy, dtype=x.dtype)
zz.dask.validate()
dask.optimize(zz)[0].dask.validate()
assert_eq(z, zz)
assert yy.key in zz.dask
def test_no_chunks():
X = np.arange(11)
dsk = {("x", 0): np.arange(5), ("x", 1): np.arange(5, 11)}
x = Array(dsk, "x", ((np.nan, np.nan),), np.arange(1).dtype)
assert_eq(x + 1, X + 1)
assert_eq(x.sum(), X.sum())
assert_eq((x + 1).std(), (X + 1).std())
assert_eq((x + x).std(), (X + X).std())
assert_eq((x + x).std(keepdims=True), (X + X).std(keepdims=True))
def test_no_chunks_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((np.nan, np.nan), (np.nan, np.nan, np.nan))
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning) # divide by zero
assert_eq(da.log(x), np.log(X))
assert_eq(x.T, X.T)
assert_eq(x.sum(axis=0, keepdims=True), X.sum(axis=0, keepdims=True))
assert_eq(x.sum(axis=1, keepdims=True), X.sum(axis=1, keepdims=True))
assert_eq(x.dot(x.T + 1), X.dot(X.T + 1))
def test_no_chunks_yes_chunks():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert (x + 1).chunks == ((2, 2), (np.nan, np.nan, np.nan))
assert (x.T).chunks == ((np.nan, np.nan, np.nan), (2, 2))
assert (x.dot(x.T)).chunks == ((2, 2), (2, 2))
def test_raise_informative_errors_no_chunks():
X = np.arange(10)
a = da.from_array(X, chunks=(5, 5))
a._chunks = ((np.nan, np.nan),)
b = da.from_array(X, chunks=(4, 4, 2))
b._chunks = ((np.nan, np.nan, np.nan),)
for op in [
lambda: a + b,
lambda: a[1],
lambda: a[::2],
lambda: a[-5],
lambda: a.rechunk(3),
lambda: a.reshape(2, 5),
]:
with pytest.raises(ValueError) as e:
op()
if "chunk" not in str(e.value) or "unknown" not in str(e.value):
op()
def test_no_chunks_slicing_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert_eq(x[0], X[0])
for op in [lambda: x[:, 4], lambda: x[:, ::2], lambda: x[0, 2:4]]:
with pytest.raises(ValueError, match="chunk sizes are unknown"):
op()
def test_index_array_with_array_1d():
x = np.arange(10)
dx = da.from_array(x, chunks=(5,))
dx._chunks = ((np.nan, np.nan),)
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
dy = da.ones(11, chunks=(3,))
with pytest.raises(ValueError):
dx[dy > 5]
def test_index_array_with_array_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x, chunks=(2, 2))
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
# Test with unknown chunks
dx._chunks = ((2, 2), (np.nan, np.nan, np.nan))
with pytest.warns(UserWarning, match="different ordering") as record:
assert sorted(x[x % 2 == 0].tolist()) == sorted(
dx[dx % 2 == 0].compute().tolist()
)
assert sorted(x[x > 6].tolist()) == sorted(dx[dx > 6].compute().tolist())
assert len(record) == 2
@pytest.mark.xfail(reason="Chunking does not align well")
def test_index_array_with_array_3d_2d():
x = np.arange(4**3).reshape((4, 4, 4))
dx = da.from_array(x, chunks=(2, 2, 2))
ind = np.random.default_rng().random((4, 4)) > 0.5
ind = np.arange(4**2).reshape((4, 4)) % 2 == 0
dind = da.from_array(ind, (2, 2))
assert_eq(x[ind], dx[dind])
assert_eq(x[:, ind], dx[:, dind])
def test_setitem_1d():
x = np.arange(10)
dx = da.from_array(x.copy(), chunks=(5,))
x[x > 6] = -1
x[x % 2 == 0] = -2
x[[2, 3]] = -3
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
dx[da.asarray([2, 3])] = -3
assert_eq(x, dx)
def test_setitem_masked():
# Test np.ma.masked assignment to object-type arrays
x = np.ma.array(["a", 1, 3.14], dtype=object)
dx = da.from_array(x.copy(), chunks=2)
x[...] = np.ma.masked
dx[...] = np.ma.masked
assert_eq(x.mask, da.ma.getmaskarray(dx))
def test_setitem_hardmask():
x = np.ma.array([1, 2, 3, 4], dtype=int)
x.harden_mask()
y = x.copy()
assert y.hardmask
x[0] = np.ma.masked
x[0:2] = np.ma.masked
dx = da.from_array(y)
dx[0] = np.ma.masked
dx[0:2] = np.ma.masked
assert_eq(x, dx)
def test_setitem_slice_twice():
x = np.array([1, 2, 3, 4, 5, 6], dtype=int)
val = np.array([0, 0], dtype=int)
y = x.copy()
x[0:2] = val
x[4:6] = val
dx = da.from_array(y)
dx[0:2] = val
dx[4:6] = val
assert_eq(x, dx)
def test_setitem_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x.copy(), chunks=(2, 2))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
def test_setitem_extended_API_0d():
# 0-d array
x = np.array(9)
dx = da.from_array(9)
x[()] = -1
dx[()] = -1
assert_eq(x, dx.compute())
x[...] = -11
dx[...] = -11
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[Ellipsis, -1],
[slice(2, 8, 2), -2],
[slice(8, None, 2), -3],
[slice(8, None, 2), [-30]],
[slice(1, None, -2), -4],
[slice(1, None, -2), [-40]],
[slice(3, None, 2), -5],
[slice(-3, None, -2), -6],
[slice(1, None, -2), -4],
[slice(3, None, 2), -5],
[slice(3, None, 2), [10, 11, 12, 13]],
[slice(-4, None, -2), [14, 15, 16, 17]],
],
)
def test_setitem_extended_API_1d(index, value):
# 1-d array
x = np.arange(10)
dx = da.from_array(x, chunks=(4, 6))
dx[index] = value
x[index] = value
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[Ellipsis, -1],
[(slice(None, None, 2), slice(None, None, -1)), -1],
[slice(1, None, 2), -1],
[[4, 3, 1], -1],
[(Ellipsis, 4), -1],
[5, -1],
[(slice(None), 2), range(6)],
[3, range(10)],
[(slice(None), [3, 5, 6]), [-30, -31, -32]],
[([-1, 0, 1], 2), [-30, -31, -32]],
[(slice(None, 2), slice(None, 3)), [-50, -51, -52]],
[(slice(None), [6, 1, 3]), [-60, -61, -62]],
[(slice(1, 3), slice(1, 4)), [[-70, -71, -72]]],
[(slice(None), [9, 8, 8]), [-80, -81, 91]],
[([True, False, False, False, True, False], 2), -1],
[(3, [True, True, False, True, True, False, True, False, True, True]), -1],
[(np.array([False, False, True, True, False, False]), slice(5, 7)), -1],
[
(
4,
da.from_array(
[False, False, True, True, False, False, True, False, False, True]
),
),
-1,
],
[
(
slice(2, 4),
da.from_array(
[False, False, True, True, False, False, True, False, False, True]
),
),
[[-100, -101, -102, -103], [-200, -201, -202, -203]],
],
[slice(5, None, 2), -99],
[slice(5, None, 2), range(1, 11)],
[slice(1, None, -2), -98],
[slice(1, None, -2), range(11, 21)],
],
)
def test_setitem_extended_API_2d(index, value):
# 2-d array
x = np.ma.arange(60).reshape((6, 10))
dx = da.from_array(x, chunks=(2, 3))
dx[index] = value
x[index] = value
assert_eq(x, dx.compute())
def test_setitem_extended_API_2d_rhs_func_of_lhs():
# Cases:
# * RHS and/or indices are a function of the LHS
# * Indices have unknown chunk sizes
# * RHS has extra leading size 1 dimensions compared to LHS
x = np.arange(60).reshape((6, 10))
chunks = (2, 3)
dx = da.from_array(x, chunks=chunks)
dx[2:4, dx[0] > 3] = -5
x[2:4, x[0] > 3] = -5
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[2, dx[0] < -2] = -7
x[2, x[0] < -2] = -7
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[dx % 2 == 0] = -8
x[x % 2 == 0] = -8
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[dx % 2 == 0] = -8
x[x % 2 == 0] = -8
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[3:5, 5:1:-2] = -dx[:2, 4:1:-2]
x[3:5, 5:1:-2] = -x[:2, 4:1:-2]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0, 1:3] = -dx[0, 4:2:-1]
x[0, 1:3] = -x[0, 4:2:-1]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[...] = dx
x[...] = x
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[...] = dx[...]
x[...] = x[...]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0] = dx[-1]
x[0] = x[-1]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0, :] = dx[-2, :]
x[0, :] = x[-2, :]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[:, 1] = dx[:, -3]
x[:, 1] = x[:, -3]
assert_eq(x, dx.compute())
index = da.from_array([0, 2], chunks=(2,))
dx = da.from_array(x, chunks=chunks)
dx[index, 8] = [99, 88]
x[[0, 2], 8] = [99, 88]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[:, index] = dx[:, :2]
x[:, [0, 2]] = x[:, :2]
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
dx = da.from_array(x, chunks=chunks)
dx[index, 7] = [-23, -33]
x[index.compute(), 7] = [-23, -33]
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
dx = da.from_array(x, chunks=chunks)
dx[(index,)] = -34
x[(index.compute(),)] = -34
assert_eq(x, dx.compute())
index = index - 4
dx = da.from_array(x, chunks=chunks)
dx[index, 7] = [-43, -53]
x[index.compute(), 7] = [-43, -53]
assert_eq(x, dx.compute())
index = da.from_array([0, -1], chunks=(1,))
x[[0, -1]] = 9999
dx[(index,)] = 9999
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=(-1, -1))
dx[...] = da.from_array(x, chunks=chunks)
assert_eq(x, dx.compute())
# RHS has extra leading size 1 dimensions compared to LHS
dx = da.from_array(x.copy(), chunks=(2, 3))
v = x.reshape((1, 1) + x.shape)
x[...] = v
dx[...] = v
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
v = -np.arange(12).reshape(1, 1, 6, 2)
x[:, [0, 1]] = v
dx[:, index] = v
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[(1, slice(1, 7, 2)), np.ma.masked],
[(slice(1, 5, 2), [7, 5]), np.ma.masked_all((2, 2))],
],
)
def test_setitem_extended_API_2d_mask(index, value):
x = np.ma.arange(60).reshape((6, 10))
dx = da.from_array(x.data, chunks=(2, 3))
# See https://github.com/numpy/numpy/issues/23000 for the `RuntimeWarning`
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=RuntimeWarning,
message="invalid value encountered in cast",
)
x[index] = value
dx[index] = value
dx = dx.persist()
assert_eq(x, dx.compute())
assert_eq(x.mask, da.ma.getmaskarray(dx).compute())
def test_setitem_on_read_only_blocks():
# Outputs of broadcast_trick-style functions contain read-only
# arrays
dx = da.empty((4, 6), dtype=float, chunks=(2, 2))
dx[0] = 99
assert_eq(dx[0, 0], 99.0)
dx[0:2] = 88
assert_eq(dx[0, 0], 88.0)
def test_setitem_errs():
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[x > 1] = x
# Shape mismatch
with pytest.raises(ValueError):
x[[True, True, False, False], 0] = [2, 3, 4]
with pytest.raises(ValueError):
x[[True, True, True, False], 0] = [2, 3]
with pytest.raises(ValueError):
x[0, [True, True, True, False]] = [2, 3]
with pytest.raises(ValueError):
x[0, [True, True, True, False]] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
x[da.from_array([True, True, True, False]), 0] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
x[0, da.from_array([True, False, False, True])] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
x[:, 0] = [2, 3, 4]
with pytest.raises(ValueError):
x[0, :] = [1, 2, 3, 4, 5]
x = da.ones((4, 4), chunks=(2, 2))
# Too many indices
with pytest.raises(IndexError):
x[:, :, :] = 2
# 2-d boolean indexing a single dimension
with pytest.raises(IndexError):
x[[[True, True, False, False]], 0] = 5
# 2-d indexing a single dimension
with pytest.raises(IndexError):
x[[[1, 2, 3]], 0] = 5
# Multiple 1-d boolean/integer arrays
with pytest.raises(NotImplementedError):
x[[1, 2], [2, 3]] = 6
with pytest.raises(NotImplementedError):
x[[True, True, False, False], [2, 3]] = 5
with pytest.raises(NotImplementedError):
x[[True, True, False, False], [False, True, False, False]] = 7
# scalar boolean indexing
with pytest.raises(NotImplementedError):
x[True] = 5
with pytest.raises(NotImplementedError):
x[np.array(True)] = 5
with pytest.raises(NotImplementedError):
x[0, da.from_array(True)] = 5
# Scalar arrays
y = da.from_array(np.array(1))
with pytest.raises(IndexError):
y[:] = 2
# RHS has non-brodacastable extra leading dimensions
x = np.arange(12).reshape((3, 4))
dx = da.from_array(x, chunks=(2, 2))
with pytest.raises(ValueError):
dx[...] = np.arange(24).reshape((2, 1, 3, 4))
# RHS doesn't have chunks set
dx = da.unique(da.random.default_rng().random([10]))
with pytest.raises(ValueError, match="Arrays chunk sizes are unknown"):
dx[0] = 0
# np.nan assigned to integer array
x = da.ones((3, 3), dtype=int)
with pytest.raises(ValueError, match="cannot convert float NaN to integer"):
x[:, 1] = np.nan
with pytest.raises(ValueError, match="cannot convert float infinity to integer"):
x[:, 1] = np.inf
with pytest.raises(ValueError, match="cannot convert float infinity to integer"):
x[:, 1] = -np.inf
@pytest.mark.parametrize("idx_namespace", [np, da])
def test_setitem_bool_index_errs(idx_namespace):
x = da.ones((3, 4), chunks=(2, 2))
y = da.ones(4, chunks=2)
array = idx_namespace.array
# Shape mismatch
with pytest.raises(ValueError):
y[array([True, True, True, False])] = [2, 3]
with pytest.raises(ValueError):
# A naive where(idx, val, x) would produce a result
y[array([True, True, True, False])] = [1, 2, 3, 4]
with pytest.raises(ValueError):
y[array([True, True, True, False])] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
y[array([True, False, False, True])] = [1, 2, 3, 4, 5]
# Too many/not enough booleans
with pytest.raises(IndexError):
y[array([True, False, True])] = 5
with pytest.raises(IndexError):
y[array([False, True, True, True, False])] = 5
# Situations where a naive da.where(idx, val, x) would produce a result
with pytest.raises(IndexError):
x[array([True, False, False, True])] = 1
with pytest.raises(IndexError):
y[array([[True], [False]])] = 1 # da.where would broadcast to ndim=2
def test_zero_slice_dtypes():
x = da.arange(5, chunks=1)
y = x[[]]
assert y.dtype == x.dtype
assert y.shape == (0,)
assert_eq(x[[]], np.arange(5)[[]])
def test_zero_sized_array_rechunk():
x = da.arange(5, chunks=1)[:0]
y = da.blockwise(identity, "i", x, "i", dtype=x.dtype)
assert_eq(x, y)
def test_blockwise_zero_shape():
da.blockwise(
lambda x: x,
"i",
da.arange(10, chunks=10),
"i",
da.from_array(np.ones((0, 2)), ((0,), 2)),
"ab",
da.from_array(np.ones((0,)), ((0,),)),
"a",
dtype="float64",
)
def test_blockwise_zero_shape_new_axes():
da.blockwise(
lambda x: np.ones(42),
"i",
da.from_array(np.ones((0, 2)), ((0,), 2)),
"ab",
da.from_array(np.ones((0,)), ((0,),)),
"a",
dtype="float64",
new_axes={"i": 42},
)
def test_broadcast_against_zero_shape():
assert_eq(da.arange(1, chunks=1)[:0] + 0, np.arange(1)[:0] + 0)
assert_eq(da.arange(1, chunks=1)[:0] + 0.1, np.arange(1)[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0, np.ones((5, 5))[:0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0.1, np.ones((5, 5))[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0, np.ones((5, 5))[:, :0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0.1, np.ones((5, 5))[:, :0] + 0.1)
def test_from_array_name():
x = np.array([1, 2, 3, 4, 5])
chunks = x.shape
# Default is tokenize the array
dx = da.from_array(x, chunks=chunks)
hashed_name = dx.name
assert da.from_array(x, chunks=chunks).name == hashed_name
# Specify name directly
assert da.from_array(x, chunks=chunks, name="x").name == "x"
# False gives a random name
dx2 = da.from_array(x, chunks=chunks, name=False)
dx3 = da.from_array(x, chunks=chunks, name=False)
assert dx2.name != hashed_name
assert dx3.name != hashed_name
assert dx2.name != dx3.name
def test_concatenate_errs():
with pytest.raises(ValueError, match=r"Shapes.*\(2, 1\)"):
da.concatenate(
[da.zeros((2, 1), chunks=(2, 1)), da.zeros((2, 3), chunks=(2, 3))]
)
with pytest.raises(ValueError):
da.concatenate(
[da.zeros((1, 2), chunks=(1, 2)), da.zeros((3, 2), chunks=(3, 2))], axis=1
)
def test_stack_errs():
with pytest.raises(ValueError) as e:
da.stack([da.zeros((2,), chunks=2)] * 10 + [da.zeros((3,), chunks=3)] * 10)
assert (
str(e.value)
== "Stacked arrays must have the same shape. The first array had shape (2,), while array 11 has shape (3,)."
)
assert len(str(e.value)) < 105
def test_blockwise_with_numpy_arrays():
x = np.ones(10)
y = da.ones(10, chunks=(5,))
assert_eq(x + y, x + x)
s = da.sum(x)
assert any(isinstance(v, np.ndarray) for v in s.dask.values())
@pytest.mark.parametrize("chunks", (100, 6))
@pytest.mark.parametrize("other", [[0, 0, 1], [2, 1, 3], (0, 0, 1)])
def test_elemwise_with_lists(chunks, other):
x = np.arange(12).reshape((4, 3))
d = da.arange(12, chunks=chunks).reshape((4, 3))
x2 = np.vstack([x[:, 0], x[:, 1], x[:, 2]]).T
d2 = da.vstack([d[:, 0], d[:, 1], d[:, 2]]).T
assert_eq(x2, d2)
x3 = x2 * other
d3 = d2 * other
assert_eq(x3, d3)
def test_constructor_plugin():
L = []
L2 = []
with dask.config.set(array_plugins=[L.append, L2.append]):
x = da.ones(10, chunks=5)
y = x + 1
assert L == L2 == [x, y]
with dask.config.set(array_plugins=[lambda x: x.compute()]):
x = da.ones(10, chunks=5)
y = x + 1
assert isinstance(y, np.ndarray)
assert len(L) == 2
def test_no_warnings_on_metadata():
x = da.ones(5, chunks=3)
with warnings.catch_warnings(record=True) as record:
da.arccos(x)
assert not record
def test_delayed_array_key_hygeine():
a = da.zeros((1,), chunks=(1,))
d = delayed(identity)(a)
b = da.from_delayed(d, shape=a.shape, dtype=a.dtype)
assert_eq(a, b)
def test_empty_chunks_in_array_len():
x = da.ones((), chunks=())
with pytest.raises(TypeError) as exc_info:
len(x)
err_msg = "len() of unsized object"
assert err_msg in str(exc_info.value)
@pytest.mark.parametrize("dtype", [None, [("a", "f4"), ("b", object)]])
def test_meta(dtype):
a = da.zeros((1,), chunks=(1,))
assert a._meta.dtype == a.dtype
assert isinstance(a._meta, np.ndarray)
assert a.nbytes < 1000
@pytest.mark.parametrize(
"shape,limit,expected",
[
(100, 10, (10,) * 10),
(20, 10, (10, 10)),
(20, 5, (5, 5, 5, 5)),
(24, 5, (5, 5, 5, 5, 4)),
(23, 5, (5, 5, 5, 5, 3)), # relatively prime, don't use 1s
(1000, 167, (167, 167, 167, 167, 167, 165)),
],
)
def test_normalize_chunks_auto_1d(shape, limit, expected):
result = normalize_chunks("auto", (shape,), limit=limit, dtype=np.uint8)
assert result == (expected,)
@pytest.mark.parametrize(
"shape,chunks,limit,expected",
[
((20, 20), ("auto", 2), 20, ((10, 10), (2,) * 10)),
(
(20, 20),
("auto", (2, 2, 2, 2, 2, 5, 5)),
20,
((4, 4, 4, 4, 4), (2, 2, 2, 2, 2, 5, 5)),
),
((1, 20), "auto", 10, ((1,), (10, 10))),
],
)
def test_normalize_chunks_auto_2d(shape, chunks, limit, expected):
result = normalize_chunks(chunks, shape, limit=limit, dtype="uint8")
assert result == expected
def test_normalize_chunks_auto_3d():
result = normalize_chunks(
("auto", "auto", 2), (20, 20, 20), limit=200, dtype="uint8"
)
expected = ((10, 10), (10, 10), (2,) * 10)
assert result == expected
result = normalize_chunks("auto", (20, 20, 20), limit=8, dtype="uint8")
expected = ((2,) * 10,) * 3
assert result == expected
def test_constructors_chunks_dict():
x = da.ones((20, 20), chunks={0: 10, 1: 5})
assert x.chunks == ((10, 10), (5, 5, 5, 5))
x = da.ones((20, 20), chunks={0: 10, 1: "auto"})
assert x.chunks == ((10, 10), (20,))
def test_from_array_chunks_dict():
with dask.config.set({"array.chunk-size": "128kiB"}):
x = np.empty((100, 100, 100))
y = da.from_array(x, chunks={0: 10, 1: -1, 2: "auto"})
z = da.from_array(x, chunks=(10, 100, (16,) * 6 + (4,)))
assert y.chunks == z.chunks
@pytest.mark.parametrize("dtype", [object, [("a", object), ("b", int)]])
def test_normalize_chunks_object_dtype(dtype):
x = np.array(["a", "abc"], dtype=object)
with pytest.raises(NotImplementedError):
da.from_array(x, chunks="auto")
def test_normalize_chunks_tuples_of_tuples():
result = normalize_chunks(((2, 3, 5), "auto"), (10, 10), limit=10, dtype=np.uint8)
expected = ((2, 3, 5), (2, 2, 2, 2, 2))
assert result == expected
def test_normalize_chunks_nan():
with pytest.raises(ValueError) as info:
normalize_chunks("auto", (np.nan,), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
with pytest.raises(ValueError) as info:
normalize_chunks(((np.nan, np.nan), "auto"), (10, 10), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
def test_pandas_from_dask_array():
pd = pytest.importorskip("pandas")
a = da.ones((12,), chunks=4)
s = pd.Series(a, index=range(12))
assert s.dtype == a.dtype
assert_eq(s.values, a)
def test_from_zarr_unique_name():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
b = zarr.array([4, 5, 6])
assert da.from_zarr(a).name != da.from_zarr(b).name
def test_from_zarr_name():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
assert da.from_zarr(a, name="foo").name == "foo"
def test_zarr_roundtrip():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_roundtrip_with_path_like():
pytest.importorskip("zarr")
with tmpdir() as d:
path = pathlib.Path(d)
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(path)
a2 = da.from_zarr(path)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_to_zarr_accepts_empty_array_without_exception_raised():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.from_array(np.arange(0))
a.to_zarr(d)
@pytest.mark.parametrize("compute", [False, True])
def test_zarr_return_stored(compute):
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a2 = a.to_zarr(d, compute=compute, return_stored=True)
assert isinstance(a2, Array)
assert_eq(a, a2, check_graph=False)
assert a2.chunks == a.chunks
@pytest.mark.parametrize("inline_array", [True, False])
def test_zarr_inline_array(inline_array):
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
dsk = dict(da.from_zarr(a, inline_array=inline_array).dask)
assert len(dsk) == (0 if inline_array else 1) + 1
assert (a in dsk.values()) is not inline_array
def test_zarr_existing_array():
zarr = pytest.importorskip("zarr")
c = (1, 1)
a = da.ones((3, 3), chunks=c)
z = zarr.zeros_like(a, chunks=c)
a.to_zarr(z)
a2 = da.from_zarr(z)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_to_zarr_unknown_chunks_raises():
pytest.importorskip("zarr")
a = da.random.default_rng().random((10,), chunks=(3,))
a = a[a > 0.5]
with pytest.raises(ValueError, match="unknown chunk sizes"):
a.to_zarr({})
def test_read_zarr_chunks():
pytest.importorskip("zarr")
a = da.zeros((9,), chunks=(3,))
with tmpdir() as d:
a.to_zarr(d)
arr = da.from_zarr(d, chunks=(5,))
assert arr.chunks == ((5, 4),)
def test_zarr_pass_store():
zarr = pytest.importorskip("zarr")
with tmpdir() as d:
if Version(zarr.__version__) < Version("3.0.0.a0"):
store = zarr.storage.DirectoryStore(d)
else:
store = zarr.storage.LocalStore(d, read_only=False)
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(store)
a2 = da.from_zarr(store)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_group():
zarr = pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d, component="test")
with pytest.raises((OSError, ValueError)):
a.to_zarr(d, component="test", overwrite=False)
a.to_zarr(d, component="test", overwrite=True)
# second time is fine, group exists
a.to_zarr(d, component="test2", overwrite=False)
a.to_zarr(d, component="nested/test", overwrite=False)
group = zarr.open_group(store=d, mode="r")
assert set(group) == {"nested", "test", "test2"}
assert "test" in group["nested"]
a2 = da.from_zarr(d, component="test")
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize(
"shape, chunks, expect_rechunk",
[
((6, 2), ((2, 1, 1, 2), 1), True),
((6, 2), ((2, 1, 2, 1), 1), True),
((7, 2), ((2, 2, 2, 1), 1), False),
((2, 7), (1, (2, 2, 2, 1)), False),
((2, 6), (1, (2, 1, 2, 1)), True),
],
)
def test_zarr_irregular_chunks(shape, chunks, expect_rechunk):
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros(shape, chunks=chunks) # ((2, 1, 1, 2), 1))
store_delayed = a.to_zarr(d, component="test", compute=False)
assert (
any("rechunk" in key_split(k) for k in dict(store_delayed.dask))
is expect_rechunk
)
store_delayed.compute()
@pytest.mark.parametrize(
"data",
[
[(), True],
[((1,),), True],
[((1, 1, 1),), True],
[((1,), (1,)), True],
[((2, 2, 1),), True],
[((2, 2, 3),), False],
[((1, 1, 1), (2, 2, 3)), False],
[((1, 2, 1),), False],
],
)
def test_regular_chunks(data):
from dask.array.core import _check_regular_chunks
chunkset, expected = data
assert _check_regular_chunks(chunkset) == expected
def test_from_array_respects_zarr_shards():
"""
Test that da.from_array chooses chunks based on
the shard shape of a sharded Zarr array instead of the chunk shape
"""
zarr = pytest.importorskip(
"zarr", minversion="3", reason="Zarr 3 or higher needed for sharding"
)
shape = (1000,) * 3
z_chunks = (101,) * 3
z_shards = (404,) * 3
z = zarr.create_array(
{}, shape=shape, chunks=z_chunks, shards=z_shards, dtype="uint8"
)
dz = da.from_array(z)
# Check that all elements of nominal chunksize are divisible by the respective shard shape
assert all(c % s == 0 for c, s in zip(dz.chunksize, z.shards))
@pytest.mark.parametrize("region_spec", [None, "all", "half"])
def test_zarr_to_zarr_shards(region_spec: None | Literal["all", "half"]):
"""
Test that calling to_zarr with a dask array with chunks that do not match the
shard shape of the zarr array automatically rechunks to a multiple of the
shard shape to ensure safe writes.
This test is parametrized over different regions, because the rechunking logic in
to_zarr contains an branch depending on whether a region parameter was specified.
"""
zarr = pytest.importorskip("zarr", minversion="3.0.0")
shape = (100,)
dask_chunks = (10,)
zarr_chunk_shape = (1,)
zarr_shard_shape = (2,)
# Create a dask array with chunks that don't align with shards
arr = da.arange(shape[0], chunks=dask_chunks)
# the region parameter we will pass into to_zarr
region: tuple[slice, ...] | None
# The region of the zarr array we will write into
sel: tuple[slice, ...]
if region_spec is None:
sel = (slice(None),)
region = None
elif region_spec == "all":
sel = (slice(None),)
region = sel
else:
sel = (slice(shape[0] // 2),)
region = sel
# crop the source data
arr = arr[sel]
# Create a sharded zarr array
# In Zarr v3: chunks = inner chunk shape, shards = shard shape
z = zarr.create_array(
store={},
shape=shape,
chunks=zarr_chunk_shape,
shards=zarr_shard_shape,
dtype=arr.dtype,
)
# to_zarr should automatically rechunk to a multiple of the shard shape
result = arr.to_zarr(z, region=region, compute=False)
# Verify the array was rechunked to the shard shape
assert all(c % s == 0 for c, s in zip(result.chunksize, zarr_shard_shape))
# Verify data correctness
result.compute()
assert_eq(z[sel], arr.compute())
def test_zarr_risky_shards_warns():
"""
Test that we see a performance warning when dask chooses a chunk size that will cause data loss
for zarr arrays.
"""
zarr = pytest.importorskip("zarr", minversion="3.0.0")
shape = (100,)
dask_chunks = (10,)
zarr_chunk_shape = (3,)
zarr_shard_shape = (6,)
arr = da.arange(shape[0], chunks=dask_chunks)
z = zarr.create_array(
store={},
shape=shape,
chunks=zarr_chunk_shape,
shards=zarr_shard_shape,
dtype=arr.dtype,
)
with dask.config.set({"array.chunk-size": 1}):
with pytest.raises(
PerformanceWarning,
match="The input Dask array will be rechunked along axis",
):
arr.to_zarr(z)
def test_zarr_nocompute():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
out = a.to_zarr(d, compute=False)
assert isinstance(out, Array)
dask.compute(out)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_regions():
zarr = pytest.importorskip("zarr")
a = da.arange(16).reshape((4, 4)).rechunk(2)
z = zarr.zeros_like(a, chunks=2)
a[:2, :2].to_zarr(z, region=(slice(2), slice(2)))
a2 = da.from_zarr(z)
expected = [[0, 1, 0, 0], [4, 5, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
assert_eq(a2, expected)
assert a2.chunks == a.chunks
with pytest.warns(PerformanceWarning):
a[:3, 3:4].to_zarr(z, region=(slice(1, 4), slice(2, 3)))
a2 = da.from_zarr(z)
expected = [[0, 1, 0, 0], [4, 5, 3, 0], [0, 0, 7, 0], [0, 0, 11, 0]]
assert_eq(a2, expected)
assert a2.chunks == a.chunks
with pytest.warns(PerformanceWarning):
a[3:, 3:].to_zarr(z, region=(slice(2, 3), slice(1, 2)))
a2 = da.from_zarr(z)
expected = [[0, 1, 0, 0], [4, 5, 3, 0], [0, 15, 7, 0], [0, 0, 11, 0]]
assert_eq(a2, expected)
assert a2.chunks == a.chunks
with pytest.raises(ValueError):
with tmpdir() as d:
a.to_zarr(d, region=(slice(2), slice(2)))
def test_tiledb_roundtrip():
tiledb = pytest.importorskip("tiledb")
# 1) load with default chunking
# 2) load from existing tiledb.DenseArray
# 3) write to existing tiledb.DenseArray
rng = da.random.default_rng()
a = rng.random((3, 3))
with tmpdir() as uri:
da.to_tiledb(a, uri)
tdb = da.from_tiledb(uri)
assert_eq(a, tdb)
assert a.chunks == tdb.chunks
# from tiledb.array
with tiledb.open(uri) as t:
tdb2 = da.from_tiledb(t)
assert_eq(a, tdb2)
with tmpdir() as uri2:
with tiledb.empty_like(uri2, a) as t:
a.to_tiledb(t)
assert_eq(da.from_tiledb(uri2), a)
# specific chunking
with tmpdir() as uri:
a = rng.random((3, 3), chunks=(1, 1))
a.to_tiledb(uri)
tdb = da.from_tiledb(uri)
assert_eq(a, tdb)
assert a.chunks == tdb.chunks
def test_tiledb_multiattr():
tiledb = pytest.importorskip("tiledb")
dom = tiledb.Domain(
tiledb.Dim("x", (0, 1000), tile=100), tiledb.Dim("y", (0, 1000), tile=100)
)
schema = tiledb.ArraySchema(
attrs=(tiledb.Attr("attr1"), tiledb.Attr("attr2")), domain=dom
)
with tmpdir() as uri:
tiledb.DenseArray.create(uri, schema)
tdb = tiledb.DenseArray(uri, "w")
rng = np.random.default_rng()
ar1 = rng.standard_normal(tdb.schema.shape)
ar2 = rng.standard_normal(tdb.schema.shape)
tdb[:] = {"attr1": ar1, "attr2": ar2}
tdb = tiledb.DenseArray(uri, "r")
# basic round-trip from dask.array
d = da.from_tiledb(uri, attribute="attr2")
assert_eq(d, ar2)
# smoke-test computation directly on the TileDB view
d = da.from_tiledb(uri, attribute="attr2")
assert_eq(np.mean(ar2), d.mean().compute(scheduler="threads"))
def test_blockview():
x = da.arange(10, chunks=2)
blockview = BlockView(x)
assert x.blocks == blockview
assert isinstance(blockview[0], da.Array)
assert_eq(blockview[0], x[:2])
assert_eq(blockview[-1], x[-2:])
assert_eq(blockview[:3], x[:6])
assert_eq(blockview[[0, 1, 2]], x[:6])
assert_eq(blockview[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
assert_eq(blockview.shape, tuple(map(len, x.chunks)))
assert_eq(blockview.size, math.prod(blockview.shape))
assert_eq(
blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]
)
x = da.random.default_rng().random((20, 20), chunks=(4, 5))
blockview = BlockView(x)
assert_eq(blockview[0], x[:4])
assert_eq(blockview[0, :3], x[:4, :15])
assert_eq(blockview[:, :3], x[:, :15])
assert_eq(blockview.shape, tuple(map(len, x.chunks)))
assert_eq(blockview.size, math.prod(blockview.shape))
assert_eq(
blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]
)
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
blockview = BlockView(x)
assert_eq(blockview[0, :, 0], np.ones((10, 40, 10)))
assert_eq(blockview.shape, tuple(map(len, x.chunks)))
assert_eq(blockview.size, math.prod(blockview.shape))
assert_eq(
blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]
)
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
blockview[[0, 1], [0, 1]]
with pytest.raises(ValueError):
blockview[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
blockview[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
blockview[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
blockview[100, 100]
def test_blocks_indexer():
x = da.arange(10, chunks=2)
assert isinstance(x.blocks[0], da.Array)
assert_eq(x.blocks[0], x[:2])
assert_eq(x.blocks[-1], x[-2:])
assert_eq(x.blocks[:3], x[:6])
assert_eq(x.blocks[[0, 1, 2]], x[:6])
assert_eq(x.blocks[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.default_rng().random((20, 20), chunks=(4, 5))
assert_eq(x.blocks[0], x[:4])
assert_eq(x.blocks[0, :3], x[:4, :15])
assert_eq(x.blocks[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.blocks[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.blocks[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.blocks[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.blocks[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.blocks[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.blocks[100, 100]
def test_partitions_indexer():
# .partitions is an alias of .blocks for dask arrays
x = da.arange(10, chunks=2)
assert isinstance(x.partitions[0], da.Array)
assert_eq(x.partitions[0], x[:2])
assert_eq(x.partitions[-1], x[-2:])
assert_eq(x.partitions[:3], x[:6])
assert_eq(x.partitions[[0, 1, 2]], x[:6])
assert_eq(x.partitions[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.default_rng().random((20, 20), chunks=(4, 5))
assert_eq(x.partitions[0], x[:4])
assert_eq(x.partitions[0, :3], x[:4, :15])
assert_eq(x.partitions[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.partitions[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.partitions[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.partitions[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.partitions[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.partitions[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.partitions[100, 100]
@pytest.mark.filterwarnings("ignore:the matrix subclass:PendingDeprecationWarning")
@pytest.mark.parametrize(
"container", [pytest.param("array", marks=skip_if_no_sparray()), "matrix"]
)
def test_dask_array_holds_scipy_sparse_containers(container):
pytest.importorskip("scipy.sparse")
import scipy.sparse
cls = scipy.sparse.csr_matrix if container == "matrix" else scipy.sparse.csr_array
kind = scipy.sparse.spmatrix if container == "matrix" else scipy.sparse.sparray
x = da.random.default_rng().random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
xx = x.compute()
y = x.map_blocks(cls)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler="single-threaded")
assert all(isinstance(v, cls) for v in values)
yy = y.compute(scheduler="single-threaded")
assert isinstance(yy, kind)
assert (yy == xx).all()
z = x.T.map_blocks(cls)
zz = z.compute(scheduler="single-threaded")
assert isinstance(zz, kind)
assert (zz == xx.T).all()
@pytest.mark.filterwarnings("ignore:the matrix subclass:PendingDeprecationWarning")
@pytest.mark.parametrize(
"container", [pytest.param("array", marks=skip_if_no_sparray()), "matrix"]
)
@pytest.mark.parametrize("format", ["csr", "csc"])
def test_dask_array_setitem_singleton_sparse(container, format):
pytest.importorskip("scipy.sparse")
import scipy.sparse
cls = (
getattr(scipy.sparse, f"{format}_matrix")
if container == "matrix"
else getattr(scipy.sparse, f"{format}_array")
)
x = cls(scipy.sparse.eye(100))
x_dask = da.from_array(x)
x[slice(10), slice(10)] = 0
x_dask[slice(10), slice(10)] = 0
np.testing.assert_almost_equal(
x_dask.compute(scheduler="single-threaded").toarray(), x.toarray()
)
@pytest.mark.parametrize(
"index",
[
[5, 8],
0,
slice(5, 8),
np.array([5, 8]),
np.array([True, False] * 500),
[True, False] * 500,
],
)
@pytest.mark.parametrize(
("sparse_module_path", "container"),
[
("scipy.sparse", "csr_matrix"),
pytest.param("scipy.sparse", "csr_array", marks=skip_if_no_sparray()),
("cupyx.scipy.sparse", "csr_matrix"),
],
)
def test_scipy_sparse_indexing(index, sparse_module_path, container):
sp = pytest.importorskip(sparse_module_path)
if sparse_module_path == "cupyx.scipy.sparse":
backend = "cupy"
else:
backend = "numpy"
with dask.config.set({"array.backend": backend}):
x = da.random.default_rng().random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
y = x.map_blocks(getattr(sp, container))
assert not (
x[index, :].compute(scheduler="single-threaded")
!= y[index, :].compute(scheduler="single-threaded")
).sum()
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize(
"container", [pytest.param("array", marks=skip_if_no_sparray()), "matrix"]
)
def test_scipy_sparse_concatenate(axis, container):
pytest.importorskip("scipy.sparse")
import scipy.sparse
cls = scipy.sparse.csr_matrix if container == "matrix" else scipy.sparse.csr_array
rng = da.random.default_rng()
xs = []
ys = []
for _ in range(2):
x = rng.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
xs.append(x)
ys.append(x.map_blocks(cls))
z = da.concatenate(ys, axis=axis)
z = z.compute()
if axis == 0:
sp_concatenate = scipy.sparse.vstack
elif axis == 1:
sp_concatenate = scipy.sparse.hstack
z_expected = sp_concatenate([cls(e.compute()) for e in xs])
assert (z != z_expected).nnz == 0
@pytest.mark.parametrize("func", [da.asarray, da.asanyarray])
@pytest.mark.parametrize("src", [[[1, 2]], np.asarray([[1, 2]]), da.asarray([[1, 2]])])
def test_scipy_sparse_asarray_like(src, func):
"""scipy.sparse.csr_matrix objects are not a valid argument for
np.asarray(..., like=...) and require special-casing.
"""
pytest.importorskip("scipy.sparse")
import scipy.sparse
mtx = scipy.sparse.csr_matrix([[3, 4, 5], [6, 7, 8]])
like = da.from_array(mtx)
a = func(src, like=like)
assert isinstance(a._meta, type(mtx))
assert isinstance(a.compute(), type(mtx))
# Respect dtype; quietly disregard order
a = func(src, dtype=np.float32, order="C", like=like)
assert a.dtype == np.float32
assert a.compute().dtype == np.float32
assert isinstance(a._meta, type(mtx))
assert isinstance(a.compute(), type(mtx))
def test_3851():
with warnings.catch_warnings(record=True) as record:
Y = da.random.default_rng().random((10, 10), chunks="auto")
da.argmax(Y, axis=0).compute()
assert not record
def test_3925():
x = da.from_array(np.array(["a", "b", "c"], dtype=object), chunks=-1)
assert (x[0] == x[0]).compute(scheduler="sync")
def test_map_blocks_large_inputs_delayed():
a = da.ones(10, chunks=(5,))
b = np.ones(1000000)
c = a.map_blocks(add, b)
assert any(b is v() for v in c.dask.values() if isinstance(v, DataNode))
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
d = a.map_blocks(lambda x, y: x + y.sum(), y=b)
assert_eq(d, d)
assert any(b is v() for v in d.dask.values() if isinstance(v, DataNode))
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
def test_blockwise_large_inputs_delayed():
def func(a, b):
return a
a = da.ones(10, chunks=(5,))
b = np.ones(1000000)
c = da.blockwise(func, "i", a, "i", b, None, dtype=a.dtype)
assert any(b is v() for v in c.dask.values() if isinstance(v, DataNode))
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
assert_eq(c, c)
d = da.blockwise(lambda x, y: x, "i", a, "i", y=b, dtype=a.dtype)
assert any(b is v() for v in d.dask.values() if isinstance(v, DataNode))
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
assert_eq(d, d)
def test_slice_reversed():
x = da.ones(10, chunks=-1)
y = x[6:3]
assert_eq(y, np.ones(0))
def test_map_blocks_chunks():
x = da.arange(400, chunks=(100,))
y = da.arange(40, chunks=(10,))
def func(a, b):
return np.array([a.max(), b.max()])
assert_eq(
da.map_blocks(func, x, y, chunks=(2,), dtype=x.dtype),
np.array([99, 9, 199, 19, 299, 29, 399, 39]),
)
def test_nbytes_auto():
chunks = normalize_chunks("800B", shape=(500,), dtype="float64")
assert chunks == ((100, 100, 100, 100, 100),)
chunks = normalize_chunks("200B", shape=(10, 10), dtype="float64")
assert chunks == ((5, 5), (5, 5))
chunks = normalize_chunks((5, "200B"), shape=(10, 10), dtype="float64")
assert chunks == ((5, 5), (5, 5))
chunks = normalize_chunks("33B", shape=(10, 10), dtype="float64")
assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2))
chunks = normalize_chunks("1800B", shape=(10, 20, 30), dtype="float64")
assert chunks == ((6, 4), (6, 6, 6, 2), (6, 6, 6, 6, 6))
with pytest.raises(ValueError):
normalize_chunks("10B", shape=(10,), limit=20, dtype="float64")
with pytest.raises(ValueError):
normalize_chunks("100B", shape=(10, 10), limit=20, dtype="float64")
with pytest.raises(ValueError):
normalize_chunks(("100B", "10B"), shape=(10, 10), dtype="float64")
with pytest.raises(ValueError):
normalize_chunks(("10B", "10B"), shape=(10, 10), limit=20, dtype="float64")
def test_auto_chunks():
chunks = ((1264, 1264, 1264, 1264, 1264, 1264, 1045), (1264, 491))
shape = sum(chunks[0]), sum(chunks[1])
result = normalize_chunks(
("auto", "auto"), shape=shape, dtype="int32", previous_chunks=chunks
)
assert result == ((8629,), (1755,))
def test_auto_chunks_h5py():
h5py = pytest.importorskip("h5py")
with tmpfile(".hdf5") as fn:
with h5py.File(fn, mode="a") as f:
d = f.create_dataset(
"/x", shape=(1000, 1000), chunks=(32, 64), dtype="float64"
)
d[:] = 1
with h5py.File(fn, mode="a") as f:
d = f["x"]
with dask.config.set({"array.chunk-size": "1 MiB"}):
x = da.from_array(d)
assert isinstance(x._meta, np.ndarray)
assert x.chunks == ((256, 256, 256, 232), (512, 488))
def test_no_warnings_from_blockwise():
with warnings.catch_warnings(record=True) as record:
x = da.ones((3, 10, 10), chunks=(3, 2, 2))
da.map_blocks(lambda y: np.mean(y, axis=0), x, dtype=x.dtype, drop_axis=0)
assert not record
with warnings.catch_warnings(record=True) as record:
x = da.ones((15, 15), chunks=(5, 5))
(x.dot(x.T + 1) - x.mean(axis=0)).std()
assert not record
with warnings.catch_warnings(record=True) as record:
x = da.ones((1,), chunks=(1,))
1 / x[0]
assert not record
def test_from_array_meta():
sparse = pytest.importorskip("sparse")
x = np.ones(10)
meta = sparse.COO.from_numpy(x)
y = da.from_array(x, meta=meta)
assert isinstance(y._meta, sparse.COO)
def test_compute_chunk_sizes():
x = da.from_array(np.linspace(-1, 1, num=50), chunks=10)
y = x[x < 0]
assert np.isnan(y.shape[0])
assert y.chunks == ((np.nan,) * 5,)
z = y.compute_chunk_sizes()
assert y is z
assert z.chunks == ((10, 10, 5, 0, 0),)
assert len(z) == 25
# check that dtype of chunk dimensions is `int`
assert isinstance(z.chunks[0][0], int)
def test_compute_chunk_sizes_2d_array():
X = np.linspace(-1, 1, num=9 * 4).reshape(9, 4)
X = da.from_array(X, chunks=(3, 4))
idx = X.sum(axis=1) > 0
Y = X[idx]
# This is very similar to the DataFrame->Array conversion
assert np.isnan(Y.shape[0]) and Y.shape[1] == 4
assert Y.chunks == ((np.nan, np.nan, np.nan), (4,))
Z = Y.compute_chunk_sizes()
assert Y is Z
assert Z.chunks == ((0, 1, 3), (4,))
assert Z.shape == (4, 4)
def test_compute_chunk_sizes_3d_array(N=8):
X = np.linspace(-1, 2, num=8 * 8 * 8).reshape(8, 8, 8)
X = da.from_array(X, chunks=(4, 4, 4))
idx = X.sum(axis=0).sum(axis=0) > 0
Y = X[idx]
idx = X.sum(axis=1).sum(axis=1) < 0
Y = Y[:, idx]
idx = X.sum(axis=2).sum(axis=1) > 0.1
Y = Y[:, :, idx]
# Checking to make sure shapes are different on outputs
assert Y.compute().shape == (8, 3, 5)
assert X.compute().shape == (8, 8, 8)
assert Y.chunks == ((np.nan, np.nan),) * 3
assert all(np.isnan(s) for s in Y.shape)
Z = Y.compute_chunk_sizes()
assert Z is Y
assert Z.shape == (8, 3, 5)
assert Z.chunks == ((4, 4), (3, 0), (1, 4))
def _known(num=50):
return da.from_array(np.linspace(-1, 1, num=num), chunks=10)
@pytest.fixture()
def unknown():
x = _known()
y = x[x < 0]
assert y.chunks == ((np.nan,) * 5,)
return y
def test_compute_chunk_sizes_warning_fixes_rechunk(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y.rechunk("auto")
y.compute_chunk_sizes()
y.rechunk("auto")
def test_compute_chunk_sizes_warning_fixes_to_zarr(unknown):
pytest.importorskip("zarr")
y = unknown
with tmpdir() as d:
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y.to_zarr(d)
y.compute_chunk_sizes()
y.to_zarr(d)
def test_compute_chunk_sizes_warning_fixes_to_svg(unknown):
y = unknown
with pytest.raises(NotImplementedError, match="compute_chunk_sizes"):
y.to_svg()
y.compute_chunk_sizes()
y.to_svg()
def test_compute_chunk_sizes_warning_fixes_concatenate():
x = _known(num=100).reshape(10, 10)
idx = x.sum(axis=0) > 0
y1 = x[idx]
y2 = x[idx]
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.concatenate((y1, y2), axis=1)
y1.compute_chunk_sizes()
y2.compute_chunk_sizes()
da.concatenate((y1, y2), axis=1)
def test_compute_chunk_sizes_warning_fixes_reduction(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.argmin(y)
y.compute_chunk_sizes()
da.argmin(y)
def test_compute_chunk_sizes_warning_fixes_reshape(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.reshape(y, (5, 5))
y.compute_chunk_sizes()
da.reshape(y, (5, 5))
def test_compute_chunk_sizes_warning_fixes_slicing():
x = _known(num=100).reshape(10, 10)
y = x[x.sum(axis=0) < 0]
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y[:3, :]
y.compute_chunk_sizes()
y[:3, :]
def test_rechunk_auto():
x = da.ones(10, chunks=(1,))
y = x.rechunk()
assert y.npartitions == 1
def test_chunk_assignment_invalidates_cached_properties():
x = da.ones((4,), chunks=(1,))
y = x.copy()
# change chunks directly, which should change all of the tested properties
y._chunks = ((2, 2), (0, 0, 0, 0))
assert x.ndim != y.ndim
assert x.shape != y.shape
assert x.size != y.size
assert x.numblocks != y.numblocks
assert x.npartitions != y.npartitions
assert x.__dask_keys__() != y.__dask_keys__()
assert not np.array_equal(x._key_array, y._key_array)
def test_map_blocks_series():
pd = pytest.importorskip("pandas")
import dask.dataframe as dd
pytest.skip("array roundtrips don't work yet")
from dask.dataframe.utils import assert_eq as dd_assert_eq
x = da.ones(10, chunks=(5,))
s = x.map_blocks(pd.Series)
assert isinstance(s, dd.Series)
assert s.npartitions == x.npartitions
dd_assert_eq(s, s)
@pytest.mark.xfail(reason="need to remove singleton index dimension")
def test_map_blocks_dataframe():
pd = pytest.importorskip("pandas")
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq as dd_assert_eq
x = da.ones((10, 2), chunks=(5, 2))
s = x.map_blocks(pd.DataFrame)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == x.npartitions
dd_assert_eq(s, s)
def test_dask_layers():
a = da.ones(1)
assert a.dask.layers.keys() == {a.name}
assert a.dask.dependencies == {a.name: set()}
assert a.__dask_layers__() == (a.name,)
b = a + 1
assert b.dask.layers.keys() == {a.name, b.name}
assert b.dask.dependencies == {a.name: set(), b.name: {a.name}}
assert b.__dask_layers__() == (b.name,)
def test_len_object_with_unknown_size():
a = da.random.default_rng().random(size=(20, 2))
b = a[a < 0.5]
with pytest.raises(ValueError, match="on object with unknown chunk size"):
assert len(b)
@pytest.mark.parametrize("ndim", [0, 1, 3, 8])
def test_chunk_shape_broadcast(ndim):
from functools import partial
def f(x, ndim=0):
# Ignore `x` and return arbitrary one-element array of dimensionality `ndim`
# For example,
# f(x, 0) = array(5)
# f(x, 1) = array([5])
# f(x, 2) = array([[5]])
# f(x, 3) = array([[[5]]])
return np.array(5)[(np.newaxis,) * ndim]
array = da.from_array([1] + [2, 2] + [3, 3, 3], chunks=((1, 2, 3),))
out_chunks = ((1, 1, 1),)
# check ``enforce_ndim`` keyword parameter of ``map_blocks()``
out = array.map_blocks(partial(f, ndim=ndim), chunks=out_chunks, enforce_ndim=True)
if ndim != 1:
with pytest.raises(ValueError, match="Dimension mismatch:"):
out.compute()
else:
out.compute() # should not raise an exception
# check ``check_ndim`` keyword parameter of ``assert_eq()``
out = array.map_blocks(partial(f, ndim=ndim), chunks=out_chunks)
expected = np.array([5, 5, 5])
try:
assert_eq(out, expected)
except AssertionError:
assert_eq(out, expected, check_ndim=False)
else:
if ndim != 1:
raise AssertionError("Expected a ValueError: Dimension mismatch")
def test_chunk_non_array_like():
array = da.from_array([1] + [2, 2] + [3, 3, 3], chunks=((1, 2, 3),))
out_chunks = ((1, 1, 1),)
# check ``enforce_ndim`` keyword parameter of ``map_blocks()``
out = array.map_blocks(lambda x: 5, chunks=out_chunks, enforce_ndim=True)
with pytest.raises(ValueError, match="Dimension mismatch:"):
out.compute()
expected = np.array([5, 5, 5])
# check ``check_ndim`` keyword parameter of ``assert_eq()``
out = array.map_blocks(lambda x: 5, chunks=out_chunks)
try:
assert_eq(out, expected)
except AssertionError:
assert_eq(out, expected, check_chunks=False)
else:
raise AssertionError("Expected a ValueError: Dimension mismatch")
def test_to_backend():
# Test that `Array.to_backend` works as expected
with dask.config.set({"array.backend": "numpy"}):
# Start with numpy-backed array
x = da.ones(10)
assert isinstance(x._meta, np.ndarray)
# Default `to_backend` shouldn't change data
assert_eq(x, x.to_backend())
# Moving to a "missing" backend should raise an error
with pytest.raises(ValueError, match="No backend dispatch registered"):
x.to_backend("missing")
def test_from_array_copies():
x = np.arange(60).reshape((6, 10))
original_array = x.copy()
chunks = (2, 3)
dx = da.from_array(x, chunks=chunks)
x[2:4, x[0] > 3] = -5
assert_eq(original_array, dx)
def test_from_array_xarray_dataarray():
xr = pytest.importorskip("xarray")
arr = xr.DataArray(da.random.random((1000, 1000), chunks=(50, 50)))
dask_array = da.from_array(arr)
dsk = collections_to_expr([dask_array]).__dask_graph__()
assert len(dsk) == 400
assert all(k[0].startswith("random_sample") for k in dsk)
assert_eq(dask_array, arr.data)
arr = xr.DataArray(np.random.random((100, 100)))
dask_array = da.from_array(arr)
assert_eq(dask_array.compute().data, arr.data)
def test_load_store_chunk():
actual = np.array([0, 0, 0, 0, 0, 0])
load_store_chunk(
x=np.array([1, 2, 3]),
out=actual,
region=None,
index=slice(2, 5),
lock=False,
return_stored=False,
load_stored=False,
)
expected = np.array([0, 0, 1, 2, 3, 0])
assert all(actual == expected)
# index should not be used on empty array
actual = load_store_chunk(
x=np.array([]),
region=None,
out=np.array([]),
index=2,
lock=False,
return_stored=True,
load_stored=False,
)
expected = np.array([])
assert all(actual == expected)
def test_scalar_setitem():
"""After a da.Array.__getitem__ call that returns a scalar, the chunk contains a
read-only np.generic instead of a writeable np.ndarray. This is a specific quirk of
numpy; cupy and other backends always return a 0-dimensional array.
Make sure that __setitem__ still works.
"""
x = da.zeros(1)
y = x[0]
assert isinstance(y.compute(), np.generic)
y[()] = 2
assert_eq(y, 2.0)
assert isinstance(y.compute(), np.ndarray)
@pytest.mark.parametrize(
"idx", [[0], [True, False], da.array([0]), da.array([True, False])]
)
@pytest.mark.parametrize(
"val",
[3.3, np.float64(3.3), np.int64(3), da.array(3.3), da.array(3, dtype=np.int64)],
)
def test_setitem_no_dtype_broadcast(idx, val):
x = da.array([1, 2], dtype=np.int32)
x[idx] = val
assert_eq(x, da.array([3, 2], dtype=np.int32))
def test_store_sources_unoptimized_nocompute():
"""Test that two sources can be optimized and share tasks after storing."""
total_calls = 0
def _shared_task(arr1):
nonlocal total_calls
total_calls += 1
return np.stack([arr1 + 1, arr1 + 2])
start = da.zeros((2, 2), chunks=1)
src = da.map_blocks(
_shared_task,
start,
dtype=start.dtype,
meta=np.array((), dtype=start.dtype),
new_axis=[0],
chunks=(2,) + start.chunks,
)
target1 = np.zeros((2, 2))
target2 = np.zeros((2, 2))
with dask.config.set(scheduler="single-threaded"):
store_res1 = da.store(src[0], target1, compute=False)
store_res2 = da.store(src[1], target2, compute=False)
da.compute(store_res1, store_res2)
assert total_calls == start.blocks.size
def test_blockwise_fusion():
def custom_scheduler_get(dsk, keys, **kwargs):
"""Custom scheduler that returns the result of the first key."""
dsk = dsk.__dask_graph__()
# two sum
# one sum agg
# one finalize Alias
assert len(dsk) == 4, "False number of tasks"
return [42 for _ in keys]
# First test that this mocking stuff works as expecged
with pytest.raises(AssertionError, match="False number of tasks"):
dask.compute(da.ones(10), scheduler=custom_scheduler_get)
a = ((da.ones(10, chunks=5) + 1) + 2).sum()
dask.compute(a, scheduler=custom_scheduler_get)
|
MyArray
|
python
|
astropy__astropy
|
astropy/table/table.py
|
{
"start": 6985,
"end": 12973
}
|
class ____(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError(
f"Illegal key or index value for {type(self).__name__} object"
)
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError(
f"Cannot replace column '{item}'. Use Table.replace_column() instead."
)
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name: str, new_name: str):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
if isinstance(new_name, str):
new_name = str(new_name)
else:
raise TypeError(
f"Expected a str value, got {new_name} with type {type(new_name).__name__}"
)
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
super().update(zip(new_names, cols))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
# When the deprecation period of setdefault() and update() is over then they
# need to be rewritten to raise an error, not removed.
@deprecated(
since="6.1", alternative="t.setdefault()", name="t.columns.setdefault()"
)
def setdefault(self, key, default):
return super().setdefault(key, default)
@deprecated(since="6.1", alternative="t.update()", name="t.columns.update()")
def update(self, *args, **kwargs):
return super().update(*args, **kwargs)
|
TableColumns
|
python
|
scrapy__scrapy
|
tests/test_webclient.py
|
{
"start": 1776,
"end": 6009
}
|
class ____:
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(
Request(
url="http://foo/bar",
body="some data",
headers={
"Host": "example.net",
"User-Agent": "fooble",
"Cookie": "blah blah",
"Content-Length": "12981",
"Useful": "value",
},
)
)
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Content-Length: 9\r\n"
b"Useful: value\r\n"
b"Connection: close\r\n"
b"User-Agent: fooble\r\n"
b"Host: example.net\r\n"
b"Cookie: blah blah\r\n"
b"\r\n"
b"some data",
)
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request("http://foo/bar"))
self._test(factory, b"GET /bar HTTP/1.0\r\nHost: foo\r\n\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(
Request(
method="POST",
url="http://foo/bar",
body="name=value",
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
)
self._test(
factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Connection: close\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"name=value",
)
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(
Request(method="POST", url="http://foo/bar")
)
self._test(
factory,
b"POST /bar HTTP/1.0\r\nHost: foo\r\nContent-Length: 0\r\n\r\n",
)
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(
Request(
url="http://foo/bar",
headers={
"X-Meta-Single": "single",
"X-Meta-Multivalued": ["value1", "value2"],
},
)
)
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n",
)
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(
Request(
url="http://foo/bar",
headers=Headers(
{
"X-Meta-Single": "single",
"X-Meta-Multivalued": ["value1", "value2"],
}
),
)
)
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n",
)
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
assert set(transport.value().splitlines()) == set(testvalue.splitlines())
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(url="http://foo/bar"))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived(b"HTTP/1.0 200 OK\n")
protocol.dataReceived(b"Hello: World\n")
protocol.dataReceived(b"Foo: Bar\n")
protocol.dataReceived(b"\n")
assert protocol.headers == Headers({"Hello": ["World"], "Foo": ["Bar"]})
|
TestScrapyHTTPPageGetter
|
python
|
django__django
|
tests/auth_tests/test_management.py
|
{
"start": 47933,
"end": 49789
}
|
class ____(TestCase):
databases = {"default", "other"}
def test_createsuperuser_command_with_database_option(self):
"""
createsuperuser --database should operate on the specified DB.
"""
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
database="other",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, "Superuser created successfully.")
user = User.objects.using("other").get(username="joe")
self.assertEqual(user.email, "joe@somewhere.org")
def test_createsuperuser_command_suggested_username_with_database_option(self):
default_username = get_default_username(database="other")
qs = User.objects.using("other")
@mock_inputs({"password": "nopasswd", "username": "", "email": ""})
def test_other_create_with_suggested_username(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
verbosity=0,
database="other",
)
self.assertIs(qs.filter(username=default_username).exists(), True)
test_other_create_with_suggested_username(self)
@mock_inputs({"password": "nopasswd", "Username: ": "other", "email": ""})
def test_other_no_suggestion(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
verbosity=0,
database="other",
)
self.assertIs(qs.filter(username="other").exists(), True)
test_other_no_suggestion(self)
|
MultiDBCreatesuperuserTestCase
|
python
|
ray-project__ray
|
rllib/examples/envs/classes/multi_agent/footsies/game/footsies_game.py
|
{
"start": 406,
"end": 4780
}
|
class ____:
"""Handles gRPC communication with game the server.
This class establishes communication between the
game server and the Python harness via gRPC. It provides methods
to start the game, reset it, get the current state, and step the
game by a certain number of frames.
"""
def __init__(self, host: str, port: int):
self.host = host
self.port = port
self.stub = self._initialize_stub()
@staticmethod
def action_to_bits(action: int, is_player_1: bool) -> int:
"""Converts an action to its corresponding bit representation."""
if isinstance(action, np.ndarray):
action = action.item()
if is_player_1:
if action == constants.EnvActions.BACK:
action = constants.GameActions.LEFT
elif action == constants.EnvActions.FORWARD:
action = constants.GameActions.RIGHT
elif action == constants.EnvActions.BACK_ATTACK:
action = constants.GameActions.LEFT_ATTACK
elif action == constants.EnvActions.FORWARD_ATTACK:
action = constants.GameActions.RIGHT_ATTACK
else:
if action == constants.EnvActions.BACK:
action = constants.GameActions.RIGHT
elif action == constants.EnvActions.FORWARD:
action = constants.GameActions.LEFT
elif action == constants.EnvActions.BACK_ATTACK:
action = constants.GameActions.RIGHT_ATTACK
elif action == constants.EnvActions.FORWARD_ATTACK:
action = constants.GameActions.LEFT_ATTACK
return constants.ACTION_TO_BITS[action]
def get_encoded_state(self) -> footsies_pb2.EncodedGameState:
"""Gets the current encoded game state by calling the GetEncodedState RPC."""
try:
return self.stub.GetEncodedState(footsies_pb2.Empty())
except Exception as e:
logger.error(f"Error calling GetEncodedState with exception: {e}")
raise e
def get_state(self) -> footsies_pb2.GameState:
"""Gets the current game state by calling the GetState RPC."""
try:
return self.stub.GetState(footsies_pb2.Empty())
except Exception as e:
logger.error(f"Error calling GetState with exception: {e}")
raise e
def is_ready(self) -> bool:
"""Checks if the game is ready by calling the IsReady RPC."""
try:
return self.stub.IsReady(footsies_pb2.Empty()).value
except Exception as e:
logger.error(f"Error calling IsReady with exception: {e}")
raise e
def reset_game(self) -> None:
"""Resets the game by calling the ResetGame RPC."""
try:
self.stub.ResetGame(footsies_pb2.Empty())
except Exception as e:
logger.error(f"Error calling ResetGame with exception: {e}")
raise e
def start_game(self) -> None:
"""Starts the game by calling the StartGame RPC."""
try:
self.stub.StartGame(footsies_pb2.Empty())
while not self.is_ready():
logger.info("Game not ready...")
time.sleep(0.5)
logger.info("StartGame called successfully")
except Exception as e:
logger.error(f"Error calling StartGame with exception: {e}")
raise e
def step_n_frames(
self, p1_action: int, p2_action: int, n_frames: int
) -> footsies_pb2.GameState:
"""Steps the game by n_frames with the given player actions. The provided actions will be repeated for all n_frames."""
try:
step_input = footsies_pb2.StepInput(
p1_action=p1_action, p2_action=p2_action, nFrames=n_frames
)
return self.stub.StepNFrames(step_input)
except Exception as e:
logger.error(f"Error calling StepNFrames with exception: {e}")
raise e
def _initialize_stub(self) -> footsies_pb2_grpc.FootsiesGameServiceStub:
try:
channel = grpc.insecure_channel(f"{self.host}:{self.port}")
return footsies_pb2_grpc.FootsiesGameServiceStub(channel)
except grpc.RpcError as e:
logger.error(f"Error connecting to gRPC stub with exception: {e}")
raise e
|
FootsiesGame
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/utils/lora_serve_utils.py
|
{
"start": 4396,
"end": 8425
}
|
class ____:
"""Download LoRA weights from remote storage and manage disk cache.
This class is serve-specific as it depends on DiskMultiplexConfig and
other serve-specific concepts.
"""
def __init__(
self,
lora_root: Optional[str] = None,
download_timeout_s: Optional[float] = None,
max_tries: int = 1,
):
self.lora_root = lora_root or "/tmp/ray/llm/lora/cache"
self.disk_cache: Dict[str, DiskMultiplexConfig] = {}
self.active_syncing_tasks: Dict[str, asyncio.Task[DiskMultiplexConfig]] = {}
if download_timeout_s is not None and download_timeout_s <= 0:
raise ValueError(
f"download_timeout_s must be None or >0, got {download_timeout_s}"
)
self.download_timeout_s = download_timeout_s
if max_tries < 1:
raise ValueError(f"max_tries must be >=1, got {max_tries}")
self.max_tries = max_tries
async def load_model_from_config(
self, lora_model_id: str, llm_config
) -> DiskMultiplexConfig:
"""Load a LoRA model by first fetching its mirror config from S3."""
lora_mirror_config = await get_lora_mirror_config(lora_model_id, llm_config)
return await self.load_model(lora_model_id, lora_mirror_config)
async def load_model(
self, lora_model_id: str, lora_mirror_config: LoraMirrorConfig
) -> DiskMultiplexConfig:
"""Load a LoRA model."""
if lora_model_id in self.disk_cache:
return self.disk_cache[lora_model_id]
if lora_model_id not in self.active_syncing_tasks:
task = asyncio.create_task(self._load_model_async(lora_mirror_config))
task.add_done_callback(
lambda result: self.active_syncing_tasks.pop(lora_model_id, None)
)
self.active_syncing_tasks[lora_model_id] = task
else:
task = self.active_syncing_tasks[lora_model_id]
disk_config = await asyncio.shield(task)
self.disk_cache[lora_model_id] = disk_config
return disk_config
async def _load_model_async(
self, lora_mirror_config: LoraMirrorConfig
) -> DiskMultiplexConfig:
return await self._load_model(lora_mirror_config)
@make_async
def _load_model(self, lora_mirror_config: LoraMirrorConfig) -> DiskMultiplexConfig:
return self._load_model_sync(lora_mirror_config)
@make_async
def clear_cache(self):
"""Clear the disk cache."""
clear_directory(self.lora_root)
def _model_dir_path(self, model_id: str) -> str:
"""Construct the path for the lora weight."""
lora_id = get_lora_id(clean_model_id(model_id))
path = os.path.join(self.lora_root, lora_id)
os.makedirs(path, exist_ok=True)
return path
def _download_lora(self, lora_mirror_config: LoraMirrorConfig) -> str:
"""Download LoRA weights using generic download primitives."""
model_local_path = self._model_dir_path(lora_mirror_config.lora_model_id)
sync_files_with_lock(
lora_mirror_config.bucket_uri,
model_local_path,
timeout=self.download_timeout_s,
)
return model_local_path
def _load_model_sync(
self, lora_mirror_config: LoraMirrorConfig
) -> DiskMultiplexConfig:
"""Load a model from the given mirror configuration."""
download_with_retries = retry_with_exponential_backoff(
max_tries=self.max_tries,
exception_to_check=Exception,
)(lambda config: self._download_lora(config))
local_path = download_with_retries(lora_mirror_config)
return DiskMultiplexConfig.model_validate(
{
"model_id": lora_mirror_config.lora_model_id,
"max_total_tokens": lora_mirror_config.max_total_tokens,
"local_path": local_path,
"lora_assigned_int_id": global_id_manager.next(),
}
)
|
LoraModelLoader
|
python
|
django-import-export__django-import-export
|
tests/core/tests/test_resources/test_bulk_operations.py
|
{
"start": 17100,
"end": 17966
}
|
class ____(BulkTest):
def setUp(self):
super().setUp()
self.init_update_test_data(model=UUIDBook)
@mock.patch("core.models.UUIDBook.objects.bulk_update")
def test_bulk_update_uuid_model(self, mock_bulk_update):
"""Test update of a Model which defines uuid not pk (issue #1274)"""
class _UUIDBookResource(resources.ModelResource):
class Meta:
model = UUIDBook
use_bulk = True
batch_size = 5
fields = (
"id",
"name",
)
resource = _UUIDBookResource()
result = resource.import_data(self.dataset)
self.assertEqual(2, mock_bulk_update.call_count)
self.assertEqual(10, result.total_rows)
self.assertEqual(10, result.totals["update"])
|
BulkUUIDBookUpdateTest
|
python
|
django-debug-toolbar__django-debug-toolbar
|
tests/test_checks.py
|
{
"start": 264,
"end": 12194
}
|
class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.gzip.GZipMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
)
def test_check_good_configuration(self):
messages = run_checks()
self.assertEqual(messages, [])
@override_settings(
MIDDLEWARE=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
]
)
def test_check_missing_middleware_error(self):
messages = run_checks()
self.assertEqual(
messages,
[
Warning(
"debug_toolbar.middleware.DebugToolbarMiddleware is "
"missing from MIDDLEWARE.",
hint="Add debug_toolbar.middleware.DebugToolbarMiddleware "
"to MIDDLEWARE.",
id="debug_toolbar.W001",
)
],
)
@override_settings(
MIDDLEWARE=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
"django.middleware.gzip.GZipMiddleware",
]
)
def test_check_gzip_middleware_error(self):
messages = run_checks()
self.assertEqual(
messages,
[
Warning(
"debug_toolbar.middleware.DebugToolbarMiddleware occurs "
"before django.middleware.gzip.GZipMiddleware in "
"MIDDLEWARE.",
hint="Move debug_toolbar.middleware.DebugToolbarMiddleware "
"to after django.middleware.gzip.GZipMiddleware in "
"MIDDLEWARE.",
id="debug_toolbar.W003",
)
],
)
@override_settings(
MIDDLEWARE_CLASSES=[
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.gzip.GZipMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
)
def test_check_middleware_classes_error(self):
messages = run_checks()
self.assertIn(
Warning(
"debug_toolbar is incompatible with MIDDLEWARE_CLASSES setting.",
hint="Use MIDDLEWARE instead of MIDDLEWARE_CLASSES",
id="debug_toolbar.W004",
),
messages,
)
@override_settings(DEBUG_TOOLBAR_PANELS=[])
def test_panels_is_empty(self):
errors = run_checks()
self.assertEqual(
errors,
[
Warning(
"Setting DEBUG_TOOLBAR_PANELS is empty.",
hint="Set DEBUG_TOOLBAR_PANELS to a non-empty list in your "
"settings.py.",
id="debug_toolbar.W005",
),
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": False,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
],
},
},
]
)
def test_check_w006_invalid(self):
errors = run_checks()
self.assertEqual(
errors,
[
Warning(
"At least one DjangoTemplates TEMPLATES configuration needs "
"to use django.template.loaders.app_directories.Loader or "
"have APP_DIRS set to True.",
hint=(
"Include django.template.loaders.app_directories.Loader "
'in ["OPTIONS"]["loaders"]. Alternatively use '
"APP_DIRS=True for at least one "
"django.template.backends.django.DjangoTemplates "
"backend configuration."
),
id="debug_toolbar.W006",
)
],
)
@override_settings(
TEMPLATES=[
{
"NAME": "use_loaders",
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": False,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"loaders": [
"django.template.loaders.app_directories.Loader",
],
},
},
{
"NAME": "use_app_dirs",
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
)
def test_check_w006_valid(self):
self.assertEqual(run_checks(), [])
@override_settings(
TEMPLATES=[
{
"NAME": "use_loaders",
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": False,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"loaders": [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
),
],
},
},
]
)
def test_check_w006_valid_nested_loaders(self):
self.assertEqual(run_checks(), [])
@patch("debug_toolbar.apps.mimetypes.guess_type")
def test_check_w007_valid(self, mocked_guess_type):
mocked_guess_type.return_value = ("text/javascript", None)
self.assertEqual(run_checks(), [])
mocked_guess_type.return_value = ("application/javascript", None)
self.assertEqual(run_checks(), [])
@patch("debug_toolbar.apps.mimetypes.guess_type")
def test_check_w007_invalid(self, mocked_guess_type):
mocked_guess_type.return_value = ("text/plain", None)
self.assertEqual(
run_checks(),
[
Warning(
"JavaScript files are resolving to the wrong content type.",
hint="The Django Debug Toolbar may not load properly while mimetypes are misconfigured. "
"See the Django documentation for an explanation of why this occurs.\n"
"https://docs.djangoproject.com/en/stable/ref/contrib/staticfiles/#static-file-development-view\n"
"\n"
"This typically occurs on Windows machines. The suggested solution is to modify "
"HKEY_CLASSES_ROOT in the registry to specify the content type for JavaScript "
"files.\n"
"\n"
"[HKEY_CLASSES_ROOT\\.js]\n"
'"Content Type"="application/javascript"',
id="debug_toolbar.W007",
)
],
)
@patch("debug_toolbar.apps.reverse")
def test_debug_toolbar_installed_when_running_tests(self, reverse):
params = [
{
"debug": True,
"running_tests": True,
"show_callback_changed": True,
"urls_installed": False,
"errors": False,
},
{
"debug": False,
"running_tests": False,
"show_callback_changed": True,
"urls_installed": False,
"errors": False,
},
{
"debug": False,
"running_tests": True,
"show_callback_changed": False,
"urls_installed": False,
"errors": False,
},
{
"debug": False,
"running_tests": True,
"show_callback_changed": True,
"urls_installed": True,
"errors": False,
},
{
"debug": False,
"running_tests": True,
"show_callback_changed": True,
"urls_installed": False,
"errors": True,
},
]
for config in params:
with self.subTest(**config):
config_setting = {
"RENDER_PANELS": False,
"IS_RUNNING_TESTS": config["running_tests"],
"SHOW_TOOLBAR_CALLBACK": (
(lambda *args: True)
if config["show_callback_changed"]
else "debug_toolbar.middleware.show_toolbar"
),
}
if config["urls_installed"]:
reverse.side_effect = lambda *args: None
else:
reverse.side_effect = NoReverseMatch()
with self.settings(
DEBUG=config["debug"], DEBUG_TOOLBAR_CONFIG=config_setting
):
errors = debug_toolbar_installed_when_running_tests_check(None)
if config["errors"]:
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, "debug_toolbar.E001")
else:
self.assertEqual(len(errors), 0)
@override_settings(
DEBUG_TOOLBAR_CONFIG={
"OBSERVE_REQUEST_CALLBACK": lambda request: False,
"IS_RUNNING_TESTS": False,
}
)
def test_observe_request_callback_specified(self):
errors = run_checks()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, "debug_toolbar.W008")
|
ChecksTestCase
|
python
|
jazzband__django-waffle
|
waffle/tests/test_waffle.py
|
{
"start": 31350,
"end": 32394
}
|
class ____(TestCase):
@mock.patch.object(random, 'uniform')
def test_percent(self, uniform):
"""If you have no cookie, you get a cookie!"""
uniform.return_value = '10'
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='50.0')
request = get()
response = process_request(request, views.flag_in_view)
assert 'dwf_myflag' in response.cookies
self.assertEqual('True', response.cookies['dwf_myflag'].value)
self.assertEqual(b'on', response.content)
@mock.patch.object(random, 'uniform')
def test_percent_readonly(self, uniform):
uniform.return_value = '10'
"""If you have no cookie, you do not get a cookie if we just have a question!"""
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='50.0')
request = get()
response = process_request(request, views.flag_in_view_readonly)
assert 'dwf_myflag' not in response.cookies
self.assertEqual(b'off', response.content)
|
FunctionTests
|
python
|
django__django
|
tests/view_tests/views.py
|
{
"start": 10210,
"end": 10391
}
|
class ____(ExceptionReporter):
custom_traceback_text = "custom traceback text"
def get_traceback_html(self):
return self.custom_traceback_text
|
CustomExceptionReporter
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/test_security.py
|
{
"start": 2096,
"end": 19141
}
|
class ____:
@classmethod
def setup_class(cls):
with conf_vars(
{
(
"core",
"auth_manager",
): "airflow.api_fastapi.auth.managers.simple.simple_auth_manager.SimpleAuthManager",
}
):
create_app()
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
async def test_resolve_user_from_token(self, mock_get_auth_manager):
token_str = "test-token"
user = SimpleAuthManagerUser(username="username", role="admin")
auth_manager = AsyncMock()
auth_manager.get_user_from_token.return_value = user
mock_get_auth_manager.return_value = auth_manager
result = await resolve_user_from_token(token_str)
auth_manager.get_user_from_token.assert_called_once_with(token_str)
assert result == user
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
async def test_get_user_wrong_token(self, mock_get_auth_manager):
token_str = "test-token"
auth_manager = AsyncMock()
auth_manager.get_user_from_token.side_effect = InvalidTokenError()
mock_get_auth_manager.return_value = auth_manager
with pytest.raises(HTTPException, match="Invalid JWT token"):
await resolve_user_from_token(token_str)
auth_manager.get_user_from_token.assert_called_once_with(token_str)
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
async def test_get_user_expired_token(self, mock_get_auth_manager):
token_str = "test-token"
auth_manager = AsyncMock()
auth_manager.get_user_from_token.side_effect = ExpiredSignatureError()
mock_get_auth_manager.return_value = auth_manager
with pytest.raises(HTTPException, match="Token Expired"):
await resolve_user_from_token(token_str)
auth_manager.get_user_from_token.assert_called_once_with(token_str)
@patch("airflow.api_fastapi.core_api.security.resolve_user_from_token")
async def test_get_user_with_request_state(self, mock_resolve_user_from_token):
user = Mock()
request = Mock()
request.state.user = user
result = await get_user(request, None, None)
assert result == user
mock_resolve_user_from_token.assert_not_called()
@pytest.mark.parametrize(
("oauth_token", "bearer_credentials_creds", "cookies", "expected"),
[
("oauth_token", None, {}, "oauth_token"),
(None, "bearer_credentials_creds", {}, "bearer_credentials_creds"),
(None, None, {COOKIE_NAME_JWT_TOKEN: "cookie_token"}, "cookie_token"),
],
)
@patch("airflow.api_fastapi.core_api.security.resolve_user_from_token")
async def test_get_user_with_token(
self, mock_resolve_user_from_token, oauth_token, bearer_credentials_creds, cookies, expected
):
user = Mock()
mock_resolve_user_from_token.return_value = user
request = Mock()
request.state.user = None
request.cookies = cookies
bearer_credentials = None
if bearer_credentials_creds:
bearer_credentials = Mock()
bearer_credentials.scheme = "bearer"
bearer_credentials.credentials = bearer_credentials_creds
result = await get_user(request, oauth_token, bearer_credentials)
assert result == user
mock_resolve_user_from_token.assert_called_once_with(expected)
@pytest.mark.db_test
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_dag_authorized(self, mock_get_auth_manager):
auth_manager = Mock()
auth_manager.is_authorized_dag.return_value = True
mock_get_auth_manager.return_value = auth_manager
fastapi_request = Mock()
fastapi_request.path_params = {}
requires_access_dag("GET", DagAccessEntity.CODE)(fastapi_request, Mock())
auth_manager.is_authorized_dag.assert_called_once()
@pytest.mark.db_test
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_dag_unauthorized(self, mock_get_auth_manager):
auth_manager = Mock()
auth_manager.is_authorized_dag.return_value = False
mock_get_auth_manager.return_value = auth_manager
fastapi_request = Mock()
fastapi_request.path_params = {}
mock_request = Mock()
mock_request.path_params.return_value = {}
with pytest.raises(HTTPException, match="Forbidden"):
requires_access_dag("GET", DagAccessEntity.CODE)(fastapi_request, Mock())
auth_manager.is_authorized_dag.assert_called_once()
@pytest.mark.parametrize(
("url", "expected_is_safe"),
[
("https://server_base_url.com/prefix/some_page?with_param=3", True),
("https://server_base_url.com/prefix/", True),
("https://server_base_url.com/prefix", True),
("/prefix/some_other", True),
("prefix/some_other", True),
("https://requesting_server_base_url.com/prefix2", True), # safe in regards to the request url
# Relative path, will go up one level escaping the prefix folder
("some_other", False),
("./some_other", False),
# wrong scheme
("javascript://server_base_url.com/prefix/some_page?with_param=3", False),
# wrong netloc
("https://some_netlock.com/prefix/some_page?with_param=3", False),
# Absolute path escaping the prefix folder
("/some_other_page/", False),
# traversal, escaping the `prefix` folder
("/../../../../some_page?with_param=3", False),
# encoded url
("https%3A%2F%2Frequesting_server_base_url.com%2Fprefix2", True),
("https%3A%2F%2Fserver_base_url.com%2Fprefix", True),
("https%3A%2F%2Fsome_netlock.com%2Fprefix%2Fsome_page%3Fwith_param%3D3", False),
("https%3A%2F%2Frequesting_server_base_url.com%2Fprefix2%2Fsub_path", True),
("%2F..%2F..%2F..%2F..%2Fsome_page%3Fwith_param%3D3", False),
],
)
@conf_vars({("api", "base_url"): "https://server_base_url.com/prefix"})
def test_is_safe_url(self, url, expected_is_safe):
request = Mock()
request.base_url = "https://requesting_server_base_url.com/prefix2"
assert is_safe_url(url, request=request) == expected_is_safe
@pytest.mark.parametrize(
("url", "expected_is_safe"),
[
("https://server_base_url.com/prefix", False),
("https://requesting_server_base_url.com/prefix2", True),
("prefix/some_other", False),
("https%3A%2F%2Fserver_base_url.com%2Fprefix", False),
("https%3A%2F%2Frequesting_server_base_url.com%2Fprefix2", True),
("https%3A%2F%2Frequesting_server_base_url.com%2Fprefix2%2Fsub_path", True),
("%2F..%2F..%2F..%2F..%2Fsome_page%3Fwith_param%3D3", False),
],
)
def test_is_safe_url_with_base_url_unset(self, url, expected_is_safe):
request = Mock()
request.base_url = "https://requesting_server_base_url.com/prefix2"
assert is_safe_url(url, request=request) == expected_is_safe
@pytest.mark.db_test
@pytest.mark.parametrize(
"team_name",
[None, "team1"],
)
@patch.object(Connection, "get_team_name")
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_connection(self, mock_get_auth_manager, mock_get_team_name, team_name):
auth_manager = Mock()
auth_manager.is_authorized_connection.return_value = True
mock_get_auth_manager.return_value = auth_manager
fastapi_request = Mock()
fastapi_request.path_params = {"connection_id": "conn_id"}
mock_get_team_name.return_value = team_name
user = Mock()
requires_access_connection("GET")(fastapi_request, user)
auth_manager.is_authorized_connection.assert_called_once_with(
method="GET",
details=ConnectionDetails(conn_id="conn_id", team_name=team_name),
user=user,
)
mock_get_team_name.assert_called_once_with("conn_id")
@patch.object(Connection, "get_conn_id_to_team_name_mapping")
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_connection_bulk(
self, mock_get_auth_manager, mock_get_conn_id_to_team_name_mapping
):
auth_manager = Mock()
auth_manager.batch_is_authorized_connection.return_value = True
mock_get_auth_manager.return_value = auth_manager
mock_get_conn_id_to_team_name_mapping.return_value = {"test1": "team1"}
request = BulkBody[ConnectionBody].model_validate(
{
"actions": [
{
"action": "create",
"entities": [
{"connection_id": "test1", "conn_type": "test1"},
{"connection_id": "test2", "conn_type": "test2"},
],
},
{
"action": "delete",
"entities": ["test3"],
},
{
"action": "create",
"entities": [
{"connection_id": "test4", "conn_type": "test4"},
],
"action_on_existence": "overwrite",
},
]
}
)
user = Mock()
requires_access_connection_bulk()(request, user)
auth_manager.batch_is_authorized_connection.assert_called_once_with(
requests=[
{
"method": "POST",
"details": ConnectionDetails(conn_id="test1", team_name="team1"),
},
{
"method": "POST",
"details": ConnectionDetails(conn_id="test2"),
},
{
"method": "DELETE",
"details": ConnectionDetails(conn_id="test3"),
},
{
"method": "POST",
"details": ConnectionDetails(conn_id="test4"),
},
{
"method": "PUT",
"details": ConnectionDetails(conn_id="test4"),
},
],
user=user,
)
@pytest.mark.db_test
@pytest.mark.parametrize(
"team_name",
[None, "team1"],
)
@patch.object(Variable, "get_team_name")
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_variable(self, mock_get_auth_manager, mock_get_team_name, team_name):
auth_manager = Mock()
auth_manager.is_authorized_variable.return_value = True
mock_get_auth_manager.return_value = auth_manager
fastapi_request = Mock()
fastapi_request.path_params = {"variable_key": "var_key"}
mock_get_team_name.return_value = team_name
user = Mock()
requires_access_variable("GET")(fastapi_request, user)
auth_manager.is_authorized_variable.assert_called_once_with(
method="GET",
details=VariableDetails(key="var_key", team_name=team_name),
user=user,
)
mock_get_team_name.assert_called_once_with("var_key")
@patch.object(Variable, "get_key_to_team_name_mapping")
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_variable_bulk(self, mock_get_auth_manager, mock_get_key_to_team_name_mapping):
auth_manager = Mock()
auth_manager.batch_is_authorized_variable.return_value = True
mock_get_auth_manager.return_value = auth_manager
mock_get_key_to_team_name_mapping.return_value = {"var1": "team1", "dummy": "team2"}
request = BulkBody[VariableBody].model_validate(
{
"actions": [
{
"action": "create",
"entities": [
{"key": "var1", "value": "value1"},
{"key": "var2", "value": "value2"},
],
},
{
"action": "delete",
"entities": ["var3"],
},
{
"action": "create",
"entities": [
{"key": "var4", "value": "value4"},
],
"action_on_existence": "overwrite",
},
]
}
)
user = Mock()
requires_access_variable_bulk()(request, user)
auth_manager.batch_is_authorized_variable.assert_called_once_with(
requests=[
{
"method": "POST",
"details": VariableDetails(key="var1", team_name="team1"),
},
{
"method": "POST",
"details": VariableDetails(key="var2"),
},
{
"method": "DELETE",
"details": VariableDetails(key="var3"),
},
{
"method": "POST",
"details": VariableDetails(key="var4"),
},
{
"method": "PUT",
"details": VariableDetails(key="var4"),
},
],
user=user,
)
@pytest.mark.db_test
@pytest.mark.parametrize(
"team_name",
[None, "team1"],
)
@patch.object(Pool, "get_team_name")
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_pool(self, mock_get_auth_manager, mock_get_team_name, team_name):
auth_manager = Mock()
auth_manager.is_authorized_pool.return_value = True
mock_get_auth_manager.return_value = auth_manager
fastapi_request = Mock()
fastapi_request.path_params = {"pool_name": "pool"}
mock_get_team_name.return_value = team_name
user = Mock()
requires_access_pool("GET")(fastapi_request, user)
auth_manager.is_authorized_pool.assert_called_once_with(
method="GET",
details=PoolDetails(name="pool", team_name=team_name),
user=user,
)
mock_get_team_name.assert_called_once_with("pool")
@patch.object(Pool, "get_name_to_team_name_mapping")
@patch("airflow.api_fastapi.core_api.security.get_auth_manager")
def test_requires_access_pool_bulk(self, mock_get_auth_manager, mock_get_name_to_team_name_mapping):
auth_manager = Mock()
auth_manager.batch_is_authorized_pool.return_value = True
mock_get_auth_manager.return_value = auth_manager
mock_get_name_to_team_name_mapping.return_value = {"pool1": "team1"}
request = BulkBody[PoolBody].model_validate(
{
"actions": [
{
"action": "create",
"entities": [
{"pool": "pool1", "slots": 1},
{"pool": "pool2", "slots": 1},
],
},
{
"action": "delete",
"entities": ["pool3"],
},
{
"action": "create",
"entities": [
{"pool": "pool4", "slots": 1},
],
"action_on_existence": "overwrite",
},
]
}
)
user = Mock()
requires_access_pool_bulk()(request, user)
auth_manager.batch_is_authorized_pool.assert_called_once_with(
requests=[
{
"method": "POST",
"details": PoolDetails(name="pool1", team_name="team1"),
},
{
"method": "POST",
"details": PoolDetails(name="pool2"),
},
{
"method": "DELETE",
"details": PoolDetails(name="pool3"),
},
{
"method": "POST",
"details": PoolDetails(name="pool4"),
},
{
"method": "PUT",
"details": PoolDetails(name="pool4"),
},
],
user=user,
)
|
TestFastApiSecurity
|
python
|
PrefectHQ__prefect
|
src/prefect/exceptions.py
|
{
"start": 10843,
"end": 11002
}
|
class ____(PrefectException):
"""
Raised when infrastructure is missing, likely because it has exited or been
deleted.
"""
|
InfrastructureNotFound
|
python
|
great-expectations__great_expectations
|
great_expectations/data_context/data_context/cloud_data_context.py
|
{
"start": 2684,
"end": 2806
}
|
class ____(Exception):
def __init__(self):
super().__init__("No user id in /accounts/me response")
|
NoUserIdError
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_service_status.py
|
{
"start": 383,
"end": 4361
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1Condition]',
'load_balancer': 'V1LoadBalancerStatus'
}
attribute_map = {
'conditions': 'conditions',
'load_balancer': 'loadBalancer'
}
def __init__(self, conditions=None, load_balancer=None, local_vars_configuration=None): # noqa: E501
"""V1ServiceStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self._load_balancer = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
if load_balancer is not None:
self.load_balancer = load_balancer
@property
def conditions(self):
"""Gets the conditions of this V1ServiceStatus. # noqa: E501
Current service state # noqa: E501
:return: The conditions of this V1ServiceStatus. # noqa: E501
:rtype: list[V1Condition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1ServiceStatus.
Current service state # noqa: E501
:param conditions: The conditions of this V1ServiceStatus. # noqa: E501
:type: list[V1Condition]
"""
self._conditions = conditions
@property
def load_balancer(self):
"""Gets the load_balancer of this V1ServiceStatus. # noqa: E501
:return: The load_balancer of this V1ServiceStatus. # noqa: E501
:rtype: V1LoadBalancerStatus
"""
return self._load_balancer
@load_balancer.setter
def load_balancer(self, load_balancer):
"""Sets the load_balancer of this V1ServiceStatus.
:param load_balancer: The load_balancer of this V1ServiceStatus. # noqa: E501
:type: V1LoadBalancerStatus
"""
self._load_balancer = load_balancer
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServiceStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServiceStatus):
return True
return self.to_dict() != other.to_dict()
|
V1ServiceStatus
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_pretty.py
|
{
"start": 13287,
"end": 14036
}
|
class ____:
def __init__(self, value):
self.value = value
def __hash__(self):
return 0
def __eq__(self, other):
return isinstance(other, HashItAnyway) and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def _repr_pretty_(self, pretty, cycle):
pretty.pretty(self.value)
def test_cyclic_counter():
c = Counter()
k = HashItAnyway(c)
c[k] = 1
assert pretty.pretty(c) == "Counter({Counter(...): 1})"
def test_cyclic_dict():
x = {}
k = HashItAnyway(x)
x[k] = x
assert pretty.pretty(x) == "{{...}: {...}}"
def test_cyclic_set():
x = set()
x.add(HashItAnyway(x))
assert pretty.pretty(x) == "{{...}}"
|
HashItAnyway
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cgroup.py
|
{
"start": 351,
"end": 527
}
|
class ____:
"""Linux filesystem mount type constants."""
TMPFS = 'tmpfs'
CGROUP_V1 = 'cgroup'
CGROUP_V2 = 'cgroup2'
@dataclasses.dataclass(frozen=True)
|
MountType
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/chat_ui/models/artifact.py
|
{
"start": 271,
"end": 326
}
|
class ____(BaseModel):
id: str
|
DocumentArtifactSource
|
python
|
openai__openai-python
|
src/openai/types/realtime/response_audio_done_event.py
|
{
"start": 199,
"end": 692
}
|
class ____(BaseModel):
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.output_audio.done"]
"""The event type, must be `response.output_audio.done`."""
|
ResponseAudioDoneEvent
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_pdf.py
|
{
"start": 14641,
"end": 16181
}
|
class ____(Enum):
"""PDF operators (not an exhaustive list)."""
close_fill_stroke = b'b'
fill_stroke = b'B'
fill = b'f'
closepath = b'h'
close_stroke = b's'
stroke = b'S'
endpath = b'n'
begin_text = b'BT'
end_text = b'ET'
curveto = b'c'
rectangle = b're'
lineto = b'l'
moveto = b'm'
concat_matrix = b'cm'
use_xobject = b'Do'
setgray_stroke = b'G'
setgray_nonstroke = b'g'
setrgb_stroke = b'RG'
setrgb_nonstroke = b'rg'
setcolorspace_stroke = b'CS'
setcolorspace_nonstroke = b'cs'
setcolor_stroke = b'SCN'
setcolor_nonstroke = b'scn'
setdash = b'd'
setlinejoin = b'j'
setlinecap = b'J'
setgstate = b'gs'
gsave = b'q'
grestore = b'Q'
textpos = b'Td'
selectfont = b'Tf'
textmatrix = b'Tm'
show = b'Tj'
showkern = b'TJ'
setlinewidth = b'w'
clip = b'W'
shading = b'sh'
def pdfRepr(self):
return self.value
@classmethod
def paint_path(cls, fill, stroke):
"""
Return the PDF operator to paint a path.
Parameters
----------
fill : bool
Fill the path with the fill color.
stroke : bool
Stroke the outline of the path with the line color.
"""
if stroke:
if fill:
return cls.fill_stroke
else:
return cls.stroke
else:
if fill:
return cls.fill
else:
return cls.endpath
|
Op
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1alpha1_apply_configuration.py
|
{
"start": 383,
"end": 8020
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str'
}
attribute_map = {
'expression': 'expression'
}
def __init__(self, expression=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1ApplyConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self.discriminator = None
if expression is not None:
self.expression = expression
@property
def expression(self):
"""Gets the expression of this V1alpha1ApplyConfiguration. # noqa: E501
expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec Apply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field: Object{ spec: Object.spec{ serviceAccountName: \"example\" } } Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration. CEL expressions have access to the object types needed to create apply configurations: - 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers') CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required. # noqa: E501
:return: The expression of this V1alpha1ApplyConfiguration. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1alpha1ApplyConfiguration.
expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec Apply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field: Object{ spec: Object.spec{ serviceAccountName: \"example\" } } Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration. CEL expressions have access to the object types needed to create apply configurations: - 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers') CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required. # noqa: E501
:param expression: The expression of this V1alpha1ApplyConfiguration. # noqa: E501
:type: str
"""
self._expression = expression
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1ApplyConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1ApplyConfiguration):
return True
return self.to_dict() != other.to_dict()
|
V1alpha1ApplyConfiguration
|
python
|
python-markdown__markdown
|
markdown/inlinepatterns.py
|
{
"start": 20962,
"end": 26112
}
|
class ____(InlineProcessor):
"""Emphasis processor for handling strong and em matches inside asterisks."""
PATTERNS = [
EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
]
""" The various strong and emphasis patterns handled by this processor. """
def build_single(self, m: re.Match[str], tag: str, idx: int) -> etree.Element:
"""Return single tag."""
el1 = etree.Element(tag)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
return el1
def build_double(self, m: re.Match[str], tags: str, idx: int) -> etree.Element:
"""Return double tag."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el2, None, idx)
el1.append(el2)
if len(m.groups()) == 3:
text = m.group(3)
self.parse_sub_patterns(text, el1, el2, idx)
return el1
def build_double2(self, m: re.Match[str], tags: str, idx: int) -> etree.Element:
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
text = m.group(3)
el1.append(el2)
self.parse_sub_patterns(text, el2, None, idx)
return el1
def parse_sub_patterns(
self, data: str, parent: etree.Element, last: etree.Element | None, idx: int
) -> None:
"""
Parses sub patterns.
`data`: text to evaluate.
`parent`: Parent to attach text and sub elements to.
`last`: Last appended child to parent. Can also be None if parent has no children.
`idx`: Current pattern index that was used to evaluate the parent.
"""
offset = 0
pos = 0
length = len(data)
while pos < length:
# Find the start of potential emphasis or strong tokens
if self.compiled_re.match(data, pos):
matched = False
# See if the we can match an emphasis/strong pattern
for index, item in enumerate(self.PATTERNS):
# Only evaluate patterns that are after what was used on the parent
if index <= idx:
continue
m = item.pattern.match(data, pos)
if m:
# Append child nodes to parent
# Text nodes should be appended to the last
# child if present, and if not, it should
# be added as the parent's text node.
text = data[offset:m.start(0)]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
el = self.build_element(m, item.builder, item.tags, index)
parent.append(el)
last = el
# Move our position past the matched hunk
offset = pos = m.end(0)
matched = True
if not matched:
# We matched nothing, move on to the next character
pos += 1
else:
# Increment position as no potential emphasis start was found.
pos += 1
# Append any leftover text as a text node.
text = data[offset:]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
def build_element(self, m: re.Match[str], builder: str, tags: str, index: int) -> etree.Element:
"""Element builder."""
if builder == 'double2':
return self.build_double2(m, tags, index)
elif builder == 'double':
return self.build_double(m, tags, index)
else:
return self.build_single(m, tags, index)
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
"""Parse patterns."""
el = None
start = None
end = None
for index, item in enumerate(self.PATTERNS):
m1 = item.pattern.match(data, m.start(0))
if m1:
start = m1.start(0)
end = m1.end(0)
el = self.build_element(m1, item.builder, item.tags, index)
break
return el, start, end
|
AsteriskProcessor
|
python
|
openai__openai-python
|
src/openai/resources/beta/threads/runs/steps.py
|
{
"start": 16460,
"end": 16981
}
|
class ____:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
self.retrieve = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
steps.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
steps.list, # pyright: ignore[reportDeprecated],
)
)
|
AsyncStepsWithStreamingResponse
|
python
|
huggingface__transformers
|
tests/models/falcon/test_modeling_falcon.py
|
{
"start": 1432,
"end": 1993
}
|
class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = FalconModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_torch
|
FalconModelTest
|
python
|
realpython__materials
|
gemini-cli/todolist/src/todolist/status.py
|
{
"start": 130,
"end": 1114
}
|
class ____:
name: str
done: tuple[str, ...]
pending: tuple[str, ...]
@classmethod
def find_all(cls) -> tuple[Self, ...]:
return tuple(
map(cls.from_model, TaskList.select().order_by(TaskList.name))
)
@classmethod
def find_one(cls, list_name: str) -> Self | None:
if task_list := TaskList.get_or_none(TaskList.name == list_name):
return cls.from_model(task_list)
else:
return None
@classmethod
def from_model(cls, task_list: TaskList) -> Self:
done, pending = [], []
for task in task_list.tasks.order_by(Task.name):
if task.done:
done.append(task.pretty_name)
else:
pending.append(task.pretty_name)
return cls(
str(task_list.name),
tuple(done),
tuple(pending),
)
def __len__(self) -> int:
return len(self.done) + len(self.pending)
|
TaskListStatus
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/special_math_test.py
|
{
"start": 8434,
"end": 8844
}
|
class ____(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_UPPER,
max=12., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_UPPER,
max=35., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_error32 = ErrorSpec(rtol=1e-6, atol=1e-14)
_error64 = ErrorSpec(rtol=1e-6, atol=1e-14)
|
LogNdtrTestUpper
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI036.py
|
{
"start": 3185,
"end": 3419
}
|
class ____:
def __exit__(self, typ, exc, tb, weird_extra_arg) -> None: ... # PYI036: Extra arg must have default
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ...# PYI036: Extra arg must have default
|
BadTwo
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/dashboard.py
|
{
"start": 15072,
"end": 15162
}
|
class ____(TypedDict):
displayType: str
layout: dict[str, str] | None
|
_WidgetPreview
|
python
|
realpython__materials
|
python-guitar-synthesizer/source_code_step_6/demo/play_diablo.py
|
{
"start": 1197,
"end": 4157
}
|
class ____:
instant: Time
chord: Chord
velocity: Velocity
def main() -> None:
acoustic_guitar = PluckedStringInstrument(
tuning=StringTuning.from_notes("E2", "A2", "D3", "G3", "B3", "E4"),
vibration=Time(seconds=10),
damping=0.498,
)
synthesizer = Synthesizer(acoustic_guitar)
audio_track = AudioTrack(synthesizer.sampling_rate)
timeline = MeasuredTimeline(measure=MeasureTiming.MEASURE)
for measure in measures(timeline):
for stroke in measure:
audio_track.add_at(
stroke.instant,
synthesizer.strum_strings(stroke.chord, stroke.velocity),
)
save(audio_track, "diablo.mp3")
def measures(timeline: MeasuredTimeline) -> tuple[tuple[Stroke, ...], ...]:
return (
measure_01(timeline),
measure_02(timeline),
)
def measure_01(timeline: MeasuredTimeline) -> tuple[Stroke, ...]:
return (
Stroke(
timeline.instant,
Chord.from_numbers(0, 0, 2, 2, 0, None),
Velocity.down(StrummingSpeed.SLOW),
),
Stroke(
(timeline >> Note.THREE_SIXTEENTH).instant,
Chord.from_numbers(None, 0, 2, None, None, None),
Velocity.up(StrummingSpeed.FAST),
),
Stroke(
(timeline >> Note.ONE_EIGHTH).instant,
Chord.from_numbers(0, 0, 2, 2, 0, None),
Velocity.down(StrummingSpeed.SLOW),
),
)
def measure_02(timeline: MeasuredTimeline) -> tuple[Stroke, ...]:
return (
Stroke(
next(timeline).instant,
Chord.from_numbers(0, 4, 2, 1, 0, None),
Velocity.down(StrummingSpeed.SLOW),
),
Stroke(
(timeline >> Note.THREE_SIXTEENTH).instant,
Chord.from_numbers(None, None, 2, None, None, None),
Velocity.down(StrummingSpeed.SUPER_FAST),
),
Stroke(
(timeline >> Note.ONE_EIGHTH).instant,
Chord.from_numbers(0, 4, 2, 1, 0, None),
Velocity.down(StrummingSpeed.SLOW),
),
Stroke(
(timeline >> Note.SEVEN_SIXTEENTH).instant,
Chord.from_numbers(7, None, None, None, None, None),
Velocity.down(StrummingSpeed.SUPER_FAST),
),
)
def save(audio_track: AudioTrack, filename: str) -> None:
with AudioFile(filename, "w", audio_track.sampling_rate) as file:
file.write(normalize(apply_effects(audio_track)))
print(f"\nSaved file {filename!r}")
def apply_effects(audio_track: AudioTrack) -> np.ndarray:
effects = Pedalboard(
[
Reverb(),
Convolution(impulse_response_filename="ir/acoustic.wav", mix=0.95),
LowShelfFilter(cutoff_frequency_hz=440, gain_db=10, q=1),
Gain(gain_db=6),
]
)
return effects(audio_track.samples, audio_track.sampling_rate)
if __name__ == "__main__":
main()
|
Stroke
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-number-of-possible-root-nodes.py
|
{
"start": 67,
"end": 1408
}
|
class ____(object):
def rootCount(self, edges, guesses, k):
"""
:type edges: List[List[int]]
:type guesses: List[List[int]]
:type k: int
:rtype: int
"""
def iter_dfs():
result = 0
stk = [(0, -1)]
while stk:
u, p = stk.pop()
result += int((p, u) in lookup)
for v in adj[u]:
if v == p:
continue
stk.append((v, u))
return result
def iter_dfs2(curr):
result = 0
stk = [(0, -1, curr)]
while stk:
u, p, curr = stk.pop()
if (p, u) in lookup:
curr -= 1
if (u, p) in lookup:
curr += 1
result += int(curr >= k)
for v in adj[u]:
if v == p:
continue
stk.append((v, u, curr))
return result
adj = collections.defaultdict(list)
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
lookup = {(u, v) for u, v in guesses}
curr = iter_dfs()
return iter_dfs2(curr)
# Time: O(n)
# Space: O(h)
import collections
# dfs
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-of-smaller-numbers-after-self.py
|
{
"start": 33,
"end": 1268
}
|
class ____(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def countAndMergeSort(num_idxs, start, end, counts):
if end - start <= 0: # The size of range [start, end] less than 2 is always with count 0.
return
mid = start + (end - start) // 2
countAndMergeSort(num_idxs, start, mid, counts)
countAndMergeSort(num_idxs, mid + 1, end, counts)
r = mid + 1
tmp = []
for i in xrange(start, mid + 1):
# Merge the two sorted arrays into tmp.
while r <= end and num_idxs[r][0] < num_idxs[i][0]:
tmp.append(num_idxs[r])
r += 1
tmp.append(num_idxs[i])
counts[num_idxs[i][1]] += r - (mid + 1)
# Copy tmp back to num_idxs
num_idxs[start:start+len(tmp)] = tmp
num_idxs = []
counts = [0] * len(nums)
for i, num in enumerate(nums):
num_idxs.append((num, i))
countAndMergeSort(num_idxs, 0, len(num_idxs) - 1, counts)
return counts
# Time: O(nlogn)
# Space: O(n)
# BIT solution.
|
Solution
|
python
|
oauthlib__oauthlib
|
oauthlib/oauth1/rfc5849/request_validator.py
|
{
"start": 200,
"end": 30987
}
|
class ____:
"""A validator/datastore interaction base class for OAuth 1 providers.
OAuth providers should inherit from RequestValidator and implement the
methods and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be useful to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realms
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum length. Rather than
overloading the methods a few properties can be used to configure these
methods.
* @safe_characters -> (character set)
* @client_key_length -> (min, max)
* @request_token_length -> (min, max)
* @access_token_length -> (min, max)
* @nonce_length -> (min, max)
* @verifier_length -> (min, max)
* @realms -> [list, of, realms]
Methods used to validate/invalidate input parameters. These checks usually
hit either persistent or temporary storage such as databases or the
filesystem. See each methods documentation for detailed usage.
The following methods must be implemented:
- validate_client_key
- validate_request_token
- validate_access_token
- validate_timestamp_and_nonce
- validate_redirect_uri
- validate_requested_realms
- validate_realms
- validate_verifier
- invalidate_request_token
Methods used to retrieve sensitive information from storage.
The following methods must be implemented:
- get_client_secret
- get_request_token_secret
- get_access_token_secret
- get_rsa_key
- get_realms
- get_default_realms
- get_redirect_uri
Methods used to save credentials.
The following methods must be implemented:
- save_request_token
- save_verifier
- save_access_token
Methods used to verify input parameters. This methods are used during
authorizing request token by user (AuthorizationEndpoint), to check if
parameters are valid. During token authorization request is not signed,
thus 'validation' methods can not be used. The following methods must be
implemented:
- verify_realms
- verify_request_token
To prevent timing attacks it is necessary to not exit early even if the
client key or resource owner key is invalid. Instead dummy values should
be used during the remaining verification process. It is very important
that the dummy client and token are valid input parameters to the methods
get_client_secret, get_rsa_key and get_(access/request)_token_secret and
that the running time of those methods when given a dummy value remain
equivalent to the running time when given a valid client/resource owner.
The following properties must be implemented:
* @dummy_client
* @dummy_request_token
* @dummy_access_token
Example implementations have been provided, note that the database used is
a simple dictionary and serves only an illustrative purpose. Use whichever
database suits your project and how to access it is entirely up to you.
The methods are introduced in an order which should make understanding
their use more straightforward and as such it could be worth reading what
follows in chronological order.
.. _`whitelisting or blacklisting`: https://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
"""
def __init__(self):
pass
@property
def allowed_signature_methods(self):
return SIGNATURE_METHODS
@property
def safe_characters(self):
return set(utils.UNICODE_ASCII_CHARACTER_SET)
@property
def client_key_length(self):
return 20, 30
@property
def request_token_length(self):
return 20, 30
@property
def access_token_length(self):
return 20, 30
@property
def timestamp_lifetime(self):
return 600
@property
def nonce_length(self):
return 20, 30
@property
def verifier_length(self):
return 20, 30
@property
def realms(self):
return []
@property
def enforce_ssl(self):
return True
def check_client_key(self, client_key):
"""Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper)
def check_request_token(self, request_token):
"""Checks that the request token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.request_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_nonce(self, nonce):
"""Checks that the nonce only contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.nonce_length
return (set(nonce) <= self.safe_characters and
lower <= len(nonce) <= upper)
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper)
def check_realms(self, realms):
"""Check that the realm is one of a set allowed realms."""
return all(r in self.realms for r in realms)
def _subclass_must_implement(self, fn):
"""
Returns a NotImplementedError for a function that should be implemented.
:param fn: name of the function
"""
m = "Missing function implementation in {}: {}".format(type(self), fn)
return NotImplementedError(m)
@property
def dummy_client(self):
"""Dummy client used when an invalid client key is supplied.
:returns: The dummy client key string.
The dummy client should be associated with either a client secret,
a rsa key or both depending on which signature methods are supported.
Providers should make sure that
get_client_secret(dummy_client)
get_rsa_key(dummy_client)
return a valid secret or key for the dummy client.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("dummy_client")
@property
def dummy_request_token(self):
"""Dummy request token used when an invalid token was supplied.
:returns: The dummy request token string.
The dummy request token should be associated with a request token
secret such that get_request_token_secret(.., dummy_request_token)
returns a valid secret.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("dummy_request_token")
@property
def dummy_access_token(self):
"""Dummy access token used when an invalid token was supplied.
:returns: The dummy access token string.
The dummy access token should be associated with an access token
secret such that get_access_token_secret(.., dummy_access_token)
returns a valid secret.
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("dummy_access_token")
def get_client_secret(self, client_key, request):
"""Retrieves the client secret associated with the client key.
:param client_key: The client/consumer key.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The client secret as a string.
This method must allow the use of a dummy client_key value.
Fetching the secret using the dummy key must take the same amount of
time as fetching a secret for a valid client::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import ClientSecret
if ClientSecret.has(client_key):
return ClientSecret.get(client_key)
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import ClientSecret
return ClientSecret.get(client_key, 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement('get_client_secret')
def get_request_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the request token.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import RequestTokenSecret
if RequestTokenSecret.has(client_key):
return RequestTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import RequestTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement('get_request_token_secret')
def get_access_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the access token.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import AccessTokenSecret
if AccessTokenSecret.has(client_key):
return AccessTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import AccessTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("get_access_token_secret")
def get_default_realms(self, client_key, request):
"""Get the default realms for a client.
:param client_key: The client/consumer key.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The list of default realms associated with the client.
The list of default realms will be set during client registration and
is outside the scope of OAuthLib.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("get_default_realms")
def get_realms(self, token, request):
"""Get realms associated with a request token.
:param token: The request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The list of realms associated with the request token.
This method is used by
* AuthorizationEndpoint
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("get_realms")
def get_redirect_uri(self, token, request):
"""Get the redirect URI associated with a request token.
:param token: The request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The redirect URI associated with the request token.
It may be desirable to return a custom URI if the redirect is set to "oob".
In this case, the user will be redirected to the returned URI and at that
endpoint the verifier can be displayed.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("get_redirect_uri")
def get_rsa_key(self, client_key, request):
"""Retrieves a previously stored client provided RSA key.
:param client_key: The client/consumer key.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The rsa public key as a string.
This method must allow the use of a dummy client_key value. Fetching
the rsa key using the dummy key must take the same amount of time
as fetching a key for a valid client. The dummy key must also be of
the same bit length as client keys.
Note that the key must be returned in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("get_rsa_key")
def invalidate_request_token(self, client_key, request_token, request):
"""Invalidates a used request token.
:param client_key: The client/consumer key.
:param request_token: The request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: None
Per `Section 2.3`_ of the spec:
"The server MUST (...) ensure that the temporary
credentials have not expired or been used before."
.. _`Section 2.3`: https://tools.ietf.org/html/rfc5849#section-2.3
This method should ensure that provided token won't validate anymore.
It can be simply removing RequestToken from storage or setting
specific flag that makes it invalid (note that such flag should be
also validated during request token validation).
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("invalidate_request_token")
def validate_client_key(self, client_key, request):
"""Validates that supplied client key is a registered and valid client.
:param client_key: The client/consumer key.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
Note that if the dummy client is supplied it should validate in same
or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import Client
try:
return Client.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import Client
if access_token == self.dummy_access_token:
return False
else:
return Client.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("validate_client_key")
def validate_request_token(self, client_key, token, request):
"""Validates that supplied request token is registered and valid.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
Note that if the dummy request_token is supplied it should validate in
the same nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import RequestToken
try:
return RequestToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import RequestToken
if access_token == self.dummy_access_token:
return False
else:
return RequestToken.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("validate_request_token")
def validate_access_token(self, client_key, token, request):
"""Validates that supplied access token is registered and valid.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
Note that if the dummy access token is supplied it should validate in
the same or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import AccessToken
try:
return AccessToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import AccessToken
if access_token == self.dummy_access_token:
return False
else:
return AccessToken.exists(client_key, access_token)
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("validate_access_token")
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None, access_token=None):
"""Validates that the nonce has not been used before.
:param client_key: The client/consumer key.
:param timestamp: The ``oauth_timestamp`` parameter.
:param nonce: The ``oauth_nonce`` parameter.
:param request_token: Request token string, if any.
:param access_token: Access token string, if any.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
Per `Section 3.3`_ of the spec.
"A nonce is a random string, uniquely generated by the client to allow
the server to verify that a request has never been made before and
helps prevent replay attacks when requests are made over a non-secure
channel. The nonce value MUST be unique across all requests with the
same timestamp, client credentials, and token combinations."
.. _`Section 3.3`: https://tools.ietf.org/html/rfc5849#section-3.3
One of the first validation checks that will be made is for the validity
of the nonce and timestamp, which are associated with a client key and
possibly a token. If invalid then immediately fail the request
by returning False. If the nonce/timestamp pair has been used before and
you may just have detected a replay attack. Therefore it is an essential
part of OAuth security that you not allow nonce/timestamp reuse.
Note that this validation check is done before checking the validity of
the client and token.::
nonces_and_timestamps_database = [
(u'foo', 1234567890, u'rannoMstrInghere', u'bar')
]
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
return ((client_key, timestamp, nonce, request_token or access_token)
not in self.nonces_and_timestamps_database)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("validate_timestamp_and_nonce")
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""Validates the client supplied redirection URI.
:param client_key: The client/consumer key.
:param redirect_uri: The URI the client which to redirect back to after
authorization is successful.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
It is highly recommended that OAuth providers require their clients
to register all redirection URIs prior to using them in requests and
register them as absolute URIs. See `CWE-601`_ for more information
about open redirection attacks.
By requiring registration of all redirection URIs it should be
straightforward for the provider to verify whether the supplied
redirect_uri is valid or not.
Alternatively per `Section 2.1`_ of the spec:
"If the client is unable to receive callbacks or a callback URI has
been established via other means, the parameter value MUST be set to
"oob" (case sensitive), to indicate an out-of-band configuration."
.. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
.. _`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("validate_redirect_uri")
def validate_requested_realms(self, client_key, realms, request):
"""Validates that the client may request access to the realm.
:param client_key: The client/consumer key.
:param realms: The list of realms that client is requesting access to.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
This method is invoked when obtaining a request token and should
tie a realm to the request token and after user authorization
this realm restriction should transfer to the access token.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("validate_requested_realms")
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Validates access to the request realm.
:param client_key: The client/consumer key.
:param token: A request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param uri: The URI the realms is protecting.
:param realms: A list of realms that must have been granted to
the access token.
:returns: True or False
How providers choose to use the realm parameter is outside the OAuth
specification but it is commonly used to restrict access to a subset
of protected resources such as "photos".
realms is a convenience parameter which can be used to provide
a per view method pre-defined list of allowed realms.
Can be as simple as::
from your_datastore import RequestToken
request_token = RequestToken.get(token, None)
if not request_token:
return False
return set(request_token.realms).issuperset(set(realms))
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("validate_realms")
def validate_verifier(self, client_key, token, verifier, request):
"""Validates a verification code.
:param client_key: The client/consumer key.
:param token: A request token string.
:param verifier: The authorization verifier string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
OAuth providers issue a verification code to clients after the
resource owner authorizes access. This code is used by the client to
obtain token credentials and the provider must verify that the
verifier is valid and associated with the client as well as the
resource owner.
Verifier validation should be done in near constant time
(to avoid verifier enumeration). To achieve this we need a
constant time string comparison which is provided by OAuthLib
in ``oauthlib.common.safe_string_equals``::
from your_datastore import Verifier
correct_verifier = Verifier.get(client_key, request_token)
from oauthlib.common import safe_string_equals
return safe_string_equals(verifier, correct_verifier)
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("validate_verifier")
def verify_request_token(self, token, request):
"""Verify that the given OAuth1 request token is valid.
:param token: A request token string.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
This method is used only in AuthorizationEndpoint to check whether the
oauth_token given in the authorization URL is valid or not.
This request is not signed and thus similar ``validate_request_token``
method can not be used.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("verify_request_token")
def verify_realms(self, token, realms, request):
"""Verify authorized realms to see if they match those given to token.
:param token: An access token string.
:param realms: A list of realms the client attempts to access.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: True or False
This prevents the list of authorized realms sent by the client during
the authorization step to be altered to include realms outside what
was bound with the request token.
Can be as simple as::
valid_realms = self.get_realms(token)
return all((r in valid_realms for r in realms))
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("verify_realms")
def save_access_token(self, token, request):
"""Save an OAuth1 access token.
:param token: A dict with token credentials.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
The token dictionary will at minimum include
* ``oauth_token`` the access token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_authorized_realms`` a space separated list of realms.
Client key can be obtained from ``request.client_key``.
The list of realms (not joined string) can be obtained from
``request.realm``.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("save_access_token")
def save_request_token(self, token, request):
"""Save an OAuth1 request token.
:param token: A dict with token credentials.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
The token dictionary will at minimum include
* ``oauth_token`` the request token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_callback_confirmed`` the string ``true``.
Client key can be obtained from ``request.client_key``.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("save_request_token")
def save_verifier(self, token, verifier, request):
"""Associate an authorization verifier with a request token.
:param token: A request token string.
:param verifier: A dictionary containing the oauth_verifier and
oauth_token
:param request: OAuthlib request.
:type request: oauthlib.common.Request
We need to associate verifiers with tokens for validation during the
access token request.
Note that unlike save_x_token token here is the ``oauth_token`` token
string from the request token saved previously.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("save_verifier")
|
RequestValidator
|
python
|
PyCQA__pylint
|
tests/functional/s/super/super_checks.py
|
{
"start": 1814,
"end": 2114
}
|
class ____(Missing):
"""Don't emit if we don't know all the bases."""
def __init__(self):
super(UnknownBases, self).__init__()
super(UnknownBases, self).test()
super(Missing, self).test() # [bad-super-call]
# Test that we are detecting proper super errors.
|
UnknownBases
|
python
|
numba__numba
|
numba/core/errors.py
|
{
"start": 19384,
"end": 19472
}
|
class ____(NumbaError):
"""
A type inference failure.
"""
pass
|
TypingError
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/lib/shim_components/multi_asset.py
|
{
"start": 364,
"end": 451
}
|
class ____(BaseModel):
asset_key: Optional[list[str]] = None
|
MultiAssetScaffoldParams
|
python
|
pytorch__pytorch
|
torch/_inductor/ops_handler.py
|
{
"start": 31389,
"end": 31692
}
|
class ____(WrapperHandler):
def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:
val = getattr(self._inner, name)(*args, **kwargs)
if not val or isinstance(val, (sympy.Expr, tuple, list)):
return val
return f"({val})"
|
AddParenHandler
|
python
|
getsentry__sentry
|
src/sentry/silo/patches/silo_aware_transaction_patch.py
|
{
"start": 556,
"end": 5137
}
|
class ____(Exception):
pass
def _get_db_for_model_if_available(model: type["Model"]) -> str | None:
from sentry.db.router import SiloConnectionUnavailableError
try:
return router.db_for_write(model)
except SiloConnectionUnavailableError:
return None
def siloed_atomic(
using: str | None = None, savepoint: bool = True, durable: bool = False
) -> Atomic:
validate_transaction_using_for_silo_mode(using)
return _default_atomic_impl(using=using, savepoint=savepoint, durable=durable)
def siloed_get_connection(using: str | None = None) -> BaseDatabaseWrapper:
validate_transaction_using_for_silo_mode(using)
return _default_get_connection(using=using)
def siloed_on_commit(func: Callable[..., Any], using: str | None = None) -> None:
validate_transaction_using_for_silo_mode(using)
return _default_on_commit(func, using)
def is_in_test_case_body() -> bool:
"""Determine whether the current execution stack is in a test case body.
This is a best-effort, potentially brittle implementation that depends on private
behavior of the current Pytest implementation. We can't necessarily rely on
underscore-prefixed method names being used in a stable way.
Are you landing here because test cases regressed mysteriously after a Pytest
upgrade? Check the list of frames and tweak the condition logic to make this
function return false as needed. The case `test_is_in_test_case_body` should
ensure that you aren't making `validate_transaction_using_for_silo_mode` too
permissive.
An attempt was also made using Pytest fixtures. We can add state changes around
the `django_db_setup` fixture, but post-test teardown seems to be too tightly
coupled to the test run to insert a fixture between them. Adding something to the
`tearDown()` override in Sentry's BaseTestCase may have worked, but would not
have helped with standalone test functions. A better solution may nonetheless
exist; refactoring is encouraged if you find one.
This should not be used as a general-purpose utility function. Avoid calling it
in places other than `validate_transaction_using_for_silo_mode` if at all possible.
"""
frames = [str(frame) for (frame, _) in traceback.walk_stack(None)]
def seek(module_path: str, function_name: str) -> bool:
"""Check whether the named function has been called in the current stack."""
pattern = re.compile(rf"/{re.escape(module_path)}\b.*\b{re.escape(function_name)}>$")
return any(pattern.search(frame) for frame in frames)
return seek("_pytest/runner.py", "pytest_runtest_call") and not (
seek("django/test/testcases.py", "_pre_setup")
or seek("django/test/testcases.py", "_post_teardown")
)
def validate_transaction_using_for_silo_mode(using: str | None) -> None:
from sentry.hybridcloud.models.outbox import ControlOutbox, RegionOutbox
from sentry.silo.base import SiloMode
if using is None:
raise TransactionMissingDBException("'using' must be specified when creating a transaction")
if in_test_environment() and not is_in_test_case_body():
# During setup and teardown of a test environment, allow treating it as one
# DB. But make sure we enforce as normal during the actual test case.
return
current_silo_mode = SiloMode.get_current_mode()
control_db = _get_db_for_model_if_available(ControlOutbox)
region_db = _get_db_for_model_if_available(RegionOutbox)
both_silos_route_to_same_db = control_db == region_db
if both_silos_route_to_same_db or current_silo_mode == SiloMode.MONOLITH:
return
elif using == control_db and current_silo_mode != SiloMode.CONTROL:
raise MismatchedSiloTransactionError(
f"Cannot use transaction.atomic({using}) except in Control Mode"
)
elif using == region_db and current_silo_mode != SiloMode.REGION:
raise MismatchedSiloTransactionError(
f"Cannot use transaction.atomic({using}) except in Region Mode"
)
def patch_silo_aware_atomic() -> None:
global _default_on_commit, _default_get_connection, _default_atomic_impl
_default_atomic_impl = transaction.atomic
_default_on_commit = transaction.on_commit
_default_get_connection = transaction.get_connection
transaction.atomic = siloed_atomic # type: ignore[assignment]
transaction.on_commit = siloed_on_commit # type: ignore[assignment]
transaction.get_connection = siloed_get_connection
|
TransactionMissingDBException
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 10821,
"end": 11096
}
|
class ____(models.Model):
title = models.CharField(max_length=100)
file = models.FileField(upload_to="files")
history = HistoricalRecords()
# Clear SIMPLE_HISTORY_FILEFIELD_TO_CHARFIELD
delattr(settings, "SIMPLE_HISTORY_FILEFIELD_TO_CHARFIELD")
|
CharFieldFileModel
|
python
|
huggingface__transformers
|
src/transformers/data/processors/glue.py
|
{
"start": 5999,
"end": 7778
}
|
class ____(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["hypothesis"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[8]
text_b = line[9]
label = None if set_type.startswith("test") else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
MnliProcessor
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/instance.py
|
{
"start": 1217,
"end": 2362
}
|
class ____(graphene.ObjectType):
daemonType = graphene.NonNull(graphene.String)
id = graphene.NonNull(graphene.ID)
required = graphene.NonNull(graphene.Boolean)
healthy = graphene.Boolean()
lastHeartbeatTime = graphene.Float()
lastHeartbeatErrors = non_null_list(GraphenePythonError)
class Meta:
name = "DaemonStatus"
def __init__(self, daemon_status):
check.inst_param(daemon_status, "daemon_status", DaemonStatus)
super().__init__(
daemonType=daemon_status.daemon_type,
required=daemon_status.required,
healthy=daemon_status.healthy,
lastHeartbeatTime=(
daemon_status.last_heartbeat.timestamp if daemon_status.last_heartbeat else None
),
lastHeartbeatErrors=(
[GraphenePythonError(error) for error in daemon_status.last_heartbeat.errors]
if daemon_status.last_heartbeat and daemon_status.last_heartbeat.errors
else []
),
)
def resolve_id(self, _graphene_info: ResolveInfo):
return self.daemonType
|
GrapheneDaemonStatus
|
python
|
django__django
|
tests/model_forms/tests.py
|
{
"start": 3548,
"end": 3657
}
|
class ____(forms.ModelForm):
class Meta:
model = Inventory
fields = "__all__"
|
InventoryForm
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/call11.py
|
{
"start": 649,
"end": 1120
}
|
class ____(Generic[E]):
def __init__(self, value: E) -> None:
self.value = value
def map_left(self, fn: Callable[[Any], Any]) -> Self:
return self
def map_right(self, fn: Callable[[E], F]) -> Right[F]:
return Right(fn(self.value))
def func() -> Either[int, str]:
raise NotImplementedError
result = func().map_left(lambda lv: lv + 1).map_right(lambda rv: rv + "a")
reveal_type(result, expected_text="Right[str] | Left[int]")
|
Right
|
python
|
facebookresearch__faiss
|
benchs/bench_hybrid_cpu_gpu.py
|
{
"start": 4180,
"end": 21694
}
|
class ____:
"""
Multiple GPU indexes, each on its GPU, with a common coarse quantizer.
The Python version of IndexShardsIVF
"""
def __init__(self, quantizer, index, bs=-1, seq_tiling=False):
self.quantizer = quantizer
self.cpu_index = index
if isinstance(index, faiss.IndexPreTransform):
index = faiss.downcast_index(index.index)
ngpu = index.count()
self.pool = ThreadPool(ngpu)
self.bs = bs
if bs > 0:
self.q_pool = ThreadPool(1)
def __del__(self):
self.pool.close()
if self.bs > 0:
self.q_pool.close()
def search(self, xq, k):
nq = len(xq)
# perform coarse quantization
index = self.cpu_index
if isinstance(self.cpu_index, faiss.IndexPreTransform):
assert index.chain.size() == 1
xq = self.cpu_index.chain.at(0).apply(xq)
index = faiss.downcast_index(index.index)
ngpu = index.count()
sub_index_0 = faiss.downcast_index(index.at(0))
nprobe = sub_index_0.nprobe
Dall = np.empty((ngpu, nq, k), dtype='float32')
Iall = np.empty((ngpu, nq, k), dtype='int64')
bs = self.bs
if bs <= 0:
Dq, Iq = self.quantizer.search(xq, nprobe)
def do_search(rank):
gpu_index = faiss.downcast_index(index.at(rank))
Dall[rank], Iall[rank] = gpu_index.search_preassigned(
xq, k, Iq, Dq)
list(self.pool.map(do_search, range(ngpu)))
else:
qq_pool = self.q_pool
bs = self.bs
def coarse_quant(i0):
if i0 >= nq:
return None
return self.quantizer.search(xq[i0:i0 + bs], nprobe)
def do_search(rank, i0, qq):
gpu_index = faiss.downcast_index(index.at(rank))
Dq, Iq = qq
Dall[rank, i0:i0 + bs], Iall[rank, i0:i0 + bs] = \
gpu_index.search_preassigned(xq[i0:i0 + bs], k, Iq, Dq)
qq = coarse_quant(0)
for i0 in range(0, nq, bs):
qq_next = qq_pool.apply_async(coarse_quant, (i0 + bs, ))
list(self.pool.map(
lambda rank: do_search(rank, i0, qq),
range(ngpu)
))
qq = qq_next.get()
return faiss.merge_knn_results(Dall, Iall)
def extract_index_ivf(index):
""" extract the IVF sub-index from the index, supporting GpuIndexes
as well """
try:
return faiss.extract_index_ivf(index)
except RuntimeError:
if index.__class__ == faiss.IndexPreTransform:
index = faiss.downcast_index(index.index)
if isinstance(index, faiss.GpuIndexIVF):
return index
raise RuntimeError(f"could not extract IVF index from {index}")
def set_index_parameter(index, name, val):
"""
Index parameter setting that works on the index lookalikes defined above
"""
if index.__class__ == SeparateCoarseQuantizationIndex:
if name == "nprobe":
set_index_parameter(index.index_ivf, name, val)
elif name.startswith("quantizer_"):
set_index_parameter(
index.quantizer, name[name.find("_") + 1:], val)
else:
raise RuntimeError()
return
if index.__class__ == ShardedGPUIndex:
if name == "nprobe":
set_index_parameter(index.cpu_index, name, val)
elif name.startswith("quantizer_"):
set_index_parameter(
index.quantizer, name[name.find("_") + 1:], val)
else:
raise RuntimeError()
return
# then it's a Faiss index
index = faiss.downcast_index(index)
if isinstance(index, faiss.IndexPreTransform):
set_index_parameter(index.index, name, val)
elif isinstance(index, faiss.IndexShardsIVF):
if name != "nprobe" and name.startswith("quantizer_"):
set_index_parameter(
index.quantizer, name[name.find("_") + 1:], val)
else:
for i in range(index.count()):
sub_index = index.at(i)
set_index_parameter(sub_index, name, val)
elif (isinstance(index, faiss.IndexShards) or
isinstance(index, faiss.IndexReplicas)):
for i in range(index.count()):
sub_index = index.at(i)
set_index_parameter(sub_index, name, val)
elif name.startswith("quantizer_"):
index_ivf = extract_index_ivf(index)
set_index_parameter(
index_ivf.quantizer, name[name.find("_") + 1:], val)
elif name == "efSearch":
index.hnsw.efSearch
index.hnsw.efSearch = int(val)
elif name == "nprobe":
index_ivf = extract_index_ivf(index)
index_ivf.nprobe
index_ivf.nprobe = int(val)
else:
raise RuntimeError(f"could not set param {name} on {index}")
#####################################################################
# Driver routine
#####################################################################
def main():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('dataset options')
aa('--nq', type=int, default=int(10e5),
help="nb queries (queries will be duplicated if below that number")
aa('--db', default='bigann10M', help='dataset')
group = parser.add_argument_group('index options')
aa('--indexname', default="", help="override index name")
aa('--mmap', default=False, action='store_true', help='mmap index')
aa('--shard_type', default=1, type=int, help="set type of sharding")
aa('--useFloat16', default=False, action='store_true',
help='GPU cloner options')
aa('--useFloat16CoarseQuantizer', default=False, action='store_true',
help='GPU cloner options')
aa('--usePrecomputed', default=False, action='store_true',
help='GPU cloner options')
group = parser.add_argument_group('search options')
aa('--k', type=int, default=100)
aa('--search_type', default="cpu",
choices=[
"cpu", "gpu", "gpu_flat_quantizer",
"cpu_flat_gpu_quantizer", "gpu_tiled", "gpu_ivf_quantizer",
"multi_gpu", "multi_gpu_flat_quantizer",
"multi_gpu_sharded", "multi_gpu_flat_quantizer_sharded",
"multi_gpu_sharded1", "multi_gpu_sharded1_flat",
"multi_gpu_sharded1_ivf",
"multi_gpu_Csharded1", "multi_gpu_Csharded1_flat",
"multi_gpu_Csharded1_ivf",
],
help="how to search"
)
aa('--ivf_quant_nlist', type=int, default=1024,
help="nb of invlists for IVF quantizer")
aa('--batch_size', type=int, default=-1,
help="batch size for tiled CPU / GPU computation (-1= no tiling)")
aa('--n_autotune', type=int, default=300,
help="max nb of auto-tuning steps")
aa('--nt', type=int, default=-1, help="force number of CPU threads to this")
group = parser.add_argument_group('output options')
aa('--quiet', default=False, action="store_true")
aa('--stats', default="", help="pickle to store output stats")
args = parser.parse_args()
print("args:", args)
if not args.quiet:
# log some stats about the machine
os.system("grep -m1 'model name' < /proc/cpuinfo")
os.system("grep -E 'MemTotal|MemFree' /proc/meminfo")
os.system("nvidia-smi")
print("prepare dataset", args.db)
ds = dataset_from_name(args.db)
print(ds)
print("Faiss nb GPUs:", faiss.get_num_gpus())
xq = ds.get_queries()
if args.nq > len(xq):
xqx = []
n = 0
while n < args.nq:
xqx.append(xq[:args.nq - n])
n += len(xqx[-1])
print(f"increased nb queries from {len(xq)} to {n}")
xq = np.vstack(xqx)
if args.nt != -1:
print("setting nb openmp threads to", args.nt)
faiss.omp_set_num_threads(args.nt)
print("loading index")
if args.mmap:
io_flag = faiss.IO_FLAG_READ_ONLY | faiss.IO_FLAG_MMAP
else:
io_flag = 0
print(f"load index {args.indexname} {io_flag=:x}")
index = faiss.read_index(args.indexname, io_flag)
index_ivf = faiss.extract_index_ivf(index)
print("prepare index")
op = OperatingPointsWithRanges()
op.add_range(
"nprobe", [
2 ** i for i in range(20)
if 2 ** i < index_ivf.nlist * 0.1 and 2 ** i <= 4096
]
)
# prepare options for GPU clone
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = args.useFloat16
co.useFloat16CoarseQuantizer = args.useFloat16CoarseQuantizer
co.usePrecomputed = args.usePrecomputed
co.shard_type = args.shard_type
if args.search_type == "cpu":
op.add_range(
"quantizer_efSearch",
[2 ** i for i in range(10)]
)
elif args.search_type == "gpu":
print("move index to 1 GPU")
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 0, index, co)
op.add_range(
"quantizer_efSearch",
[2 ** i for i in range(10)]
)
op.restrict_range("nprobe", 2049)
elif args.search_type == "gpu_tiled":
print("move index to 1 GPU")
new_quantizer = faiss.IndexFlatL2(index_ivf.d)
quantizer_hnsw = replace_ivf_quantizer(index_ivf, new_quantizer)
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 0, index, co)
op.add_range(
"quantizer_efSearch",
[2 ** i for i in range(10)]
)
op.restrict_range("nprobe", 2049)
index = SeparateCoarseQuantizationIndex(
quantizer_hnsw, index, bs=args.batch_size)
elif args.search_type == "gpu_ivf_quantizer":
index_ivf = faiss.extract_index_ivf(index)
centroids = index_ivf.quantizer.reconstruct_n()
replace_ivf_quantizer(index_ivf, faiss.IndexFlatL2(index_ivf.d))
res = faiss.StandardGpuResources()
new_quantizer = faiss.index_factory(
index_ivf.d, f"IVF{args.ivf_quant_nlist},Flat")
new_quantizer.train(centroids)
new_quantizer.add(centroids)
index = SeparateCoarseQuantizationIndex(
faiss.index_cpu_to_gpu(res, 0, new_quantizer, co),
faiss.index_cpu_to_gpu(res, 0, index, co),
bs=args.batch_size, seq_tiling=True
)
op.add_range(
"quantizer_nprobe",
[2 ** i for i in range(9)]
)
op.restrict_range("nprobe", 1025)
elif args.search_type == "gpu_flat_quantizer":
index_ivf = faiss.extract_index_ivf(index)
new_quantizer = faiss.IndexFlatL2(index_ivf.d)
replace_ivf_quantizer(index_ivf, new_quantizer)
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 0, index, co)
op.restrict_range("nprobe", 2049)
elif args.search_type == "cpu_flat_gpu_quantizer":
index_ivf = faiss.extract_index_ivf(index)
quantizer = faiss.IndexFlatL2(index_ivf.d)
res = faiss.StandardGpuResources()
quantizer = faiss.index_cpu_to_gpu(res, 0, quantizer, co)
index = SeparateCoarseQuantizationIndex(
quantizer, index, bs=args.batch_size)
op.restrict_range("nprobe", 2049)
elif args.search_type in ("multi_gpu", "multi_gpu_sharded"):
print(f"move index to {faiss.get_num_gpus()} GPU")
co.shard = "sharded" in args.search_type
index = faiss.index_cpu_to_all_gpus(index, co=co)
op.add_range(
"quantizer_efSearch",
[2 ** i for i in range(10)]
)
op.restrict_range("nprobe", 2049)
elif args.search_type in (
"multi_gpu_flat_quantizer", "multi_gpu_flat_quantizer_sharded"):
index_ivf = faiss.extract_index_ivf(index)
new_quantizer = faiss.IndexFlatL2(ds.d)
replace_ivf_quantizer(index_ivf, new_quantizer)
index = faiss.index_cpu_to_all_gpus(index, co=co)
op.restrict_range("nprobe", 2049)
elif args.search_type in (
"multi_gpu_sharded1", "multi_gpu_sharded1_flat",
"multi_gpu_sharded1_ivf"):
print(f"move index to {faiss.get_num_gpus()} GPU")
new_quantizer = faiss.IndexFlatL2(index_ivf.d)
hnsw_quantizer = replace_ivf_quantizer(index_ivf, new_quantizer)
co.shard
co.shard = True
gpus = list(range(faiss.get_num_gpus()))
res = [faiss.StandardGpuResources() for _ in gpus]
index = faiss.index_cpu_to_gpu_multiple_py(res, index, co, gpus)
op.restrict_range("nprobe", 2049)
if args.search_type == "multi_gpu_sharded1":
op.add_range(
"quantizer_efSearch",
[2 ** i for i in range(10)]
)
index = ShardedGPUIndex(hnsw_quantizer, index, bs=args.batch_size)
elif args.search_type == "multi_gpu_sharded1_ivf":
centroids = hnsw_quantizer.storage.reconstruct_n()
quantizer = faiss.index_factory(
centroids.shape[1], f"IVF{args.ivf_quant_nlist},Flat")
quantizer.train(centroids)
quantizer.add(centroids)
co.shard = False
quantizer = faiss.index_cpu_to_gpu_multiple_py(
res, quantizer, co, gpus)
index = ShardedGPUIndex(quantizer, index, bs=args.batch_size)
op.add_range(
"quantizer_nprobe",
[2 ** i for i in range(9)]
)
op.restrict_range("nprobe", 1025)
elif args.search_type == "multi_gpu_sharded1_flat":
quantizer = hnsw_quantizer.storage
quantizer = faiss.index_cpu_to_gpu_multiple_py(
res, quantizer, co, gpus)
index = ShardedGPUIndex(quantizer, index, bs=args.batch_size)
else:
raise RuntimeError()
elif args.search_type in (
"multi_gpu_Csharded1", "multi_gpu_Csharded1_flat",
"multi_gpu_Csharded1_ivf"):
print(f"move index to {faiss.get_num_gpus()} GPU")
co.shard = True
co.common_ivf_quantizer
co.common_ivf_quantizer = True
op.restrict_range("nprobe", 2049)
if args.search_type == "multi_gpu_Csharded1":
op.add_range(
"quantizer_efSearch",
[2 ** i for i in range(10)]
)
index = faiss.index_cpu_to_all_gpus(index, co)
elif args.search_type == "multi_gpu_Csharded1_flat":
new_quantizer = faiss.IndexFlatL2(index_ivf.d)
quantizer_hnsw = replace_ivf_quantizer(index_ivf, new_quantizer)
index = faiss.index_cpu_to_all_gpus(index, co)
elif args.search_type == "multi_gpu_Csharded1_ivf":
quantizer = faiss.index_factory(
index_ivf.d, f"IVF{args.ivf_quant_nlist},Flat")
quantizer_hnsw = replace_ivf_quantizer(index_ivf, quantizer)
op.add_range(
"quantizer_nprobe",
[2 ** i for i in range(9)]
)
index = faiss.index_cpu_to_all_gpus(index, co)
else:
raise RuntimeError()
else:
raise RuntimeError()
totex = op.num_experiments()
experiments = op.sample_experiments()
print(f"total nb experiments {totex}, running {len(experiments)}")
print("perform search")
gt = ds.get_groundtruth(100)
# piggyback on operating points so that this gets stored in the stats file
op.all_experiments = []
op.platform = {
"loadavg": open("/proc/loadavg", "r").readlines(),
"procesor": [l for l in open("/proc/cpuinfo") if "model name" in l][0],
"GPU": list(os.popen("nvidia-smi", "r")),
"mem": open("/proc/meminfo", "r").readlines(),
"pid": os.getpid()
}
op.args = args
if args.stats:
print(f"storing stats in {args.stats} after each experiment")
for cno in experiments:
key = op.cno_to_key(cno)
parameters = op.get_parameters(key)
print(f"{cno=:4d} {str(parameters):50}", end=": ", flush=True)
(max_perf, min_time) = op.predict_bounds(key)
if not op.is_pareto_optimal(max_perf, min_time):
print(f"SKIP, {max_perf=:.3f} {min_time=:.3f}", )
continue
for name, val in parameters.items():
set_index_parameter(index, name, val)
if cno == 0:
# warmup
for _ in range(5):
D, I = index.search(xq, 100)
t0 = time.time()
try:
D, I = index.search(xq, 100)
except RuntimeError as e:
print(f"ERROR {e}")
continue
t1 = time.time()
recalls = {}
for rank in 1, 10, 100:
recall = (gt[:, :1] == I[:ds.nq, :rank]).sum() / ds.nq
recalls[rank] = recall
print(f"time={t1 - t0:.3f} s recalls={recalls}")
perf = recalls[1]
op.add_operating_point(key, perf, t1 - t0)
op.all_experiments.append({
"cno": cno,
"key": key,
"parameters": parameters,
"time": t1 - t0,
"recalls": recalls
})
if args.stats:
pickle.dump(op, open(args.stats, "wb"))
if __name__ == "__main__":
main()
|
ShardedGPUIndex
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/messages/beta_deleted_message_batch.py
|
{
"start": 201,
"end": 438
}
|
class ____(BaseModel):
id: str
"""ID of the Message Batch."""
type: Literal["message_batch_deleted"]
"""Deleted object type.
For Message Batches, this is always `"message_batch_deleted"`.
"""
|
BetaDeletedMessageBatch
|
python
|
doocs__leetcode
|
solution/2800-2899/2817.Minimum Absolute Difference Between Elements With Constraint/Solution.py
|
{
"start": 0,
"end": 405
}
|
class ____:
def minAbsoluteDifference(self, nums: List[int], x: int) -> int:
sl = SortedList()
ans = inf
for i in range(x, len(nums)):
sl.add(nums[i - x])
j = bisect_left(sl, nums[i])
if j < len(sl):
ans = min(ans, sl[j] - nums[i])
if j:
ans = min(ans, nums[i] - sl[j - 1])
return ans
|
Solution
|
python
|
huggingface__transformers
|
tests/models/timm_backbone/test_modeling_timm_backbone.py
|
{
"start": 1164,
"end": 2816
}
|
class ____:
def __init__(
self,
parent,
out_indices=None,
out_features=None,
stage_names=None,
backbone="resnet18",
batch_size=3,
image_size=32,
num_channels=3,
is_training=True,
use_pretrained_backbone=True,
):
self.parent = parent
self.out_indices = out_indices if out_indices is not None else [4]
self.stage_names = stage_names
self.out_features = out_features
self.backbone = backbone
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.use_pretrained_backbone = use_pretrained_backbone
self.is_training = is_training
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return TimmBackboneConfig(
image_size=self.image_size,
num_channels=self.num_channels,
out_features=self.out_features,
out_indices=self.out_indices,
stage_names=self.stage_names,
use_pretrained_backbone=self.use_pretrained_backbone,
backbone=self.backbone,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
|
TimmBackboneModelTester
|
python
|
numba__numba
|
numba/tests/test_function_type.py
|
{
"start": 17045,
"end": 20392
}
|
class ____(TestCase):
"""Test calling external library functions within Numba jit compiled
functions.
"""
def test_wrapper_address_protocol_libm(self):
"""Call cos and sinf from standard math library.
"""
import ctypes.util
class LibM(types.WrapperAddressProtocol):
def __init__(self, fname):
if IS_WIN32:
lib = ctypes.cdll.msvcrt
else:
libpath = ctypes.util.find_library('m')
lib = ctypes.cdll.LoadLibrary(libpath)
self.lib = lib
self._name = fname
if fname == 'cos':
# test for double-precision math function
addr = ctypes.cast(self.lib.cos, ctypes.c_voidp).value
signature = float64(float64)
elif fname == 'sinf':
# test for single-precision math function
# Other 32/64 bit platforms define sinf as the
# single-precision sin function
addr = ctypes.cast(self.lib.sinf, ctypes.c_voidp).value
signature = float32(float32)
else:
raise NotImplementedError(
f'wrapper address of `{fname}`'
f' with signature `{signature}`')
self._signature = signature
self._address = addr
def __repr__(self):
return f'{type(self).__name__}({self._name!r})'
def __wrapper_address__(self):
return self._address
def signature(self):
return self._signature
mycos = LibM('cos')
mysin = LibM('sinf')
def myeval(f, x):
return f(x)
# Not testing forceobj=True as it requires implementing
# LibM.__call__ using ctypes which would be out-of-scope here.
for jit_opts in [dict(nopython=True)]:
jit_ = jit(**jit_opts)
with self.subTest(jit=jit_opts):
if mycos.signature() is not None:
self.assertEqual(jit_(myeval)(mycos, 0.0), 1.0)
if mysin.signature() is not None:
self.assertEqual(jit_(myeval)(mysin, float32(0.0)), 0.0)
def test_compilation_results(self):
"""Turn the existing compilation results of a dispatcher instance to
first-class functions with precise types.
"""
@jit(nopython=True)
def add_template(x, y):
return x + y
# Trigger compilations
self.assertEqual(add_template(1, 2), 3)
self.assertEqual(add_template(1.2, 3.4), 4.6)
cres1, cres2 = add_template.overloads.values()
# Turn compilation results into first-class functions
iadd = types.CompileResultWAP(cres1)
fadd = types.CompileResultWAP(cres2)
@jit(nopython=True)
def foo(add, x, y):
return add(x, y)
@jit(forceobj=True)
def foo_obj(add, x, y):
return add(x, y)
self.assertEqual(foo(iadd, 3, 4), 7)
self.assertEqual(foo(fadd, 3.4, 4.5), 7.9)
self.assertEqual(foo_obj(iadd, 3, 4), 7)
self.assertEqual(foo_obj(fadd, 3.4, 4.5), 7.9)
|
TestFunctionTypeExtensions
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_rbfinterp.py
|
{
"start": 18817,
"end": 20046
}
|
class ____(_TestRBFInterpolator):
# RBFInterpolator using 20 nearest neighbors.
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs, neighbors=20)
def test_equivalent_to_rbf_interpolator(self):
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x, np)
yitp1 = self.build(x, y)(xitp)
yitp2 = []
tree = cKDTree(x)
for xi in xitp:
_, nbr = tree.query(xi, 20)
yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_concurrency(self):
# Check that no segfaults appear with concurrent access to
# RbfInterpolator
seq = Halton(2, scramble=False, seed=np.random.RandomState(0))
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x, np)
interp = self.build(x, y)
def worker_fn(_, interp, xp):
interp(xp)
_run_concurrent_barrier(10, worker_fn, interp, xitp)
@skip_xp_backends(np_only=True, reason="neighbors not None uses KDTree")
|
TestRBFInterpolatorNeighbors20
|
python
|
eth-brownie__brownie
|
brownie/test/managers/master.py
|
{
"start": 246,
"end": 3243
}
|
class ____(PytestBrownieBase):
"""
Brownie plugin xdist master hooks.
Hooks in this class are loaded by the master process when using xdist.
"""
def pytest_configure_node(self, node):
"""
Configure node information before it gets instantiated.
Here we can pass arbitrary information to xdist workers via the
`workerinput` dict.
"""
node.workerinput["network"] = CONFIG.argv["network"]
def pytest_xdist_make_scheduler(self, config, log):
"""
Return a node scheduler implementation.
Uses file scheduling to ensure consistent test execution with module-level
isolation.
"""
return LoadFileScheduling(config, log)
def pytest_xdist_node_collection_finished(self, ids):
"""
Called by the master node when a node finishes collecting.
* Generates the node map
* Populates `self.results` with previous test results. For tests that
are executed by one of the runners, these results will be overwritten.
"""
# required because pytest_collection_modifyitems is not called by master
self._make_nodemap(ids)
for path in self.node_map:
if path in self.tests and CONFIG.argv["update"]:
self.results[path] = list(self.tests[path]["results"])
else:
self.results[path] = ["s"] * len(self.node_map[path])
def pytest_sessionfinish(self, session):
"""
Called after whole test run finished, right before returning the exit
status to the system.
* Aggregates results from `build/tests-{workerid}.json` files and stores
them as `build/test.json`.
"""
if session.testscollected == 0:
raise pytest.UsageError(
"xdist workers failed to collect tests. Ensure all test cases are "
"isolated with the module_isolation or fn_isolation fixtures.\n\n"
"https://eth-brownie.readthedocs.io/en/stable/tests-pytest-intro.html#pytest-fixtures-isolation" # noqa e501
)
build_path = self.project._build_path
# aggregate worker test results
report = {"tests": {}, "contracts": self.contracts, "tx": {}}
for path in list(build_path.glob("tests-*.json")):
with path.open() as fp:
data = ujson_load(fp)
assert data["contracts"] == report["contracts"]
report["tests"].update(data["tests"])
report["tx"].update(data["tx"])
path.unlink()
# store worker coverage results - these are used in `pytest_terminal_summary`
for hash_, coverage_eval in report["tx"].items():
coverage._add_transaction(hash_, coverage_eval)
# save aggregate test results
with build_path.joinpath("tests.json").open("w") as fp:
ujson_dump(report, fp, indent=2, sort_keys=True, default=sorted)
|
PytestBrownieMaster
|
python
|
ray-project__ray
|
rllib/utils/spaces/repeated.py
|
{
"start": 107,
"end": 1110
}
|
class ____(gym.Space):
"""Represents a variable-length list of child spaces.
Example:
self.observation_space = spaces.Repeated(spaces.Box(4,), max_len=10)
--> from 0 to 10 boxes of shape (4,)
See also: documentation for rllib.models.RepeatedValues, which shows how
the lists are represented as batched input for ModelV2 classes.
"""
def __init__(self, child_space: gym.Space, max_len: int):
super().__init__()
self.child_space = child_space
self.max_len = max_len
def sample(self):
return [
self.child_space.sample()
for _ in range(self.np_random.integers(1, self.max_len + 1))
]
def contains(self, x):
return (
isinstance(x, (list, np.ndarray))
and len(x) <= self.max_len
and all(self.child_space.contains(c) for c in x)
)
def __repr__(self):
return "Repeated({}, {})".format(self.child_space, self.max_len)
|
Repeated
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-databricks/dagster_databricks/pipes.py
|
{
"start": 32588,
"end": 35611
}
|
class ____(PipesBlobStoreMessageReader):
"""Message reader that reads messages by periodically reading message chunks from an
automatically-generated temporary directory in Unity Catalog Volumes.
Args:
interval (float): interval in seconds between attempts to download a chunk
client (WorkspaceClient): A databricks `WorkspaceClient` object.
include_stdio_in_messages (bool): Whether to send stdout/stderr to Dagster via Pipes messages. Defaults to True.
"""
def __init__(
self,
*,
interval: float = 10,
client: WorkspaceClient,
volume_path: str,
include_stdio_in_messages: bool = True,
):
self.include_stdio_in_messages = check.bool_param(
include_stdio_in_messages, "include_stdio_in_messages"
)
super().__init__(
interval=interval,
)
self.files_client = files.FilesAPI(client.api_client)
self.volume_path = volume_path
@contextmanager
def get_params(self) -> Iterator[PipesParams]:
with ExitStack() as stack:
params: PipesParams = {}
params["path"] = stack.enter_context(
volumes_tempdir(self.files_client, self.volume_path)
)
params[PipesBlobStoreMessageWriter.INCLUDE_STDIO_IN_MESSAGES_KEY] = (
self.include_stdio_in_messages
)
yield params
def messages_are_readable(self, params: PipesParams) -> bool:
"""Check if the messages directory exists and is readable."""
try:
# The API returns an error if there is no directory at the specified path
self.files_client.list_directory_contents(params["path"])
return True
except Exception:
return False
def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]:
"""Download a specific message chunk from Unity Catalog Volume."""
message_path = f"{params['path']}/{index}.json"
try:
download_response = self.files_client.download(message_path)
response_contents = check.not_none(
download_response.contents, "Read message with null data."
)
# Databricks sdk returns a BinaryIO object
return response_contents.read().decode("utf-8")
# An error here is an expected result, since an IOError will be thrown if the next message
# chunk doesn't yet exist. Swallowing the error here is equivalent to doing a no-op on a
# status check showing a non-existent file.
except OSError:
return None
def no_messages_debug_text(self) -> str:
return (
"Attempted to read messages from a temporary directory in Unity Catalog Volumes. Expected"
" PipesUnityCatalogVolumesMessageWriter to be explicitly passed to open_dagster_pipes in the external"
" process."
)
|
PipesUnityCatalogVolumesMessageReader
|
python
|
kamyu104__LeetCode-Solutions
|
Python/fruits-into-baskets-iii.py
|
{
"start": 63,
"end": 1864
}
|
class ____(object):
def numOfUnplacedFruits(self, fruits, baskets):
"""
:type fruits: List[int]
:type baskets: List[int]
:rtype: int
"""
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: 0,
query_fn=lambda x, y: y if x is None else x if y is None else max(x, y),
update_fn=lambda x: x):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def binary_search(self, x):
if self.tree[1] < x:
return -1
i = 1
while not i >= self.base:
if self.tree[2*i] >= x:
i = 2*i
else:
i = 2*i+1
return i-self.base
def build(i):
return baskets[i]
st = SegmentTree(len(baskets), build_fn=build)
result = 0
for x in fruits:
i = st.binary_search(x)
if i == -1:
result += 1
else:
st.update(i, 0)
return result
|
Solution
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_replica_set_status.py
|
{
"start": 383,
"end": 10908
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'conditions': 'list[V1ReplicaSetCondition]',
'fully_labeled_replicas': 'int',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'terminating_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'conditions': 'conditions',
'fully_labeled_replicas': 'fullyLabeledReplicas',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'terminating_replicas': 'terminatingReplicas'
}
def __init__(self, available_replicas=None, conditions=None, fully_labeled_replicas=None, observed_generation=None, ready_replicas=None, replicas=None, terminating_replicas=None, local_vars_configuration=None): # noqa: E501
"""V1ReplicaSetStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._conditions = None
self._fully_labeled_replicas = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._terminating_replicas = None
self.discriminator = None
if available_replicas is not None:
self.available_replicas = available_replicas
if conditions is not None:
self.conditions = conditions
if fully_labeled_replicas is not None:
self.fully_labeled_replicas = fully_labeled_replicas
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
if terminating_replicas is not None:
self.terminating_replicas = terminating_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this V1ReplicaSetStatus. # noqa: E501
The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. # noqa: E501
:return: The available_replicas of this V1ReplicaSetStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this V1ReplicaSetStatus.
The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. # noqa: E501
:param available_replicas: The available_replicas of this V1ReplicaSetStatus. # noqa: E501
:type: int
"""
self._available_replicas = available_replicas
@property
def conditions(self):
"""Gets the conditions of this V1ReplicaSetStatus. # noqa: E501
Represents the latest available observations of a replica set's current state. # noqa: E501
:return: The conditions of this V1ReplicaSetStatus. # noqa: E501
:rtype: list[V1ReplicaSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1ReplicaSetStatus.
Represents the latest available observations of a replica set's current state. # noqa: E501
:param conditions: The conditions of this V1ReplicaSetStatus. # noqa: E501
:type: list[V1ReplicaSetCondition]
"""
self._conditions = conditions
@property
def fully_labeled_replicas(self):
"""Gets the fully_labeled_replicas of this V1ReplicaSetStatus. # noqa: E501
The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. # noqa: E501
:return: The fully_labeled_replicas of this V1ReplicaSetStatus. # noqa: E501
:rtype: int
"""
return self._fully_labeled_replicas
@fully_labeled_replicas.setter
def fully_labeled_replicas(self, fully_labeled_replicas):
"""Sets the fully_labeled_replicas of this V1ReplicaSetStatus.
The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. # noqa: E501
:param fully_labeled_replicas: The fully_labeled_replicas of this V1ReplicaSetStatus. # noqa: E501
:type: int
"""
self._fully_labeled_replicas = fully_labeled_replicas
@property
def observed_generation(self):
"""Gets the observed_generation of this V1ReplicaSetStatus. # noqa: E501
ObservedGeneration reflects the generation of the most recently observed ReplicaSet. # noqa: E501
:return: The observed_generation of this V1ReplicaSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1ReplicaSetStatus.
ObservedGeneration reflects the generation of the most recently observed ReplicaSet. # noqa: E501
:param observed_generation: The observed_generation of this V1ReplicaSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1ReplicaSetStatus. # noqa: E501
The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. # noqa: E501
:return: The ready_replicas of this V1ReplicaSetStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1ReplicaSetStatus.
The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. # noqa: E501
:param ready_replicas: The ready_replicas of this V1ReplicaSetStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1ReplicaSetStatus. # noqa: E501
Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset # noqa: E501
:return: The replicas of this V1ReplicaSetStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1ReplicaSetStatus.
Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset # noqa: E501
:param replicas: The replicas of this V1ReplicaSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def terminating_replicas(self):
"""Gets the terminating_replicas of this V1ReplicaSetStatus. # noqa: E501
The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. # noqa: E501
:return: The terminating_replicas of this V1ReplicaSetStatus. # noqa: E501
:rtype: int
"""
return self._terminating_replicas
@terminating_replicas.setter
def terminating_replicas(self, terminating_replicas):
"""Sets the terminating_replicas of this V1ReplicaSetStatus.
The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. # noqa: E501
:param terminating_replicas: The terminating_replicas of this V1ReplicaSetStatus. # noqa: E501
:type: int
"""
self._terminating_replicas = terminating_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicaSetStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ReplicaSetStatus):
return True
return self.to_dict() != other.to_dict()
|
V1ReplicaSetStatus
|
python
|
getlogbook__logbook
|
src/logbook/_fallback.py
|
{
"start": 1715,
"end": 1972
}
|
class ____:
def __init__(self, obj):
self.__obj = obj
def __enter__(self):
self.__obj.push_application()
return self.__obj
def __exit__(self, exc_type, exc_value, tb):
self.__obj.pop_application()
|
ApplicationBound
|
python
|
huggingface__transformers
|
tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py
|
{
"start": 1470,
"end": 3290
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=1,
padding_value=0.0,
sampling_rate=16000,
return_attention_mask=True,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
speech_inputs = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
|
ASTFeatureExtractionTester
|
python
|
tensorflow__tensorflow
|
tensorflow/dtensor/python/input_util.py
|
{
"start": 3615,
"end": 4060
}
|
class ____:
"""Specifies the tf.data service configuration to use.
Attributes:
dispatcher_address: a string specifying the address of the tf.data service
dispatcher server.
job_name: a non-empty string identifying the shared job that will be created
on tf.data service to process this dataset.
"""
dispatcher_address: str
job_name: str
# TODO(b/223275517): Add support for get_next_as_optional().
|
TFDataServiceConfig
|
python
|
pytorch__pytorch
|
test/distributed/algorithms/test_join.py
|
{
"start": 934,
"end": 2485
}
|
class ____(JoinHook):
r"""
Join hook for :class:`AllReducer`.
Arguments:
allreducer (AllReducer): the :class:`AllReducer` object using this
hook.
num_allreduces (int): the number of all-reduces to shadow per
iteration.
run_post_hook (bool): a flag enabling the post-hook logic.
"""
def __init__(self, allreducer, num_allreduces, run_post_hook):
self.allreducer = allreducer
self.num_allreduces = num_allreduces
self.run_post_hook = run_post_hook
def main_hook(self):
r"""
Shadows each all-reduce; the number of all-reduces is passed into the
constructor as ``num_allreduces``.
"""
device = self.allreducer.device
for _ in range(self.num_allreduces):
t = torch.zeros(1, device=device)
dist.all_reduce(t)
def post_hook(self, is_last_joiner: bool):
r"""
Broadcasts a tensor containing a magic constant ``AFTER_CONSTANT`` from
the last joiner to all other processes.
"""
if not self.run_post_hook:
return
rank = dist.get_rank(self.allreducer.process_group)
common_rank = self.allreducer.find_common_rank(rank, is_last_joiner)
device = self.allreducer.device
if rank == common_rank:
self.allreducer.post_hook_tensor = torch.tensor(
[AFTER_CONSTANT], device=device
)
dist.broadcast(self.allreducer.post_hook_tensor, src=common_rank)
|
AllReducerJoinHook
|
python
|
openai__openai-python
|
src/openai/types/responses/response_function_shell_call_output_content_param.py
|
{
"start": 323,
"end": 456
}
|
class ____(TypedDict, total=False):
type: Required[Literal["timeout"]]
"""The outcome type. Always `timeout`."""
|
OutcomeTimeout
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/rendezvous/dynamic_rendezvous.py
|
{
"start": 6779,
"end": 7226
}
|
class ____:
"""Describe a node in the rendezvous.
Attributes:
addr:
The FQDN of the node or user specified local node address.
pid:
The id of the process in which the rendezvous handler runs.
local_id:
A process-wide unique id.
"""
addr: str
pid: int
local_id: int
def __repr__(self) -> str:
return f"{self.addr}_{self.pid}_{self.local_id}"
|
_NodeDesc
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-sigma/dagster_sigma/translator.py
|
{
"start": 2069,
"end": 2395
}
|
class ____:
"""Represents a Sigma dataset, a centralized data definition which can
contain aggregations or other manipulations.
https://help.sigmacomputing.com/docs/datasets
"""
properties: dict[str, Any]
columns: AbstractSet[str]
inputs: AbstractSet[str]
@whitelist_for_serdes
@record
|
SigmaDataset
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 17794,
"end": 18756
}
|
class ____(VOTableSpecWarning):
"""Invalid VOTable datatype.
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ("x", "y")
# W14: Deprecated
|
W13
|
python
|
huggingface__transformers
|
src/transformers/models/video_llama_3/modular_video_llama_3.py
|
{
"start": 66306,
"end": 66426
}
|
class ____(Qwen2VLVideoProcessorInitKwargs):
use_token_compression: Optional[bool]
|
VideoLlama3VideoProcessorInitKwargs
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterpolar/marker/_colorbar.py
|
{
"start": 233,
"end": 61749
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar.marker"
_path_str = "scatterpolar.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scatterpolar.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.scatterpolar.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scatterpolar.m
arker.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
scatterpolar.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatterpolar.ma
rker.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rpolar.marker.colorbar.tickformatstopdefaults), sets
the default property values to use for elements of
scatterpolar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatterpolar.marker.colorb
ar.Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatterpolar.ma
rker.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rpolar.marker.colorbar.tickformatstopdefaults), sets
the default property values to use for elements of
scatterpolar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatterpolar.marker.colorb
ar.Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
ColorBar
|
python
|
dask__dask
|
dask/array/core.py
|
{
"start": 198746,
"end": 202332
}
|
class ____:
"""An array-like interface to the blocks of an array.
``BlockView`` provides an array-like interface
to the blocks of a dask array. Numpy-style indexing of a
``BlockView`` returns a selection of blocks as a new dask array.
You can index ``BlockView`` like a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.blocks.size). The dimensionality of the output array matches
the dimension of this array, even if integer indices are passed.
Slicing with ``np.newaxis`` or multiple lists is not supported.
Examples
--------
>>> import dask.array as da
>>> from dask.array.core import BlockView
>>> x = da.arange(8, chunks=2)
>>> bv = BlockView(x)
>>> bv.shape # aliases x.numblocks
(4,)
>>> bv.size
4
>>> bv[0].compute()
array([0, 1])
>>> bv[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> bv[::2].compute()
array([0, 1, 4, 5])
>>> bv[[-1, 0]].compute()
array([6, 7, 0, 1])
>>> bv.ravel() # doctest: +NORMALIZE_WHITESPACE
[dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>]
Returns
-------
An instance of ``da.array.Blockview``
"""
def __init__(self, array: Array):
self._array = array
def __getitem__(self, index: Any) -> Array:
from dask.array.slicing import normalize_index
if not isinstance(index, tuple):
index = (index,)
if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:
raise ValueError("Can only slice with a single list")
if any(ind is None for ind in index):
raise ValueError("Slicing with np.newaxis or None is not supported")
index = normalize_index(index, self._array.numblocks)
index = tuple(
slice(k, k + 1) if isinstance(k, Number) else k # type: ignore[operator]
for k in index
)
name = "blocks-" + tokenize(self._array, index)
new_keys = self._array._key_array[index]
chunks = tuple(
tuple(np.array(c)[i].tolist()) for c, i in zip(self._array.chunks, index)
)
keys = product(*(range(len(c)) for c in chunks))
graph: Graph = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}
hlg = HighLevelGraph.from_collections(name, graph, dependencies=[self._array])
return Array(hlg, name, chunks, meta=self._array)
def __eq__(self, other: object) -> bool:
if isinstance(other, BlockView):
return self._array is other._array
else:
return NotImplemented
@property
def size(self) -> int:
"""
The total number of blocks in the array.
"""
return math.prod(self.shape)
@property
def shape(self) -> tuple[int, ...]:
"""
The number of blocks per axis. Alias of ``dask.array.numblocks``.
"""
return self._array.numblocks
def ravel(self) -> list[Array]:
"""
Return a flattened list of all the blocks in the array in C order.
"""
return [self[idx] for idx in np.ndindex(self.shape)]
def _numpy_vindex(indexer, arr):
return arr[indexer]
from dask.array.blockwise import blockwise
|
BlockView
|
python
|
agronholm__apscheduler
|
tests/test_marshalling.py
|
{
"start": 270,
"end": 569
}
|
class ____:
def meth(self):
pass
@staticmethod
def staticmeth():
pass
@classmethod
def classmeth(cls):
pass
def __call__(self):
pass
class InnerDummyClass:
@classmethod
def innerclassmeth(cls):
pass
|
DummyClass
|
python
|
doocs__leetcode
|
solution/0300-0399/0337.House Robber III/Solution.py
|
{
"start": 192,
"end": 543
}
|
class ____:
def rob(self, root: Optional[TreeNode]) -> int:
def dfs(root: Optional[TreeNode]) -> (int, int):
if root is None:
return 0, 0
la, lb = dfs(root.left)
ra, rb = dfs(root.right)
return root.val + lb + rb, max(la, lb) + max(ra, rb)
return max(dfs(root))
|
Solution
|
python
|
ray-project__ray
|
python/ray/dashboard/modules/job/common.py
|
{
"start": 22378,
"end": 22481
}
|
class ____:
deleted: bool
# TODO(jiaodong): Support log streaming #19415
@dataclass
|
JobDeleteResponse
|
python
|
numpy__numpy
|
numpy/_core/tests/test_numeric.py
|
{
"start": 79166,
"end": 83869
}
|
class ____:
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with assert_raises(ValueError):
np.base_repr(1, 1)
with assert_raises(ValueError):
np.base_repr(1, 37)
def test_minimal_signed_int(self):
assert_equal(np.base_repr(np.int8(-128)), '-10000000')
def _test_array_equal_parametrizations():
"""
we pre-create arrays as we sometime want to pass the same instance
and sometime not. Passing the same instances may not mean the array are
equal, especially when containing None
"""
# those are 0-d arrays, it used to be a special case
# where (e0 == e0).all() would raise
e0 = np.array(0, dtype="int")
e1 = np.array(1, dtype="float")
# x,y, nan_equal, expected_result
yield (e0, e0.copy(), None, True)
yield (e0, e0.copy(), False, True)
yield (e0, e0.copy(), True, True)
#
yield (e1, e1.copy(), None, True)
yield (e1, e1.copy(), False, True)
yield (e1, e1.copy(), True, True)
# Non-nanable - those cannot hold nans
a12 = np.array([1, 2])
a12b = a12.copy()
a123 = np.array([1, 2, 3])
a13 = np.array([1, 3])
a34 = np.array([3, 4])
aS1 = np.array(["a"], dtype="S1")
aS1b = aS1.copy()
aS1u4 = np.array([("a", 1)], dtype="S1,u4")
aS1u4b = aS1u4.copy()
yield (a12, a12b, None, True)
yield (a12, a12, None, True)
yield (a12, a123, None, False)
yield (a12, a34, None, False)
yield (a12, a13, None, False)
yield (aS1, aS1b, None, True)
yield (aS1, aS1, None, True)
# Non-float dtype - equal_nan should have no effect,
yield (a123, a123, None, True)
yield (a123, a123, False, True)
yield (a123, a123, True, True)
yield (a123, a123.copy(), None, True)
yield (a123, a123.copy(), False, True)
yield (a123, a123.copy(), True, True)
yield (a123.astype("float"), a123.astype("float"), None, True)
yield (a123.astype("float"), a123.astype("float"), False, True)
yield (a123.astype("float"), a123.astype("float"), True, True)
# these can hold None
b1 = np.array([1, 2, np.nan])
b2 = np.array([1, np.nan, 2])
b3 = np.array([1, 2, np.inf])
b4 = np.array(np.nan)
# instances are the same
yield (b1, b1, None, False)
yield (b1, b1, False, False)
yield (b1, b1, True, True)
# equal but not same instance
yield (b1, b1.copy(), None, False)
yield (b1, b1.copy(), False, False)
yield (b1, b1.copy(), True, True)
# same once stripped of Nan
yield (b1, b2, None, False)
yield (b1, b2, False, False)
yield (b1, b2, True, False)
# nan's not conflated with inf's
yield (b1, b3, None, False)
yield (b1, b3, False, False)
yield (b1, b3, True, False)
# all Nan
yield (b4, b4, None, False)
yield (b4, b4, False, False)
yield (b4, b4, True, True)
yield (b4, b4.copy(), None, False)
yield (b4, b4.copy(), False, False)
yield (b4, b4.copy(), True, True)
t1 = b1.astype("timedelta64")
t2 = b2.astype("timedelta64")
# Timedeltas are particular
yield (t1, t1, None, False)
yield (t1, t1, False, False)
yield (t1, t1, True, True)
yield (t1, t1.copy(), None, False)
yield (t1, t1.copy(), False, False)
yield (t1, t1.copy(), True, True)
yield (t1, t2, None, False)
yield (t1, t2, False, False)
yield (t1, t2, True, False)
# Multi-dimensional array
md1 = np.array([[0, 1], [np.nan, 1]])
yield (md1, md1, None, False)
yield (md1, md1, False, False)
yield (md1, md1, True, True)
yield (md1, md1.copy(), None, False)
yield (md1, md1.copy(), False, False)
yield (md1, md1.copy(), True, True)
# both complexes are nan+nan.j but the same instance
cplx1, cplx2 = [np.array([np.nan + np.nan * 1j])] * 2
# only real or img are nan.
cplx3, cplx4 = np.complex64(1, np.nan), np.complex64(np.nan, 1)
# Complex values
yield (cplx1, cplx2, None, False)
yield (cplx1, cplx2, False, False)
yield (cplx1, cplx2, True, True)
# Complex values, 1+nan, nan+1j
yield (cplx3, cplx4, None, False)
yield (cplx3, cplx4, False, False)
yield (cplx3, cplx4, True, True)
|
TestBaseRepr
|
python
|
kamyu104__LeetCode-Solutions
|
Python/meeting-rooms-iii.py
|
{
"start": 791,
"end": 1504
}
|
class ____(object):
def mostBooked(self, n, meetings):
"""
:type n: int
:type meetings: List[List[int]]
:rtype:
"""
meetings.sort()
unused, used = range(n), []
result = [0]*n
for s, e in meetings:
while used and used[0][0] <= s:
_, i = heapq.heappop(used)
heapq.heappush(unused, i)
if unused:
i = heapq.heappop(unused)
heapq.heappush(used, (e, i))
else:
e2, i = heapq.heappop(used)
heapq.heappush(used, (e2+(e-s), i))
result[i] += 1
return max(xrange(n), key=lambda x:result[x])
|
Solution2
|
python
|
wandb__wandb
|
wandb/automations/actions.py
|
{
"start": 3330,
"end": 4276
}
|
class ____(NoOpActionFields, frozen=False):
action_type: Literal[ActionType.NO_OP] = ActionType.NO_OP
no_op: Annotated[
bool,
BeforeValidator(default_if_none),
Field(repr=False, frozen=True),
] = True
"""Placeholder field, only needed to conform to schema requirements.
There should never be a need to set this field explicitly, as its value is ignored.
"""
# for type annotations
SavedAction = Annotated[
Union[
SavedLaunchJobAction,
SavedNotificationAction,
SavedWebhookAction,
SavedNoOpAction,
],
BeforeValidator(parse_saved_action),
Field(discriminator="typename__"),
]
# for runtime type checks
SavedActionTypes: tuple[type, ...] = get_args(SavedAction.__origin__) # type: ignore[attr-defined]
# ------------------------------------------------------------------------------
# Input types: for creating or updating automations
|
SavedNoOpAction
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDictReadOnly1.py
|
{
"start": 302,
"end": 637
}
|
class ____(TypedDict):
a: ReadOnly[int]
b: Required[ReadOnly[str]]
c: ReadOnly[NotRequired[str]]
# This should generate an error because nested ReadOnly are not allowed.
d: ReadOnly[ReadOnly[str]]
TD2 = TypedDict("TD2", {"a": ReadOnly[str]}, total=True)
TD3 = TypedDict("TD3", {"a": ReadOnly[str]}, total=True)
|
TD1
|
python
|
ray-project__ray
|
python/ray/dag/class_node.py
|
{
"start": 4340,
"end": 4961
}
|
class ____:
"""Represents a class method output in a Ray function DAG."""
def __init__(self, class_method_call: "ClassMethodNode", output_idx: int):
# The upstream class method call that returns multiple values.
self._class_method_call = class_method_call
# The output index of the return value from the upstream class method call.
self._output_idx = output_idx
@property
def class_method_call(self) -> "ClassMethodNode":
return self._class_method_call
@property
def output_idx(self) -> int:
return self._output_idx
@DeveloperAPI
|
_ClassMethodOutput
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/tests/cloud/test_jobs.py
|
{
"start": 1168,
"end": 6917
}
|
class ____:
async def test_get_dbt_cloud_job_info(self, dbt_cloud_credentials):
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.get(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/12/",
headers=HEADERS,
).mock(return_value=Response(200, json={"data": {"id": 10000}}))
response = await get_dbt_cloud_job_info.fn(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=12,
order_by="id",
)
assert response == {"id": 10000}
async def test_trigger_job_with_no_options(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
with disable_run_logger():
result = await trigger_dbt_cloud_job_run.fn(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
)
assert result == {"id": 10000, "project_id": 12345}
request_body = json.loads(respx_mock.calls.last.request.content.decode())
assert "Triggered via Prefect" in request_body["cause"]
async def test_trigger_with_custom_options(self, dbt_cloud_credentials):
with respx.mock(using="httpx") as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
json={
"cause": "This is a custom cause",
"git_branch": "staging",
"schema_override": "dbt_cloud_pr_123",
"dbt_version_override": "0.18.0",
"threads_override": 8,
"target_name_override": "staging",
"generate_docs_override": True,
"timeout_seconds_override": 3000,
"steps_override": [
"dbt seed",
"dbt run --fail-fast",
"dbt test --fail fast",
],
},
).mock(
return_value=Response(
200, json={"data": {"id": 10000, "project_id": 12345}}
)
)
@flow
async def test_trigger_with_custom_options():
return await trigger_dbt_cloud_job_run(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
options=TriggerJobRunOptions(
cause="This is a custom cause",
git_branch="staging",
schema_override="dbt_cloud_pr_123",
dbt_version_override="0.18.0",
target_name_override="staging",
timeout_seconds_override=3000,
generate_docs_override=True,
threads_override=8,
steps_override=[
"dbt seed",
"dbt run --fail-fast",
"dbt test --fail fast",
],
),
)
result = await test_trigger_with_custom_options()
assert result == {"id": 10000, "project_id": 12345}
async def test_trigger_nonexistent_job(self, dbt_cloud_credentials):
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(
return_value=Response(
404, json={"status": {"user_message": "Not found!"}}
)
)
@flow
async def test_trigger_nonexistent_job():
task_shorter_retry = trigger_dbt_cloud_job_run.with_options(
retries=1, retry_delay_seconds=1
)
await task_shorter_retry(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
)
with pytest.raises(DbtCloudJobRunTriggerFailed, match="Not found!"):
await test_trigger_nonexistent_job()
async def test_trigger_nonexistent_run_id_no_logs(
self, dbt_cloud_credentials, caplog
):
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
respx_mock.route(host="127.0.0.1").pass_through()
respx_mock.post(
"https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/",
headers=HEADERS,
).mock(return_value=Response(200, json={"data": {"project_id": 12345}}))
@flow
async def trigger_nonexistent_run_id():
task_shorter_retry = trigger_dbt_cloud_job_run.with_options(
retries=1, retry_delay_seconds=1
)
await task_shorter_retry(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=1,
)
await trigger_nonexistent_run_id()
|
TestTriggerDbtCloudJobRun
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 651540,
"end": 652186
}
|
class ____(sgqlc.types.Type):
"""An Enterprise Server installation that a user is a member of."""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "role")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("EnterpriseServerInstallation", graphql_name="node")
"""The item at the end of the edge."""
role = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseUserAccountMembershipRole), graphql_name="role")
"""The role of the user in the enterprise membership."""
|
EnterpriseServerInstallationMembershipEdge
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/data_asset/path/spark/parquet_asset.py
|
{
"start": 1800,
"end": 1896
}
|
class ____(FileDataAsset, ParquetAssetBase):
type: Literal["parquet"] = "parquet"
|
ParquetAsset
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_scalar_ctors.py
|
{
"start": 1706,
"end": 2333
}
|
class ____(TestCase):
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
def test_uint64_from_negative(self):
# NumPy test was asserting a DeprecationWarning
assert_equal(np.uint8(-2), np.uint8(254))
int_types = [
subtest(np.byte, name="np_byte"),
subtest(np.short, name="np_short"),
subtest(np.intc, name="np_intc"),
subtest(np.int_, name="np_int_"),
subtest(np.longlong, name="np_longlong"),
]
uint_types = [np.ubyte]
float_types = [np.half, np.single, np.double]
cfloat_types = [np.csingle, np.cdouble]
@instantiate_parametrized_tests
|
TestFromInt
|
python
|
keras-team__keras
|
keras/src/tree/tree_test.py
|
{
"start": 1382,
"end": 1727
}
|
class ____:
def __init__(self, func):
self.func = func
self.visited_list = []
def __call__(self, x):
self.visited_list.append(x)
return self.func(x)
def visited(self):
ret = self.visited_list
self.visited_list = []
return ret
@parameterized.named_parameters(TEST_CASES)
|
Visitor
|
python
|
ApeWorX__ape
|
src/ape/api/query.py
|
{
"start": 7879,
"end": 9157
}
|
class ____(BaseInterface):
@abstractmethod
def estimate_query(self, query: QueryType) -> Optional[int]:
"""
Estimation of time needed to complete the query. The estimation is returned
as an int representing milliseconds. A value of None indicates that the
query engine is not available for use or is unable to complete the query.
Args:
query (``QueryType``): Query to estimate.
Returns:
Optional[int]: Represents milliseconds, returns ``None`` if unable to execute.
"""
@abstractmethod
def perform_query(self, query: QueryType) -> Iterator:
"""
Executes the query using best performing ``estimate_query`` query engine.
Args:
query (``QueryType``): query to execute
Returns:
Iterator
"""
def update_cache(self, query: QueryType, result: Iterator[BaseInterfaceModel]):
"""
Allows a query plugin the chance to update any cache using the results obtained
from other query plugins. Defaults to doing nothing, override to store cache data.
Args:
query (``QueryType``): query that was executed
result (``Iterator``): the result of the query
"""
|
QueryAPI
|
python
|
ray-project__ray
|
python/ray/_common/test_utils.py
|
{
"start": 1429,
"end": 5541
}
|
class ____:
"""A Ray actor implementing a semaphore for test coordination.
Useful for testing resource limiting, concurrency control,
and coordination between multiple actors or tasks.
"""
def __init__(self, value: int = 1):
self._sema = asyncio.Semaphore(value=value)
async def acquire(self):
await self._sema.acquire()
async def release(self):
self._sema.release()
async def locked(self) -> bool:
return self._sema.locked()
__all__ = ["SignalActor", "Semaphore"]
def wait_for_condition(
condition_predictor: Callable[..., bool],
timeout: float = 10,
retry_interval_ms: float = 100,
raise_exceptions: bool = False,
**kwargs: Any,
):
"""Wait until a condition is met or time out with an exception.
Args:
condition_predictor: A function that predicts the condition.
timeout: Maximum timeout in seconds.
retry_interval_ms: Retry interval in milliseconds.
raise_exceptions: If true, exceptions that occur while executing
condition_predictor won't be caught and instead will be raised.
**kwargs: Arguments to pass to the condition_predictor.
Returns:
None: Returns when the condition is met.
Raises:
RuntimeError: If the condition is not met before the timeout expires.
"""
start = time.time()
last_ex = None
while time.time() - start <= timeout:
try:
if condition_predictor(**kwargs):
return
except Exception:
if raise_exceptions:
raise
last_ex = ray._private.utils.format_error_message(traceback.format_exc())
time.sleep(retry_interval_ms / 1000.0)
message = "The condition wasn't met before the timeout expired."
if last_ex is not None:
message += f" Last exception: {last_ex}"
raise RuntimeError(message)
async def async_wait_for_condition(
condition_predictor: Callable[..., Awaitable[bool]],
timeout: float = 10,
retry_interval_ms: float = 100,
**kwargs: Any,
):
"""Wait until a condition is met or time out with an exception.
Args:
condition_predictor: A function that predicts the condition.
timeout: Maximum timeout in seconds.
retry_interval_ms: Retry interval in milliseconds.
**kwargs: Arguments to pass to the condition_predictor.
Returns:
None: Returns when the condition is met.
Raises:
RuntimeError: If the condition is not met before the timeout expires.
"""
start = time.time()
last_ex = None
while time.time() - start <= timeout:
try:
if inspect.iscoroutinefunction(condition_predictor):
if await condition_predictor(**kwargs):
return
else:
if condition_predictor(**kwargs):
return
except Exception as ex:
last_ex = ex
await asyncio.sleep(retry_interval_ms / 1000.0)
message = "The condition wasn't met before the timeout expired."
if last_ex is not None:
message += f" Last exception: {last_ex}"
raise RuntimeError(message)
@contextmanager
def simulate_s3_bucket(
port: int = 5002,
region: str = "us-west-2",
) -> Iterator[str]:
"""Context manager that simulates an S3 bucket and yields the URI.
Args:
port: The port of the localhost endpoint where S3 is being served.
region: The S3 region.
Yields:
str: URI for the simulated S3 bucket.
"""
from moto.server import ThreadedMotoServer
old_env = os.environ
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
s3_server = f"http://{build_address('localhost', port)}"
server = ThreadedMotoServer(port=port)
server.start()
url = f"s3://{uuid.uuid4().hex}?region={region}&endpoint_override={s3_server}"
yield url
server.stop()
os.environ = old_env
|
Semaphore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.