language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/stats/rv.py | {
"start": 8632,
"end": 9616
} | class ____(RandomSymbol):
def __new__(cls, idx_obj, pspace=None):
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
if not isinstance(idx_obj, (Indexed, Function)):
raise TypeError("An Function or Indexed object is expected not %s"%(idx_obj))
return Basic.__new__(cls, idx_obj, pspace)
symbol = property(lambda self: self.args[0])
name = property(lambda self: str(self.args[0]))
@property
def key(self):
if isinstance(self.symbol, Indexed):
return self.symbol.args[1]
elif isinstance(self.symbol, Function):
return self.symbol.args[0]
@property
def free_symbols(self):
if self.key.free_symbols:
free_syms = self.key.free_symbols
free_syms.add(self)
return free_syms
return {self}
@property
def pspace(self):
return self.args[1]
| RandomIndexedSymbol |
python | mlflow__mlflow | mlflow/pytorch/__init__.py | {
"start": 26519,
"end": 45803
} | class ____:
"""
Wrapper class that creates a predict function such that
predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)
"""
def __init__(self, pytorch_model, device):
self.pytorch_model = pytorch_model
self.device = device
self._is_forecasting_model = _is_forecasting_model(self.pytorch_model)
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.pytorch_model
def predict(self, data, params: dict[str, Any] | None = None):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
import torch
if params and "device" in params:
raise ValueError(
"device' can no longer be specified as an inference parameter. "
"It must be specified at load time. "
"Please specify the device at load time, for example: "
"`mlflow.pyfunc.load_model(model_uri, model_config={'device': 'cuda'})`."
)
if isinstance(data, pd.DataFrame):
inp_data = data if self._is_forecasting_model else data.to_numpy(dtype=np.float32)
elif isinstance(data, np.ndarray):
if self._is_forecasting_model:
raise TypeError(
"The pytorch forecasting model does not support numpy.ndarray input data, "
"please provide pandas.DataFrame input data."
)
inp_data = data
elif isinstance(data, (list, dict)):
raise TypeError(
"The PyTorch flavor does not support List or Dict input types. "
"Please use a pandas.DataFrame or a numpy.ndarray"
)
else:
raise TypeError("Input data should be pandas.DataFrame or numpy.ndarray")
device = self.device
with torch.no_grad():
if self._is_forecasting_model:
# forecasting model `predict` method supports
# dataframe input.
preds = self.pytorch_model.predict(inp_data)
else:
input_tensor = torch.from_numpy(inp_data).to(device)
preds = self.pytorch_model(input_tensor, **(params or {}))
# if the predictions happened on a remote device, copy them back to
# the host CPU for processing
if device != _TORCH_CPU_DEVICE_NAME:
preds = preds.to(_TORCH_CPU_DEVICE_NAME)
if not isinstance(preds, torch.Tensor):
raise TypeError(
"Expected PyTorch model to output a single output tensor, "
f"but got output of type '{type(preds)}'"
)
if isinstance(data, pd.DataFrame) and not self._is_forecasting_model:
predicted = pd.DataFrame(preds.numpy())
predicted.index = data.index
else:
predicted = preds.numpy()
return predicted
def log_state_dict(state_dict, artifact_path, **kwargs):
"""
Log a state_dict as an MLflow artifact for the current run.
.. warning::
This function just logs a state_dict as an artifact and doesn't generate
an :ref:`MLflow Model <models>`.
Args:
state_dict: state_dict to be saved.
artifact_path: Run-relative artifact path.
kwargs: kwargs to pass to ``torch.save``.
.. code-block:: python
:caption: Example
# Log a model as a state_dict
with mlflow.start_run():
state_dict = model.state_dict()
mlflow.pytorch.log_state_dict(state_dict, artifact_path="model")
# Log a checkpoint as a state_dict
with mlflow.start_run():
state_dict = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"loss": loss,
}
mlflow.pytorch.log_state_dict(state_dict, artifact_path="checkpoint")
"""
with TempDir() as tmp:
local_path = tmp.path()
save_state_dict(state_dict=state_dict, path=local_path, **kwargs)
mlflow.log_artifacts(local_path, artifact_path)
def save_state_dict(state_dict, path, **kwargs):
"""
Save a state_dict to a path on the local file system
Args:
state_dict: state_dict to be saved.
path: Local path where the state_dict is to be saved.
kwargs: kwargs to pass to ``torch.save``.
"""
import torch
# The object type check here aims to prevent a scenario where a user accidentally passees
# a model instead of a state_dict and `torch.save` (which accepts both model and state_dict)
# successfully completes, leaving the user unaware of the mistake.
if not isinstance(state_dict, dict):
raise TypeError(
"Invalid object type for `state_dict`: {}. Must be an instance of `dict`".format(
type(state_dict)
)
)
os.makedirs(path, exist_ok=True)
state_dict_path = os.path.join(path, _TORCH_STATE_DICT_FILE_NAME)
torch.save(state_dict, state_dict_path, **kwargs)
def load_state_dict(state_dict_uri, **kwargs):
"""
Load a state_dict from a local file or a run.
Args:
state_dict_uri: The location, in URI format, of the state_dict, for example:
- ``/Users/me/path/to/local/state_dict``
- ``relative/path/to/local/state_dict``
- ``s3://my_bucket/path/to/state_dict``
- ``runs:/<mlflow_run_id>/run-relative/path/to/state_dict``
For more information about supported URI schemes, see `Referencing Artifacts \
<https://www.mlflow.org/docs/latest/concepts.html#artifact-locations>`_.
kwargs: kwargs to pass to ``torch.load``.
Returns:
A state_dict
.. code-block:: python
:caption: Example
with mlflow.start_run():
artifact_path = "model"
mlflow.pytorch.log_state_dict(model.state_dict(), artifact_path)
state_dict_uri = mlflow.get_artifact_uri(artifact_path)
state_dict = mlflow.pytorch.load_state_dict(state_dict_uri)
"""
import torch
local_path = _download_artifact_from_uri(artifact_uri=state_dict_uri)
state_dict_path = os.path.join(local_path, _TORCH_STATE_DICT_FILE_NAME)
return torch.load(state_dict_path, **kwargs)
@autologging_integration(FLAVOR_NAME)
def autolog(
log_every_n_epoch=1,
log_every_n_step=None,
log_models=True,
log_datasets=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
registered_model_name=None,
extra_tags=None,
checkpoint=True,
checkpoint_monitor="val_loss",
checkpoint_mode="min",
checkpoint_save_best_only=True,
checkpoint_save_weights_only=False,
checkpoint_save_freq="epoch",
log_model_signatures=True,
):
"""
Enables (or disables) and configures autologging from `PyTorch Lightning
<https://pytorch-lightning.readthedocs.io/en/latest>`_ to MLflow.
Autologging is performed when you call the `fit` method of
`pytorch_lightning.Trainer() \
<https://pytorch-lightning.readthedocs.io/en/latest/trainer.html#>`_.
Explore the complete `PyTorch MNIST \
<https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST>`_ for
an expansive example with implementation of additional lightening steps.
**Note**: Full autologging is only supported for PyTorch Lightning models,
i.e., models that subclass
`pytorch_lightning.LightningModule \
<https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html>`_.
Autologging support for vanilla PyTorch (ie models that only subclass
`torch.nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_)
only autologs calls to
`torch.utils.tensorboard.SummaryWriter <https://pytorch.org/docs/stable/tensorboard.html>`_'s
``add_scalar`` and ``add_hparams`` methods to mlflow. In this case, there's also
no notion of an "epoch".
Args:
log_every_n_epoch: If specified, logs metrics once every `n` epochs. By default, metrics
are logged after every epoch.
log_every_n_step: If specified, logs batch metrics once every `n` training step.
By default, metrics are not logged for steps. Note that setting this to 1 can cause
performance issues and is not recommended. Metrics are logged against Lightning's global
step number, and when multiple optimizers are used it is assumed that all optimizers
are stepped in each training step.
log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
log_datasets: If ``True``, dataset information is logged to MLflow Tracking.
If ``False``, dataset information is not logged.
disable: If ``True``, disables the PyTorch Lightning autologging integration.
If ``False``, enables the PyTorch Lightning autologging integration.
exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run, which may be
user-created.
disable_for_unsupported_versions: If ``True``, disable autologging for versions of
pytorch and pytorch-lightning that have not been tested against this version
of the MLflow client or are incompatible.
silent: If ``True``, suppress all event logs and warnings from MLflow during PyTorch
Lightning autologging. If ``False``, show all events and warnings during PyTorch
Lightning autologging.
registered_model_name: If given, each time a model is trained, it is registered as a
new model version of the registered model with this name. The registered model is
created if it does not already exist.
extra_tags: A dictionary of extra tags to set on each managed run created by autologging.
checkpoint: Enable automatic model checkpointing, this feature only supports
pytorch-lightning >= 1.6.0.
checkpoint_monitor: In automatic model checkpointing, the metric name to monitor if
you set `model_checkpoint_save_best_only` to True.
checkpoint_mode: one of {"min", "max"}. In automatic model checkpointing,
if save_best_only=True, the decision to overwrite the current save file is made based on
either the maximization or the minimization of the monitored quantity.
checkpoint_save_best_only: If True, automatic model checkpointing only saves when
the model is considered the "best" model according to the quantity
monitored and previous checkpoint model is overwritten.
checkpoint_save_weights_only: In automatic model checkpointing, if True, then
only the model's weights will be saved. Otherwise, the optimizer states,
lr-scheduler states, etc are added in the checkpoint too.
checkpoint_save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
log_model_signatures: Whether to log model signature when `log_model` is True.
.. code-block:: python
:test:
:caption: Example
import os
import lightning as L
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader, Subset
from torchmetrics import Accuracy
from torchvision import transforms
from torchvision.datasets import MNIST
import mlflow.pytorch
from mlflow import MlflowClient
class MNISTModel(L.LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
self.accuracy = Accuracy("multiclass", num_classes=10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
logits = self(x)
loss = F.cross_entropy(logits, y)
pred = logits.argmax(dim=1)
acc = self.accuracy(pred, y)
# PyTorch `self.log` will be automatically captured by MLflow.
self.log("train_loss", loss, on_epoch=True)
self.log("acc", acc, on_epoch=True)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, "model")]
print(f"run_id: {r.info.run_id}")
print(f"artifacts: {artifacts}")
print(f"params: {r.data.params}")
print(f"metrics: {r.data.metrics}")
print(f"tags: {tags}")
# Initialize our model.
mnist_model = MNISTModel()
# Load MNIST dataset.
train_ds = MNIST(
os.getcwd(), train=True, download=True, transform=transforms.ToTensor()
)
# Only take a subset of the data for faster training.
indices = torch.arange(32)
train_ds = Subset(train_ds, indices)
train_loader = DataLoader(train_ds, batch_size=8)
# Initialize a trainer.
trainer = L.Trainer(max_epochs=3)
# Auto log all MLflow entities
mlflow.pytorch.autolog()
# Train the model.
with mlflow.start_run() as run:
trainer.fit(mnist_model, train_loader)
# Fetch the auto logged parameters and metrics.
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
"""
try:
import pytorch_lightning as pl
except ImportError:
pass
else:
from mlflow.pytorch._lightning_autolog import patched_fit
safe_patch(
FLAVOR_NAME, pl.Trainer, "fit", patched_fit, manage_run=True, extra_tags=extra_tags
)
try:
import lightning as L
except ImportError:
pass
else:
from mlflow.pytorch._lightning_autolog import patched_fit
safe_patch(
FLAVOR_NAME, L.Trainer, "fit", patched_fit, manage_run=True, extra_tags=extra_tags
)
try:
import torch.utils.tensorboard.writer
except ImportError:
pass
else:
from mlflow.pytorch._pytorch_autolog import (
flush_metrics_queue,
patched_add_event,
patched_add_hparams,
patched_add_summary,
)
safe_patch(
FLAVOR_NAME,
torch.utils.tensorboard.writer.FileWriter,
"add_event",
partial(patched_add_event, mlflow_log_every_n_step=log_every_n_step),
manage_run=True,
extra_tags=extra_tags,
)
safe_patch(
FLAVOR_NAME,
torch.utils.tensorboard.writer.FileWriter,
"add_summary",
patched_add_summary,
manage_run=True,
extra_tags=extra_tags,
)
safe_patch(
FLAVOR_NAME,
torch.utils.tensorboard.SummaryWriter,
"add_hparams",
patched_add_hparams,
manage_run=True,
extra_tags=extra_tags,
)
atexit.register(flush_metrics_queue)
if autolog.__doc__ is not None:
autolog.__doc__ = autolog.__doc__.replace("MIN_REQ_VERSION", str(MIN_REQ_VERSION)).replace(
"MAX_REQ_VERSION", str(MAX_REQ_VERSION)
)
def load_checkpoint(model_class, run_id=None, epoch=None, global_step=None, kwargs=None):
"""
If you enable "checkpoint" in autologging, during pytorch-lightning model
training execution, checkpointed models are logged as MLflow artifacts.
Using this API, you can load the checkpointed model.
If you want to load the latest checkpoint, set both `epoch` and `global_step` to None.
If "checkpoint_save_freq" is set to "epoch" in autologging,
you can set `epoch` param to the epoch of the checkpoint to load specific epoch checkpoint.
If "checkpoint_save_freq" is set to an integer in autologging,
you can set `global_step` param to the global step of the checkpoint to load specific
global step checkpoint.
`epoch` param and `global_step` can't be set together.
Args:
model_class: The class of the training model, the class should inherit
'pytorch_lightning.LightningModule'.
run_id: The id of the run which model is logged to. If not provided,
current active run is used.
epoch: The epoch of the checkpoint to be loaded, if you set
"checkpoint_save_freq" to "epoch".
global_step: The global step of the checkpoint to be loaded, if
you set "checkpoint_save_freq" to an integer.
kwargs: Any extra kwargs needed to init the model.
Returns:
The instance of a pytorch-lightning model restored from the specified checkpoint.
.. code-block:: python
:caption: Example
import mlflow
mlflow.pytorch.autolog(checkpoint=True)
model = MyLightningModuleNet() # A custom-pytorch lightning model
train_loader = create_train_dataset_loader()
trainer = Trainer()
with mlflow.start_run() as run:
trainer.fit(model, train_loader)
run_id = run.info.run_id
# load latest checkpoint model
latest_checkpoint_model = mlflow.pytorch.load_checkpoint(MyLightningModuleNet, run_id)
# load history checkpoint model logged in second epoch
checkpoint_model = mlflow.pytorch.load_checkpoint(MyLightningModuleNet, run_id, epoch=2)
"""
with TempDir() as tmp_dir:
downloaded_checkpoint_filepath = download_checkpoint_artifact(
run_id=run_id, epoch=epoch, global_step=global_step, dst_path=tmp_dir.path()
)
return model_class.load_from_checkpoint(downloaded_checkpoint_filepath, **(kwargs or {}))
__all__ = [
"autolog",
"load_model",
"save_model",
"log_model",
"get_default_pip_requirements",
"get_default_conda_env",
"load_checkpoint",
]
try:
from mlflow.pytorch._lightning_autolog import MlflowModelCheckpointCallback # noqa: F401
__all__.append("MlflowModelCheckpointCallback")
except ImportError:
# Swallow exception if pytorch-lightning is not installed.
pass
| _PyTorchWrapper |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 113437,
"end": 114289
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.greater_equal(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype="bool")
@keras_export(
[
"keras.ops.greater_equal",
"keras.ops.numpy.greater_equal",
]
)
def greater_equal(x1, x2):
"""Return the truth value of `x1 >= x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return GreaterEqual().symbolic_call(x1, x2)
return backend.numpy.greater_equal(x1, x2)
| GreaterEqual |
python | ray-project__ray | rllib/utils/replay_buffers/base.py | {
"start": 158,
"end": 2271
} | class ____(metaclass=ABCMeta):
"""Abstract base class for all of RLlib's replay buffers.
Mainly defines the `add()` and `sample()` methods that every buffer class
must implement to be usable by an Algorithm.
Buffers may determine on all the implementation details themselves, e.g.
whether to store single timesteps, episodes, or episode fragments or whether
to return fixed batch sizes or per-call defined ones.
"""
@abstractmethod
@DeveloperAPI
def __len__(self) -> int:
"""Returns the number of items currently stored in this buffer."""
@abstractmethod
@DeveloperAPI
def add(self, batch: Any, **kwargs) -> None:
"""Adds a batch of experiences or other data to this buffer.
Args:
batch: Batch or data to add.
``**kwargs``: Forward compatibility kwargs.
"""
@abstractmethod
@DeveloperAPI
def sample(self, num_items: Optional[int] = None, **kwargs) -> Any:
"""Samples `num_items` items from this buffer.
The exact shape of the returned data depends on the buffer's implementation.
Args:
num_items: Number of items to sample from this buffer.
``**kwargs``: Forward compatibility kwargs.
Returns:
A batch of items.
"""
@abstractmethod
@DeveloperAPI
def get_state(self) -> Dict[str, Any]:
"""Returns all local state in a dict.
Returns:
The serializable local state.
"""
@abstractmethod
@DeveloperAPI
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be obtained by calling
`self.get_state()`.
"""
@DeveloperAPI
def get_host(self) -> str:
"""Returns the computer's network name.
Returns:
The computer's networks name or an empty string, if the network
name could not be determined.
"""
return platform.node()
| ReplayBufferInterface |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 10410,
"end": 27818
} | class ____(Generic[_HttpxClientT, _DefaultStreamT]):
_client: _HttpxClientT
_version: str
_base_url: URL
max_retries: int
timeout: Union[float, Timeout, None]
_strict_response_validation: bool
_idempotency_header: str | None
_default_stream_cls: type[_DefaultStreamT] | None = None
def __init__(
self,
*,
version: str,
base_url: str | URL,
_strict_response_validation: bool,
max_retries: int = DEFAULT_MAX_RETRIES,
timeout: float | Timeout | None = DEFAULT_TIMEOUT,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
) -> None:
self._version = version
self._base_url = self._enforce_trailing_slash(URL(base_url))
self.max_retries = max_retries
self.timeout = timeout
self._custom_headers = custom_headers or {}
self._custom_query = custom_query or {}
self._strict_response_validation = _strict_response_validation
self._idempotency_header = None
self._platform: Platform | None = None
if max_retries is None: # pyright: ignore[reportUnnecessaryComparison]
raise TypeError(
"max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `openai.DEFAULT_MAX_RETRIES`"
)
def _enforce_trailing_slash(self, url: URL) -> URL:
if url.raw_path.endswith(b"/"):
return url
return url.copy_with(raw_path=url.raw_path + b"/")
def _make_status_error_from_response(
self,
response: httpx.Response,
) -> APIStatusError:
if response.is_closed and not response.is_stream_consumed:
# We can't read the response body as it has been closed
# before it was read. This can happen if an event hook
# raises a status error.
body = None
err_msg = f"Error code: {response.status_code}"
else:
err_text = response.text.strip()
body = err_text
try:
body = json.loads(err_text)
err_msg = f"Error code: {response.status_code} - {body}"
except Exception:
err_msg = err_text or f"Error code: {response.status_code}"
return self._make_status_error(err_msg, body=body, response=response)
def _make_status_error(
self,
err_msg: str,
*,
body: object,
response: httpx.Response,
) -> _exceptions.APIStatusError:
raise NotImplementedError()
def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers:
custom_headers = options.headers or {}
headers_dict = _merge_mappings(self.default_headers, custom_headers)
self._validate_headers(headers_dict, custom_headers)
# headers are case-insensitive while dictionaries are not.
headers = httpx.Headers(headers_dict)
idempotency_header = self._idempotency_header
if idempotency_header and options.idempotency_key and idempotency_header not in headers:
headers[idempotency_header] = options.idempotency_key
# Don't set these headers if they were already set or removed by the caller. We check
# `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case.
lower_custom_headers = [header.lower() for header in custom_headers]
if "x-stainless-retry-count" not in lower_custom_headers:
headers["x-stainless-retry-count"] = str(retries_taken)
if "x-stainless-read-timeout" not in lower_custom_headers:
timeout = self.timeout if isinstance(options.timeout, NotGiven) else options.timeout
if isinstance(timeout, Timeout):
timeout = timeout.read
if timeout is not None:
headers["x-stainless-read-timeout"] = str(timeout)
return headers
def _prepare_url(self, url: str) -> URL:
"""
Merge a URL argument together with any 'base_url' on the client,
to create the URL used for the outgoing request.
"""
# Copied from httpx's `_merge_url` method.
merge_url = URL(url)
if merge_url.is_relative_url:
merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
return self.base_url.copy_with(raw_path=merge_raw_path)
return merge_url
def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder:
return SSEDecoder()
def _build_request(
self,
options: FinalRequestOptions,
*,
retries_taken: int = 0,
) -> httpx.Request:
if log.isEnabledFor(logging.DEBUG):
log.debug("Request options: %s", model_dump(options, exclude_unset=True))
kwargs: dict[str, Any] = {}
json_data = options.json_data
if options.extra_json is not None:
if json_data is None:
json_data = cast(Body, options.extra_json)
elif is_mapping(json_data):
json_data = _merge_mappings(json_data, options.extra_json)
else:
raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
headers = self._build_headers(options, retries_taken=retries_taken)
params = _merge_mappings(self.default_query, options.params)
content_type = headers.get("Content-Type")
files = options.files
# If the given Content-Type header is multipart/form-data then it
# has to be removed so that httpx can generate the header with
# additional information for us as it has to be in this form
# for the server to be able to correctly parse the request:
# multipart/form-data; boundary=---abc--
if content_type is not None and content_type.startswith("multipart/form-data"):
if "boundary" not in content_type:
# only remove the header if the boundary hasn't been explicitly set
# as the caller doesn't want httpx to come up with their own boundary
headers.pop("Content-Type")
# As we are now sending multipart/form-data instead of application/json
# we need to tell httpx to use it, https://www.python-httpx.org/advanced/clients/#multipart-file-encoding
if json_data:
if not is_dict(json_data):
raise TypeError(
f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead."
)
kwargs["data"] = self._serialize_multipartform(json_data)
# httpx determines whether or not to send a "multipart/form-data"
# request based on the truthiness of the "files" argument.
# This gets around that issue by generating a dict value that
# evaluates to true.
#
# https://github.com/encode/httpx/discussions/2399#discussioncomment-3814186
if not files:
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
is_body_allowed = options.method.lower() != "get"
if is_body_allowed:
if isinstance(json_data, bytes):
kwargs["content"] = json_data
else:
kwargs["json"] = json_data if is_given(json_data) else None
kwargs["files"] = files
else:
headers.pop("Content-Type", None)
kwargs.pop("data", None)
# TODO: report this error to httpx
return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
headers=headers,
timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
method=options.method,
url=prepared_url,
# the `Query` type that we use is incompatible with qs'
# `Params` type as it needs to be typed as `Mapping[str, object]`
# so that passing a `TypedDict` doesn't cause an error.
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
**kwargs,
)
def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
items = self.qs.stringify_items(
# TODO: type ignore is required as stringify_items is well typed but we can't be
# well typed without heavy validation.
data, # type: ignore
array_format="brackets",
)
serialized: dict[str, object] = {}
for key, value in items:
existing = serialized.get(key)
if not existing:
serialized[key] = value
continue
# If a value has already been set for this key then that
# means we're sending data like `array[]=[1, 2, 3]` and we
# need to tell httpx that we want to send multiple values with
# the same key which is done by using a list or a tuple.
#
# Note: 2d arrays should never result in the same key at both
# levels so it's safe to assume that if the value is a list,
# it was because we changed it to be a list.
if is_list(existing):
existing.append(value)
else:
serialized[key] = [existing, value]
return serialized
def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]:
if not is_given(options.headers):
return cast_to
# make a copy of the headers so we don't mutate user-input
headers = dict(options.headers)
# we internally support defining a temporary header to override the
# default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
# see _response.py for implementation details
override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given)
if is_given(override_cast_to):
options.headers = headers
return cast(Type[ResponseT], override_cast_to)
return cast_to
def _should_stream_response_body(self, request: httpx.Request) -> bool:
return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return]
def _process_response_data(
self,
*,
data: object,
cast_to: type[ResponseT],
response: httpx.Response,
) -> ResponseT:
if data is None:
return cast(ResponseT, None)
if cast_to is object:
return cast(ResponseT, data)
try:
if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol):
return cast(ResponseT, cast_to.build(response=response, data=data))
if self._strict_response_validation:
return cast(ResponseT, validate_type(type_=cast_to, value=data))
return cast(ResponseT, construct_type(type_=cast_to, value=data))
except pydantic.ValidationError as err:
raise APIResponseValidationError(response=response, body=data) from err
@property
def qs(self) -> Querystring:
return Querystring()
@property
def custom_auth(self) -> httpx.Auth | None:
return None
@property
def auth_headers(self) -> dict[str, str]:
return {}
@property
def default_headers(self) -> dict[str, str | Omit]:
return {
"Accept": "application/json",
"Content-Type": "application/json",
"User-Agent": self.user_agent,
**self.platform_headers(),
**self.auth_headers,
**self._custom_headers,
}
@property
def default_query(self) -> dict[str, object]:
return {
**self._custom_query,
}
def _validate_headers(
self,
headers: Headers, # noqa: ARG002
custom_headers: Headers, # noqa: ARG002
) -> None:
"""Validate the given default headers and custom headers.
Does nothing by default.
"""
return
@property
def user_agent(self) -> str:
return f"{self.__class__.__name__}/Python {self._version}"
@property
def base_url(self) -> URL:
return self._base_url
@base_url.setter
def base_url(self, url: URL | str) -> None:
self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url))
def platform_headers(self) -> Dict[str, str]:
# the actual implementation is in a separate `lru_cache` decorated
# function because adding `lru_cache` to methods will leak memory
# https://github.com/python/cpython/issues/88476
return platform_headers(self._version, platform=self._platform)
def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None:
"""Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified.
About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax
"""
if response_headers is None:
return None
# First, try the non-standard `retry-after-ms` header for milliseconds,
# which is more precise than integer-seconds `retry-after`
try:
retry_ms_header = response_headers.get("retry-after-ms", None)
return float(retry_ms_header) / 1000
except (TypeError, ValueError):
pass
# Next, try parsing `retry-after` header as seconds (allowing nonstandard floats).
retry_header = response_headers.get("retry-after")
try:
# note: the spec indicates that this should only ever be an integer
# but if someone sends a float there's no reason for us to not respect it
return float(retry_header)
except (TypeError, ValueError):
pass
# Last, try parsing `retry-after` as a date.
retry_date_tuple = email.utils.parsedate_tz(retry_header)
if retry_date_tuple is None:
return None
retry_date = email.utils.mktime_tz(retry_date_tuple)
return float(retry_date - time.time())
def _calculate_retry_timeout(
self,
remaining_retries: int,
options: FinalRequestOptions,
response_headers: Optional[httpx.Headers] = None,
) -> float:
max_retries = options.get_max_retries(self.max_retries)
# If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
retry_after = self._parse_retry_after_header(response_headers)
if retry_after is not None and 0 < retry_after <= 60:
return retry_after
# Also cap retry count to 1000 to avoid any potential overflows with `pow`
nb_retries = min(max_retries - remaining_retries, 1000)
# Apply exponential backoff, but not more than the max.
sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY)
# Apply some jitter, plus-or-minus half a second.
jitter = 1 - 0.25 * random()
timeout = sleep_seconds * jitter
return timeout if timeout >= 0 else 0
def _should_retry(self, response: httpx.Response) -> bool:
# Note: this is not a standard header
should_retry_header = response.headers.get("x-should-retry")
# If the server explicitly says whether or not to retry, obey.
if should_retry_header == "true":
log.debug("Retrying as header `x-should-retry` is set to `true`")
return True
if should_retry_header == "false":
log.debug("Not retrying as header `x-should-retry` is set to `false`")
return False
# Retry on request timeouts.
if response.status_code == 408:
log.debug("Retrying due to status code %i", response.status_code)
return True
# Retry on lock timeouts.
if response.status_code == 409:
log.debug("Retrying due to status code %i", response.status_code)
return True
# Retry on rate limits.
if response.status_code == 429:
log.debug("Retrying due to status code %i", response.status_code)
return True
# Retry internal errors.
if response.status_code >= 500:
log.debug("Retrying due to status code %i", response.status_code)
return True
log.debug("Not retrying")
return False
def _idempotency_key(self) -> str:
return f"stainless-python-retry-{uuid.uuid4()}"
| BaseClient |
python | coleifer__peewee | tests/transactions.py | {
"start": 11295,
"end": 13711
} | class ____(BaseTransactionTestCase):
@skip_unless(IS_POSTGRESQL, 'requires postgresql')
def test_isolation_level_pg(self):
db2 = new_connection()
db2.connect()
with db2.atomic(isolation_level='SERIALIZABLE'):
with db.atomic(isolation_level='SERIALIZABLE'):
self._save(1)
self.assertDB2(db2, [])
self.assertDB2(db2, [])
self.assertDB2(db2, [1])
with db2.atomic(isolation_level='READ COMMITTED'):
with db.atomic():
self._save(2)
self.assertDB2(db2, [1])
self.assertDB2(db2, [1, 2])
self.assertDB2(db2, [1, 2])
# NB: Read Uncommitted is treated as Read Committed by PG, so we don't
# test it here.
with db2.atomic(isolation_level='REPEATABLE READ'):
with db.atomic(isolation_level='REPEATABLE READ'):
self._save(3)
self.assertDB2(db2, [1, 2])
self.assertDB2(db2, [1, 2])
self.assertDB2(db2, [1, 2, 3])
@skip_unless(IS_MYSQL, 'requires mysql')
def test_isolation_level_mysql(self):
db2 = new_connection()
db2.connect()
with db2.atomic():
with db.atomic(isolation_level='SERIALIZABLE'):
self._save(1)
self.assertDB2(db2, [])
self.assertDB2(db2, [])
self.assertDB2(db2, [1])
with db2.atomic(isolation_level='READ COMMITTED'):
with db.atomic():
self._save(2)
self.assertDB2(db2, [1])
self.assertDB2(db2, [1, 2])
self.assertDB2(db2, [1, 2])
with db2.atomic(isolation_level='READ UNCOMMITTED'):
with db.atomic():
self._save(3)
self.assertDB2(db2, [1, 2, 3])
self.assertDB2(db2, [1, 2, 3])
self.assertDB2(db2, [1, 2, 3])
with db2.atomic(isolation_level='REPEATABLE READ'):
with db.atomic(isolation_level='REPEATABLE READ'):
self._save(4)
self.assertDB2(db2, [1, 2, 3])
self.assertDB2(db2, [1, 2, 3])
self.assertDB2(db2, [1, 2, 3, 4])
def assertDB2(self, db2, vals):
with Register.bind_ctx(db2):
q = Register.select().order_by(Register.value)
self.assertEqual([r.value for r in q], vals)
| TestTransactionIsolationLevel |
python | pytorch__pytorch | test/inductor/test_fx_fusion.py | {
"start": 1335,
"end": 5982
} | class ____(TestCase):
def test_sink_cat_after_pointwise(self):
def test_kwarg(x, y):
return torch.cat([x, y], dim=-1).view(-1).view(128).tanh()
def test_arg(x, y):
return torch.cat([x, y], -1).view(-1).view(128).tanh()
def test_arg2(x, y):
return torch.cat([x, y]).view(-1).view(128).tanh()
def test_kwarg2(x, y):
return torch.cat(tensors=[x, y], dim=0).tanh()
def test_kwarg3(x, y):
return torch.cat(tensors=[x, y], dim=0).view(128).tanh()
trace_func = chain_passes(torch.fx.symbolic_trace, sink_cat_after_pointwise)
inputs = [
torch.randn(8, 8),
torch.randn(8, 8),
]
for f in [test_kwarg, test_arg, test_arg2, test_kwarg2, test_kwarg3]:
traced = trace_func(f, inputs)
torch.testing.assert_close(f(*inputs), traced(*inputs))
self.assertEqual(count_call_method(traced, "tanh"), 2)
def test_linear_permute_fusion(self):
class TestModule(torch.nn.Module):
def __init__(self, k: int, n: int, has_bias: bool):
super().__init__()
self.weight = torch.nn.Parameter(torch.randn(n, k))
self.has_bias = has_bias
if has_bias:
self.bias = torch.nn.Parameter(torch.randn(n))
def forward(self, input: torch.Tensor):
if self.has_bias:
a0 = torch.nn.functional.linear(input, self.weight, self.bias)
else:
a0 = torch.nn.functional.linear(input, self.weight)
b0 = a0.permute(0, 2, 1)
return b0
m, k, n = 16, 8, 4
trace_func = chain_passes(torch.fx.symbolic_trace, linear_permute_fusion)
for has_bias in [True, False]:
module = TestModule(k, n, has_bias).eval()
input = torch.randn(6, m, k)
traced = trace_func(module, [input])
num_linear = count_call_function(traced, torch.nn.functional.linear)
num_linear_transpose = count_call_function(traced, linear_transpose)
self.assertEqual(num_linear, 0)
self.assertEqual(num_linear_transpose, 1)
torch.testing.assert_close(module(input), traced(input))
def test_permute_linear_fusion(self):
class TestModule(torch.nn.Module):
def __init__(self, k: int, n: int, has_bias: bool):
super().__init__()
self.weight = torch.nn.Parameter(torch.randn(n, k))
self.has_bias = has_bias
if has_bias:
self.bias = torch.nn.Parameter(torch.randn(n))
def forward(self, input: torch.Tensor):
input1 = input.permute(0, 2, 1)
if self.has_bias:
return torch.nn.functional.linear(input1, self.weight, self.bias)
return torch.nn.functional.linear(input1, self.weight)
m, k, n = 16, 8, 4
trace_func = chain_passes(torch.fx.symbolic_trace, permute_linear_fusion)
for has_bias in [True, False]:
module = TestModule(k, n, has_bias).eval()
input = torch.randn(6, k, m)
traced = trace_func(module, [input])
num_linear = count_call_function(traced, torch.nn.functional.linear)
num_transpose_linear = count_call_function(traced, transpose_linear)
self.assertEqual(num_linear, 0)
self.assertEqual(num_transpose_linear, 1)
torch.testing.assert_close(module(input), traced(input))
def test_permute_bmm_fusion(self):
class TestModule(torch.nn.Module):
def __init__(self, batch: int, k: int, n: int):
super().__init__()
self.other = torch.randn(batch, k, n)
def forward(self, input: torch.Tensor):
input1 = input.permute(0, 2, 1)
output = torch.bmm(input1, self.other)
return output
batch, m, k, n = 6, 16, 8, 4
trace_func = chain_passes(torch.fx.symbolic_trace, permute_matmul_fusion)
module = TestModule(batch, k, n).eval()
input = torch.randn(batch, k, m)
traced = trace_func(module, [input])
num_bmm = count_call_function(traced, torch.bmm)
num_transpose_matmul = count_call_function(traced, transpose_matmul)
self.assertEqual(num_bmm, 0)
self.assertEqual(num_transpose_matmul, 1)
torch.testing.assert_close(module(input), traced(input))
if __name__ == "__main__":
run_tests()
| TestFxFusion |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py | {
"start": 16678,
"end": 17760
} | class ____(Benchmark):
r"""
Price 1 objective function.
This class defines the Price 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price01}}(x) = (\lvert x_1 \rvert - 5)^2
+ (\lvert x_2 \rvert - 5)^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [5, 5]` or
:math:`x = [5, -5]` or :math:`x = [-5, 5]` or :math:`x = [-5, -5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])
self.global_optimum = [[5.0, 5.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0]) - 5.0) ** 2.0 + (abs(x[1]) - 5.0) ** 2.0
| Price01 |
python | openai__openai-python | src/openai/types/shared_params/response_format_json_schema.py | {
"start": 1239,
"end": 1529
} | class ____(TypedDict, total=False):
json_schema: Required[JSONSchema]
"""Structured Outputs configuration options, including a JSON Schema."""
type: Required[Literal["json_schema"]]
"""The type of response format being defined. Always `json_schema`."""
| ResponseFormatJSONSchema |
python | pytorch__pytorch | test/functorch/test_parsing.py | {
"start": 2169,
"end": 7527
} | class ____(TestCase):
def test_elementary_axis_name(self) -> None:
for name in [
"a",
"b",
"h",
"dx",
"h1",
"zz",
"i9123",
"somelongname",
"Alex",
"camelCase",
"u_n_d_e_r_score",
"unreasonablyLongAxisName",
]:
self.assertTrue(ParsedExpression.check_axis_name(name))
for name in [
"",
"2b",
"12",
"_startWithUnderscore",
"endWithUnderscore_",
"_",
"...",
_ellipsis,
]:
self.assertFalse(ParsedExpression.check_axis_name(name))
def test_invalid_expressions(self) -> None:
# double ellipsis should raise an error
ParsedExpression("... a b c d")
with self.assertRaises(ValueError):
ParsedExpression("... a b c d ...")
with self.assertRaises(ValueError):
ParsedExpression("... a b c (d ...)")
with self.assertRaises(ValueError):
ParsedExpression("(... a) b c (d ...)")
# double/missing/enclosed parenthesis
ParsedExpression("(a) b c (d ...)")
with self.assertRaises(ValueError):
ParsedExpression("(a)) b c (d ...)")
with self.assertRaises(ValueError):
ParsedExpression("(a b c (d ...)")
with self.assertRaises(ValueError):
ParsedExpression("(a) (()) b c (d ...)")
with self.assertRaises(ValueError):
ParsedExpression("(a) ((b c) (d ...))")
# invalid identifiers
ParsedExpression("camelCase under_scored cApiTaLs \u00df ...")
with self.assertRaises(ValueError):
ParsedExpression("1a")
with self.assertRaises(ValueError):
ParsedExpression("_pre")
with self.assertRaises(ValueError):
ParsedExpression("...pre")
with self.assertRaises(ValueError):
ParsedExpression("pre...")
@mock.patch.object(AnonymousAxis, "__eq__", mock_anonymous_axis_eq)
def test_parse_expression(self, *mocks: mock.MagicMock) -> None:
parsed = ParsedExpression("a1 b1 c1 d1")
self.assertSetEqual(parsed.identifiers, {"a1", "b1", "c1", "d1"})
self.assertListEqual(parsed.composition, [["a1"], ["b1"], ["c1"], ["d1"]])
self.assertFalse(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
parsed = ParsedExpression("() () () ()")
self.assertSetEqual(parsed.identifiers, set())
self.assertListEqual(parsed.composition, [[], [], [], []])
self.assertFalse(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
parsed = ParsedExpression("1 1 1 ()")
self.assertSetEqual(parsed.identifiers, set())
self.assertListEqual(parsed.composition, [[], [], [], []])
self.assertFalse(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
parsed = ParsedExpression("5 (3 4)")
self.assertEqual(len(parsed.identifiers), 3)
self.assertSetEqual(
{
i.value if isinstance(i, AnonymousAxis) else i
for i in parsed.identifiers
},
{3, 4, 5},
)
self.assertListEqual(
parsed.composition,
[[AnonymousAxis("5")], [AnonymousAxis("3"), AnonymousAxis("4")]],
)
self.assertTrue(parsed.has_non_unitary_anonymous_axes)
self.assertFalse(parsed.has_ellipsis)
parsed = ParsedExpression("5 1 (1 4) 1")
self.assertEqual(len(parsed.identifiers), 2)
self.assertSetEqual(
{
i.value if isinstance(i, AnonymousAxis) else i
for i in parsed.identifiers
},
{4, 5},
)
self.assertListEqual(
parsed.composition, [[AnonymousAxis("5")], [], [AnonymousAxis("4")], []]
)
parsed = ParsedExpression("name1 ... a1 12 (name2 14)")
self.assertEqual(len(parsed.identifiers), 6)
self.assertEqual(
len(parsed.identifiers - {"name1", _ellipsis, "a1", "name2"}), 2
)
self.assertListEqual(
parsed.composition,
[
["name1"],
_ellipsis,
["a1"],
[AnonymousAxis("12")],
["name2", AnonymousAxis("14")],
],
)
self.assertTrue(parsed.has_non_unitary_anonymous_axes)
self.assertTrue(parsed.has_ellipsis)
self.assertFalse(parsed.has_ellipsis_parenthesized)
parsed = ParsedExpression("(name1 ... a1 12) name2 14")
self.assertEqual(len(parsed.identifiers), 6)
self.assertEqual(
len(parsed.identifiers - {"name1", _ellipsis, "a1", "name2"}), 2
)
self.assertListEqual(
parsed.composition,
[
["name1", _ellipsis, "a1", AnonymousAxis("12")],
["name2"],
[AnonymousAxis("14")],
],
)
self.assertTrue(parsed.has_non_unitary_anonymous_axes)
self.assertTrue(parsed.has_ellipsis)
self.assertTrue(parsed.has_ellipsis_parenthesized)
| TestParsedExpression |
python | pytest-dev__pytest | testing/test_subtests.py | {
"start": 23237,
"end": 26826
} | class ____:
def create_file(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
import logging
def test_foo(subtests):
logging.info("before")
with subtests.test("sub1"):
print("sub1 stdout")
logging.info("sub1 logging")
logging.debug("sub1 logging debug")
with subtests.test("sub2"):
print("sub2 stdout")
logging.info("sub2 logging")
logging.debug("sub2 logging debug")
assert False
"""
)
def test_capturing_info(self, pytester: pytest.Pytester) -> None:
self.create_file(pytester)
result = pytester.runpytest("--log-level=INFO")
result.stdout.fnmatch_lines(
[
"*___ test_foo [[]sub2[]] __*",
"*-- Captured stdout call --*",
"sub2 stdout",
"*-- Captured log call ---*",
"INFO * before",
"INFO * sub1 logging",
"INFO * sub2 logging",
"*== short test summary info ==*",
]
)
result.stdout.no_fnmatch_line("sub1 logging debug")
result.stdout.no_fnmatch_line("sub2 logging debug")
def test_capturing_debug(self, pytester: pytest.Pytester) -> None:
self.create_file(pytester)
result = pytester.runpytest("--log-level=DEBUG")
result.stdout.fnmatch_lines(
[
"*___ test_foo [[]sub2[]] __*",
"*-- Captured stdout call --*",
"sub2 stdout",
"*-- Captured log call ---*",
"INFO * before",
"INFO * sub1 logging",
"DEBUG * sub1 logging debug",
"INFO * sub2 logging",
"DEBUG * sub2 logging debug",
"*== short test summary info ==*",
]
)
def test_caplog(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
import logging
def test(subtests, caplog):
caplog.set_level(logging.INFO)
logging.info("start test")
with subtests.test("sub1"):
logging.info("inside %s", "subtest1")
assert len(caplog.records) == 2
assert caplog.records[0].getMessage() == "start test"
assert caplog.records[1].getMessage() == "inside subtest1"
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*1 passed*",
]
)
def test_no_logging(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
import logging
def test(subtests):
logging.info("start log line")
with subtests.test("sub passing"):
logging.info("inside %s", "passing log line")
with subtests.test("sub failing"):
logging.info("inside %s", "failing log line")
assert False
logging.info("end log line")
"""
)
result = pytester.runpytest("-p no:logging")
result.stdout.fnmatch_lines(
[
"*2 failed in*",
]
)
result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*")
| TestLogging |
python | PyCQA__pylint | tests/functional/r/regression/regression_property_no_member_2641.py | {
"start": 690,
"end": 919
} | class ____(Person):
def __init__(self, name, age, tel):
super().__init__(name, age)
self.tel = tel
MS = Myself("Matheus Saraiva", 36, "988070350")
WI = Wife("Joice Saraiva", 34, "999923554")
print(WI.name)
| Wife |
python | facebookresearch__faiss | faiss/gpu/test/test_contrib_gpu.py | {
"start": 3160,
"end": 4431
} | class ____(unittest.TestCase):
def do_test(self, factory_string):
ds = datasets.SyntheticDataset(32, 2000, 4000, 1000)
k = 10
index = faiss.index_factory(ds.d, factory_string)
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 5
Dref, Iref = index.search(ds.get_queries(), k)
res = faiss.StandardGpuResources()
def pairwise_distances(xq, xb, metric=faiss.METRIC_L2):
return faiss.pairwise_distance_gpu(
res, xq, xb, metric=faiss.METRIC_L2)
def knn_function(xq, xb, k, metric=faiss.METRIC_L2):
return faiss.knn_gpu(res, xq, xb, k, metric=faiss.METRIC_L2)
for method in "pairwise_distances", "knn_function":
Dnew, Inew = big_batch_search.big_batch_search(
index, ds.get_queries(),
k, method=method,
pairwise_distances=pairwise_distances,
knn=knn_function
)
self.assertLess((Inew != Iref).sum() / Iref.size, 1e-4)
np.testing.assert_almost_equal(Dnew, Dref, decimal=4)
def test_Flat(self):
self.do_test("IVF64,Flat")
def test_PQ(self):
self.do_test("IVF64,PQ4np")
| TestBigBatchSearch |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/type/typemap.py | {
"start": 514,
"end": 6844
} | class ____(OrderedDict):
def __init__(self, types):
super(GraphQLTypeMap, self).__init__()
self.update(reduce(self.reducer, types, OrderedDict()))
self._possible_type_map = defaultdict(set)
# Keep track of all implementations by interface name.
self._implementations = {}
for gql_type in self.values():
if isinstance(gql_type, GraphQLObjectType):
for interface in gql_type.interfaces:
self._implementations.setdefault(interface.name, []).append(gql_type)
# Enforce correct interface implementations.
for type in self.values():
if isinstance(type, GraphQLObjectType):
for interface in type.interfaces:
self.assert_object_implements_interface(self, type, interface)
def get_possible_types(self, abstract_type):
if isinstance(abstract_type, GraphQLUnionType):
return abstract_type.types
assert isinstance(abstract_type, GraphQLInterfaceType)
return self._implementations.get(abstract_type.name, None)
def is_possible_type(self, abstract_type, possible_type):
possible_types = self.get_possible_types(abstract_type)
assert isinstance(possible_types, Sequence), (
'Could not find possible implementing types for ${} in ' +
'schema. Check that schema.types is defined and is an array of' +
'all possible types in the schema.'
).format(abstract_type)
if not self._possible_type_map[abstract_type.name]:
self._possible_type_map[abstract_type.name].update([p.name for p in possible_types])
return possible_type.name in self._possible_type_map[abstract_type.name]
@classmethod
def reducer(cls, map, type):
if not type:
return map
if isinstance(type, GraphQLList) or isinstance(type, GraphQLNonNull):
return cls.reducer(map, type.of_type)
if type.name in map:
assert map[type.name] == type, (
'Schema must contain unique named types but contains multiple types named "{}".'
).format(type.name)
return map
map[type.name] = type
reduced_map = map
if isinstance(type, (GraphQLUnionType)):
for t in type.types:
reduced_map = cls.reducer(reduced_map, t)
if isinstance(type, GraphQLObjectType):
for t in type.interfaces:
reduced_map = cls.reducer(reduced_map, t)
if isinstance(type, (GraphQLObjectType, GraphQLInterfaceType, GraphQLInputObjectType)):
field_map = type.fields
type_is_input = isinstance(type, GraphQLInputObjectType)
for field_name, field in field_map.items():
if type_is_input:
assert isinstance(field, GraphQLInputObjectField), (
'{}.{} must be an instance of GraphQLInputObjectField.'.format(type, field_name)
)
assert is_input_type(field.type), (
'{}.{} field type must be Input Type but got: {}.'.format(type, field_name, field.type)
)
else:
assert isinstance(field, (GraphQLField, GraphQLField)), (
'{}.{} must be an instance of GraphQLField.'.format(type, field_name)
)
assert is_output_type(field.type), (
'{}.{} field type must be Output Type but got: {}.'.format(type, field_name, field.type)
)
for arg_name, arg in field.args.items():
assert isinstance(arg, (GraphQLArgument, GraphQLArgument)), (
'{}.{}({}:) argument must be an instance of GraphQLArgument.'.format(type, field_name, arg_name)
)
assert is_input_type(arg.type), (
'{}.{}({}:) argument type must be Input Type but got: {}.'.format(type, field_name, arg_name,
arg.type)
)
reduced_map = cls.reducer(reduced_map, arg.type)
reduced_map = cls.reducer(reduced_map, getattr(field, 'type', None))
return reduced_map
@classmethod
def assert_object_implements_interface(cls, schema, object, interface):
object_field_map = object.fields
interface_field_map = interface.fields
for field_name, interface_field in interface_field_map.items():
object_field = object_field_map.get(field_name)
assert object_field, '"{}" expects field "{}" but "{}" does not provide it.'.format(
interface, field_name, object
)
assert is_type_sub_type_of(schema, object_field.type, interface_field.type), (
'{}.{} expects type "{}" but {}.{} provides type "{}".'
).format(interface, field_name, interface_field.type, object, field_name, object_field.type)
for arg_name, interface_arg in interface_field.args.items():
object_arg = object_field.args.get(arg_name)
assert object_arg, (
'{}.{} expects argument "{}" but {}.{} does not provide it.'
).format(interface, field_name, arg_name, object, field_name)
assert is_equal_type(interface_arg.type, object_arg.type), (
'{}.{}({}:) expects type "{}" but {}.{}({}:) provides type "{}".'
).format(interface, field_name, arg_name, interface_arg.type, object, field_name, arg_name, object_arg.type)
for arg_name, object_arg in object_field.args.items():
interface_arg = interface_field.args.get(arg_name)
if not interface_arg:
assert not isinstance(object_arg.type, GraphQLNonNull), (
'{}.{}({}:) is of required type '
'"{}" but is not also provided by the '
'interface {}.{}.'
).format(object, field_name, arg_name, object_arg.type, interface, field_name)
| GraphQLTypeMap |
python | docker__docker-py | tests/unit/utils_proxy_test.py | {
"start": 465,
"end": 2784
} | class ____(unittest.TestCase):
def test_from_dict(self):
config = ProxyConfig.from_dict({
'httpProxy': HTTP,
'httpsProxy': HTTPS,
'ftpProxy': FTP,
'noProxy': NO_PROXY
})
self.assertEqual(CONFIG.http, config.http)
self.assertEqual(CONFIG.https, config.https)
self.assertEqual(CONFIG.ftp, config.ftp)
self.assertEqual(CONFIG.no_proxy, config.no_proxy)
def test_new(self):
config = ProxyConfig()
self.assertIsNone(config.http)
self.assertIsNone(config.https)
self.assertIsNone(config.ftp)
self.assertIsNone(config.no_proxy)
config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d')
self.assertEqual(config.http, 'a')
self.assertEqual(config.https, 'b')
self.assertEqual(config.ftp, 'c')
self.assertEqual(config.no_proxy, 'd')
def test_truthiness(self):
assert not ProxyConfig()
assert ProxyConfig(http='non-zero')
assert ProxyConfig(https='non-zero')
assert ProxyConfig(ftp='non-zero')
assert ProxyConfig(no_proxy='non-zero')
def test_environment(self):
self.assertDictEqual(CONFIG.get_environment(), ENV)
empty = ProxyConfig()
self.assertDictEqual(empty.get_environment(), {})
def test_inject_proxy_environment(self):
# Proxy config is non null, env is None.
self.assertSetEqual(
set(CONFIG.inject_proxy_environment(None)),
{f'{k}={v}' for k, v in ENV.items()})
# Proxy config is null, env is None.
self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None)
env = ['FOO=BAR', 'BAR=BAZ']
# Proxy config is non null, env is non null
actual = CONFIG.inject_proxy_environment(env)
expected = [f'{k}={v}' for k, v in ENV.items()] + env
# It's important that the first 8 variables are the ones from the proxy
# config, and the last 2 are the ones from the input environment
self.assertSetEqual(set(actual[:8]), set(expected[:8]))
self.assertSetEqual(set(actual[-2:]), set(expected[-2:]))
# Proxy is null, and is non null
self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env)
| ProxyConfigTest |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/expressions/literal.py | {
"start": 599,
"end": 2208
} | class ____(Expr):
__slots__ = ("value",)
_non_child = ("dtype", "value")
value: Any # Python scalar
def __init__(self, dtype: DataType, value: Any) -> None:
if value is None and dtype.id() == plc.TypeId.EMPTY:
# TypeId.EMPTY not supported by libcudf
# cuDF Python also maps EMPTY to INT8
dtype = DataType(pl.datatypes.Int8())
self.dtype = dtype
self.value = value
self.children = ()
self.is_pointwise = True
def do_evaluate(
self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME
) -> Column:
"""Evaluate this expression given a dataframe for context."""
return Column(
plc.Column.from_scalar(
plc.Scalar.from_py(self.value, self.dtype.plc_type, stream=df.stream),
1,
stream=df.stream,
),
dtype=self.dtype,
)
@property
def agg_request(self) -> NoReturn: # noqa: D102
raise NotImplementedError(
"Not expecting to require agg request of literal"
) # pragma: no cover
def astype(self, dtype: DataType) -> Literal:
"""Cast self to dtype."""
if self.value is None:
return Literal(dtype, self.value)
else:
# Use polars to cast instead of pylibcudf
# since there are just Python scalars
casted = pl.Series(values=[self.value], dtype=self.dtype.polars_type).cast(
dtype.polars_type
)[0]
return Literal(dtype, casted)
| Literal |
python | openai__openai-python | src/openai/types/chat/chat_completion_assistant_message_param.py | {
"start": 1334,
"end": 2441
} | class ____(TypedDict, total=False):
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
audio: Optional[Audio]
"""
Data about a previous audio response from the model.
[Learn more](https://platform.openai.com/docs/guides/audio).
"""
content: Union[str, Iterable[ContentArrayOfContentPart], None]
"""The contents of the assistant message.
Required unless `tool_calls` or `function_call` is specified.
"""
function_call: Optional[FunctionCall]
"""Deprecated and replaced by `tool_calls`.
The name and arguments of a function that should be called, as generated by the
model.
"""
name: str
"""An optional name for the participant.
Provides the model information to differentiate between participants of the same
role.
"""
refusal: Optional[str]
"""The refusal message by the assistant."""
tool_calls: Iterable[ChatCompletionMessageToolCallUnionParam]
"""The tool calls generated by the model, such as function calls."""
| ChatCompletionAssistantMessageParam |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_sqs.py | {
"start": 3717,
"end": 4724
} | class ____:
@pytest.mark.usefixtures("collect_queue_param_deprecation_warning")
def test_provider_integrations_with_queue_param(self, cleanup_providers_manager):
queue = "https://sqs.us-east-1.amazonaws.com/0123456789/Test"
from airflow.providers.amazon.aws.triggers.sqs import SqsSensorTrigger
from airflow.providers.common.messaging.triggers.msg_queue import MessageQueueTrigger
trigger = MessageQueueTrigger(queue=queue)
assert isinstance(trigger.trigger, SqsSensorTrigger)
def test_provider_integrations_with_scheme_param(self, cleanup_providers_manager):
from airflow.providers.amazon.aws.triggers.sqs import SqsSensorTrigger
from airflow.providers.common.messaging.triggers.msg_queue import MessageQueueTrigger
trigger = MessageQueueTrigger(
scheme="sqs", sqs_queue="https://sqs.us-east-1.amazonaws.com/0123456789/Test"
)
assert isinstance(trigger.trigger, SqsSensorTrigger)
| TestMessageQueueTrigger |
python | RaRe-Technologies__gensim | gensim/corpora/hashdictionary.py | {
"start": 1212,
"end": 13212
} | class ____(utils.SaveLoad, dict):
"""Mapping between words and their integer ids, using a hashing function.
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
You can start converting words to ids immediately, without training on a corpus.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>>
>>> dct = HashDictionary(debug=False) # needs no training corpus!
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str, optional
Iterable of documents. If given, used to collect additional corpus statistics.
:class:`~gensim.corpora.hashdictionary.HashDictionary` can work
without these statistics (optional parameter).
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) %% id_range`.
myhash : function, optional
Hash function, should support interface `myhash(str) -> int`, uses `zlib.adler32` by default.
debug : bool, optional
Store which tokens have mapped to a given id? **Will use a lot of RAM**.
If you find yourself running out of memory (or not sure that you really need raw tokens),
keep `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if you initialized your :class:`~gensim.corpora.hashdictionary.HashDictionary` object
with `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all words that have mapped to this id.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, if `debug=True` was set in the constructor.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Collect corpus statistics from a corpus.
Warnings
--------
Useful only if `debug=True`, to build the reverse `id=>set(words)` mapping.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>>
>>> dct = HashDictionary(debug=True) # needs no training corpus!
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this", "is", "sparta"], ["just", "joking"]])
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert a sequence of words `document` into the bag-of-words format of `[(word_id, word_count)]`
(e.g. `[(1, 4), (150, 1), (2005, 2)]`).
Notes
-----
Each word is assumed to be a **tokenized and normalized** string. No further preprocessing
is done on the words in `document`: you have to apply tokenization, stemming etc before calling this method.
If `allow_update` or `self.allow_update` is set, then also update the dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : sequence of str
A sequence of word tokens = **tokenized and normalized** strings.
allow_update : bool, optional
Update corpus statistics and if `debug=True`, also the reverse id=>word mapping?
return_missing : bool, optional
Not used. Only here for compatibility with the Dictionary class.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>>
>>> dct = HashDictionary()
>>> dct.doc2bow(["this", "is", "máma"])
[(1721, 1), (5280, 1), (22493, 1)]
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in result.keys():
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(result.items())
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in the debug dictionary by their frequency.
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything. It only clears some
internal corpus statistics, for easier debugging and a smaller RAM footprint.
Warnings
--------
Only makes sense when `debug=True`.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in self.dfs_debug.items() if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in self.dfs_debug.items() if word in ok}
self.token2id = {token: tokenid for token, tokenid in self.token2id.items() if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in self.id2token.items()
}
self.dfs = {tokenid: freq for tokenid, freq in self.dfs.items() if self.id2token.get(tokenid, False)}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save the debug token=>id mapping to a text file.
Warnings
--------
Only makes sense when `debug=True`, for debugging.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving %s mapping to %s" % (self, fname))
with utils.open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
| HashDictionary |
python | TheAlgorithms__Python | data_structures/binary_tree/binary_tree_traversals.py | {
"start": 198,
"end": 5536
} | class ____:
data: int
left: Node | None = None
right: Node | None = None
def make_tree() -> Node | None:
r"""
The below tree
1
/ \
2 3
/ \
4 5
"""
tree = Node(1)
tree.left = Node(2)
tree.right = Node(3)
tree.left.left = Node(4)
tree.left.right = Node(5)
return tree
def preorder(root: Node | None) -> Generator[int]:
"""
Pre-order traversal visits root node, left subtree, right subtree.
>>> list(preorder(make_tree()))
[1, 2, 4, 5, 3]
"""
if not root:
return
yield root.data
yield from preorder(root.left)
yield from preorder(root.right)
def postorder(root: Node | None) -> Generator[int]:
"""
Post-order traversal visits left subtree, right subtree, root node.
>>> list(postorder(make_tree()))
[4, 5, 2, 3, 1]
"""
if not root:
return
yield from postorder(root.left)
yield from postorder(root.right)
yield root.data
def inorder(root: Node | None) -> Generator[int]:
"""
In-order traversal visits left subtree, root node, right subtree.
>>> list(inorder(make_tree()))
[4, 2, 5, 1, 3]
"""
if not root:
return
yield from inorder(root.left)
yield root.data
yield from inorder(root.right)
def reverse_inorder(root: Node | None) -> Generator[int]:
"""
Reverse in-order traversal visits right subtree, root node, left subtree.
>>> list(reverse_inorder(make_tree()))
[3, 1, 5, 2, 4]
"""
if not root:
return
yield from reverse_inorder(root.right)
yield root.data
yield from reverse_inorder(root.left)
def height(root: Node | None) -> int:
"""
Recursive function for calculating the height of the binary tree.
>>> height(None)
0
>>> height(make_tree())
3
"""
return (max(height(root.left), height(root.right)) + 1) if root else 0
def level_order(root: Node | None) -> Generator[int]:
"""
Returns a list of nodes value from a whole binary tree in Level Order Traverse.
Level Order traverse: Visit nodes of the tree level-by-level.
>>> list(level_order(make_tree()))
[1, 2, 3, 4, 5]
"""
if root is None:
return
process_queue = deque([root])
while process_queue:
node = process_queue.popleft()
yield node.data
if node.left:
process_queue.append(node.left)
if node.right:
process_queue.append(node.right)
def get_nodes_from_left_to_right(root: Node | None, level: int) -> Generator[int]:
"""
Returns a list of nodes value from a particular level:
Left to right direction of the binary tree.
>>> list(get_nodes_from_left_to_right(make_tree(), 1))
[1]
>>> list(get_nodes_from_left_to_right(make_tree(), 2))
[2, 3]
"""
def populate_output(root: Node | None, level: int) -> Generator[int]:
if not root:
return
if level == 1:
yield root.data
elif level > 1:
yield from populate_output(root.left, level - 1)
yield from populate_output(root.right, level - 1)
yield from populate_output(root, level)
def get_nodes_from_right_to_left(root: Node | None, level: int) -> Generator[int]:
"""
Returns a list of nodes value from a particular level:
Right to left direction of the binary tree.
>>> list(get_nodes_from_right_to_left(make_tree(), 1))
[1]
>>> list(get_nodes_from_right_to_left(make_tree(), 2))
[3, 2]
"""
def populate_output(root: Node | None, level: int) -> Generator[int]:
if not root:
return
if level == 1:
yield root.data
elif level > 1:
yield from populate_output(root.right, level - 1)
yield from populate_output(root.left, level - 1)
yield from populate_output(root, level)
def zigzag(root: Node | None) -> Generator[int]:
"""
ZigZag traverse:
Returns a list of nodes value from left to right and right to left, alternatively.
>>> list(zigzag(make_tree()))
[1, 3, 2, 4, 5]
"""
if root is None:
return
flag = 0
height_tree = height(root)
for h in range(1, height_tree + 1):
if not flag:
yield from get_nodes_from_left_to_right(root, h)
flag = 1
else:
yield from get_nodes_from_right_to_left(root, h)
flag = 0
def main() -> None: # Main function for testing.
# Create binary tree.
root = make_tree()
# All Traversals of the binary are as follows:
print(f"In-order Traversal: {list(inorder(root))}")
print(f"Reverse In-order Traversal: {list(reverse_inorder(root))}")
print(f"Pre-order Traversal: {list(preorder(root))}")
print(f"Post-order Traversal: {list(postorder(root))}", "\n")
print(f"Height of Tree: {height(root)}", "\n")
print("Complete Level Order Traversal: ")
print(f"{list(level_order(root))} \n")
print("Level-wise order Traversal: ")
for level in range(1, height(root) + 1):
print(f"Level {level}:", list(get_nodes_from_left_to_right(root, level=level)))
print("\nZigZag order Traversal: ")
print(f"{list(zigzag(root))}")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| Node |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_cloud_storage_transfer_service.py | {
"start": 3061,
"end": 11575
} | class ____:
def test_serialize(self, trigger):
class_path, serialized = trigger.serialize()
assert class_path == CLASS_PATH
assert serialized == {
"project_id": PROJECT_ID,
"job_names": JOB_NAMES,
"poll_interval": POLL_INTERVAL,
"gcp_conn_id": GCP_CONN_ID,
}
def test_get_async_hook(self, trigger):
hook = trigger.get_async_hook()
assert isinstance(hook, CloudDataTransferServiceAsyncHook)
assert hook.project_id == PROJECT_ID
@pytest.mark.asyncio
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs")
async def test_run(self, get_jobs, get_latest_operation, trigger):
get_jobs.return_value = mock_jobs(names=JOB_NAMES, latest_operation_names=LATEST_OPERATION_NAMES)
get_latest_operation.side_effect = [
create_mock_operation(status=TransferOperation.Status.SUCCESS, name="operation_" + job_name)
for job_name in JOB_NAMES
]
expected_event = TriggerEvent(
{
"status": "success",
"message": f"Transfer jobs {JOB_0}, {JOB_1} completed successfully",
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
@pytest.mark.parametrize(
"status",
[
TransferOperation.Status.STATUS_UNSPECIFIED,
TransferOperation.Status.IN_PROGRESS,
TransferOperation.Status.PAUSED,
TransferOperation.Status.QUEUED,
],
)
@pytest.mark.asyncio
@mock.patch("asyncio.sleep")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation", autospec=True)
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs", autospec=True)
async def test_run_poll_interval(self, get_jobs, get_latest_operation, mock_sleep, trigger, status):
get_jobs.side_effect = [
mock_jobs(names=JOB_NAMES, latest_operation_names=LATEST_OPERATION_NAMES),
mock_jobs(names=JOB_NAMES, latest_operation_names=LATEST_OPERATION_NAMES),
]
get_latest_operation.side_effect = [
create_mock_operation(status=status, name="operation_" + job_name) for job_name in JOB_NAMES
] + [
create_mock_operation(status=TransferOperation.Status.SUCCESS, name="operation_" + job_name)
for job_name in JOB_NAMES
]
expected_event = TriggerEvent(
{
"status": "success",
"message": f"Transfer jobs {JOB_0}, {JOB_1} completed successfully",
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
mock_sleep.assert_called_once_with(POLL_INTERVAL)
@pytest.mark.parametrize(
("latest_operations_names", "expected_failed_job"),
[
([None, LATEST_OPERATION_NAME_1], JOB_0),
([LATEST_OPERATION_NAME_0, None], JOB_1),
],
)
@pytest.mark.asyncio
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs")
async def test_run_error_job_has_no_latest_operation(
self, get_jobs, get_latest_operation, trigger, latest_operations_names, expected_failed_job
):
get_jobs.return_value = mock_jobs(names=JOB_NAMES, latest_operation_names=latest_operations_names)
get_latest_operation.side_effect = [
create_mock_operation(status=TransferOperation.Status.SUCCESS, name="operation_" + job_name)
if job_name
else None
for job_name in latest_operations_names
]
expected_event = TriggerEvent(
{
"status": "error",
"message": f"Transfer job {expected_failed_job} has no latest operation.",
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
@pytest.mark.parametrize(
("job_statuses", "failed_operation", "expected_status"),
[
(
[TransferOperation.Status.ABORTED, TransferOperation.Status.SUCCESS],
LATEST_OPERATION_NAME_0,
"ABORTED",
),
(
[TransferOperation.Status.FAILED, TransferOperation.Status.SUCCESS],
LATEST_OPERATION_NAME_0,
"FAILED",
),
(
[TransferOperation.Status.SUCCESS, TransferOperation.Status.ABORTED],
LATEST_OPERATION_NAME_1,
"ABORTED",
),
(
[TransferOperation.Status.SUCCESS, TransferOperation.Status.FAILED],
LATEST_OPERATION_NAME_1,
"FAILED",
),
],
)
@pytest.mark.asyncio
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs")
async def test_run_error_one_job_failed_or_aborted(
self,
get_jobs,
get_latest_operation,
trigger,
job_statuses,
failed_operation,
expected_status,
):
get_jobs.return_value = mock_jobs(names=JOB_NAMES, latest_operation_names=LATEST_OPERATION_NAMES)
get_latest_operation.side_effect = [
create_mock_operation(status=status, name=operation_name)
for status, operation_name in zip(job_statuses, LATEST_OPERATION_NAMES)
]
expected_event = TriggerEvent(
{
"status": "error",
"message": f"Transfer operation {failed_operation} failed with status {expected_status}",
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
@pytest.mark.asyncio
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs")
async def test_run_get_jobs_airflow_exception(self, get_jobs, get_latest_operation, trigger):
expected_error_message = "Mock error message"
get_jobs.side_effect = AirflowException(expected_error_message)
get_latest_operation.side_effect = [
create_mock_operation(status=TransferOperation.Status.SUCCESS, name="operation_" + job_name)
for job_name in JOB_NAMES
]
expected_event = TriggerEvent(
{
"status": "error",
"message": expected_error_message,
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
@pytest.mark.asyncio
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs")
async def test_run_get_latest_operation_airflow_exception(self, get_jobs, get_latest_operation, trigger):
get_jobs.return_value = mock_jobs(names=JOB_NAMES, latest_operation_names=LATEST_OPERATION_NAMES)
expected_error_message = "Mock error message"
get_latest_operation.side_effect = AirflowException(expected_error_message)
expected_event = TriggerEvent(
{
"status": "error",
"message": expected_error_message,
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
@pytest.mark.asyncio
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_latest_operation")
@mock.patch(ASYNC_HOOK_CLASS_PATH + ".get_jobs")
async def test_run_get_latest_operation_google_api_call_error(
self, get_jobs, get_latest_operation, trigger
):
get_jobs.return_value = mock_jobs(names=JOB_NAMES, latest_operation_names=LATEST_OPERATION_NAMES)
error_message = "Mock error message"
get_latest_operation.side_effect = GoogleAPICallError(error_message)
expected_event = TriggerEvent(
{
"status": "error",
"message": f"{None} {error_message}",
}
)
generator = trigger.run()
actual_event = await generator.asend(None)
assert actual_event == expected_event
| TestCloudStorageTransferServiceCreateJobsTrigger |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 107398,
"end": 107876
} | class ____(ModelOutput):
"""
Base class for time series model's predictions outputs that contains the sampled values from the chosen
distribution.
Args:
sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`):
Sampled values from the chosen distribution.
"""
sequences: Optional[torch.FloatTensor] = None
@dataclass
| SampleTSPredictionOutput |
python | numba__numba | numba/tests/test_caching.py | {
"start": 5880,
"end": 7758
} | class ____(TestCase):
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
os.chmod(self.modfile, stat.S_IREAD | stat.S_IWRITE)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
cached = [old.__cached__]
for fn in cached:
try:
os.unlink(fn)
except FileNotFoundError:
pass
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except FileNotFoundError:
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
| BaseCacheTest |
python | django__django | django/views/generic/edit.py | {
"start": 5714,
"end": 5843
} | class ____(TemplateResponseMixin, BaseFormView):
"""A view for displaying a form and rendering a template response."""
| FormView |
python | RaRe-Technologies__gensim | gensim/similarities/termsim.py | {
"start": 754,
"end": 1912
} | class ____(SaveLoad):
"""
Base class = common interface for retrieving the most similar terms for a given term.
See Also
--------
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
A sparse term similarity matrix built using a term similarity index.
"""
def most_similar(self, term, topn=10):
"""Get most similar terms for a given term.
Return the most similar terms for a given term along with their similarities.
Parameters
----------
term : str
The term for which we are retrieving `topn` most similar terms.
topn : int, optional
The maximum number of most similar terms to `term` that will be retrieved.
Returns
-------
iterable of (str, float)
Most similar terms along with their similarities to `term`. Only terms distinct from
`term` must be returned.
"""
raise NotImplementedError
def __str__(self):
members = ', '.join('%s=%s' % pair for pair in vars(self).items())
return '%s<%s>' % (self.__class__.__name__, members)
| TermSimilarityIndex |
python | ansible__ansible | lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_c2m.py | {
"start": 212,
"end": 1048
} | class ____(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
encode_strings_as_utf8 = True
@classmethod
def post_init(cls) -> None:
cls.serialize_map = {}
cls.serialize_map.update(cls._common_discard_tags)
cls.serialize_map.update(
{
# The bytes type is not supported, use str instead (future module profiles may support a bytes wrapper distinct from `bytes`).
set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
_datetime.date: _datatag.AnsibleSerializableDate,
_datetime.time: _datatag.AnsibleSerializableTime,
_datetime.datetime: _datatag.AnsibleSerializableDateTime,
}
)
| _Profile |
python | huggingface__transformers | src/transformers/models/seamless_m4t/processing_seamless_m4t.py | {
"start": 955,
"end": 1054
} | class ____(TextKwargs):
src_lang: Optional[str]
tgt_lang: Optional[str]
| SeamlessM4TTextKwargs |
python | astropy__astropy | astropy/coordinates/builtin_frames/hcrs.py | {
"start": 608,
"end": 1610
} | class ____(BaseRADecFrame):
"""
A coordinate or frame in a Heliocentric system, with axes aligned to ICRS.
The ICRS has an origin at the Barycenter and axes which are fixed with
respect to space.
This coordinate system is distinct from ICRS mainly in that it is relative
to the Sun's center-of-mass rather than the solar system Barycenter.
In principle, therefore, this frame should include the effects of
aberration (unlike ICRS), but this is not done, since they are very small,
of the order of 8 milli-arcseconds.
For more background on the ICRS and related coordinate transformations, see
the references provided in the :ref:`astropy:astropy-coordinates-seealso`
section of the documentation.
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(
default=DEFAULT_OBSTIME, doc="The reference time (e.g., time of observation)"
)
# Transformations are defined in icrs_circ_transforms.py
| HCRS |
python | langchain-ai__langchain | libs/core/tests/unit_tests/runnables/test_configurable.py | {
"start": 1444,
"end": 9148
} | class ____(RunnableSerializable[str, str]):
my_other_property: str
@override
def invoke(
self, input: str, config: RunnableConfig | None = None, **kwargs: Any
) -> Any:
return input + self.my_other_property
def my_other_custom_function(self) -> str:
return self.my_other_property
def my_other_custom_function_w_config(self, config: RunnableConfig) -> str: # noqa: ARG002
return self.my_other_property
def test_doubly_set_configurable() -> None:
"""Test that setting a configurable field with a default value works."""
runnable = MyRunnable(my_property="a")
configurable_runnable = runnable.configurable_fields(
my_property=ConfigurableField(
id="my_property",
name="My property",
description="The property to test",
)
)
assert configurable_runnable.invoke("d", config={"my_property": "c"}) == "dc" # type: ignore[arg-type]
def test_alias_set_configurable() -> None:
runnable = MyRunnable(my_property="a")
configurable_runnable = runnable.configurable_fields(
my_property=ConfigurableField(
id="my_property_alias",
name="My property alias",
description="The property to test alias",
)
)
assert (
configurable_runnable.invoke(
"d", config=RunnableConfig(configurable={"my_property_alias": "c"})
)
== "dc"
)
def test_field_alias_set_configurable() -> None:
runnable = MyRunnable(my_property_alias="a") # type: ignore[call-arg]
configurable_runnable = runnable.configurable_fields(
my_property=ConfigurableField(
id="my_property",
name="My property alias",
description="The property to test alias",
)
)
assert (
configurable_runnable.invoke(
"d", config=RunnableConfig(configurable={"my_property": "c"})
)
== "dc"
)
def test_config_passthrough() -> None:
runnable = MyRunnable(my_property="a")
configurable_runnable = runnable.configurable_fields(
my_property=ConfigurableField(
id="my_property",
name="My property",
description="The property to test",
)
)
# first one
with pytest.raises(AttributeError):
configurable_runnable.not_my_custom_function() # type: ignore[attr-defined]
assert configurable_runnable.my_custom_function() == "a" # type: ignore[attr-defined]
assert (
configurable_runnable.my_custom_function_w_config( # type: ignore[attr-defined]
{"configurable": {"my_property": "b"}}
)
== "b"
)
assert (
configurable_runnable.my_custom_function_w_config( # type: ignore[attr-defined]
config={"configurable": {"my_property": "b"}}
)
== "b"
)
# second one
assert (
configurable_runnable.with_config(
configurable={"my_property": "b"}
).my_custom_function() # type: ignore[attr-defined]
== "b"
)
def test_config_passthrough_nested() -> None:
runnable = MyRunnable(my_property="a")
configurable_runnable = runnable.configurable_fields(
my_property=ConfigurableField(
id="my_property",
name="My property",
description="The property to test",
)
).configurable_alternatives(
ConfigurableField(id="which", description="Which runnable to use"),
other=MyOtherRunnable(my_other_property="c"),
)
# first one
with pytest.raises(AttributeError):
configurable_runnable.not_my_custom_function() # type: ignore[attr-defined]
assert configurable_runnable.my_custom_function() == "a" # type: ignore[attr-defined]
assert (
configurable_runnable.my_custom_function_w_config( # type: ignore[attr-defined]
{"configurable": {"my_property": "b"}}
)
== "b"
)
assert (
configurable_runnable.my_custom_function_w_config( # type: ignore[attr-defined]
config={"configurable": {"my_property": "b"}}
)
== "b"
)
assert (
configurable_runnable.with_config(
configurable={"my_property": "b"}
).my_custom_function() # type: ignore[attr-defined]
== "b"
), "function without config can be called w bound config"
assert (
configurable_runnable.with_config(
configurable={"my_property": "b"}
).my_custom_function_w_config( # type: ignore[attr-defined]
)
== "b"
), "func with config arg can be called w bound config without config"
assert (
configurable_runnable.with_config(
configurable={"my_property": "b"}
).my_custom_function_w_config( # type: ignore[attr-defined]
config={"configurable": {"my_property": "c"}}
)
== "c"
), "func with config arg can be called w bound config with config as kwarg"
assert (
configurable_runnable.with_config(
configurable={"my_property": "b"}
).my_custom_function_w_kw_config( # type: ignore[attr-defined]
)
== "b"
), "function with config kwarg can be called w bound config w/out config"
assert (
configurable_runnable.with_config(
configurable={"my_property": "b"}
).my_custom_function_w_kw_config( # type: ignore[attr-defined]
config={"configurable": {"my_property": "c"}}
)
== "c"
), "function with config kwarg can be called w bound config with config"
assert (
configurable_runnable.with_config(configurable={"my_property": "b"})
.with_types()
.my_custom_function() # type: ignore[attr-defined]
== "b"
), "function without config can be called w bound config"
assert (
configurable_runnable.with_config(configurable={"my_property": "b"})
.with_types()
.my_custom_function_w_config( # type: ignore[attr-defined]
)
== "b"
), "func with config arg can be called w bound config without config"
assert (
configurable_runnable.with_config(configurable={"my_property": "b"})
.with_types()
.my_custom_function_w_config( # type: ignore[attr-defined]
config={"configurable": {"my_property": "c"}}
)
== "c"
), "func with config arg can be called w bound config with config as kwarg"
assert (
configurable_runnable.with_config(configurable={"my_property": "b"})
.with_types()
.my_custom_function_w_kw_config( # type: ignore[attr-defined]
)
== "b"
), "function with config kwarg can be called w bound config w/out config"
assert (
configurable_runnable.with_config(configurable={"my_property": "b"})
.with_types()
.my_custom_function_w_kw_config( # type: ignore[attr-defined]
config={"configurable": {"my_property": "c"}}
)
== "c"
), "function with config kwarg can be called w bound config with config"
# second one
with pytest.raises(AttributeError):
configurable_runnable.my_other_custom_function() # type: ignore[attr-defined]
with pytest.raises(AttributeError):
configurable_runnable.my_other_custom_function_w_config( # type: ignore[attr-defined]
{"configurable": {"my_other_property": "b"}}
)
with pytest.raises(AttributeError):
configurable_runnable.with_config(
configurable={"my_other_property": "c", "which": "other"}
).my_other_custom_function() # type: ignore[attr-defined]
| MyOtherRunnable |
python | cython__cython | Cython/Compiler/FlowControl.py | {
"start": 12648,
"end": 12731
} | class ____:
"""Coming from outer closure, might be initialised or not."""
| Unknown |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_process_event.py | {
"start": 27862,
"end": 48353
} | class ____(LanguageSpecificDeriveCodeMappings):
platform = "java"
def test_extension_in_the_wrong_configuration(self) -> None:
# We do not include the extension in the configuration to demostrate
# that the correct platform -> extension mapping is needed
with patch(
"sentry.issues.auto_source_code_config.utils.platform.PLATFORMS_CONFIG",
{"java": {"extensions": []}},
):
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/com/example/foo/Bar.sc"]},
frames=[self.frame_from_module("com.example.foo.Bar", "Bar.sc")],
platform=self.platform,
expected_new_code_mappings=[], # Not expected
expected_new_in_app_stack_trace_rules=[], # Not expected,
)
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/com/example/foo/Bar.sc"]},
frames=[self.frame_from_module("com.example.foo.Bar", "Bar.sc")],
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping("com/example/foo/", "src/com/example/foo/")
],
expected_new_in_app_stack_trace_rules=["stack.module:com.example.** +app"],
)
def test_marked_in_app_already(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/com/example/foo/Bar.kt"]},
# The developer may have marked the frame as in-app in the SDK
frames=[self.frame_from_module("com.example.foo.Bar", "Bar.kt", in_app=True)],
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping("com/example/foo/", "src/com/example/foo/")
],
expected_new_in_app_stack_trace_rules=[
"stack.module:com.example.** +app",
],
)
def test_marked_in_app_and_code_mapping_already_exists(self) -> None:
"""Test that the in-app rule is created regardless of whether the code mapping already exists"""
# The developer may have already created the code mapping and repository
self.create_repo_and_code_mapping("REPO1", "com/example/foo/", "src/com/example/foo/")
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/com/example/foo/Bar.kt"]},
# The developer may have marked the frame as in-app in the SDK
frames=[self.frame_from_module("com.example.foo.Bar", "Bar.kt", in_app=True)],
platform=self.platform,
# We're not expecting to create anything new
expected_new_code_mappings=[],
# The in-app rule will still be created
expected_new_in_app_stack_trace_rules=[
"stack.module:com.example.** +app",
],
)
assert RepositoryProjectPathConfig.objects.count() == 1
def test_short_packages(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={
REPO1: [
"src/Foo.java",
"src/a/Bar.java",
"src/x/y/Baz.java",
"src/foo/bar/baz/Qux.java",
]
},
frames=[
# This will not create a code mapping because
# the stacktrace root would be empty
self.frame_from_module("Foo", "Foo.java"),
self.frame_from_module("a.Bar", "Bar.java"),
self.frame_from_module("x.y.Baz", "Baz.java"),
self.frame_from_module("foo.bar.baz.Qux", "Qux.java"),
],
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping("a/", "src/a/"),
self.code_mapping("x/y/", "src/x/y/"),
self.code_mapping("foo/bar/baz/", "src/foo/bar/baz/"),
],
expected_new_in_app_stack_trace_rules=[
"stack.module:a.** +app",
"stack.module:x.y.** +app",
# This rule, unlike the previous two, does not have the same granularity
# as its related code mapping (foo/bar/baz/ vs foo/bar/)
"stack.module:foo.bar.** +app",
],
)
def test_handles_dollar_sign_in_module(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/com/example/foo/Bar.kt"]},
frames=[self.frame_from_module("com.example.foo.Bar$InnerClass", "Bar.kt")],
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping("com/example/foo/", "src/com/example/foo/")
],
expected_new_in_app_stack_trace_rules=["stack.module:com.example.** +app"],
)
def test_multiple_configuration_changes_with_past_changes(self) -> None:
# This block is to emulate the past behavior
# A less granular code mapping already exists
# It would work for com.example.foo but not com.example.bar
# since one is stored under src/main/ while the other is under src/app/
self.create_repo_and_code_mapping("REPO1", "com/example/", "src/main/com/example/")
self.project.update_option(
DERIVED_ENHANCEMENTS_OPTION_KEY,
"stack.module:com.example.** +app",
)
# Test case with multiple frames from different packages
self._process_and_assert_configuration_changes(
repo_trees={
REPO1: [
"src/main/com/example/foo/Bar.kt",
"src/app/com/example/bar/Baz.kt",
"src/lib/org/other/utils/Helper.kt",
]
},
frames=[
self.frame_from_module("com.example.foo.Bar", "Bar.kt"),
self.frame_from_module("com.example.bar.Baz", "Baz.kt"),
self.frame_from_module("org.other.utils.Helper", "Helper.kt"),
],
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping("com/example/foo/", "src/main/com/example/foo/"),
self.code_mapping("com/example/bar/", "src/app/com/example/bar/"),
self.code_mapping("org/other/utils/", "src/lib/org/other/utils/"),
],
expected_new_in_app_stack_trace_rules=["stack.module:org.other.** +app"],
)
# XXX: Ideally we would delete the old code mappings
assert RepositoryProjectPathConfig.objects.count() == 4
assert self.project.get_option(DERIVED_ENHANCEMENTS_OPTION_KEY).split("\n") == [
"stack.module:com.example.** +app",
"stack.module:org.other.** +app",
]
def test_multiple_configuration_changes(self) -> None:
# Test case with multiple frames from different packages
self._process_and_assert_configuration_changes(
repo_trees={
REPO1: [
"src/main/com/example/foo/Bar.kt",
"src/app/com/example/bar/Baz.kt",
"src/lib/org/other/utils/Helper.kt",
]
},
frames=[
self.frame_from_module("com.example.foo.Bar", "Bar.kt"),
self.frame_from_module("com.example.bar.Baz", "Baz.kt"),
self.frame_from_module("org.other.utils.Helper", "Helper.kt"),
],
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping("com/example/foo/", "src/main/com/example/foo/"),
self.code_mapping("com/example/bar/", "src/app/com/example/bar/"),
self.code_mapping("org/other/utils/", "src/lib/org/other/utils/"),
],
expected_new_in_app_stack_trace_rules=[
"stack.module:com.example.** +app",
"stack.module:org.other.** +app",
],
)
def test_country_code_tld(self) -> None:
# We have two packages for the same domain
repo_trees = {
REPO1: [
"src/uk/co/example/foo/Bar.kt",
"src/uk/co/example/bar/Baz.kt",
]
}
foo_package = self.frame_from_module("uk.co.example.foo.Bar", "Bar.kt")
bar_package = self.frame_from_module("uk.co.example.bar.Baz", "Baz.kt")
third_party_package = self.frame_from_module("uk.co.not-example.baz.qux", "qux.kt")
# Only one of the packages is in the first event
frames = [foo_package, third_party_package]
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=frames,
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping(
stack_root="uk/co/example/foo/", source_root="src/uk/co/example/foo/"
),
],
expected_new_in_app_stack_trace_rules=["stack.module:uk.co.example.** +app"],
)
# The event where derivation happens does not have rules applied
assert event.data["metadata"]["in_app_frame_mix"] == "system-only"
# The second event will have the rules applied
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=frames,
platform=self.platform,
)
# It's mixed because the not-example package is a system frame
assert event.data["metadata"]["in_app_frame_mix"] == "mixed"
assert event.data["stacktrace"]["frames"][0]["module"] == "uk.co.example.foo.Bar"
assert event.data["stacktrace"]["frames"][0]["in_app"] is True
assert event.data["stacktrace"]["frames"][1]["module"] == "uk.co.not-example.baz.qux"
assert event.data["stacktrace"]["frames"][1]["in_app"] is False
# Trying the 2nd package will only create a new code mapping
# because the in-app rule is already in place
frames = [bar_package, third_party_package]
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=frames,
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping(
stack_root="uk/co/example/bar/", source_root="src/uk/co/example/bar/"
),
],
)
# The code mapping & in-app-rule of the first event does apply
assert event.data["metadata"]["in_app_frame_mix"] == "mixed"
assert event.data["stacktrace"]["frames"][0]["module"] == "uk.co.example.bar.Baz"
assert event.data["stacktrace"]["frames"][0]["in_app"] is True
assert event.data["stacktrace"]["frames"][1]["module"] == "uk.co.not-example.baz.qux"
assert event.data["stacktrace"]["frames"][1]["in_app"] is False
def test_country_code_tld_with_old_granularity(self) -> None:
# We have two packages for the same domain but source roots
repo_trees = {
REPO1: [
"src/main/uk/co/example/foo/Bar.kt",
"src/app/uk/co/example/bar/Baz.kt",
]
}
frames = [
self.frame_from_module("uk.co.example.foo.Bar", "Bar.kt"),
self.frame_from_module("uk.co.example.bar.Baz", "Baz.kt"),
# This does not belong to the org since it does not show up in the repos
self.frame_from_module("uk.co.not-example.baz.qux", "qux.kt"),
]
# Let's pretend that we have already added the two level tld rule
# This means that the uk.co.not-example.baz.qux will be in-app
repo = RepoAndBranch(name="repo1", branch="default")
# The source root will only work for the foo package
cm = CodeMapping(repo=repo, stacktrace_root="uk/co/", source_path="src/main/uk/co/")
create_code_mapping(self.organization, cm, self.project)
self.project.update_option(DERIVED_ENHANCEMENTS_OPTION_KEY, "stack.module:uk.co.** +app")
# The new code will generate two code mappings with greater granularity
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=frames,
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping(
stack_root="uk/co/example/foo/", source_root="src/main/uk/co/example/foo/"
),
self.code_mapping(
stack_root="uk/co/example/bar/", source_root="src/app/uk/co/example/bar/"
),
],
expected_new_in_app_stack_trace_rules=["stack.module:uk.co.example.** +app"],
)
# XXX: Ideally we would remove the old rules and code mappings
# All frames are in-app because the 2-level tld rule is already in place
assert event.data["metadata"]["in_app_frame_mix"] == "in-app-only"
assert RepositoryProjectPathConfig.objects.count() == 3
assert self.project.get_option(DERIVED_ENHANCEMENTS_OPTION_KEY).split("\n") == [
"stack.module:uk.co.** +app",
"stack.module:uk.co.example.** +app",
]
def test_do_not_clobber_rules(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/a/Bar.java", "src/x/y/Baz.java"]},
frames=[self.frame_from_module("a.Bar", "Bar.java")],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("a/", "src/a/")],
expected_new_in_app_stack_trace_rules=["stack.module:a.** +app"],
)
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/a/Bar.java", "src/x/y/Baz.java"]},
frames=[self.frame_from_module("x.y.Baz", "Baz.java")],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("x/y/", "src/x/y/")],
# Both rules should exist
expected_new_in_app_stack_trace_rules=["stack.module:x.y.** +app"],
)
def test_prevent_creating_duplicate_rules(self) -> None:
# Rules set by the customer prevent configuration changes
self.project.update_option("sentry:grouping_enhancements", "stack.module:foo.bar.** +app")
# Manually created code mapping
self.create_repo_and_code_mapping(REPO1, "foo/bar/", "src/foo/")
# We do not expect code mappings or in-app rules to be created since
# the developer already created the code mapping and in-app rule
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/foo/bar/Baz.java"]},
frames=[self.frame_from_module("foo.bar.Baz", "Baz.java")],
platform=self.platform,
)
def test_basic_case(self) -> None:
repo_trees = {REPO1: ["src/com/example/foo/Bar.kt"]}
frames = [
self.frame_from_module("com.example.foo.Bar", "Bar.kt"),
self.frame_from_module("com.other.foo.Bar", "Bar.kt"),
]
rule = "stack.module:com.example.**"
expected_in_app_rule = f"{rule} +app"
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=frames,
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping(
stack_root="com/example/foo/", source_root="src/com/example/foo/"
),
],
expected_new_in_app_stack_trace_rules=[expected_in_app_rule],
)
# The effects of the configuration changes will be noticed on the second event processing
assert event.data["metadata"]["in_app_frame_mix"] == "system-only"
assert len(event.data["hashes"]) == 1 # Only system hash
system_only_hash = event.data["hashes"][0]
first_enhancements_base64_string = event.data["grouping_config"]["enhancements"]
group_id = event.group_id
# Running a second time will not create any new configurations, however,
# the rules from the previous run will be applied to the event's stack trace
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees, frames=frames, platform=self.platform
)
assert event.group_id == group_id # The new rules did not cause new groups
assert event.data["metadata"]["in_app_frame_mix"] == "mixed"
second_enhancements_hash = event.data["grouping_config"]["enhancements"]
# The enhancements now contain the automatic rule (+app)
assert second_enhancements_hash != first_enhancements_base64_string
assert len(event.data["hashes"]) == 2
event.data["hashes"].remove(system_only_hash)
in_app_hash = event.data["hashes"][0]
assert in_app_hash != system_only_hash
# The developer will add a rule to invalidate our automatinc rule (-app)
self.project.update_option("sentry:grouping_enhancements", f"{rule} -app")
event = self._process_and_assert_configuration_changes(
repo_trees=repo_trees, frames=frames, platform=self.platform
)
# Back to system-only
assert event.data["metadata"]["in_app_frame_mix"] == "system-only"
assert event.group_id == group_id # It still belongs to the same group
assert event.data["hashes"] == [system_only_hash]
# The enhancements now contain the automatic rule (+app) and the developer's rule (-app)
assert event.data["grouping_config"]["enhancements"] != first_enhancements_base64_string
assert event.data["grouping_config"]["enhancements"] != second_enhancements_hash
def test_categorized_frames_are_not_processed(self) -> None:
# Even though the file is in the repo, it's not processed because it's categorized as internals
repo_trees = {REPO1: ["src/android/app/Activity.java"]}
frame = self.frame_from_module("android.app.Activity", "Activity.java")
self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=[frame],
platform=self.platform,
)
# If we remove the category, it will be processed
with patch(f"{CODE_ROOT}.stacktraces._check_not_categorized", return_value=True):
self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=[frame],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("android/app/", "src/android/app/")],
expected_new_in_app_stack_trace_rules=["stack.module:android.app.** +app"],
)
def test_multi_module(self) -> None:
# Some Java projects have all modules under the same com/foo/bar directory
# however, some projects have different modules under different directories
# Case 1:
# com.example.multi.foo -> modules/com/example/multi/foo/Bar.kt
# com.example.multi.bar -> modules/com/example/multi/bar/Baz.kt
# Case 2:
# com.example.multi.foo -> modules/modX/com/example/multi/foo/Bar.kt (Notice modX infix)
# com.example.multi.bar -> modules/modY/com/example/multi/bar/Baz.kt (Notice modY infix)
java_module_prefix = "com.example.multi"
module_prefix = java_module_prefix.replace(".", "/") + "/"
repo_trees = {
REPO1: [
f"modules/modX/{module_prefix}foo/Bar.kt",
f"modules/modY/{module_prefix}bar/Baz.kt",
]
}
frames = [
self.frame_from_module(f"{java_module_prefix}.foo.Bar", "Bar.kt"),
self.frame_from_module(f"{java_module_prefix}.bar.Baz", "Baz.kt"),
]
self._process_and_assert_configuration_changes(
repo_trees=repo_trees,
frames=frames,
platform=self.platform,
expected_new_code_mappings=[
self.code_mapping(f"{module_prefix}foo/", f"modules/modX/{module_prefix}foo/"),
self.code_mapping(f"{module_prefix}bar/", f"modules/modY/{module_prefix}bar/"),
],
expected_new_in_app_stack_trace_rules=[f"stack.module:{java_module_prefix}.** +app"],
)
| TestJavaDeriveCodeMappings |
python | doocs__leetcode | solution/1800-1899/1863.Sum of All Subset XOR Totals/Solution2.py | {
"start": 0,
"end": 321
} | class ____:
def subsetXORSum(self, nums: List[int]) -> int:
def dfs(i: int, s: int):
nonlocal ans
if i >= len(nums):
ans += s
return
dfs(i + 1, s)
dfs(i + 1, s ^ nums[i])
ans = 0
dfs(0, 0)
return ans
| Solution |
python | pytorch__pytorch | torch/testing/_internal/common_dist_composable.py | {
"start": 886,
"end": 1335
} | class ____(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.l = nn.Linear(100, 100, device=device)
self.seq = nn.Sequential(
nn.ReLU(),
nn.Linear(100, 100, device=device),
nn.ReLU(),
)
self.p = nn.Parameter(torch.randn((100, 100), device=device))
def forward(self, x):
return torch.mm(self.seq(self.l(x)), self.p)
| UnitParamModule |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/translator.py | {
"start": 1854,
"end": 2802
} | class ____:
"""Represents an Airbyte connection, based on data as returned from the API."""
id: str
name: str
stream_prefix: Optional[str]
streams: Mapping[str, "AirbyteStream"]
destination_id: str
@classmethod
def from_connection_details(
cls,
connection_details: Mapping[str, Any],
) -> "AirbyteConnection":
return cls(
id=connection_details["connectionId"],
name=connection_details["name"],
stream_prefix=connection_details.get("prefix"),
streams={
stream_details["stream"]["name"]: AirbyteStream.from_stream_details(
stream_details=stream_details
)
for stream_details in connection_details.get("syncCatalog", {}).get("streams", [])
},
destination_id=connection_details["destinationId"],
)
@whitelist_for_serdes
@record
| AirbyteConnection |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 3258,
"end": 3525
} | class ____(str, _Action, Enum):
CREATE = "create_tenants"
READ = "read_tenants"
UPDATE = "update_tenants"
DELETE = "delete_tenants"
@staticmethod
def values() -> List[str]:
return [action.value for action in TenantsAction]
| TenantsAction |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-operations-to-make-elements-in-array-distinct.py | {
"start": 46,
"end": 463
} | class ____(object):
def minimumOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
mx = max(nums)
cnt = [0]*mx
for i in reversed(xrange(len(nums))):
cnt[nums[i]-1] += 1
if cnt[nums[i]-1] == 2:
return ceil_divide(i+1, 3)
return 0
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchMapping1.py | {
"start": 1511,
"end": 1600
} | class ____(TypedDict):
title: str
release_year: int
gross_earnings: float
| Movie |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 107339,
"end": 111736
} | class ____(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
shape: (Optional) A list of integers, a tuple of integers, or a 1-D Tensor
of type int32. If not specified, the shape is inferred from the values at
the first call of update_state.
Standalone usage:
>>> m = tf.keras.metrics.MeanTensor()
>>> m.update_state([0, 1, 2, 3])
>>> m.update_state([4, 5, 6, 7])
>>> m.result().numpy()
array([2., 3., 4., 5.], dtype=float32)
>>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])
>>> m.result().numpy()
array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)
>>> m = tf.keras.metrics.MeanTensor(dtype=tf.float64, shape=(1, 4))
>>> m.result().numpy()
array([[0., 0., 0., 0.]])
>>> m.update_state([[0, 1, 2, 3]])
>>> m.update_state([[4, 5, 6, 7]])
>>> m.result().numpy()
array([[2., 3., 4., 5.]])
"""
def __init__(self, name='mean_tensor', dtype=None, shape=None):
super(MeanTensor, self).__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
if shape is not None:
self._build(shape)
def _build(self, shape):
self._shape = tensor_shape.TensorShape(shape)
self._build_input_shape = self._shape
# Create new state variables
self._total = self.add_weight(
'total', shape=shape, initializer=init_ops.zeros_initializer)
self._count = self.add_weight(
'count', shape=shape, initializer=init_ops.zeros_initializer)
with ops.init_scope():
if not context.executing_eagerly():
backend._initialize_variables(backend._get_session()) # pylint: disable=protected-access
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
values = math_ops.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError('MeanTensor input values must always have the same '
'shape. Expected shape (set during the first call): {}. '
'Got: {}'.format(self._shape, values.shape))
num_values = array_ops.ones_like(values)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = backend.ndim(values)
weight_ndim = backend.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.multiply(num_values, sample_weight)
values = math_ops.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with ops.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
'MeanTensor does not have any result yet. Please call the MeanTensor '
'instance or use `.update_state(value)` before retrieving the result.'
)
return math_ops.div_no_nan(self.total, self.count)
def reset_state(self):
if self._built:
backend.batch_set_value(
[(v, np.zeros(self._shape.as_list())) for v in self.variables])
| MeanTensor |
python | great-expectations__great_expectations | tests/data_context/fixtures/plugins/extended_checkpoint.py | {
"start": 161,
"end": 579
} | class ____(Checkpoint):
def __init__(
self,
name: str,
data_context,
expectation_suite_name: Optional[str] = None,
action_list: Optional[List[dict]] = None,
):
super().__init__(
name=name,
data_context=data_context,
expectation_suite_name=expectation_suite_name,
action_list=action_list,
)
| ExtendedCheckpoint |
python | pypa__warehouse | tests/unit/test_views.py | {
"start": 7568,
"end": 11341
} | class ____:
def test_logged_in_returns_exception(self, pyramid_config):
renderer = pyramid_config.testing_add_renderer("403.html")
exc = pretend.stub(
status_code=403, status="403 Forbidden", headers={}, result=pretend.stub()
)
request = pretend.stub(user=pretend.stub(), context=None)
resp = forbidden(exc, request)
assert resp.status_code == 403
renderer.assert_()
def test_logged_out_redirects_login(self):
exc = pretend.stub()
request = pretend.stub(
user=None,
path_qs="/foo/bar/?b=s",
route_url=pretend.call_recorder(
lambda route, _query: "/accounts/login/?next=/foo/bar/%3Fb%3Ds"
),
context=None,
)
resp = forbidden(exc, request)
assert resp.status_code == 303
assert resp.headers["Location"] == "/accounts/login/?next=/foo/bar/%3Fb%3Ds"
@pytest.mark.parametrize("reason", ["manage_2fa_required"])
def test_two_factor_required(self, reason):
result = WarehouseDenied("Some summary", reason=reason)
exc = pretend.stub(result=result)
request = pretend.stub(
user=pretend.stub(),
session=pretend.stub(flash=pretend.call_recorder(lambda x, queue: None)),
path_qs="/foo/bar/?b=s",
route_url=pretend.call_recorder(
lambda route, _query: "/the/url/?next=/foo/bar/%3Fb%3Ds"
),
_=lambda x: x,
)
resp = forbidden(exc, request)
assert resp.status_code == 303
assert resp.headers["Location"] == "/the/url/?next=/foo/bar/%3Fb%3Ds"
assert request.route_url.calls == [
pretend.call("manage.account.two-factor", _query={"next": "/foo/bar/?b=s"})
]
assert request.session.flash.calls == [
pretend.call(
"Two-factor authentication must be enabled on your account to "
"perform this action.",
queue="error",
)
]
@pytest.mark.parametrize(
"requested_path",
["/manage/projects/", "/manage/account/two-factor/", "/manage/organizations/"],
)
def test_unverified_email_redirects(self, requested_path):
result = WarehouseDenied("Some summary", reason="unverified_email")
exc = pretend.stub(result=result)
request = pretend.stub(
user=pretend.stub(),
session=pretend.stub(flash=pretend.call_recorder(lambda x, queue: None)),
path_qs=requested_path,
route_url=pretend.call_recorder(lambda route, _query: "/manage/account/"),
_=lambda x: x,
)
resp = forbidden(exc, request)
assert resp.status_code == 303
assert resp.location == "/manage/account/"
assert request.session.flash.calls == [
pretend.call(
"You must verify your **primary** email address before you "
"can perform this action.",
queue="error",
)
]
def test_generic_warehousedeined(self, pyramid_config):
result = WarehouseDenied(
"This project requires two factor authentication to be enabled "
"for all contributors.",
reason="some_other_reason",
)
exc = pretend.stub(result=result)
renderer = pyramid_config.testing_add_renderer("403.html")
exc = pretend.stub(
status_code=403, status="403 Forbidden", headers={}, result=result
)
request = pretend.stub(user=pretend.stub(), context=None)
resp = forbidden(exc, request)
assert resp.status_code == 403
renderer.assert_()
| TestForbiddenView |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_sort.py | {
"start": 12603,
"end": 16814
} | class ____(__TestCase):
def test_safe_object_compare(self):
heterogeneous_lists = [[0, 'foo'],
[0.0, 'foo'],
[('foo',), 'foo']]
for L in heterogeneous_lists:
self.assertRaises(TypeError, L.sort)
self.assertRaises(TypeError, [(x,) for x in L].sort)
self.assertRaises(TypeError, [((x,),) for x in L].sort)
float_int_lists = [[1,1.1],
[1<<70,1.1],
[1.1,1],
[1.1,1<<70]]
for L in float_int_lists:
check_against_PyObject_RichCompareBool(self, L)
def test_unsafe_object_compare(self):
# This test is by ppperry. It ensures that unsafe_object_compare is
# verifying ms->key_richcompare == tp->richcompare before comparing.
with torch._dynamo.error_on_graph_break(False):
class WackyComparator(int):
def __lt__(self, other):
elem.__class__ = WackyList2
return int.__lt__(self, other)
class WackyList1(list):
pass
class WackyList2(list):
def __lt__(self, other):
raise ValueError
L = [WackyList1([WackyComparator(i), i]) for i in range(10)]
elem = L[-1]
with self.assertRaises(ValueError):
L.sort()
L = [WackyList1([WackyComparator(i), i]) for i in range(10)]
elem = L[-1]
with self.assertRaises(ValueError):
[(x,) for x in L].sort()
# The following test is also by ppperry. It ensures that
# unsafe_object_compare handles Py_NotImplemented appropriately.
with torch._dynamo.error_on_graph_break(False):
class PointlessComparator:
def __lt__(self, other):
return NotImplemented
L = [PointlessComparator(), PointlessComparator()]
self.assertRaises(TypeError, L.sort)
self.assertRaises(TypeError, [(x,) for x in L].sort)
# The following tests go through various types that would trigger
# ms->key_compare = unsafe_object_compare
lists = [list(range(100)) + [(1<<70)],
[str(x) for x in range(100)] + ['\uffff'],
[bytes(x) for x in range(100)],
[cmp_to_key(lambda x,y: x<y)(x) for x in range(100)]]
for L in lists:
check_against_PyObject_RichCompareBool(self, L)
def test_unsafe_latin_compare(self):
check_against_PyObject_RichCompareBool(self, [str(x) for
x in range(100)])
def test_unsafe_long_compare(self):
check_against_PyObject_RichCompareBool(self, [x for
x in range(100)])
def test_unsafe_float_compare(self):
check_against_PyObject_RichCompareBool(self, [float(x) for
x in range(100)])
def test_unsafe_tuple_compare(self):
# This test was suggested by Tim Peters. It verifies that the tuple
# comparison respects the current tuple compare semantics, which do not
# guarantee that x < x <=> (x,) < (x,)
#
# Note that we don't have to put anything in tuples here, because
# the check function does a tuple test automatically.
check_against_PyObject_RichCompareBool(self, [float('nan')]*100)
check_against_PyObject_RichCompareBool(self, [float('nan') for
_ in range(100)])
def test_not_all_tuples(self):
self.assertRaises(TypeError, [(1.0, 1.0), (False, "A"), 6].sort)
self.assertRaises(TypeError, [('a', 1), (1, 'a')].sort)
self.assertRaises(TypeError, [(1, 'a'), ('a', 1)].sort)
def test_none_in_tuples(self):
expected = [(None, 1), (None, 2)]
actual = sorted([(None, 2), (None, 1)])
self.assertEqual(actual, expected)
#==============================================================================
if __name__ == "__main__":
run_tests()
| TestOptimizedCompares |
python | psf__black | src/blib2to3/pgen2/grammar.py | {
"start": 839,
"end": 6846
} | class ____:
"""Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self) -> None:
self.symbol2number: dict[str, int] = {}
self.number2symbol: dict[int, str] = {}
self.states: list[DFA] = []
self.dfas: dict[int, DFAS] = {}
self.labels: list[Label] = [(0, "EMPTY")]
self.keywords: dict[str, int] = {}
self.soft_keywords: dict[str, int] = {}
self.tokens: dict[int, int] = {}
self.symbol2label: dict[str, int] = {}
self.version: tuple[int, int] = (0, 0)
self.start = 256
# Python 3.7+ parses async as a keyword, not an identifier
self.async_keywords = False
def dump(self, filename: Path) -> None:
"""Dump the grammar tables to a pickle file."""
# mypyc generates objects that don't have a __dict__, but they
# do have __getstate__ methods that will return an equivalent
# dictionary
if hasattr(self, "__dict__"):
d = self.__dict__
else:
d = self.__getstate__() # type: ignore
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename), delete=False
) as f:
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
os.replace(f.name, filename)
def _update(self, attrs: dict[str, Any]) -> None:
for k, v in attrs.items():
setattr(self, k, v)
def load(self, filename: Path) -> None:
"""Load the grammar tables from a pickle file."""
with open(filename, "rb") as f:
d = pickle.load(f)
self._update(d)
def loads(self, pkl: bytes) -> None:
"""Load the grammar tables from a pickle bytes object."""
self._update(pickle.loads(pkl))
def copy(self: _P) -> _P:
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in (
"symbol2number",
"number2symbol",
"dfas",
"keywords",
"soft_keywords",
"tokens",
"symbol2label",
):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
new.version = self.version
new.async_keywords = self.async_keywords
return new
def report(self) -> None:
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
:= COLONEQUAL
! BANG
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| Grammar |
python | pytorch__pytorch | test/export/test_db.py | {
"start": 627,
"end": 3636
} | class ____(TestCase):
# TODO Maybe we should make this tests actually show up in a file?
@parametrize(
"name,case",
filter_examples_by_support_level(SupportLevel.SUPPORTED).items(),
name_fn=lambda name, case: f"case_{name}",
)
def test_exportdb_supported(self, name: str, case: ExportCase) -> None:
model = case.model
args_export = case.example_args
kwargs_export = case.example_kwargs
args_model = copy.deepcopy(args_export)
kwargs_model = copy.deepcopy(kwargs_export)
with config.patch(use_new_tracer_experimental=True):
exported_program = export(
model,
case.example_args,
case.example_kwargs,
dynamic_shapes=case.dynamic_shapes,
strict=True,
)
exported_program.graph_module.print_readable()
self.assertEqual(
exported_program.module()(*args_export, **kwargs_export),
model(*args_model, **kwargs_model),
)
if case.extra_args is not None:
args = case.extra_args
args_model = copy.deepcopy(args)
self.assertEqual(
exported_program.module()(*args),
model(*args_model),
)
@parametrize(
"name,case",
filter_examples_by_support_level(SupportLevel.NOT_SUPPORTED_YET).items(),
name_fn=lambda name, case: f"case_{name}",
)
def test_exportdb_not_supported(self, name: str, case: ExportCase) -> None:
model = case.model
# pyre-ignore
with self.assertRaises(
(torchdynamo.exc.Unsupported, AssertionError, RuntimeError)
):
with config.patch(use_new_tracer_experimental=True):
_ = export(
model,
case.example_args,
case.example_kwargs,
dynamic_shapes=case.dynamic_shapes,
strict=True,
)
exportdb_not_supported_rewrite_cases = [
(name, rewrite_case)
for name, case in filter_examples_by_support_level(
SupportLevel.NOT_SUPPORTED_YET
).items()
for rewrite_case in get_rewrite_cases(case)
]
if exportdb_not_supported_rewrite_cases:
@parametrize(
"name,rewrite_case",
exportdb_not_supported_rewrite_cases,
name_fn=lambda name, case: f"case_{name}_{case.name}",
)
def test_exportdb_not_supported_rewrite(
self, name: str, rewrite_case: ExportCase
) -> None:
# pyre-ignore
export(
rewrite_case.model,
rewrite_case.example_args,
rewrite_case.example_kwargs,
dynamic_shapes=rewrite_case.dynamic_shapes,
strict=True,
)
instantiate_parametrized_tests(ExampleTests)
if __name__ == "__main__":
run_tests()
| ExampleTests |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py | {
"start": 623,
"end": 728
} | class ____(
# Comment on A.
A,
object,
):
...
def f():
class A(object):
...
| B |
python | doocs__leetcode | solution/2000-2099/2049.Count Nodes With the Highest Score/Solution.py | {
"start": 0,
"end": 720
} | class ____:
def countHighestScoreNodes(self, parents: List[int]) -> int:
def dfs(i: int, fa: int):
cnt = score = 1
for j in g[i]:
if j != fa:
t = dfs(j, i)
score *= t
cnt += t
if n - cnt:
score *= n - cnt
nonlocal ans, mx
if mx < score:
mx = score
ans = 1
elif mx == score:
ans += 1
return cnt
n = len(parents)
g = [[] for _ in range(n)]
for i in range(1, n):
g[parents[i]].append(i)
ans = mx = 0
dfs(0, -1)
return ans
| Solution |
python | facebook__pyre-check | tools/upgrade/errors.py | {
"start": 7790,
"end": 13929
} | class ____:
@classmethod
def empty(cls) -> "Errors":
return cls([])
@staticmethod
def from_json(
json_string: str,
only_fix_error_code: Optional[int] = None,
from_stdin: bool = False,
) -> "Errors":
try:
errors = json.loads(json_string)
return Errors(_filter_errors(errors, only_fix_error_code))
except json.decoder.JSONDecodeError:
if from_stdin:
raise UserError(
"Received invalid JSON as input. "
"If piping from `pyre check` be sure to use `--output=json`."
)
else:
raise UserError(
"Encountered invalid output when checking for pyre errors: "
f"`{json_string}`."
)
@staticmethod
def from_stdin(only_fix_error_code: Optional[int] = None) -> "Errors":
input = sys.stdin.read()
return Errors.from_json(input, only_fix_error_code, from_stdin=True)
def __init__(self, errors: List[Dict[str, Any]]) -> None:
self.errors: List[Dict[str, Any]] = errors
def __len__(self) -> int:
return len(self.errors)
def __eq__(self, other: "Errors") -> bool:
return self.errors == other.errors
@property
def paths_to_errors(self) -> Dict[str, List[PyreError]]:
return {
path: list(errors)
for path, errors in itertools.groupby(
sorted(self.errors, key=error_path), key=error_path
)
}
def suppress(
self,
comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> None:
unsuppressed_paths_and_exceptions = []
for path_to_suppress, errors in self.paths_to_errors.items():
LOG.info("Processing `%s`", path_to_suppress)
try:
path = Path(path_to_suppress)
input = path.read_text()
output = _suppress_errors(
input,
_build_error_map(errors),
comment,
(
max_line_length
if max_line_length and max_line_length > 0
else None
),
truncate,
unsafe,
)
path.write_text(output)
except SkippingGeneratedFileException:
LOG.warning(f"Skipping generated file at {path_to_suppress}")
except LineBreakParsingException:
LOG.warning(
f"Skipping file with unparseable line breaks at {path_to_suppress}"
)
except (ast.UnstableAST, SyntaxError) as exception:
unsuppressed_paths_and_exceptions.append((path_to_suppress, exception))
if unsuppressed_paths_and_exceptions:
exception_messages = "\n".join(
f"{path} - {str(exception)}"
for path, exception in unsuppressed_paths_and_exceptions
)
raise PartialErrorSuppression(
"Could not fully suppress errors due to the following exceptions: "
f"{exception_messages}\n Run with `--unsafe` to suppress anyway.",
[path for path, _ in unsuppressed_paths_and_exceptions],
)
def _filter_errors(
errors: List[Dict[str, Any]], only_fix_error_code: Optional[int] = None
) -> List[Dict[str, Any]]:
if only_fix_error_code is not None:
errors = [error for error in errors if error["code"] == only_fix_error_code]
return errors
def _remove_comment_preamble(lines: List[str]) -> None:
# Deprecated: leaving remove logic until live old-style comments are cleaned up.
while lines:
old_line = lines.pop()
new_line = re.sub(r"# pyre: .*$", "", old_line).rstrip()
if old_line == "" or new_line != "":
# The preamble has ended.
lines.append(new_line)
return
def _add_error_to_line_break_block(lines: List[str], errors: List[List[str]]) -> None:
# Gather unbroken lines.
line_break_block = [lines.pop() for _ in range(0, len(errors))]
line_break_block.reverse()
# Transform line break block to use parenthesis.
indent = len(line_break_block[0]) - len(line_break_block[0].lstrip())
line_break_block = [line[indent:] for line in line_break_block]
statement = "\n".join(line_break_block)
transformed_statement = libcst.Module([]).code_for_node(
cast(
libcst.CSTNode,
libcst.parse_statement(statement).visit(LineBreakTransformer()),
)
)
transformed_lines = transformed_statement.split("\n")
transformed_lines = [" " * indent + line for line in transformed_lines]
# Add to lines.
for line, comment in zip(transformed_lines, errors):
lines.extend(comment)
lines.append(line)
def _split_across_lines(
comment: str, indent: int, max_line_length: Optional[int]
) -> List[str]:
if not max_line_length or len(comment) <= max_line_length:
return [comment]
comment = comment.lstrip()
available_columns = max_line_length - indent - len("# ")
buffered_line = ""
result = []
prefix = " " * indent
for token in comment.split():
if buffered_line and (
len(buffered_line) + len(token) + len(" ") > available_columns
):
# This new token would make the line exceed the limit,
# hence terminate what we have accumulated.
result.append(("{}{}".format(prefix, buffered_line)).rstrip())
# The first line already has a comment token on it, so don't prefix #. For
# the rest, we need to add the comment symbol manually.
prefix = "{}# ".format(" " * indent)
buffered_line = ""
buffered_line = buffered_line + token + " "
result.append(("{}{}".format(prefix, buffered_line)).rstrip())
return result
| Errors |
python | tensorflow__tensorflow | tensorflow/python/training/saving/saveable_object_util_test.py | {
"start": 1252,
"end": 1846
} | class ____(saveable_object.SaveableObject):
def __init__(self, var, slice_spec, name):
specs = [saveable_object.SaveSpec(var.read_value(), slice_spec, name)]
super().__init__(var, specs, name)
def restore(self, restored_tensors, restored_shapes):
return self.op.assign(restored_tensors[0])
def _create_converted_trackable(obj):
saveable_factories = saveable_object_util.saveable_objects_from_trackable(obj)
saveables = [factory(name) for name, factory in saveable_factories.items()]
return saveable_object_util.SaveableCompatibilityConverter(obj, saveables)
| _VarSaveable |
python | tensorflow__tensorflow | tensorflow/python/autograph/core/converter.py | {
"start": 8517,
"end": 10711
} | class ____(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError(
'%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg, parser.unparse(other_value).strip(),
parser.unparse(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
| Base |
python | ray-project__ray | python/ray/data/_internal/execution/operators/actor_pool_map_operator.py | {
"start": 31426,
"end": 48843
} | class ____(AutoscalingActorPool):
"""A pool of actors for map task execution.
This class is in charge of tracking the number of in-flight tasks per actor,
providing the least heavily loaded actor to the operator, and killing idle
actors when the operator is done submitting work to the pool.
"""
_ACTOR_POOL_SCALE_DOWN_DEBOUNCE_PERIOD_S = 10
_ACTOR_POOL_GRACEFUL_SHUTDOWN_TIMEOUT_S = 30
_LOGICAL_ACTOR_ID_LABEL_KEY = "__ray_data_logical_actor_id"
def __init__(
self,
create_actor_fn: "Callable[[Dict[str, str]], Tuple[ActorHandle, ObjectRef[Any]]]",
per_actor_resource_usage: ExecutionResources,
*,
min_size: int,
max_size: int,
initial_size: int,
max_actor_concurrency: int,
max_tasks_in_flight_per_actor: int,
_enable_actor_pool_on_exit_hook: bool = False,
):
"""Initialize the actor pool.
Args:
create_actor_fn: This function should take key-value labels as input, and
create an actor with those labels. The function should return the actor
handle and a reference to the actor's node ID.
per_actor_resource_usage: The resource usage per actor.
min_size: The minimum number of running actors to be maintained
in the pool. Note, that this constraint could be violated when
no new work is available for scheduling in the actor pool (ie
when operator completes execution).
max_size: The maximum number of running actors to be maintained
in the pool.
initial_size: The initial number of actors to start with.
max_actor_concurrency: The maximum number of concurrent tasks a
single actor can execute (derived from `ray_remote_args`
passed to the operator).
max_tasks_in_flight_per_actor: The maximum number of tasks that can
be submitted to a single actor at any given time.
_enable_actor_pool_on_exit_hook: Whether to enable the actor pool
on exit hook.
"""
self._min_size: int = min_size
self._max_size: int = max_size
self._initial_size: int = initial_size
self._max_actor_concurrency: int = max_actor_concurrency
self._max_tasks_in_flight: int = max_tasks_in_flight_per_actor
self._create_actor_fn = create_actor_fn
self._per_actor_resource_usage = per_actor_resource_usage
assert self._min_size >= 1
assert self._max_size >= self._min_size
assert self._initial_size <= self._max_size
assert self._initial_size >= self._min_size
assert self._max_tasks_in_flight >= 1
assert self._create_actor_fn is not None
# Timestamp of the last scale up action
self._last_upscaled_at: Optional[float] = None
self._last_downscaling_debounce_warning_ts: Optional[float] = None
# Actors that have started running, including alive and restarting actors.
self._running_actors: Dict[ray.actor.ActorHandle, _ActorState] = {}
# Actors that are not yet ready (still pending creation).
self._pending_actors: Dict[ObjectRef, ray.actor.ActorHandle] = {}
# Map from actor handle to its logical ID.
self._actor_to_logical_id: Dict[ray.actor.ActorHandle, str] = {}
self._enable_actor_pool_on_exit_hook = _enable_actor_pool_on_exit_hook
# Cached values for actor / task counts
self._num_restarting_actors: int = 0
self._num_active_actors: int = 0
self._total_num_tasks_in_flight: int = 0
# === Overriding methods of AutoscalingActorPool ===
def min_size(self) -> int:
return self._min_size
def max_size(self) -> int:
return self._max_size
def current_size(self) -> int:
return self.num_pending_actors() + self.num_running_actors()
def num_running_actors(self) -> int:
return len(self._running_actors)
def num_restarting_actors(self) -> int:
"""Restarting actors are all the running actors not in ALIVE state."""
return self._num_restarting_actors
def num_active_actors(self) -> int:
"""Active actors are all the running actors with inflight tasks."""
return self._num_active_actors
def num_alive_actors(self) -> int:
"""Alive actors are all the running actors in ALIVE state."""
return len(self._running_actors) - self._num_restarting_actors
def num_pending_actors(self) -> int:
return len(self._pending_actors)
def max_tasks_in_flight_per_actor(self) -> int:
return self._max_tasks_in_flight
def max_actor_concurrency(self) -> int:
return self._max_actor_concurrency
def num_tasks_in_flight(self) -> int:
return self._total_num_tasks_in_flight
def initial_size(self) -> int:
return self._initial_size
def _can_apply(self, config: ActorPoolScalingRequest) -> bool:
"""Returns whether Actor Pool is able to execute scaling request"""
if config.delta < 0:
# To prevent bouncing back and forth, we disallow scale down for
# a "cool-off" period after the most recent scaling up, with an intention
# to allow application to actually utilize newly provisioned resources
# before making decisions on subsequent actions.
#
# Note that this action is unidirectional and doesn't apply to
# scaling up, ie if actor pool just scaled down, it'd still be able
# to scale back up immediately.
if (
not config.force
and self._last_upscaled_at is not None
and (
time.time()
<= self._last_upscaled_at
+ self._ACTOR_POOL_SCALE_DOWN_DEBOUNCE_PERIOD_S
)
):
# NOTE: To avoid spamming logs unnecessarily, debounce log is produced once
# per upscaling event
if self._last_upscaled_at != self._last_downscaling_debounce_warning_ts:
logger.debug(
f"Ignoring scaling down request (request={config}; reason=debounced from scaling up at {self._last_upscaled_at})"
)
self._last_downscaling_debounce_warning_ts = self._last_upscaled_at
return False
return True
def scale(self, req: ActorPoolScalingRequest) -> Optional[int]:
# Verify request could be applied
if not self._can_apply(req):
return 0
if req.delta > 0:
target_num_actors = req.delta
logger.debug(
f"Scaling up actor pool by {target_num_actors} (reason={req.reason}, "
f"{self.get_actor_info()})"
)
for _ in range(target_num_actors):
actor, ready_ref = self._create_actor()
self.add_pending_actor(actor, ready_ref)
# Capture last scale up timestamp
self._last_upscaled_at = time.time()
return target_num_actors
elif req.delta < 0:
num_released = 0
target_num_actors = abs(req.delta)
for _ in range(target_num_actors):
if self._remove_inactive_actor():
num_released += 1
if num_released > 0:
logger.debug(
f"Scaled down actor pool by {num_released} "
f"(reason={req.reason}; {self.get_actor_info()})"
)
return -num_released
return None
def _create_actor(self) -> Tuple[ray.actor.ActorHandle, ObjectRef]:
logical_actor_id = str(uuid.uuid4())
labels = {self.get_logical_id_label_key(): logical_actor_id}
actor, ready_ref = self._create_actor_fn(labels, logical_actor_id)
self._actor_to_logical_id[actor] = logical_actor_id
return actor, ready_ref
# === End of overriding methods of AutoscalingActorPool ===
def running_actors(self) -> Dict[ray.actor.ActorHandle, _ActorState]:
return self._running_actors
def on_task_submitted(self, actor: ray.actor.ActorHandle):
self._running_actors[actor].num_tasks_in_flight += 1
self._total_num_tasks_in_flight += 1
if self._running_actors[actor].num_tasks_in_flight == 1:
self._num_active_actors += 1
def update_running_actor_state(
self, actor: ray.actor.ActorHandle, is_restarting: bool
) -> None:
"""Update running actor state.
Args:
actor: The running actor that needs state update.
is_restarting: Whether running actor is restarting or alive.
"""
assert actor in self._running_actors
if self._running_actors[actor].is_restarting == is_restarting:
return
self._running_actors[actor].is_restarting = is_restarting
if is_restarting:
self._num_restarting_actors += 1
else:
self._num_restarting_actors -= 1
def add_pending_actor(self, actor: ray.actor.ActorHandle, ready_ref: ray.ObjectRef):
"""Adds a pending actor to the pool.
This actor won't be pickable until it is marked as running via a
pending_to_running() call.
Args:
actor: The not-yet-ready actor to add as pending to the pool.
ready_ref: The ready future for the actor.
"""
self._pending_actors[ready_ref] = actor
def pending_to_running(self, ready_ref: ray.ObjectRef) -> bool:
"""Mark the actor corresponding to the provided ready future as running, making
the actor pickable.
Args:
ready_ref: The ready future for the actor that we wish to mark as running.
Returns:
Whether the actor was still pending. This can return False if the actor had
already been killed.
"""
if ready_ref not in self._pending_actors:
# The actor has been removed from the pool before becoming running.
return False
actor = self._pending_actors.pop(ready_ref)
self._running_actors[actor] = _ActorState(
num_tasks_in_flight=0,
actor_location=ray.get(ready_ref),
is_restarting=False,
)
return True
def on_task_completed(self, actor: ray.actor.ActorHandle):
"""Called when a task completes. Returns the provided actor to the pool."""
assert actor in self._running_actors
assert self._running_actors[actor].num_tasks_in_flight > 0
self._running_actors[actor].num_tasks_in_flight -= 1
self._total_num_tasks_in_flight -= 1
if not self._running_actors[actor].num_tasks_in_flight:
self._num_active_actors -= 1
def get_pending_actor_refs(self) -> List[ray.ObjectRef]:
return list(self._pending_actors.keys())
def get_running_actor_refs(self) -> List[ray.ObjectRef]:
return list(self._running_actors.keys())
def get_logical_ids(self) -> List[str]:
"""Get the logical IDs for pending and running actors in the actor pool.
We can’t use Ray Core actor IDs because we need to identify actors by labels,
but labels must be set before creation, and actor IDs aren’t available until
after.
"""
return list(self._actor_to_logical_id.values())
def get_logical_id_label_key(self) -> str:
"""Get the label key for the logical actor ID.
Actors launched by this pool should have this label.
"""
return self._LOGICAL_ACTOR_ID_LABEL_KEY
def num_idle_actors(self) -> int:
"""Return the number of idle actors in the pool."""
return len(self._running_actors) - self._num_active_actors
def _remove_inactive_actor(self) -> bool:
"""Kills a single pending or idle actor, if any actors are pending/idle.
Returns whether an inactive actor was actually released.
"""
# We prioritize killing pending actors over idle actors to reduce actor starting
# churn.
released = self._try_remove_pending_actor()
if not released:
# If no pending actor was released, so kill actor.
released = self._try_remove_idle_actor()
return released
def _try_remove_pending_actor(self) -> bool:
if self._pending_actors:
# At least one pending actor, so kill first one.
ready_ref = next(iter(self._pending_actors.keys()))
actor = self._pending_actors.pop(ready_ref)
del self._actor_to_logical_id[actor]
return True
# No pending actors, so indicate to the caller that no actors were killed.
return False
def _try_remove_idle_actor(self) -> bool:
for actor, state in self._running_actors.items():
if state.num_tasks_in_flight == 0:
# At least one idle actor, so kill first one found.
# NOTE: This is a fire-and-forget op
self._release_running_actor(actor)
return True
# No idle actors, so indicate to the caller that no actors were killed.
return False
def shutdown(self, force: bool = False):
"""Kills all actors, including running/active actors.
This is called once the operator is shutting down.
"""
self._release_pending_actors(force=force)
self._release_running_actors(force=force)
def _release_pending_actors(self, force: bool):
# Release pending actors from the set of pending ones
pending = dict(self._pending_actors)
self._pending_actors.clear()
if force:
for _, actor in pending.items():
# NOTE: Actors can't be brought back after being ``ray.kill``-ed,
# hence we're only doing that if this is a forced release
ray.kill(actor)
def _release_running_actors(self, force: bool):
running = list(self._running_actors.keys())
on_exit_refs = []
# First release actors and collect their shutdown hook object-refs
for actor in running:
ref = self._release_running_actor(actor)
if ref:
on_exit_refs.append(ref)
# Wait for all actors to shutdown gracefully before killing them
ray.wait(on_exit_refs, timeout=self._ACTOR_POOL_GRACEFUL_SHUTDOWN_TIMEOUT_S)
# NOTE: Actors can't be brought back after being ``ray.kill``-ed,
# hence we're only doing that if this is a forced release
if force:
for actor in running:
ray.kill(actor)
def _release_running_actor(
self, actor: ray.actor.ActorHandle
) -> Optional[ObjectRef]:
"""Remove the given actor from the pool and trigger its `on_exit` callback.
This method returns a ``ref`` to the result
"""
# NOTE: By default, we remove references to the actor and let ref counting
# garbage collect the actor, instead of using ray.kill.
#
# Otherwise, actor cannot be reconstructed for the purposes of produced
# object's lineage reconstruction.
if actor not in self._running_actors:
return None
# Update cached statistics before removing the actor
actor_state = self._running_actors[actor]
# Update total tasks in flight
self._total_num_tasks_in_flight -= actor_state.num_tasks_in_flight
# Update active actors count
if actor_state.num_tasks_in_flight > 0:
self._num_active_actors -= 1
# Update restarting actors count
if actor_state.is_restarting:
self._num_restarting_actors -= 1
if self._enable_actor_pool_on_exit_hook:
# Call `on_exit` to trigger `UDF.__del__` which may perform
# cleanup operations.
ref = actor.on_exit.remote()
else:
ref = None
del self._running_actors[actor]
del self._actor_to_logical_id[actor]
return ref
def get_actor_info(self) -> _ActorPoolInfo:
"""Returns current snapshot of actors' being used in the pool"""
return _ActorPoolInfo(
running=self.num_alive_actors(),
pending=self.num_pending_actors(),
restarting=self.num_restarting_actors(),
)
def per_actor_resource_usage(self) -> ExecutionResources:
"""Per actor resource usage."""
return self._per_actor_resource_usage
def get_pool_util(self) -> float:
if self.num_running_actors() == 0:
return 0.0
else:
# We compute utilization as a ration of
# - Number of submitted tasks over
# - Max number of tasks that Actor Pool could currently run
#
# This value could exceed 100%, since by default actors are allowed
# to queue tasks (to pipeline task execution by overlapping block
# fetching with the execution of the previous task)
return self.num_tasks_in_flight() / (
self._max_actor_concurrency * self.num_running_actors()
)
| _ActorPool |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 167081,
"end": 192973
} | class ____(TestCase, jtu.JaxTestCase):
"""Device tests with lowering from the MLIR dialect and layout inference."""
def setUp(self):
if mgpu_dialect is None:
raise self.skipTest("Test requires Mosaic GPU dialect")
super().setUp()
@parameterized.product(
layout=tuple(mtu.RegisterLayout),
dtype=(jnp.bfloat16, jnp.int8),
optimized=(True, False, None),
)
def test_smem_gmem_registers_load_store(self, layout, dtype, optimized):
if layout == mtu.RegisterLayout.WG_SPLAT:
self.skipTest("WG_SPLAT is not supported for `vector.load`.")
# We don't infer optimized transfer-compatible transforms for load/store to
# registers with TCGEN05_TMEM_NATIVE layout.
if optimized and layout == mtu.RegisterLayout.TCGEN05_TMEM_NATIVE:
self.skipTest(
"Optimized loads not supported for TCGEN05_TMEM_NATIVE layout"
)
shape = (128, 128)
layout_attr = layout.to_layout_attr(shape, dtype)
def body(ctx, param: ir.Value, result: ir.Value, smem: list[ir.Value]):
del ctx
# GMEM -> Registers
reg = mgpu_dialect.vector_load(param)
reg = mgpu_dialect.layout_cast(reg, layout_attr)
# Registers -> SMEM
mgpu_dialect.vector_store(reg, smem, optimized=optimized)
# SMEM -> Registers
reg = mgpu_dialect.vector_load(smem, optimized=optimized)
reg = mgpu_dialect.layout_cast(reg, layout_attr)
# Registers -> GMEM
mgpu_dialect.vector_store(reg, result)
jax_shape = jax.ShapeDtypeStruct(shape, dtype)
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=jax_shape,
out_shape=jax_shape,
smem_scratch_shape=jax_shape,
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
param = self.prng.uniform(-1, 1, shape).astype(dtype)
self.assertArraysEqual(kernel(param), param)
@parameterized.parameters(
(mgpu.WGMMA_LAYOUT, mgpu.WGMMA_TRANSPOSED_LAYOUT),
(mgpu.WGMMA_TRANSPOSED_LAYOUT, mgpu.WGMMA_LAYOUT),
)
def test_transposed_load_store(self, src_layout, dst_layout):
def is_transposed(layout):
return layout == mgpu.WGMMA_TRANSPOSED_LAYOUT
def body(ctx, src_ref, dst_ref, scratch):
del ctx, scratch
if is_transposed(src_layout):
src_ref = utils.memref_transpose(src_ref, (1, 0))
if is_transposed(dst_layout):
dst_ref = utils.memref_transpose(dst_ref, (1, 0))
src_reg = mgpu_dialect.vector_load(src_ref)
src_layout_attr = layouts.to_tiled_layout_attr(src_layout)
src_reg = mgpu_dialect.layout_cast(src_reg, src_layout_attr)
dst_layout_attr = layouts.to_tiled_layout_attr(dst_layout)
dst_reg = mgpu_dialect.layout_cast(src_reg, dst_layout_attr)
mgpu_dialect.vector_store(dst_reg, dst_ref)
shape = (128, 128)
dtype = jnp.float32
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(jax.ShapeDtypeStruct(shape, dtype),),
out_shape=jax.ShapeDtypeStruct(shape, dtype),
smem_scratch_shape=[],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(-1, 1, shape).astype(dtype)
np.testing.assert_array_equal(kernel(x), x.T)
def test_pointwise_kernel(self):
def add(ctx, a, b, result, smem):
del ctx, smem
# GMEM -> registers
a = mgpu_dialect.vector_load(a)
b = mgpu_dialect.vector_load(b)
# Computation
add = arith.addf(a, b)
# Registers -> GMEM
mgpu_dialect.vector_store(add, result)
dtype = jnp.bfloat16
shape = (128, 128)
jax_shape = jax.ShapeDtypeStruct(shape, dtype)
kernel = mgpu.as_gpu_kernel(
add,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(jax_shape, jax_shape),
out_shape=jax_shape,
smem_scratch_shape=[],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(-1, 1, shape).astype(dtype)
y = self.prng.uniform(-1, 1, shape).astype(dtype)
self.assertArraysEqual(kernel(x, y), x + y)
@staticmethod
def kernel_with_tma_cases(dtype: jnp.dtype):
@dataclasses.dataclass()
class TestCaseInput:
shape: tuple[int, ...]
shape_sliced: tuple[int, ...] = ()
slice_indices: tuple[int, ...] = ()
slice_lengths: tuple[int, ...] = ()
transforms: tuple[Tile | Transpose | Swizzle, ...] = ()
def __post_init__(self):
if not self.shape_sliced:
self.shape_sliced = self.shape
if not self.slice_lengths:
self.slice_lengths = self.shape_sliced
if not self.slice_indices:
self.slice_indices = tuple([0] * len(self.slice_lengths))
result = []
for swizzle in mgpu_dialect.SwizzlingMode:
n = swizzle * 8 // jnp.finfo(dtype).bits
if swizzle == mgpu_dialect.SwizzlingMode.kNoSwizzle:
# We need at least one case with no transforms, as this is handled
# differently.
result.append(TestCaseInput(shape=[128, n]))
result.extend([
TestCaseInput(
shape=[128, n],
transforms=[Swizzle(swizzle)],
),
TestCaseInput(
shape=[256, n],
shape_sliced=[128, n],
transforms=[Swizzle(swizzle)],
),
TestCaseInput(
shape=[2, 128, n],
shape_sliced=[128, n],
slice_lengths=[-1, 128, n],
slice_indices=[1, 0, 0],
transforms=[Swizzle(swizzle)],
),
TestCaseInput(
shape=[2, 3, 64, n],
transforms=[Transpose([0, 1, 2, 3]), Swizzle(swizzle)],
),
TestCaseInput(
shape=[2, 3, 64, n],
transforms=[
Transpose([1, 0, 2, 3]),
Transpose([1, 0, 2, 3]),
Swizzle(swizzle),
],
),
TestCaseInput(
shape=[2, 3, 64, n],
transforms=[Transpose([1, 0, 2, 3]), Swizzle(swizzle)],
),
TestCaseInput(
shape=[256, n],
shape_sliced=[128, n],
transforms=[Tile([64, n]), Swizzle(swizzle)],
),
TestCaseInput(
shape=[2 * 64, 3 * n],
transforms=[
Tile([64, n]),
Transpose([1, 0, 2, 3]),
Swizzle(swizzle),
],
),
])
return result
@parameterized.parameters(kernel_with_tma_cases(jnp.bfloat16))
def test_kernel_with_tma(self, test_case):
def add(
ctx: launch_context.LaunchContext,
in_gmem_ref: ir.Value,
result_gmem_ref: ir.Value,
smem: list[ir.Value],
):
del ctx
smem_ref, tma_barrier = smem
elt_type = ir.MemRefType(in_gmem_ref.type).element_type
memref_bytes = utils.bytewidth(elt_type) * math.prod(
test_case.shape_sliced
)
i32 = ir.IntegerType.get_signless(32)
slice_indices = [arith.constant(i32, i) for i in test_case.slice_indices]
# GMEM -> SMEM
tma_barrier.arrive_expect_tx(memref_bytes)
load_op = mgpu_dialect.AsyncLoadOp(
source=in_gmem_ref,
destination=smem_ref,
barrier=tma_barrier.as_barrier_memref(),
indices=slice_indices,
slice_lengths=test_case.slice_lengths,
collective=ir.ArrayAttr.get([]),
)
set_in_transforms(load_op, [test_case.transforms])
tma_barrier.wait()
# SMEM -> GMEM
zero_index = arith.constant(i32, 0)
mgpu_dialect.async_store(
source=smem_ref,
destination=result_gmem_ref,
indices=[zero_index] * len(test_case.shape_sliced),
slice_lengths=test_case.shape_sliced,
)
nvvm.cp_async_bulk_wait_group(0)
utils.warpgroup_barrier()
dtype = jnp.bfloat16
jax_shape = jax.ShapeDtypeStruct(test_case.shape, dtype)
jax_shape_sliced = jax.ShapeDtypeStruct(test_case.shape_sliced, dtype)
kernel = mgpu.as_gpu_kernel(
add,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(jax_shape),
out_shape=jax_shape_sliced,
smem_scratch_shape=[
jax_shape_sliced,
core.TMABarrier(1),
],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(-1, 1, test_case.shape).astype(dtype)
input_slice = tuple(
slice(i * abs(l), (i + 1) * abs(l))
for i, l in zip(test_case.slice_indices, test_case.slice_lengths)
)
self.assertArraysEqual(
kernel(x),
(x[input_slice]).reshape(test_case.shape_sliced),
)
def test_pointwise_kernel_with_tma(self):
def add(
ctx: launch_context.LaunchContext,
a_gmem_ref: ir.Value,
b_gmem_ref: ir.Value,
result_gmem_ref: ir.Value,
smem: list[ir.Value],
):
del ctx
a_smem_ref, b_smem_ref, result_smem_ref, tma_barrier = smem
memref_type = ir.MemRefType(a_gmem_ref.type)
shape = memref_type.shape
elt_type = memref_type.element_type
zero_i32 = arith.constant(ir.IntegerType.get_signless(32), 0)
zero_slice_indices = [zero_i32] * memref_type.rank
# GMEM -> SMEM
memref_bytes = utils.bytewidth(elt_type) * math.prod(shape)
tma_barrier.arrive_expect_tx(2 * memref_bytes)
mgpu_dialect.async_load(
source=a_gmem_ref,
destination=a_smem_ref,
barrier=tma_barrier.as_barrier_memref(),
indices=zero_slice_indices,
slice_lengths=shape,
collective=ir.ArrayAttr.get([]),
)
mgpu_dialect.async_load(
source=b_gmem_ref,
destination=b_smem_ref,
barrier=tma_barrier.as_barrier_memref(),
indices=zero_slice_indices,
slice_lengths=shape,
collective=ir.ArrayAttr.get([]),
)
tma_barrier.wait()
# SMEM -> registers
a = mgpu_dialect.vector_load(a_smem_ref)
b = mgpu_dialect.vector_load(b_smem_ref)
# Computation
add = arith.addf(arith.addf(a, b), b)
# Registers -> SMEM
mgpu_dialect.vector_store(add, result_smem_ref)
# SMEM -> GMEM
mgpu_dialect.async_store(
source=result_smem_ref,
destination=result_gmem_ref,
indices=zero_slice_indices,
slice_lengths=shape,
)
nvvm.cp_async_bulk_wait_group(0)
utils.warpgroup_barrier()
dtype = jnp.bfloat16
spec = jax.ShapeDtypeStruct((2, 3, 4, 64), dtype)
kernel = mgpu.as_gpu_kernel(
add,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(spec, spec),
out_shape=spec,
smem_scratch_shape=[
spec,
spec,
spec,
core.TMABarrier(1),
],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(-1, 1, spec.shape).astype(dtype)
y = self.prng.uniform(-1, 1, spec.shape).astype(dtype)
self.assertArraysEqual(kernel(x, y), x + y + y)
@parameterized.parameters(
((64,), (64, 128), [0]),
((64,), (128, 64), [1]),
)
def test_broadcast_in_dim(self, input_shape, output_shape, bcast_dims):
element_value = 42.0
layout = fa.WGMMA_ROW_LAYOUT if bcast_dims[0] == 0 else fa.WGMMA_COL_LAYOUT
def body(ctx, result_gmem_ref, scratch):
del ctx, scratch
# Create input in registers
f32 = ir.F32Type.get()
x_type = ir.VectorType.get(input_shape, f32)
c = arith.constant(f32, element_value)
x = vector.broadcast(x_type, c)
# Computation
out_type = ir.VectorType.get(output_shape, f32)
cast = mgpu_dialect.layout_cast(x, layouts.to_layout_attr(layout))
expanded = mgpu_dialect.broadcast_in_dim(out_type, cast, bcast_dims)
# Registers -> GMEM
mgpu_dialect.vector_store(expanded, result_gmem_ref)
dtype = jnp.float32
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(),
out_shape=jax.ShapeDtypeStruct(output_shape, dtype),
smem_scratch_shape=[],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = np.full(input_shape, element_value, dtype=dtype)
self.assertArraysEqual(
kernel(), jax.lax.broadcast_in_dim(x, output_shape, bcast_dims)
)
@parameterized.parameters(
(jnp.float32, 5.0, 2.0, vector.CombiningKind.ADD),
(jnp.float32, 5.0, 2.0, vector.CombiningKind.MAXIMUMF),
(jnp.float32, 5.0, 7.0, vector.CombiningKind.MAXIMUMF),
(jnp.int32, 5, 2, vector.CombiningKind.MAXSI),
(jnp.int32, -5, -2, vector.CombiningKind.MAXSI),
(jnp.int32, -2, -5, vector.CombiningKind.MAXSI),
(jnp.uint32, 5, 2, vector.CombiningKind.MAXUI),
(jnp.uint32, 2, 5, vector.CombiningKind.MAXUI),
#
# TODO(dasenov): Add tests for wgmma_col_layout output once
# fragmented_array.reduce supports that.
)
def test_vector_multi_dim_reduction(
self,
dtype,
input_value,
init_value,
kind,
):
input_shape = (128, 64)
output_shape = (128,)
red_dims = [1]
def body(ctx, result_gmem_ref, scratch):
del ctx, scratch
el_type = utils.dtype_to_ir_type(dtype)
# Create source in registers
source_type = ir.VectorType.get(input_shape, el_type)
c = arith.constant(el_type, input_value)
source = vector.broadcast(source_type, c)
# Create accumulator in registers
acc_type = ir.VectorType.get(output_shape, el_type)
c = arith.constant(el_type, init_value)
acc = vector.broadcast(acc_type, c)
# Cast inputs
source = mgpu_dialect.layout_cast(
source, layouts.to_layout_attr(fa.WGMMA_LAYOUT)
)
acc_layout = (
fa.WGMMA_ROW_LAYOUT if red_dims[0] == 1 else fa.WGMMA_COL_LAYOUT
)
acc = mgpu_dialect.layout_cast(acc, layouts.to_layout_attr(acc_layout))
# Computation
reduced = vector.multi_reduction(kind, source, acc, red_dims)
# Registers -> GMEM
mgpu_dialect.vector_store(reduced, result_gmem_ref)
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(),
out_shape=jax.ShapeDtypeStruct(output_shape, dtype),
smem_scratch_shape=[],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
source = np.full(input_shape, input_value, dtype=dtype)
acc = np.full(output_shape, init_value, dtype=dtype)
if kind == vector.CombiningKind.ADD:
red = jax.lax.reduce_sum(source, red_dims)
red = red + acc
else:
red = jax.lax.reduce_max(source, red_dims)
red = jax.lax.max(red, acc)
self.assertArraysEqual(kernel(), red)
@parameterized.parameters(fa.WGMMA_ROW_LAYOUT, fa.WGMMA_COL_LAYOUT)
def test_wgmma_row_col_store(self, in_layout):
element_value = 42.0
shape = (64, )
def body(ctx, result_gmem_ref, smem):
del ctx
# Create input in registers
f32 = ir.F32Type.get()
x_type = ir.VectorType.get(shape, f32)
c = arith.constant(f32, element_value)
x = vector.broadcast(x_type, c)
cast = mgpu_dialect.layout_cast(x, layouts.to_layout_attr(in_layout))
# Registers -> SMEM
mgpu_dialect.vector_store(cast, smem)
# SMEM -> GMEM
zero_i32 = arith.constant(ir.IntegerType.get_signless(32), 0)
mgpu_dialect.async_store(
source=smem,
destination=result_gmem_ref,
indices=[zero_i32],
slice_lengths=shape,
)
nvvm.cp_async_bulk_wait_group(0)
utils.warpgroup_barrier()
dtype = jnp.float32
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(),
out_shape=jax.ShapeDtypeStruct(shape, dtype),
smem_scratch_shape=jax.ShapeDtypeStruct(shape, dtype),
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = np.full(shape, element_value, dtype=dtype)
self.assertArraysEqual(kernel(), x)
@parameterized.parameters(
# Positive offsets will be passsed as static offsets.
# Negative offsets will be converted to positive dynamic offsets.
dict(
full_shape=(2, 3, 128, 64),
sub_shape=(32, 64),
offsets=[-1, 0, -96, 0],
tiling=None,
swizzle=None,
),
dict(
full_shape=(3, 128, 64),
sub_shape=(32, 64),
offsets=[-2, -96, 0],
tiling=[32, 64],
swizzle=mgpu_dialect.SwizzlingMode.k128ByteSwizzle,
),
dict(
full_shape=(128, 128),
sub_shape=(64,),
offsets=[-1, 64],
tiling=[64],
swizzle=mgpu_dialect.SwizzlingMode.k128ByteSwizzle,
),
)
def test_subview(
self,
full_shape,
sub_shape,
offsets,
tiling,
swizzle,
):
assert len(sub_shape) <= 2
sizes = [1] * (len(full_shape) - len(sub_shape)) + list(sub_shape)
def body(
ctx: launch_context.LaunchContext,
full_gmem_ref: ir.Value,
sub_gmem_ref: ir.Value,
smem: list[ir.Value],
):
del ctx
full_smem_ref, tma_barrier = smem
zero_i32 = arith.constant(ir.IntegerType.get_signless(32), 0)
# GMEM -> SMEM
operand_elt_type = ir.MemRefType(full_gmem_ref.type).element_type
bytes = utils.bytewidth(operand_elt_type) * math.prod(full_shape)
tma_barrier.arrive_expect_tx(bytes)
mgpu_dialect.async_load(
source=full_gmem_ref,
destination=full_smem_ref,
barrier=tma_barrier.as_barrier_memref(),
indices=[zero_i32] * len(full_shape),
slice_lengths=full_shape,
collective=ir.ArrayAttr.get([]),
)
tma_barrier.wait()
# SubView
mixed_offsets = [
o if o >= 0 else arith.constant(ir.IndexType.get(), -o)
for o in offsets
]
full_ref_type = ir.MemRefType(full_smem_ref.type)
dynamic = ir.ShapedType.get_dynamic_stride_or_offset()
rhs_subview_ref_type = ir.MemRefType.get(
shape=sub_shape,
element_type=full_ref_type.element_type,
layout=ir.StridedLayoutAttr.get(
dynamic, [full_shape[-1], 1] if len(sub_shape) == 2 else [1]
),
memory_space=full_ref_type.memory_space,
)
sub_smem_ref = memref.subview(
full_smem_ref,
mixed_offsets,
sizes,
strides=[1] * len(sizes),
result_type=rhs_subview_ref_type,
)
transforms = []
if tiling is not None:
transforms.append(mgpu_dialect.TileTransformAttr.get(tiling))
if swizzle is not None:
transforms.append(mgpu_dialect.SwizzleTransformAttr.get(swizzle))
if transforms:
sub_smem_ref = mgpu_dialect.with_transforms(
sub_smem_ref,
transforms=ir.ArrayAttr.get(transforms),
)
# SMEM -> GMEM
mgpu_dialect.async_store(
source=sub_smem_ref,
destination=sub_gmem_ref,
indices=[zero_i32] * len(sub_shape),
slice_lengths=sub_shape,
)
nvvm.cp_async_bulk_wait_group(0)
el_type = jnp.bfloat16
full_jax_shape = jax.ShapeDtypeStruct(full_shape, el_type)
result_jax_shape = jax.ShapeDtypeStruct(sub_shape, el_type)
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(full_jax_shape),
out_shape=result_jax_shape,
smem_scratch_shape=[full_jax_shape, core.TMABarrier(1)],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(0, 10, full_shape).astype(el_type)
slicing = tuple(slice(abs(o), abs(o) + s) for o, s in zip(offsets, sizes))
self.assertArraysEqual(kernel(x), x[slicing].reshape(sub_shape))
def test_custom_primitive_op(self):
# This test exercises the following cases:
# - The lowering handles nested blocks and regions (e.g. `scf.IfOp`).
# - The lowering updates references to inlined operations.
def body(ctx, result, scratch):
del ctx, scratch
i64 = ir.IntegerType.get_signless(64)
index = ir.IndexType.get()
op = mgpu_dialect.CustomPrimitiveOp(
result=[],
operands_=[result],
in_layouts=[],
in_transforms=[],
out_layouts=[],
)
args_ty = [arg.type for arg in op.operands_]
block = op.body.blocks.append(*args_ty)
with ir.InsertionPoint(block):
is_leader_thread = single_thread_predicate()
with when(is_leader_thread):
c5 = arith.constant(i64, 5)
memref.store(c5, block.arguments[0], [c(0, index)])
mgpu_dialect.return_([])
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
cluster=(1, 1, 1),
block=(128, 1, 1),
in_shape=(),
out_shape=jax.ShapeDtypeStruct((1,), jnp.int64),
smem_scratch_shape=(),
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
self.assertArraysEqual(kernel(), [5])
def test_profiler(self):
def body(ctx, input, result, scratch):
del scratch
with ctx.named_region("load"):
reg = mgpu_dialect.vector_load(input)
with ctx.named_region("store"):
mgpu_dialect.vector_store(reg, result)
dtype = jnp.bfloat16
shape = (128, 128)
jax_shape = jax.ShapeDtypeStruct(shape, dtype)
with tempfile.TemporaryDirectory() as tmpdir:
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(jax_shape),
out_shape=jax_shape,
smem_scratch_shape=[],
prof_spec=profiler.ProfilerSpec(1024, dump_path=tmpdir),
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
param = self.prng.uniform(-1, 1, shape).astype(dtype)
self.assertArraysEqual(kernel(param), param)
[name] = os.listdir(tmpdir)
with open(os.path.join(tmpdir, name)) as f:
data = f.read()
self.assertEqual(data.count('"name": "load"'), 2)
self.assertEqual(data.count('"name": "store"'), 2)
@parameterized.parameters(((128,),), ((128, 128),))
def test_tma_collective_async_cp(self, in_shape):
def body(ctx, src, dst, scratch):
del ctx
tmp, barrier = scratch
i32 = ir.IntegerType.get_signless(32)
zero_i32 = arith.constant(i32, 0)
src_type = ir.MemRefType(src.type)
barrier.arrive_expect_tx(
utils.bytewidth(src_type.element_type) * math.prod(src_type.shape)
)
mgpu_dialect.async_load(
source=src,
destination=tmp,
indices=[zero_i32] * src_type.rank,
slice_lengths=src_type.shape,
collective=ir.ArrayAttr.get([
ir.IntegerAttr.get(i32, mgpu_dialect.Dimension.x),
]),
barrier=barrier.as_barrier_memref(),
)
barrier.wait()
block_id = gpu.cluster_block_id(gpu.Dimension.x)
block_id = arith.index_cast(i32, block_id)
mgpu_dialect.async_store(
source=tmp,
destination=dst,
indices=[block_id] + [zero_i32] * src_type.rank,
slice_lengths=[-1, *src_type.shape],
)
dtype = jnp.float32
kernel = mgpu.as_gpu_kernel(
body,
grid=(2, 1, 1),
cluster=(2, 1, 1),
block=(128, 1, 1),
in_shape=jax.ShapeDtypeStruct(in_shape, dtype),
out_shape=jax.ShapeDtypeStruct((2, *in_shape), dtype),
smem_scratch_shape=[
jax.ShapeDtypeStruct(in_shape, dtype),
mgpu.TMABarrier(),
],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(-1, 1, in_shape).astype(dtype)
self.assertArraysEqual(kernel(x), jnp.stack([x, x], axis=0))
def test_vector_extract_strided_slice(self):
def body(ctx, src, dst, scratch):
del ctx, scratch
src_vec = mgpu_dialect.vector_load(src)
src_vec = mgpu_dialect.layout_cast(
src_vec, layouts.to_layout_attr(fa.WGMMA_LAYOUT)
)
dst_type = ir.MemRefType(dst.type)
dest_vec_type = ir.VectorType.get(dst_type.shape, dst_type.element_type)
sliced_vec = vector.extract_strided_slice(
dest_vec_type,
src_vec,
offsets=[0, 64],
sizes=[64, 64],
strides=[1, 1],
)
mgpu_dialect.vector_store(sliced_vec, dst)
dtype = jnp.float32
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=jax.ShapeDtypeStruct((128, 128), dtype),
out_shape=jax.ShapeDtypeStruct((64, 64), dtype),
smem_scratch_shape=[],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
x = self.prng.uniform(-1, 1, (128, 128)).astype(dtype)
self.assertArraysEqual(kernel(x), x[0:64, 64:128])
@parameterized.product(
dtype=(jnp.float32, jnp.int32, jnp.uint32),
dimension=(0, 1),
)
def test_broadcasted_iota(self, dtype, dimension):
def body(ctx, out, scratch):
del ctx, scratch
result_type = ir.VectorType.get(out.type.shape, out.type.element_type)
iota = mgpu_dialect.broadcasted_iota(result_type, dimension)
mgpu_dialect.vector_store(iota, out)
shape = (128, 128)
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(),
out_shape=jax.ShapeDtypeStruct(shape, dtype),
smem_scratch_shape=[],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
expected = jax.lax.broadcasted_iota(dtype, shape, dimension)
self.assertArraysEqual(kernel(), expected)
| MosaicGpuDialectTest |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 202133,
"end": 204791
} | class ____(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = socket_helper.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
| NetworkConnectionAttributesTest |
python | etianen__django-reversion | tests/test_app/tests/test_commands.py | {
"start": 6055,
"end": 6417
} | class ____(TestModelMixin, TestBase):
databases = {"default", "postgres"}
def testDeleteRevisionsModelDb(self):
with reversion.create_revision():
TestModel.objects.db_manager("postgres").create()
self.callCommand("deleterevisions", model_db="postgres")
self.assertNoRevision(using="postgres")
| DeleteRevisionsModelDbTest |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 871,
"end": 3752
} | class ____:
def test_method(self):
class Foo:
@classmethod
def classmethod(cls):
pass
def valid(self):
pass
def valid_kwargs(self, param='value'):
pass
def valid_vargs_kwargs(self, *args, **kwargs):
pass
def invalid(self, param):
pass
assert is_simple_callable(Foo.classmethod)
# unbound methods
assert not is_simple_callable(Foo.valid)
assert not is_simple_callable(Foo.valid_kwargs)
assert not is_simple_callable(Foo.valid_vargs_kwargs)
assert not is_simple_callable(Foo.invalid)
# bound methods
assert is_simple_callable(Foo().valid)
assert is_simple_callable(Foo().valid_kwargs)
assert is_simple_callable(Foo().valid_vargs_kwargs)
assert not is_simple_callable(Foo().invalid)
def test_function(self):
def simple():
pass
def valid(param='value', param2='value'):
pass
def valid_vargs_kwargs(*args, **kwargs):
pass
def invalid(param, param2='value'):
pass
assert is_simple_callable(simple)
assert is_simple_callable(valid)
assert is_simple_callable(valid_vargs_kwargs)
assert not is_simple_callable(invalid)
@pytest.mark.parametrize('obj', (True, None, "str", b'bytes', 123, 1.23))
def test_not_callable(self, obj):
assert not is_simple_callable(obj)
def test_4602_regression(self):
from django.db import models
class ChoiceModel(models.Model):
choice_field = models.CharField(
max_length=1, default='a',
choices=(('a', 'A'), ('b', 'B')),
)
class Meta:
app_label = 'tests'
assert is_simple_callable(ChoiceModel().get_choice_field_display)
def test_builtin_function(self):
# Built-in function signatures are not easily inspectable, so the
# current expectation is to just raise a helpful error message.
timestamp = datetime.datetime.now()
with pytest.raises(BuiltinSignatureError) as exc_info:
is_simple_callable(timestamp.date)
assert str(exc_info.value) == (
'Built-in function signatures are not inspectable. Wrap the '
'function call in a simple, pure Python function.')
def test_type_annotation(self):
# The annotation will otherwise raise a syntax error in python < 3.5
locals = {}
exec("def valid(param: str='value'): pass", locals)
valid = locals['valid']
assert is_simple_callable(valid)
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
| TestIsSimpleCallable |
python | arrow-py__arrow | arrow/locales.py | {
"start": 15330,
"end": 15643
} | class ____(FrenchBaseLocale, Locale):
names = ["fr-ca"]
month_abbreviations = [
"",
"janv",
"févr",
"mars",
"avr",
"mai",
"juin",
"juill",
"août",
"sept",
"oct",
"nov",
"déc",
]
| FrenchCanadianLocale |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_day_count_to_be_close_to_equivalent_week_day_mean.py | {
"start": 1431,
"end": 3025
} | class ____(ColumnAggregateMetricProvider):
"""
This metric expects daily counts of the given column, to be close to the average counts calculated 4 weeks back,
respective to the specific day of the week.
The expectation fails if the difference in percentage ((current - average) / average) is more than the threshold
given by user (default value is 25%). The threshold parameter should be given in fraction and not percent,
i.e. for 25% define threshold = 0.25
"""
metric_name = "column.counts_per_days_custom"
library_metadata = {"tags": ["query-based"], "contributors": ["@itaise", "@hadasm"]}
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
(
selectable,
_compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(metric_domain_kwargs, MetricDomainTypes.COLUMN)
column_name = accessor_domain_kwargs["column"]
column = sa.column(column_name)
# get counts for dates
query = (
sa.select([sa.func.Date(column), sa.func.count()])
.group_by(sa.func.Date(column))
.select_from(selectable)
.order_by(sa.func.Date(column).desc())
.limit(METRIC_SAMPLE_LIMIT)
)
results = execution_engine.execute_query(query).fetchall()
return results
| ColumnCountsPerDaysCustom |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 2151,
"end": 2861
} | class ____(Benchmark):
param_names = ['alternative', 'mode']
params = [
['two-sided', 'less', 'greater'],
['auto', 'exact', 'asymp'],
]
def setup(self, alternative, mode):
rng = np.random.default_rng(0x2e7c964ff9a5cd6be22014c09f1dbba9)
self.a = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
self.b = stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
def time_ks_1samp(self, alternative, mode):
stats.ks_1samp(self.a, stats.norm.cdf,
alternative=alternative, mode=mode)
def time_ks_2samp(self, alternative, mode):
stats.ks_2samp(self.a, self.b, alternative=alternative, mode=mode)
| KS |
python | apache__airflow | airflow-core/tests/unit/always/test_secrets.py | {
"start": 1275,
"end": 4950
} | class ____:
def setup_method(self) -> None:
SecretCache.reset()
@mock.patch("airflow.secrets.metastore.MetastoreBackend.get_connection")
@mock.patch("airflow.secrets.environment_variables.EnvironmentVariablesBackend.get_connection")
def test_get_connection_second_try(self, mock_env_get, mock_meta_get):
mock_env_get.side_effect = [None] # return None
Connection.get_connection_from_secrets("fake_conn_id")
mock_meta_get.assert_called_once_with(conn_id="fake_conn_id")
mock_env_get.assert_called_once_with(conn_id="fake_conn_id")
@mock.patch("airflow.secrets.metastore.MetastoreBackend.get_connection")
@mock.patch("airflow.secrets.environment_variables.EnvironmentVariablesBackend.get_connection")
def test_get_connection_first_try(self, mock_env_get, mock_meta_get):
mock_env_get.return_value = Connection("something") # returns something
Connection.get_connection_from_secrets("fake_conn_id")
mock_env_get.assert_called_once_with(conn_id="fake_conn_id")
mock_meta_get.assert_not_called()
@conf_vars(
{
(
"secrets",
"backend",
): "airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend",
("secrets", "backend_kwargs"): '{"connections_prefix": "/airflow", "profile_name": null}',
}
)
def test_initialize_secrets_backends(self):
backends = initialize_secrets_backends()
backend_classes = [backend.__class__.__name__ for backend in backends]
assert len(backends) == 3
assert "SystemsManagerParameterStoreBackend" in backend_classes
@conf_vars(
{
(
"secrets",
"backend",
): "airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend",
("secrets", "backend_kwargs"): '{"use_ssl": false}',
}
)
def test_backends_kwargs(self):
backends = initialize_secrets_backends()
systems_manager = next(
backend
for backend in backends
if backend.__class__.__name__ == "SystemsManagerParameterStoreBackend"
)
assert systems_manager.kwargs == {}
assert systems_manager.use_ssl is False
@conf_vars(
{
(
"secrets",
"backend",
): "airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend",
("secrets", "backend_kwargs"): '{"connections_prefix": "/airflow", "profile_name": null}',
}
)
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_MYSQL": "mysql://airflow:airflow@host:5432/airflow",
},
)
@mock.patch(
"airflow.providers.amazon.aws.secrets.systems_manager."
"SystemsManagerParameterStoreBackend.get_connection"
)
def test_backend_fallback_to_env_var(self, mock_get_connection):
mock_get_connection.return_value = None
backends = ensure_secrets_loaded()
backend_classes = [backend.__class__.__name__ for backend in backends]
assert "SystemsManagerParameterStoreBackend" in backend_classes
conn = Connection.get_connection_from_secrets(conn_id="test_mysql")
# Assert that SystemsManagerParameterStoreBackend.get_conn_uri was called
mock_get_connection.assert_called_once_with(conn_id="test_mysql")
assert conn.get_uri() == "mysql://airflow:airflow@host:5432/airflow"
@skip_if_force_lowest_dependencies_marker
@pytest.mark.db_test
| TestConnectionsFromSecrets |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_shape_base_.py | {
"start": 25930,
"end": 27321
} | class ____(TestCase):
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])
assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])
assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(
tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]
)
def test_tile_one_repetition_on_array_gh4679(self):
a = np.arange(5)
b = tile(a, 1)
b += 2
assert_equal(a, np.arange(5))
def test_empty(self):
a = np.array([[[]]])
b = np.array([[], []])
c = tile(b, 2).shape
d = tile(a, (3, 2, 5)).shape
assert_equal(c, (2, 0))
assert_equal(d, (3, 2, 0))
def test_kroncompare(self):
reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
for s in shape:
b = randint(0, 10, size=s)
for r in reps:
a = np.ones(r, b.dtype)
large = tile(b, r)
klarge = kron(a, b)
assert_equal(large, klarge)
@xfail # Maybe implement one day
| TestTile |
python | ray-project__ray | python/ray/serve/tests/unit/test_user_callable_wrapper.py | {
"start": 18229,
"end": 18434
} | class ____:
async def __call__(self, request: Request) -> str:
msg = await request.body()
return PlainTextResponse(f"Hello {msg}!")
app = FastAPI()
@serve.ingress(app)
| RawRequestHandler |
python | davidhalter__jedi | jedi/inference/value/instance.py | {
"start": 20402,
"end": 22210
} | class ____(ClassFilter):
"""
This class basically filters all the use cases where `self.*` was assigned.
"""
def __init__(self, instance, instance_class, node_context, origin_scope):
super().__init__(
class_value=instance_class,
node_context=node_context,
origin_scope=origin_scope,
is_instance=True,
)
self._instance = instance
def _filter(self, names):
start, end = self._parser_scope.start_pos, self._parser_scope.end_pos
names = [n for n in names if start < n.start_pos < end]
return self._filter_self_names(names)
def _filter_self_names(self, names):
for name in names:
trailer = name.parent
if trailer.type == 'trailer' \
and len(trailer.parent.children) == 2 \
and trailer.children[0] == '.':
if name.is_definition() and self._access_possible(name):
# TODO filter non-self assignments instead of this bad
# filter.
if self._is_in_right_scope(trailer.parent.children[0], name):
yield name
def _is_in_right_scope(self, self_name, name):
self_context = self._node_context.create_context(self_name)
names = self_context.goto(self_name, position=self_name.start_pos)
return any(
n.api_type == 'param'
and n.tree_name.get_definition().position_index == 0
and n.parent_context.tree_node is self._parser_scope
for n in names
)
def _convert_names(self, names):
return [SelfName(self._instance, self._node_context, name) for name in names]
def _check_flows(self, names):
return names
| SelfAttributeFilter |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 12935,
"end": 13363
} | class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.TEXT2VEC_TRANSFORMERS, frozen=True, exclude=True
)
poolingStrategy: Literal["masked_mean", "cls"]
vectorizeClassName: bool
inferenceUrl: Optional[str]
passageInferenceUrl: Optional[str]
queryInferenceUrl: Optional[str]
dimensions: Optional[int] = None
| _Text2VecTransformersConfig |
python | huggingface__transformers | src/transformers/models/layoutlmv3/modeling_layoutlmv3.py | {
"start": 41582,
"end": 46692
} | class ____(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv3 = LayoutLMv3Model(config)
self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
bbox: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.LongTensor] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> question = "what's his name?"
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
bbox=bbox,
pixel_values=pixel_values,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
[CLS] token) e.g. for document image classification tasks such as the
[RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
"""
)
| LayoutLMv3ForQuestionAnswering |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 10275,
"end": 11385
} | class ____:
def __init__(self, capacity: int):
self.cache = OrderedDict()
self.capacity = capacity
self.current_load = 0
def has(self, key) -> bool:
return key in self.cache
def get(self, key):
"""
Get the value of the key if the key exists in the cache, otherwise return None.
Move the key to the end of the cache to show that it was recently used.
"""
if key not in self.cache:
return None
self.cache.move_to_end(key)
return self.cache[key]
def put(self, key, value) -> None:
"""
Add the key-value pair to the cache.
Move the key to the end of the cache to show that it was recently used.
If the cache is full, remove the first key (least recently used).
"""
if key not in self.cache:
self.current_load += 1
if self.current_load > self.capacity:
self.cache.popitem(last=False)
self.current_load -= 1
self.cache[key] = value
self.cache.move_to_end(key)
| OmDetTurboLRUCache |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 6068,
"end": 6325
} | class ____:
params = [10**3, 10**4, 10**5]
param_names = ["N"]
def setup(self, N):
self.s = Series(np.random.randint(0, N, size=10 * N)).astype("object")
def time_mode(self, N):
self.s.mode(dropna=False)
| ModeObjectDropNAFalse |
python | realpython__materials | queue/src/multiprocess_queue.py | {
"start": 1106,
"end": 3244
} | class ____(multiprocessing.Process):
def __init__(self, queue_in, queue_out, hash_value):
super().__init__(daemon=True)
self.queue_in = queue_in
self.queue_out = queue_out
self.hash_value = hash_value
def run(self):
while True:
job = self.queue_in.get()
if job is POISON_PILL:
self.queue_in.put(POISON_PILL)
break
if plaintext := job(self.hash_value):
self.queue_out.put(plaintext)
break
def main(args):
t1 = time.perf_counter()
queue_in = multiprocessing.Queue()
queue_out = multiprocessing.Queue()
workers = [
Worker(queue_in, queue_out, args.hash_value)
for _ in range(args.num_workers)
]
for worker in workers:
worker.start()
for text_length in range(1, args.max_length + 1):
combinations = Combinations(ascii_lowercase, text_length)
for indices in chunk_indices(len(combinations), len(workers)):
queue_in.put(Job(combinations, *indices))
queue_in.put(POISON_PILL)
while any(worker.is_alive() for worker in workers):
try:
solution = queue_out.get(timeout=0.1)
if solution:
t2 = time.perf_counter()
print(f"{solution} (found in {t2 - t1:.1f}s)")
break
except queue.Empty:
pass
else:
print("Unable to find a solution")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("hash_value")
parser.add_argument("-m", "--max-length", type=int, default=6)
parser.add_argument(
"-w",
"--num-workers",
type=int,
default=multiprocessing.cpu_count(),
)
return parser.parse_args()
def chunk_indices(length, num_chunks):
start = 0
while num_chunks > 0:
num_chunks = min(num_chunks, length)
chunk_size = round(length / num_chunks)
yield start, (start := start + chunk_size)
length -= chunk_size
num_chunks -= 1
if __name__ == "__main__":
main(parse_args())
| Worker |
python | django__django | tests/utils_tests/models.py | {
"start": 107,
"end": 203
} | class ____(models.Model):
category = models.OneToOneField(Category, models.CASCADE)
| CategoryInfo |
python | django__django | tests/expressions/models.py | {
"start": 2444,
"end": 2565
} | class ____(models.Model):
time = models.TimeField(null=True)
def __str__(self):
return str(self.time)
| Time |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qlinear_test.py | {
"start": 217,
"end": 1075
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, linear_under_test):
scale = torch.tensor(1.0 / 255)
zero_point = torch.tensor(0)
self.X = torch.randn(N, IN, dtype=torch.float32)
self.qX = torch.quantize_per_tensor(
self.X, scale=scale, zero_point=zero_point, dtype=torch.quint8
)
W = torch.randn(OUT, IN, dtype=torch.float32)
qW = torch.quantize_per_tensor(W, scale=scale, zero_point=0, dtype=torch.qint8)
# Assume that the `self.qlinear` is set in the child
self.qlinear = linear_under_test
self.qlinear.weight = qW
self.qlinear.scale = scale
self.qlinear.zero_point = zero_point
def forward(self, input):
# Assume that the `self.input` is set in the child
return self.qlinear(input)
| _QLinearBenchmarkBase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-s3/unit_tests/v4/test_source.py | {
"start": 367,
"end": 2361
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self._stream_reader = Mock(spec=SourceS3StreamReader)
self._source = SourceS3(
self._stream_reader,
Config,
SourceS3.read_catalog(str(TEST_FILES_FOLDER.joinpath("catalog.json"))),
SourceS3.read_config(str(TEST_FILES_FOLDER.joinpath("v3_config.json"))),
None,
)
@patch("source_s3.v4.source.emit_configuration_as_airbyte_control_message")
def test_given_config_is_v3_when_read_config_then_emit_new_config(self, emit_config_mock) -> None:
self._source.read_config(str(TEST_FILES_FOLDER.joinpath("v3_config.json")))
assert emit_config_mock.call_count == 1
@patch("source_s3.v4.source.emit_configuration_as_airbyte_control_message")
def test_given_config_is_v4_when_read_config_then_do_not_emit_new_config(self, emit_config_mock) -> None:
self._source.read_config(str(TEST_FILES_FOLDER.joinpath("v4_config.json")))
assert emit_config_mock.call_count == 0
def test_when_spec_then_v3_fields_not_required(self) -> None:
spec = self._source.spec()
assert all(field not in spec.connectionSpecification["required"] for field in _V3_FIELDS)
def test_when_spec_then_v3_fields_are_hidden(self) -> None:
spec = self._source.spec()
assert all(spec.connectionSpecification["properties"][field]["airbyte_hidden"] for field in _V3_FIELDS)
def test_when_spec_then_v3_fields_descriptions_are_prefixed_with_deprecation_warning(self) -> None:
spec = self._source.spec()
assert all(
spec.connectionSpecification["properties"][field]["description"].startswith("Deprecated and will be removed soon")
for field in _V3_FIELDS
)
def test_when_spec_then_v3_nested_fields_are_not_required(self) -> None:
spec = self._source.spec()
assert not spec.connectionSpecification["properties"]["provider"]["required"]
| SourceTest |
python | scikit-learn__scikit-learn | sklearn/neighbors/_graph.py | {
"start": 8917,
"end": 16942
} | class ____(
ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase
):
"""Transform X into a (weighted) graph of k nearest neighbors.
The transformed data is a sparse graph as returned by kneighbors_graph.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
n_neighbors : int, default=5
Number of neighbors for each sample in the transformed sparse graph.
For compatibility reasons, as each sample is considered as its own
neighbor, one extra neighbor will be computed when mode == 'distance'.
In this case, the sparse graph contains (n_neighbors + 1) neighbors.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
p : float, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
This parameter is expected to be positive.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
kneighbors_graph : Compute the weighted graph of k-neighbors for
points in X.
RadiusNeighborsTransformer : Transform X into a weighted graph of
neighbors nearer than a radius.
Notes
-----
For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer`
in combination with :class:`~sklearn.manifold.TSNE` see
:ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
Examples
--------
>>> from sklearn.datasets import load_wine
>>> from sklearn.neighbors import KNeighborsTransformer
>>> X, _ = load_wine(return_X_y=True)
>>> X.shape
(178, 13)
>>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance')
>>> X_dist_graph = transformer.fit_transform(X)
>>> X_dist_graph.shape
(178, 178)
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"mode": [StrOptions({"distance", "connectivity"})],
}
_parameter_constraints.pop("radius")
def __init__(
self,
*,
mode="distance",
n_neighbors=5,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
radius=None,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.mode = mode
@_fit_context(
# KNeighborsTransformer.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the k-nearest neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : KNeighborsTransformer
The fitted k-nearest neighbors transformer.
"""
self._fit(X)
self._n_features_out = self.n_samples_fit_
return self
def transform(self, X):
"""Compute the (weighted) graph of Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data.
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
add_one = self.mode == "distance"
return self.kneighbors_graph(
X, mode=self.mode, n_neighbors=self.n_neighbors + add_one
)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
| KNeighborsTransformer |
python | pytorch__pytorch | torch/_inductor/compile_fx_ext.py | {
"start": 11053,
"end": 12656
} | class ____:
"""
Helper for _LoggerState - this class actually attaches to the logger in
the child process and grabs the log messages themselves.
"""
state: _LoggerState
queue: queue.Queue[logging.LogRecord]
handlers: Optional[dict[str, logging.Handler]]
def __init__(self, state: _LoggerState) -> None:
self.state = state
# A queue of the log entries
# TODO: For memory purposes should we log to a file and then respond with that?
self.queue = queue.Queue(-1)
# Mapping from name to handler (only valid when applied)
self.handlers = None
def finish(self) -> list[logging.LogRecord]:
assert self.handlers is None
logs = []
try:
while True:
logs.append(self.queue.get_nowait())
except queue.Empty:
pass
return logs
def remove(self) -> None:
assert self.handlers is not None
handlers, self.handlers = self.handlers, None
for name, handler in handlers.items():
logger = logging.getLogger(name)
logger.removeHandler(handler)
def apply(self) -> None:
from logging.handlers import QueueHandler
assert self.handlers is None
self.handlers = {}
for name, level in self.state.loggers.items():
logger = logging.getLogger(name)
handler = QueueHandler(self.queue)
self.handlers[name] = handler
logger.addHandler(handler)
if level != logging.NOTSET:
logger.setLevel(level)
| _CapturedLogs |
python | langchain-ai__langchain | libs/langchain/langchain_classic/output_parsers/structured.py | {
"start": 473,
"end": 930
} | class ____(BaseModel):
"""Schema for a response from a structured output parser."""
name: str
"""The name of the schema."""
description: str
"""The description of the schema."""
type: str = "string"
"""The type of the response."""
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name,
description=schema.description,
type=schema.type,
)
| ResponseSchema |
python | scrapy__scrapy | tests/CrawlerProcess/default_name_resolver.py | {
"start": 58,
"end": 424
} | class ____(scrapy.Spider):
"""
Raises a twisted.internet.error.DNSLookupError:
the default name resolver does not handle IPv6 addresses.
"""
name = "ipv6_spider"
start_urls = ["http://[::1]"]
if __name__ == "__main__":
process = CrawlerProcess(settings={"RETRY_ENABLED": False})
process.crawl(IPv6Spider)
process.start()
| IPv6Spider |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/device_test.py | {
"start": 16711,
"end": 21288
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super(DTensorPackUnpackOnOneDMeshTest, self).setUp()
global_ids = test_util.create_device_ids_array((2,))
local_device_ids = np.ravel(global_ids).tolist()
mesh_dict = { # pylint: disable=g-complex-comprehension
device: Mesh(
[_BATCH_DIM],
global_ids,
local_device_ids,
test_util.create_device_list((2,), device),
)
for device in ("CPU", "GPU", "TPU")
}
self.mesh = self.configTestMesh(mesh_dict)
def testUnpack(self):
with api.default_mesh(self.mesh):
v = constant_op.constant(1.0)
v = api.copy_to_mesh(v, Layout.replicated(self.mesh, rank=0))
self.assertAllClose([1.0, 1.0], api.unpack(v))
def testUnpackVariables(self):
v0 = d_variable.DVariable(
api.call_with_layout(
array_ops.ones,
shape=[2, 3],
dtype=dtypes.float32,
layout=Layout.replicated(self.mesh, 2),
)
)
with self.assertRaisesRegex(TypeError, "Expecting a Tensor"):
api._dtensor_device().unpack(v0)
def testUnpackingRegularTensorRaisesInvalidArgumentError(self):
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"DTensorUnpack expects a tensor placed on the DTensor device",
):
api._dtensor_device().unpack(constant_op.constant([1.0, 2.0]))
def testUnpackingNotEagerlyRaisesRuntimeError(self):
@polymorphic_function.function
def f(dtensor_input):
api._dtensor_device().unpack(dtensor_input)
with self.assertRaisesRegex(
RuntimeError, "`unpack` must be called eagerly."):
f(
api.copy_to_mesh(
constant_op.constant(1.0), Layout.replicated(self.mesh, rank=0)
)
)
def testPack(self):
a = constant_op.constant([1.0, 2.0])
b = constant_op.constant([3.0, 4.0])
with ops.device_v2(api.device_name()):
packed_tensor = api.pack(
[a, b], layout=Layout.batch_sharded(self.mesh, _BATCH_DIM, rank=1)
)
api.check_layout(
packed_tensor, Layout.batch_sharded(self.mesh, _BATCH_DIM, rank=1)
)
self.assertAllEqual([
4,
], packed_tensor.shape)
unpacked_tensor = api.unpack(packed_tensor)
self.assertAllClose([1., 2.], unpacked_tensor[0])
self.assertAllClose([3., 4.], unpacked_tensor[1])
def testPackingNotEagerlyRaisesRuntimeError(self):
@polymorphic_function.function
def f(a):
api.pack([a, a], layout=Layout.replicated(self.mesh, rank=1))
with self.assertRaisesRegex(RuntimeError, "`pack` must be called eagerly."):
f(constant_op.constant([1.0]))
def testPackingVariablesRaisesError(self):
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError, "Variable input is not supported."
):
api._dtensor_device().pack(
[
variables.Variable(array_ops.ones([2, 3])),
variables.Variable(array_ops.ones([2, 3])),
],
Layout.replicated(self.mesh, rank=2),
)
def testPackDevice(self):
a = constant_op.constant([1.0, 2.0])
b = constant_op.constant([3.0, 4.0])
with ops.device_v2(api.device_name()):
packed_tensor = api.pack(
[a, b], layout=Layout.batch_sharded(self.mesh, _BATCH_DIM, rank=1)
)
unpacked_tensor = api.unpack(packed_tensor)
self.assertAllEqual(self.mesh.local_devices(),
[t.device for t in unpacked_tensor])
def testPackScalar(self):
a = constant_op.constant(1.0)
with ops.device_v2(api.device_name()):
packed_layout = Layout([], self.mesh)
packed_tensor = api.pack([a, a], layout=packed_layout)
api.check_layout(packed_tensor, packed_layout)
self.assertAllEqual([], packed_tensor.shape)
unpacked_tensor = api.unpack(packed_tensor)
self.assertAllClose([a, a], unpacked_tensor)
def testPackHigherRankValue(self):
# Pack a rank 3 matrix into a 1d mesh.
a = constant_op.constant(
[[[1, 2, 3], [4, 5, 6]], [[2, 3, 4], [5, 6, 7]]]
) # 2x2x3
b = constant_op.constant(
[[[3, 2, 1], [6, 5, 4]], [[4, 3, 2], [7, 6, 5]]]
) # 2x2x3
pack_layout = Layout([_BATCH_DIM, UNSHARDED, UNSHARDED], self.mesh)
with ops.device_v2(api.device_name()):
# pack to 4x2x3
packed_tensor = api.pack([a, b], layout=pack_layout)
api.check_layout(packed_tensor, pack_layout)
self.assertAllEqual([4, 2, 3], packed_tensor.shape)
| DTensorPackUnpackOnOneDMeshTest |
python | eventlet__eventlet | eventlet/green/threading.py | {
"start": 811,
"end": 3903
} | class ____:
"""Wrapper for GreenThread objects to provide Thread-like attributes
and methods"""
def __init__(self, g):
global _count
self._g = g
self._name = 'GreenThread-%d' % _count
_count += 1
def __repr__(self):
return '<_GreenThread(%s, %r)>' % (self._name, self._g)
def join(self, timeout=None):
return self._g.wait()
def getName(self):
return self._name
get_name = getName
def setName(self, name):
self._name = str(name)
set_name = setName
name = property(getName, setName)
ident = property(lambda self: id(self._g))
def isAlive(self):
return True
is_alive = isAlive
daemon = property(lambda self: True)
def isDaemon(self):
return self.daemon
is_daemon = isDaemon
__threading = None
def _fixup_thread(t):
# Some third-party packages (lockfile) will try to patch the
# threading.Thread class with a get_name attribute if it doesn't
# exist. Since we might return Thread objects from the original
# threading package that won't get patched, let's make sure each
# individual object gets patched too our patched threading.Thread
# class has been patched. This is why monkey patching can be bad...
global __threading
if not __threading:
__threading = __import__('threading')
if (hasattr(__threading.Thread, 'get_name') and
not hasattr(t, 'get_name')):
t.get_name = t.getName
return t
def current_thread():
global __patched_enumerate
g = greenlet.getcurrent()
if not g:
# Not currently in a greenthread, fall back to standard function
return _fixup_thread(__orig_threading.current_thread())
try:
active = __threadlocal.active
except AttributeError:
active = __threadlocal.active = {}
g_id = id(g)
t = active.get(g_id)
if t is not None:
return t
# FIXME: move import from function body to top
# (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from
# threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call
# and b) was hot-patched using patch_function().
# https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165
if __patched_enumerate is None:
__patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate)
found = [th for th in __patched_enumerate() if th.ident == g_id]
if found:
return found[0]
# Add green thread to active if we can clean it up on exit
def cleanup(g):
del active[g_id]
try:
g.link(cleanup)
except AttributeError:
# Not a GreenThread type, so there's no way to hook into
# the green thread exiting. Fall back to the standard
# function then.
t = _fixup_thread(__orig_threading.current_thread())
else:
t = active[g_id] = _GreenThread(g)
return t
currentThread = current_thread
| _GreenThread |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/pipeline.py | {
"start": 4171,
"end": 4434
} | class ____(BaseModel):
connector_technical_name: str
connector_repository: str
connector_version: str
connector_definition_id: str
dependencies: List[Dict[str, str]]
generation_time: datetime = datetime.utcnow()
| ConnectorDependenciesMetadata |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 2454,
"end": 3527
} | class ____(LeafResource):
def render(self, request):
total = getarg(request, b"total", 100, type_=int)
show = getarg(request, b"show", 1, type_=int)
order = getarg(request, b"order", b"desc")
maxlatency = getarg(request, b"maxlatency", 0, type_=float)
n = getarg(request, b"n", total, type_=int)
if order == b"rand":
nlist = [random.randint(1, total) for _ in range(show)]
else: # order == "desc"
nlist = range(n, max(n - show, 0), -1)
lag = random.random() * maxlatency
self.deferRequest(request, lag, self.renderRequest, request, nlist)
return NOT_DONE_YET
def renderRequest(self, request, nlist):
s = """<html> <head></head> <body>"""
args = request.args.copy()
for nl in nlist:
args[b"n"] = [to_bytes(str(nl))]
argstr = urlencode(args, doseq=True)
s += f"<a href='/follow?{argstr}'>follow {nl}</a><br>"
s += """</body>"""
request.write(to_bytes(s))
request.finish()
| Follow |
python | readthedocs__readthedocs.org | readthedocs/core/history.py | {
"start": 2245,
"end": 3188
} | class ____(models.Model):
"""
Abstract model to allow history models track extra data.
Extra data includes:
- User information to retain after they have been deleted
- IP & browser
"""
extra_history_user_id = models.IntegerField(
_("ID"),
blank=True,
null=True,
)
extra_history_user_username = models.CharField(
_("username"),
max_length=150,
null=True,
)
extra_history_ip = models.CharField(
_("IP address"),
blank=True,
null=True,
max_length=250,
)
extra_history_browser = models.CharField(
_("Browser user-agent"),
max_length=250,
blank=True,
null=True,
)
class Meta:
abstract = True
ExtraHistoricalRecords = partial(HistoricalRecords, bases=[ExtraFieldsHistoricalModel])
"""Helper partial to use instead of HistoricalRecords."""
| ExtraFieldsHistoricalModel |
python | doocs__leetcode | solution/0700-0799/0719.Find K-th Smallest Pair Distance/Solution.py | {
"start": 0,
"end": 387
} | class ____:
def smallestDistancePair(self, nums: List[int], k: int) -> int:
def count(dist):
cnt = 0
for i, b in enumerate(nums):
a = b - dist
j = bisect_left(nums, a, 0, i)
cnt += i - j
return cnt
nums.sort()
return bisect_left(range(nums[-1] - nums[0]), k, key=count)
| Solution |
python | prakhar1989__Algorithms | tests/modular_multiplicative_inverse_test.py | {
"start": 146,
"end": 623
} | class ____(unittest.TestCase):
def test_modular_multiplicative_inverse(self):
self.assertEqual(mmi.modular_multiplicative_inv(10, 7), 5)
self.assertEqual(mmi.modular_multiplicative_inv(45, 13), 11)
self.assertEqual(mmi.modular_multiplicative_inv(52, 1), 0)
self.assertRaises(ValueError, mmi.modular_multiplicative_inv, 12, -1)
self.assertRaises(ValueError, mmi.modular_multiplicative_inv, 12, 2)
if __name__ == "__main__":
unittest.main() | TestLCS |
python | mlflow__mlflow | mlflow/exceptions.py | {
"start": 5818,
"end": 5947
} | class ____(MlflowException):
"""Exception thrown when a http request fails to send due to an invalid URL"""
| InvalidUrlException |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 2317,
"end": 5597
} | class ____:
"""Base class for spin operators."""
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def name(self):
return self.args[0]
def _print_contents(self, printer, *args):
return '%s%s' % (self.name, self._coord)
def _print_contents_pretty(self, printer, *args):
a = stringPict(str(self.name))
b = stringPict(self._coord)
return self._print_subscript_pretty(a, b)
def _print_contents_latex(self, printer, *args):
return r'%s_%s' % ((self.name, self._coord))
def _represent_base(self, basis, **options):
j = options.get('j', S.Half)
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
result[p, q] = me
return result
def _apply_op(self, ket, orig_basis, **options):
state = ket.rewrite(self.basis)
# If the state has only one term
if isinstance(state, State):
ret = (hbar*state.m)*state
# state is a linear combination of states
elif isinstance(state, Sum):
ret = self._apply_operator_Sum(state, **options)
else:
ret = qapply(self*state)
if ret == self*state:
raise NotImplementedError
return ret.rewrite(orig_basis)
def _apply_operator_JxKet(self, ket, **options):
return self._apply_op(ket, 'Jx', **options)
def _apply_operator_JxKetCoupled(self, ket, **options):
return self._apply_op(ket, 'Jx', **options)
def _apply_operator_JyKet(self, ket, **options):
return self._apply_op(ket, 'Jy', **options)
def _apply_operator_JyKetCoupled(self, ket, **options):
return self._apply_op(ket, 'Jy', **options)
def _apply_operator_JzKet(self, ket, **options):
return self._apply_op(ket, 'Jz', **options)
def _apply_operator_JzKetCoupled(self, ket, **options):
return self._apply_op(ket, 'Jz', **options)
def _apply_operator_TensorProduct(self, tp, **options):
# Uncoupling operator is only easily found for coordinate basis spin operators
# TODO: add methods for uncoupling operators
if not isinstance(self, (JxOp, JyOp, JzOp)):
raise NotImplementedError
result = []
for n in range(len(tp.args)):
arg = []
arg.extend(tp.args[:n])
arg.append(self._apply_operator(tp.args[n]))
arg.extend(tp.args[n + 1:])
result.append(tp.__class__(*arg))
return Add(*result).expand()
# TODO: move this to qapply_Mul
def _apply_operator_Sum(self, s, **options):
new_func = qapply(self*s.function)
if new_func == self*s.function:
raise NotImplementedError
return Sum(new_func, *s.limits)
def _eval_trace(self, **options):
#TODO: use options to use different j values
#For now eval at default basis
# is it efficient to represent each time
# to do a trace?
return self._represent_default_basis().trace()
| SpinOpBase |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 7872,
"end": 8140
} | class ____(BaseModel):
"""
Schema for updating downstream tasks to a skipped state.
"""
model_config = ConfigDict(
extra="forbid",
)
tasks: Annotated[list[str | tuple[str, int]], Field(title="Tasks")]
| TISkippedDownstreamTasksStatePayload |
python | doocs__leetcode | solution/1800-1899/1854.Maximum Population Year/Solution.py | {
"start": 0,
"end": 389
} | class ____:
def maximumPopulation(self, logs: List[List[int]]) -> int:
d = [0] * 101
offset = 1950
for a, b in logs:
a, b = a - offset, b - offset
d[a] += 1
d[b] -= 1
s = mx = j = 0
for i, x in enumerate(d):
s += x
if mx < s:
mx, j = s, i
return j + offset
| Solution |
python | falconry__falcon | falcon/media/msgpack.py | {
"start": 2654,
"end": 3857
} | class ____(BinaryBaseHandlerWS):
"""WebSocket media handler for de(serializing) MessagePack to/from BINARY payloads.
This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The
MessagePack ``bin`` type is used to distinguish between Unicode strings
(of type ``str``) and byte strings (of type ``bytes``).
Note:
This handler requires the extra ``msgpack`` package (version 0.5.2
or higher), which must be installed in addition to ``falcon`` from
PyPI:
.. code::
$ pip install msgpack
"""
__slots__ = ('msgpack', 'packer')
_pack: Callable[[Any], bytes]
_unpackb: UnpackMethod
def __init__(self) -> None:
import msgpack
packer = msgpack.Packer(autoreset=True, use_bin_type=True)
self._pack = packer.pack
self._unpackb = msgpack.unpackb
def serialize(self, media: object) -> bytes:
return self._pack(media)
def deserialize(self, payload: bytes) -> Any:
# NOTE(jmvrbanac): Using unpackb since we would need to manage
# a buffer for Unpacker() which wouldn't gain us much.
return self._unpackb(payload, raw=False)
| MessagePackHandlerWS |
python | spack__spack | lib/spack/spack/util/web.py | {
"start": 6726,
"end": 30594
} | class ____(HTMLParser):
"""This parser takes an HTML page and selects the include-fragments,
used on GitHub, https://github.github.io/include-fragment-element,
as well as a possible base url."""
def __init__(self):
super().__init__()
self.fragments = []
self.base_url = None
def handle_starttag(self, tag, attrs):
# <include-fragment src="..." />
if tag == "include-fragment":
for attr, val in attrs:
if attr == "src":
self.fragments.append(val)
# <base href="..." />
elif tag == "base":
for attr, val in attrs:
if attr == "href":
self.base_url = val
def read_from_url(url, accept_content_type=None):
if isinstance(url, str):
url = urllib.parse.urlparse(url)
# Timeout in seconds for web requests
request = Request(url.geturl(), headers={"User-Agent": SPACK_USER_AGENT})
try:
response = urlopen(request)
except OSError as e:
raise SpackWebError(f"Download of {url.geturl()} failed: {e.__class__.__name__}: {e}")
if accept_content_type:
try:
content_type = get_header(response.headers, "Content-type")
reject_content_type = not content_type.startswith(accept_content_type)
except KeyError:
content_type = None
reject_content_type = True
if reject_content_type:
msg = "ignoring page {}".format(url.geturl())
if content_type:
msg += " with content type {}".format(content_type)
tty.debug(msg)
return None, None, None
return response.url, response.headers, response
def push_to_url(local_file_path, remote_path, keep_original=True, extra_args=None):
remote_url = urllib.parse.urlparse(remote_path)
if remote_url.scheme == "file":
remote_file_path = url_util.local_file_path(remote_url)
mkdirp(os.path.dirname(remote_file_path))
if keep_original:
shutil.copy(local_file_path, remote_file_path)
else:
try:
rename(local_file_path, remote_file_path)
except OSError as e:
if e.errno == errno.EXDEV:
# NOTE(opadron): The above move failed because it crosses
# filesystem boundaries. Copy the file (plus original
# metadata), and then delete the original. This operation
# needs to be done in separate steps.
shutil.copy2(local_file_path, remote_file_path)
os.remove(local_file_path)
else:
raise
elif remote_url.scheme == "s3":
if extra_args is None:
extra_args = {}
remote_path = remote_url.path
while remote_path.startswith("/"):
remote_path = remote_path[1:]
s3 = get_s3_session(remote_url, method="push")
s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args)
if not keep_original:
os.remove(local_file_path)
elif remote_url.scheme == "gs":
gcs = GCSBlob(remote_url)
gcs.upload_to_blob(local_file_path)
if not keep_original:
os.remove(local_file_path)
else:
raise NotImplementedError(f"Unrecognized URL scheme: {remote_url.scheme}")
def base_curl_fetch_args(url, timeout=0):
"""Return the basic fetch arguments typically used in calls to curl.
The arguments include those for ensuring behaviors such as failing on
errors for codes over 400, printing HTML headers, resolving 3xx redirects,
status or failure handling, and connection timeouts.
It also uses the following configuration option to set an additional
argument as needed:
* config:connect_timeout (int): connection timeout
* config:verify_ssl (str): Perform SSL verification
Arguments:
url (str): URL whose contents will be fetched
timeout (int): Connection timeout, which is only used if higher than
config:connect_timeout
Returns (list): list of argument strings
"""
curl_args = [
"-f", # fail on >400 errors
"-D",
"-", # "-D -" prints out HTML headers
"-L", # resolve 3xx redirects
url,
]
if not spack.config.get("config:verify_ssl"):
curl_args.append("-k")
if sys.stdout.isatty() and tty.msg_enabled():
curl_args.append("-#") # status bar when using a tty
else:
curl_args.append("-sS") # show errors if fail
connect_timeout = spack.config.get("config:connect_timeout", 10)
if timeout:
connect_timeout = max(int(connect_timeout), int(timeout))
if connect_timeout > 0:
curl_args.extend(["--connect-timeout", str(connect_timeout)])
return curl_args
def check_curl_code(returncode: int) -> None:
"""Check standard return code failures for provided arguments.
Arguments:
returncode: curl return code
Raises FetchError if the curl returncode indicates failure
"""
if returncode == 0:
return
elif returncode == 22:
# This is a 404. Curl will print the error.
raise spack.error.FetchError("URL was not found!")
elif returncode == 60:
# This is a certificate error. Suggest spack -k
raise spack.error.FetchError(
"Curl was unable to fetch due to invalid certificate. "
"This is either an attack, or your cluster's SSL "
"configuration is bad. If you believe your SSL "
"configuration is bad, you can try running spack -k, "
"which will not check SSL certificates."
"Use this at your own risk."
)
raise spack.error.FetchError(f"Curl failed with error {returncode}")
def require_curl() -> Executable:
try:
path = spack.util.executable.which_string("curl", required=True)
except CommandNotFoundError as e:
raise spack.error.FetchError(f"curl is required but not found: {e}") from e
curl = spack.util.executable.Executable(path)
set_curl_env_for_ssl_certs(curl)
return curl
def fetch_url_text(url, curl: Optional[Executable] = None, dest_dir="."):
"""Retrieves text-only URL content using the configured fetch method.
It determines the fetch method from:
* config:url_fetch_method (str): fetch method to use (e.g., 'curl')
If the method is ``curl``, it also uses the following configuration
options:
* config:connect_timeout (int): connection time out
* config:verify_ssl (str): Perform SSL verification
Arguments:
url (str): URL whose contents are to be fetched
curl (spack.util.executable.Executable or None): (optional) curl
executable if curl is the configured fetch method
dest_dir (str): (optional) destination directory for fetched text
file
Returns (str or None): path to the fetched file
Raises FetchError if the curl returncode indicates failure
"""
if not url:
raise spack.error.FetchError("A URL is required to fetch its text")
tty.debug("Fetching text at {0}".format(url))
filename = os.path.basename(url)
path = os.path.join(dest_dir, filename)
fetch_method = spack.config.get("config:url_fetch_method")
tty.debug("Using '{0}' to fetch {1} into {2}".format(fetch_method, url, path))
if fetch_method.startswith("curl"):
curl_exe = curl or require_curl()
curl_args = fetch_method.split()[1:] + ["-O"]
curl_args.extend(base_curl_fetch_args(url))
# Curl automatically downloads file contents as filename
with working_dir(dest_dir, create=True):
_ = curl_exe(*curl_args, fail_on_error=False, output=os.devnull)
check_curl_code(curl_exe.returncode)
return path
else:
try:
_, _, response = read_from_url(url)
output = codecs.getreader("utf-8")(response).read()
if output:
with working_dir(dest_dir, create=True):
with open(filename, "w", encoding="utf-8") as f:
f.write(output)
return path
except (SpackWebError, OSError, ValueError) as err:
raise spack.error.FetchError(f"Urllib fetch failed: {err}")
return None
def url_exists(url, curl=None):
"""Determines whether url exists.
A scheme-specific process is used for Google Storage (``gs``) and Amazon
Simple Storage Service (``s3``) URLs; otherwise, the configured fetch
method defined by ``config:url_fetch_method`` is used.
Arguments:
url (str): URL whose existence is being checked
curl (spack.util.executable.Executable or None): (optional) curl
executable if curl is the configured fetch method
Returns (bool): True if it exists; False otherwise.
"""
tty.debug("Checking existence of {0}".format(url))
url_result = urllib.parse.urlparse(url)
# Use curl if configured to do so
fetch_method = spack.config.get("config:url_fetch_method", "urllib")
use_curl = fetch_method.startswith("curl") and url_result.scheme not in ("gs", "s3")
if use_curl:
curl_exe = curl or require_curl()
# Telling curl to fetch the first byte (-r 0-0) is supposed to be
# portable.
curl_args = fetch_method.split()[1:] + ["--stderr", "-", "-s", "-f", "-r", "0-0", url]
if not spack.config.get("config:verify_ssl"):
curl_args.append("-k")
_ = curl_exe(*curl_args, fail_on_error=False, output=os.devnull)
return curl_exe.returncode == 0
# Otherwise use urllib.
try:
urlopen(
Request(url, method="HEAD", headers={"User-Agent": SPACK_USER_AGENT}),
timeout=spack.config.get("config:connect_timeout", 10),
)
return True
except OSError as e:
tty.debug(f"Failure reading {url}: {e}")
return False
def _debug_print_delete_results(result):
if "Deleted" in result:
for d in result["Deleted"]:
tty.debug("Deleted {0}".format(d["Key"]))
if "Errors" in result:
for e in result["Errors"]:
tty.debug("Failed to delete {0} ({1})".format(e["Key"], e["Message"]))
def remove_url(url, recursive=False):
url = urllib.parse.urlparse(url)
local_path = url_util.local_file_path(url)
if local_path:
if recursive:
shutil.rmtree(local_path)
else:
os.remove(local_path)
return
if url.scheme == "s3":
# Try to find a mirror for potential connection information
s3 = get_s3_session(url, method="push")
bucket = url.netloc
if recursive:
# Because list_objects_v2 can only return up to 1000 items
# at a time, we have to paginate to make sure we get it all
prefix = url.path.strip("/")
paginator = s3.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
delete_request = {"Objects": []}
for item in pages.search("Contents"):
if not item:
continue
delete_request["Objects"].append({"Key": item["Key"]})
# Make sure we do not try to hit S3 with a list of more
# than 1000 items
if len(delete_request["Objects"]) >= 1000:
r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
_debug_print_delete_results(r)
delete_request = {"Objects": []}
# Delete any items that remain
if len(delete_request["Objects"]):
r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
_debug_print_delete_results(r)
else:
s3.delete_object(Bucket=bucket, Key=url.path.lstrip("/"))
return
elif url.scheme == "gs":
if recursive:
bucket = GCSBucket(url)
bucket.destroy(recursive=recursive)
else:
blob = GCSBlob(url)
blob.delete_blob()
return
# Don't even try for other URL schemes.
def _iter_s3_contents(contents, prefix):
for entry in contents:
key = entry["Key"]
if not key.startswith("/"):
key = "/" + key
key = os.path.relpath(key, prefix)
if key == ".":
continue
yield key
def _list_s3_objects(client, bucket, prefix, num_entries, start_after=None):
list_args = dict(Bucket=bucket, Prefix=prefix[1:], MaxKeys=num_entries)
if start_after is not None:
list_args["StartAfter"] = start_after
result = client.list_objects_v2(**list_args)
last_key = None
if result["IsTruncated"]:
last_key = result["Contents"][-1]["Key"]
iter = _iter_s3_contents(result["Contents"], prefix)
return iter, last_key
def _iter_s3_prefix(client, url, num_entries=1024):
key = None
bucket = url.netloc
prefix = re.sub(r"^/*", "/", url.path)
while True:
contents, key = _list_s3_objects(client, bucket, prefix, num_entries, start_after=key)
for x in contents:
yield x
if not key:
break
def _iter_local_prefix(path):
for root, _, files in os.walk(path):
for f in files:
yield os.path.relpath(os.path.join(root, f), path)
def list_url(url, recursive=False):
url = urllib.parse.urlparse(url)
local_path = url_util.local_file_path(url)
if local_path:
if recursive:
# convert backslash to forward slash as required for URLs
return [str(PurePosixPath(Path(p))) for p in _iter_local_prefix(local_path)]
return [
subpath
for subpath in os.listdir(local_path)
if os.path.isfile(os.path.join(local_path, subpath))
]
if url.scheme == "s3":
s3 = get_s3_session(url, method="fetch")
if recursive:
return list(_iter_s3_prefix(s3, url))
return list(set(key.split("/", 1)[0] for key in _iter_s3_prefix(s3, url)))
elif url.scheme == "gs":
gcs = GCSBucket(url)
return gcs.get_all_blobs(recursive=recursive)
def stat_url(url: str) -> Optional[Tuple[int, float]]:
"""Get stat result for a URL.
Args:
url: URL to get stat result for
Returns:
A tuple of (size, mtime) if the URL exists, None otherwise.
"""
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme == "file":
local_file_path = url_util.local_file_path(parsed_url)
assert isinstance(local_file_path, str)
try:
url_stat = Path(local_file_path).stat()
except FileNotFoundError:
return None
return url_stat.st_size, url_stat.st_mtime
elif parsed_url.scheme == "s3":
s3_bucket = parsed_url.netloc
s3_key = parsed_url.path.lstrip("/")
s3 = get_s3_session(url, method="fetch")
try:
head_request = s3.head_object(Bucket=s3_bucket, Key=s3_key)
except s3.ClientError as e:
if e.response["Error"]["Code"] == "404":
return None
raise e
mtime = head_request["LastModified"].timestamp()
size = head_request["ContentLength"]
return size, mtime
else:
raise NotImplementedError(f"Unrecognized URL scheme: {parsed_url.scheme}")
def spider(
root_urls: Union[str, Iterable[str]], depth: int = 0, concurrency: Optional[int] = None
):
"""Get web pages from root URLs.
If depth is specified (e.g., depth=2), then this will also follow up to <depth> levels
of links from each root.
Args:
root_urls: root urls used as a starting point for spidering
depth: level of recursion into links
concurrency: number of simultaneous requests that can be sent
Returns:
A dict of pages visited (URL) mapped to their full text and the set of visited links.
"""
if isinstance(root_urls, str):
root_urls = [root_urls]
current_depth = 0
pages, links, spider_args = {}, set(), []
_visited: Set[str] = set()
go_deeper = current_depth < depth
for root_str in root_urls:
root = urllib.parse.urlparse(root_str)
spider_args.append((root, go_deeper, _visited))
with spack.util.parallel.make_concurrent_executor(concurrency, require_fork=False) as tp:
while current_depth <= depth:
tty.debug(
f"SPIDER: [depth={current_depth}, max_depth={depth}, urls={len(spider_args)}]"
)
results = [tp.submit(_spider, *one_search_args) for one_search_args in spider_args]
spider_args = []
go_deeper = current_depth < depth
for future in results:
sub_pages, sub_links, sub_spider_args, sub_visited = future.result()
_visited.update(sub_visited)
sub_spider_args = [(x, go_deeper, _visited) for x in sub_spider_args]
pages.update(sub_pages)
links.update(sub_links)
spider_args.extend(sub_spider_args)
current_depth += 1
return pages, links
def _spider(url: urllib.parse.ParseResult, collect_nested: bool, _visited: Set[str]):
"""Fetches URL and any pages it links to.
Prints out a warning only if the root can't be fetched; it ignores errors with pages
that the root links to.
Args:
url: url being fetched and searched for links
collect_nested: whether we want to collect arguments for nested spidering on the
links found in this url
_visited: links already visited
Returns:
A tuple of:
- pages: dict of pages visited (URL) mapped to their full text.
- links: set of links encountered while visiting the pages.
- spider_args: argument for subsequent call to spider
- visited: updated set of visited urls
"""
pages: Dict[str, str] = {} # dict from page URL -> text content.
links: Set[str] = set() # set of all links seen on visited pages.
subcalls: List[str] = []
try:
response_url, _, response = read_from_url(url, "text/html")
if not response_url or not response:
return pages, links, subcalls, _visited
page = codecs.getreader("utf-8")(response).read()
pages[response_url] = page
# Parse out the include-fragments in the page
# https://github.github.io/include-fragment-element
metadata_parser = ExtractMetadataParser()
metadata_parser.feed(page)
# Change of base URL due to <base href="..." /> tag
response_url = metadata_parser.base_url or response_url
fragments = set()
while metadata_parser.fragments:
raw_link = metadata_parser.fragments.pop()
abs_link = url_util.join(response_url, raw_link.strip(), resolve_href=True)
fragment_response_url = None
try:
# This seems to be text/html, though text/fragment+html is also used
fragment_response_url, _, fragment_response = read_from_url(abs_link, "text/html")
except Exception as e:
msg = f"Error reading fragment: {(type(e), str(e))}:{traceback.format_exc()}"
tty.debug(msg)
if not fragment_response_url or not fragment_response:
continue
fragment = codecs.getreader("utf-8")(fragment_response).read()
fragments.add(fragment)
pages[fragment_response_url] = fragment
# Parse out the links in the page and all fragments
link_parser = LinkParser()
link_parser.feed(page)
for fragment in fragments:
link_parser.feed(fragment)
while link_parser.links:
raw_link = link_parser.links.pop()
abs_link = url_util.join(response_url, raw_link.strip(), resolve_href=True)
links.add(abs_link)
# Skip stuff that looks like an archive
if any(raw_link.endswith(s) for s in spack.llnl.url.ALLOWED_ARCHIVE_TYPES):
continue
# Skip already-visited links
if abs_link in _visited:
continue
# If we're not at max depth, follow links.
if collect_nested:
subcalls.append(abs_link)
_visited.add(abs_link)
except OSError as e:
tty.debug(f"[SPIDER] Unable to read: {url}")
tty.debug(str(e), level=2)
if isinstance(e, URLError) and isinstance(e.reason, ssl.SSLError):
tty.warn(
"Spack was unable to fetch url list due to a "
"certificate verification problem. You can try "
"running spack -k, which will not check SSL "
"certificates. Use this at your own risk."
)
except HTMLParseError as e:
# This error indicates that Python's HTML parser sucks.
msg = "Got an error parsing HTML."
tty.warn(msg, url, "HTMLParseError: " + str(e))
except Exception as e:
# Other types of errors are completely ignored,
# except in debug mode
tty.debug(f"Error in _spider: {type(e)}:{str(e)}", traceback.format_exc())
finally:
tty.debug(f"SPIDER: [url={url}]")
return pages, links, subcalls, _visited
def get_header(headers, header_name):
"""Looks up a dict of headers for the given header value.
Looks up a dict of headers, [headers], for a header value given by
[header_name]. Returns headers[header_name] if header_name is in headers.
Otherwise, the first fuzzy match is returned, if any.
This fuzzy matching is performed by discarding word separators and
capitalization, so that for example, "Content-length", "content_length",
"conTENtLength", etc., all match. In the case of multiple fuzzy-matches,
the returned value is the "first" such match given the underlying mapping's
ordering, or unspecified if no such ordering is defined.
If header_name is not in headers, and no such fuzzy match exists, then a
KeyError is raised.
"""
def unfuzz(header):
return re.sub(r"[ _-]", "", header).lower()
try:
return headers[header_name]
except KeyError:
unfuzzed_header_name = unfuzz(header_name)
for header, value in headers.items():
if unfuzz(header) == unfuzzed_header_name:
return value
raise
def parse_etag(header_value):
"""Parse a strong etag from an ETag: <value> header value.
We don't allow for weakness indicators because it's unclear
what that means for cache invalidation."""
if header_value is None:
return None
# First follow rfc7232 section 2.3 mostly:
# ETag = entity-tag
# entity-tag = [ weak ] opaque-tag
# weak = %x57.2F ; "W/", case-sensitive
# opaque-tag = DQUOTE *etagc DQUOTE
# etagc = %x21 / %x23-7E / obs-text
# ; VCHAR except double quotes, plus obs-text
# obs-text = %x80-FF
# That means quotes are required.
valid = re.match(r'"([\x21\x23-\x7e\x80-\xFF]+)"$', header_value)
if valid:
return valid.group(1)
# However, not everybody adheres to the RFC (some servers send
# wrong etags, but also s3:// is simply a different standard).
# In that case, it's common that quotes are omitted, everything
# else stays the same.
valid = re.match(r"([\x21\x23-\x7e\x80-\xFF]+)$", header_value)
return valid.group(1) if valid else None
| ExtractMetadataParser |
python | pytorch__pytorch | test/distributed/launcher/api_test.py | {
"start": 3137,
"end": 13797
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests.
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server.
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables.
for env in os.environ.keys(): # noqa:SIM118
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc.
# this should be present on the child and gets
# asserted in ``bin/test_script.py``.
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
os.environ["OMP_NUM_THREADS"] = str(1)
def tearDown(self):
shutil.rmtree(self.test_dir)
def check_works_ran(self, world_size: int):
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_script_python(self):
nnodes = 1
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
# make sure all the workers ran.
# each worker touches a file with its global rank as the name.
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_script_python_local_rank_transfer(self):
nnodes = 1
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
# make sure all the workers ran.
# each worker touches a file with its global rank as the name.
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_script_bash(self):
nnodes = 1
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
path("bin/test_script.sh"),
)(f"{self.test_dir}")
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_function(self):
nnodes = 1
nproc_per_node = 4
res = elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
simple_rank_scale,
)()
expected_res = [10, 11, 12, 13]
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_dist_sum_with_static_rdzv(self):
nnodes = 1
nproc_per_node = 4
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
rdzv_endpoint = f"127.0.0.1:{master_port}"
rank = 0
rdzv_config = {
"rank": rank,
}
res = elastic_launch(
get_test_launch_config(
rdzv_endpoint,
nnodes,
nnodes,
nproc_per_node,
rdzv_backend="static",
config=rdzv_config,
),
_dist_sum,
)()
expected_res = [sum(range(nproc_per_node))] * nproc_per_node
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_elastic(self):
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 2, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
world_size = nproc_per_node
self.check_works_ran(world_size)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed.
"""
nproc_per_node = 4
with self.assertRaises(ChildFailedError):
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 2, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), "--fail")
record_mock.assert_called_once()
@mock.patch("torch.distributed.elastic.events.record")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception.
"""
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 2, 4),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
record_mock.assert_called_once()
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_elastic_multiple_agents(self):
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
ctx = mp.get_context("spawn")
for _ in range(nnodes - 1):
p = ctx.Process(
target=elastic_launch_wrapper,
args=(
self.test_dir,
self._etcd_endpoint,
min_nodes,
max_nodes,
nproc_per_node,
run_id,
),
)
procs.append(p)
p.start()
elastic_launch_wrapper(
self.test_dir,
self._etcd_endpoint,
min_nodes,
max_nodes,
nproc_per_node,
run_id,
)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
world_size = nnodes * nproc_per_node
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 1, 4),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch-file-dir={self.test_dir}")
rdzv_handler_mock.shutdown.assert_called_once()
def test_get_entrypoint_name(self):
self.assertEqual(
"simple_rank_scale", _get_entrypoint_name(simple_rank_scale, [])
)
self.assertEqual("", _get_entrypoint_name(sys.executable, []))
self.assertEqual("", _get_entrypoint_name(sys.executable, ["-u"]))
self.assertEqual(
"test_script.py",
_get_entrypoint_name(sys.executable, ["-u", "test_script.py"]),
)
self.assertEqual("", _get_entrypoint_name(None, []))
@patch(ELASTIC_AGENT_RUN)
@patch(GET_RDZV_HANDLER)
def test_rdzv_handler_shutdown_on_agent_signal(self, mock_get_rdzv, mock_agent_run):
config = get_test_launch_config(
self._etcd_endpoint, min_nodes=1, max_nodes=1, nproc_per_node=1
)
for sigval in [signal.SIGTERM, signal.SIGINT]:
with patch(EVENTS_RECORD) as record_event_mock:
rdzv_handler_mock = MagicMock()
rdzv_handler_mock.get_run_id.return_value = short_hash()
mock_get_rdzv.return_value = rdzv_handler_mock
mock_agent_run.side_effect = SignalException("test", sigval)
with self.assertRaises(SignalException):
launch_agent(config, simple_rank_scale, [])
rdzv_handler_mock.shutdown.assert_not_called()
record_event_mock.assert_called_once()
@patch(ELASTIC_AGENT_RUN)
@patch(GET_RDZV_HANDLER)
def test_rdzv_handler_shutdown_on_agent_error(self, mock_get_rdzv, mock_agent_run):
config = get_test_launch_config(
self._etcd_endpoint, min_nodes=1, max_nodes=1, nproc_per_node=1
)
with patch(EVENTS_RECORD) as record_event_mock:
rdzv_handler_mock = MagicMock()
rdzv_handler_mock.get_run_id.return_value = short_hash()
mock_get_rdzv.return_value = rdzv_handler_mock
mock_agent_run.side_effect = RuntimeError("any other exception")
with self.assertRaises(RuntimeError):
launch_agent(config, simple_rank_scale, [])
rdzv_handler_mock.shutdown.assert_called_once()
record_event_mock.assert_called_once()
if __name__ == "__main__":
raise RuntimeError(
"This test is not currently used and should be "
"enabled in discover_tests.py if required."
)
| ElasticLaunchTest |
python | pytorch__pytorch | test/distributed/tensor/test_op_strategy.py | {
"start": 1509,
"end": 4005
} | class ____(TestCase):
def test_batch_dims(self):
equation = "abc,abc->abc"
input_dims, output_dim = EinsumDims.parse_equation(equation)
edims = EinsumDims.parse_dims(input_dims, output_dim)
self.assertEqual(edims.batch_dims, ["a", "b", "c"])
self.assertEqual(edims.contracting_dims, [])
self.assertEqual(edims.lhs_out_only_dims, [])
self.assertEqual(edims.rhs_out_only_dims, [])
def test_mm_dims(self):
equation = "mk,kn->mn"
input_dims, output_dim = EinsumDims.parse_equation(equation)
edims = EinsumDims.parse_dims(input_dims, output_dim)
self.assertEqual(edims.batch_dims, [])
self.assertEqual(edims.contracting_dims, ["k"])
self.assertEqual(edims.lhs_out_only_dims, ["m"])
self.assertEqual(edims.rhs_out_only_dims, ["n"])
def test_bmm_dims(self):
equation = "bmk,bkn->bmn"
input_dims, output_dim = EinsumDims.parse_equation(equation)
edims = EinsumDims.parse_dims(input_dims, output_dim)
self.assertEqual(edims.batch_dims, ["b"])
self.assertEqual(edims.contracting_dims, ["k"])
self.assertEqual(edims.lhs_out_only_dims, ["m"])
self.assertEqual(edims.rhs_out_only_dims, ["n"])
equation = "bcmk,bckn->bcmn"
input_dims, output_dim = EinsumDims.parse_equation(equation)
edims = EinsumDims.parse_dims(input_dims, output_dim)
self.assertEqual(edims.batch_dims, ["b", "c"])
self.assertEqual(edims.contracting_dims, ["k"])
self.assertEqual(edims.lhs_out_only_dims, ["m"])
self.assertEqual(edims.rhs_out_only_dims, ["n"])
def test_free_dims(self):
equation = "abc,ab->abc"
input_dims, output_dim = EinsumDims.parse_equation(equation)
edims = EinsumDims.parse_dims(input_dims, output_dim)
self.assertEqual(edims.batch_dims, ["a", "b"])
self.assertEqual(edims.contracting_dims, [])
self.assertEqual(edims.lhs_out_only_dims, ["c"])
self.assertEqual(edims.rhs_out_only_dims, [])
equation = "abd,bf->abfd" # codespell:ignore
input_dims, output_dim = EinsumDims.parse_equation(equation)
edims = EinsumDims.parse_dims(input_dims, output_dim)
self.assertEqual(edims.batch_dims, ["b"])
self.assertEqual(edims.contracting_dims, [])
self.assertEqual(edims.lhs_out_only_dims, ["a", "d"])
self.assertEqual(edims.rhs_out_only_dims, ["f"])
| TestEinsumDims |
python | django__django | tests/serializers/models/data.py | {
"start": 3693,
"end": 3818
} | class ____(models.Model):
data = models.ForeignKey(UniqueAnchor, models.SET_NULL, null=True, to_field="data")
| FKDataToField |
python | pyca__cryptography | src/cryptography/hazmat/asn1/asn1.py | {
"start": 8112,
"end": 8420
} | class ____(typing.Generic[U]):
value: U
Explicit = declarative_asn1.Encoding.Explicit
Implicit = declarative_asn1.Encoding.Implicit
Size = declarative_asn1.Size
PrintableString = declarative_asn1.PrintableString
UtcTime = declarative_asn1.UtcTime
GeneralizedTime = declarative_asn1.GeneralizedTime
| Default |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 255333,
"end": 255984
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of MinimizeComment"""
__schema__ = github_schema
__field_names__ = ("subject_id", "classifier", "client_mutation_id")
subject_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="subjectId")
"""The Node ID of the subject to modify."""
classifier = sgqlc.types.Field(sgqlc.types.non_null(ReportedContentClassifiers), graphql_name="classifier")
"""The classification of comment"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| MinimizeCommentInput |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 11984,
"end": 13431
} | class ____(VariableTracker):
def get_filename(self) -> str:
return self.get_code().co_filename # type: ignore[attr-defined]
def get_name(self) -> str:
return self.get_code().co_name # type: ignore[attr-defined]
def get_globals(self):
raise NotImplementedError
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
return tx.inline_user_function_return(self, [*self.self_args(), *args], kwargs) # type: ignore[attr-defined]
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
result = False
try:
result = hasattr(self.get_function(), name) # type: ignore[attr-defined]
except NotImplementedError:
if name == "__name__" and isinstance(self, NestedUserFunctionVariable):
result = True
return variables.ConstantVariable.create(result)
def closure_vars(self, tx: "InstructionTranslator") -> dict[str, VariableTracker]:
return {}
# Override to set whether or not nested graph breaks should be allowed
# if we create an inlining tx for this BaseUserFunctionVariable.
# See symbolic_convert.py for where this function is called.
def should_allow_nested_graph_breaks(self):
return True
| BaseUserFunctionVariable |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigtable.py | {
"start": 8036,
"end": 12167
} | class ____(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Updates an existing Cloud Bigtable instance.
For more details about instance creation have a look at the reference:
https://googleapis.dev/python/bigtable/latest/instance.html#google.cloud.bigtable.instance.Instance.update
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableUpdateInstanceOperator`
:param instance_id: The ID of the Cloud Bigtable instance to update.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param instance_display_name: (optional) Human-readable name of the instance.
:param instance_type: (optional) The type of the instance.
:param instance_labels: (optional) Dictionary of labels to associate
with the instance.
:param timeout: (optional) timeout (in seconds) for instance update.
If None is not specified, Operator will wait indefinitely.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES: Iterable[str] = ["instance_id"]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"impersonation_chain",
)
operator_extra_links = (BigtableInstanceLink(),)
def __init__(
self,
*,
instance_id: str,
project_id: str = PROVIDE_PROJECT_ID,
instance_display_name: str | None = None,
instance_type: enums.Instance.Type | enum.IntEnum | None = None,
instance_labels: dict | None = None,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.instance_display_name = instance_display_name
self.instance_type = instance_type
self.instance_labels = instance_labels
self.timeout = timeout
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance_id,
"project_id": self.project_id,
}
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
raise AirflowException(f"Dependency: instance '{self.instance_id}' does not exist.")
try:
hook.update_instance(
project_id=self.project_id,
instance_id=self.instance_id,
instance_display_name=self.instance_display_name,
instance_type=self.instance_type,
instance_labels=self.instance_labels,
timeout=self.timeout,
)
BigtableInstanceLink.persist(context=context)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error("An error occurred. Exiting.")
raise e
| BigtableUpdateInstanceOperator |
python | huggingface__transformers | tests/models/code_llama/test_tokenization_code_llama.py | {
"start": 1106,
"end": 9299
} | class ____(TokenizerTesterMixin, unittest.TestCase):
# TokenizerTesterMixin configuration
from_pretrained_id = ["hf-internal-testing/llama-code-tokenizer"]
tokenizer_class = CodeLlamaTokenizer
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '<0xF0>', '<0x9F>', '<0x98>', '<0x8A>', '<0x0A>', 'I', '▁was', '▁born', '▁in', '▁', '9', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'als', 'é', '.', '<0x0A>', '生', '活', '的', '真', '<0xE8>', '<0xB0>', '<0x9B>', '是', '<0x0A>', 'Hi', '▁', '▁Hello', '<0x0A>', 'Hi', '▁▁', '▁Hello', '<0x0A>', '<0x0A>', '▁', '<0x0A>', '▁▁', '<0x0A>', '▁Hello', '<0x0A>', '<s>', '<0x0A>', 'hi', '<s>', 'there', '<0x0A>', 'The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁Hello', '.', '<0x0A>', 'But', '▁', 'ird', '▁and', '▁', 'ป', 'ี', '▁▁▁', 'ird', '▁▁▁', 'ด', '<0x0A>', 'H', 'ey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [910, 338, 263, 1243, 29871, 243, 162, 155, 141, 13, 29902, 471, 6345, 297, 29871, 29929, 29906, 29900, 29900, 29900, 29892, 322, 445, 338, 285, 1338, 29948, 29889, 13, 30486, 31704, 30210, 30848, 235, 179, 158, 30392, 13, 18567, 29871, 15043, 13, 18567, 259, 15043, 13, 13, 29871, 13, 259, 13, 15043, 13, 1, 13, 2918, 1, 12711, 13, 1576, 1494, 1347, 881, 367, 6284, 18511, 29901, 15043, 29889, 13, 6246, 29871, 1823, 322, 29871, 31010, 30691, 1678, 1823, 1678, 30718, 13, 29950, 1032, 920, 526, 366, 2599] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '<0xF0>', '<0x9F>', '<0x98>', '<0x8A>', '<0x0A>', 'I', '▁was', '▁born', '▁in', '▁', '9', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'als', 'é', '.', '<0x0A>', '生', '活', '的', '真', '<0xE8>', '<0xB0>', '<0x9B>', '是', '<0x0A>', 'Hi', '▁', '▁Hello', '<0x0A>', 'Hi', '▁▁', '▁Hello', '<0x0A>', '<0x0A>', '▁', '<0x0A>', '▁▁', '<0x0A>', '▁Hello', '<0x0A>', '<s>', '<0x0A>', 'hi', '<s>', 'there', '<0x0A>', 'The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁Hello', '.', '<0x0A>', 'But', '▁', 'ird', '▁and', '▁', 'ป', 'ี', '▁▁▁', 'ird', '▁▁▁', 'ด', '<0x0A>', 'H', 'ey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊\nI was born in 92000, and this is falsé.\n生活的真谛是\nHi Hello\nHi Hello\n\n \n \n Hello\n<s>\nhi<s>there\nThe following string should be properly encoded: Hello.\nBut ird and ปี ird ด\nHey how are you doing"
def test_save_and_load_tokenizer(self):
"""Override to handle non-deterministic vocabulary order from Rust tokenizer."""
# safety check on max_len default value so we are sure the test works
tokenizer = self.get_tokenizer()
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizer = self.get_tokenizer()
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
# Compare vocabularies in an order-independent way
# The Rust tokenizer returns vocabularies in non-deterministic order
# Some special tokens may be added during _post_init when loading, so we check that
# all tokens from before_vocab are in after_vocab with the same IDs
for token, token_id in before_vocab.items():
self.assertIn(token, after_vocab, f"Token '{token}' missing in after_vocab")
self.assertEqual(
after_vocab[token], token_id, f"Token '{token}' has different ID: {after_vocab[token]} != {token_id}"
)
shutil.rmtree(tmpdirname)
tokenizer = self.get_tokenizer(model_max_length=42)
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00e9d,running"
tokenizer.add_tokens(["bim", "bambam"])
extra_special_tokens = tokenizer.extra_special_tokens
extra_special_tokens.append("new_extra_special_token")
tokenizer.add_special_tokens(
{"extra_special_tokens": extra_special_tokens}, replace_extra_special_tokens=False
)
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
for token, token_id in before_vocab.items():
self.assertIn(token, after_vocab, f"Token '{token}' missing in after_vocab")
self.assertEqual(
after_vocab[token], token_id, f"Token '{token}' has different ID: {after_vocab[token]} != {token_id}"
)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_extra_special_token", after_tokenizer.extra_special_tokens)
def test_no_infilling_init(self):
tokenizer = CodeLlamaTokenizer(SAMPLE_VOCAB, prefix_token=None, keep_accents=True)
with self.assertRaises(ValueError):
tokenizer.tokenize("This is <FILL_ME> prefix")
@require_torch
def test_batch_tokenization(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
text = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
try:
batch = tokenizer(
text=text,
max_length=3,
return_tensors="pt",
)
except NotImplementedError:
self.skipTest(reason="Encountered NotImplementedError when calling tokenizer")
self.assertEqual(batch.input_ids.shape[1], 3)
# max_target_length will default to max_length if not specified
batch = tokenizer(text, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3)
batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only)
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.get_tokenizer(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
r_output = tokenizer_r.encode("Hey this is a <special> token")
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
@require_tokenizers
| CodeLlamaTokenizationTest |
python | mlflow__mlflow | mlflow/utils/search_utils.py | {
"start": 4627,
"end": 40943
} | class ____:
LIKE_OPERATOR = "LIKE"
ILIKE_OPERATOR = "ILIKE"
ASC_OPERATOR = "asc"
DESC_OPERATOR = "desc"
VALID_ORDER_BY_TAGS = [ASC_OPERATOR, DESC_OPERATOR]
VALID_METRIC_COMPARATORS = {">", ">=", "!=", "=", "<", "<="}
VALID_PARAM_COMPARATORS = {"!=", "=", LIKE_OPERATOR, ILIKE_OPERATOR}
VALID_TAG_COMPARATORS = {"!=", "=", LIKE_OPERATOR, ILIKE_OPERATOR}
VALID_STRING_ATTRIBUTE_COMPARATORS = {"!=", "=", LIKE_OPERATOR, ILIKE_OPERATOR, "IN", "NOT IN"}
VALID_NUMERIC_ATTRIBUTE_COMPARATORS = VALID_METRIC_COMPARATORS
VALID_DATASET_COMPARATORS = {"!=", "=", LIKE_OPERATOR, ILIKE_OPERATOR, "IN", "NOT IN"}
_BUILTIN_NUMERIC_ATTRIBUTES = {"start_time", "end_time"}
_ALTERNATE_NUMERIC_ATTRIBUTES = {"created", "Created"}
_ALTERNATE_STRING_ATTRIBUTES = {"run name", "Run name", "Run Name"}
NUMERIC_ATTRIBUTES = set(
list(_BUILTIN_NUMERIC_ATTRIBUTES) + list(_ALTERNATE_NUMERIC_ATTRIBUTES)
)
DATASET_ATTRIBUTES = {"name", "digest", "context"}
VALID_SEARCH_ATTRIBUTE_KEYS = set(
RunInfo.get_searchable_attributes()
+ list(_ALTERNATE_NUMERIC_ATTRIBUTES)
+ list(_ALTERNATE_STRING_ATTRIBUTES)
)
VALID_ORDER_BY_ATTRIBUTE_KEYS = set(
RunInfo.get_orderable_attributes() + list(_ALTERNATE_NUMERIC_ATTRIBUTES)
)
_METRIC_IDENTIFIER = "metric"
_ALTERNATE_METRIC_IDENTIFIERS = {"metrics"}
_PARAM_IDENTIFIER = "parameter"
_ALTERNATE_PARAM_IDENTIFIERS = {"parameters", "param", "params"}
_TAG_IDENTIFIER = "tag"
_ALTERNATE_TAG_IDENTIFIERS = {"tags"}
_ATTRIBUTE_IDENTIFIER = "attribute"
_ALTERNATE_ATTRIBUTE_IDENTIFIERS = {"attr", "attributes", "run"}
_DATASET_IDENTIFIER = "dataset"
_ALTERNATE_DATASET_IDENTIFIERS = {"datasets"}
_IDENTIFIERS = [
_METRIC_IDENTIFIER,
_PARAM_IDENTIFIER,
_TAG_IDENTIFIER,
_ATTRIBUTE_IDENTIFIER,
_DATASET_IDENTIFIER,
]
_VALID_IDENTIFIERS = set(
_IDENTIFIERS
+ list(_ALTERNATE_METRIC_IDENTIFIERS)
+ list(_ALTERNATE_PARAM_IDENTIFIERS)
+ list(_ALTERNATE_TAG_IDENTIFIERS)
+ list(_ALTERNATE_ATTRIBUTE_IDENTIFIERS)
+ list(_ALTERNATE_DATASET_IDENTIFIERS)
)
STRING_VALUE_TYPES = {TokenType.Literal.String.Single}
DELIMITER_VALUE_TYPES = {TokenType.Punctuation}
WHITESPACE_VALUE_TYPE = TokenType.Text.Whitespace
NUMERIC_VALUE_TYPES = {TokenType.Literal.Number.Integer, TokenType.Literal.Number.Float}
# Registered Models Constants
ORDER_BY_KEY_TIMESTAMP = "timestamp"
ORDER_BY_KEY_LAST_UPDATED_TIMESTAMP = "last_updated_timestamp"
ORDER_BY_KEY_MODEL_NAME = "name"
VALID_ORDER_BY_KEYS_REGISTERED_MODELS = {
ORDER_BY_KEY_TIMESTAMP,
ORDER_BY_KEY_LAST_UPDATED_TIMESTAMP,
ORDER_BY_KEY_MODEL_NAME,
}
VALID_TIMESTAMP_ORDER_BY_KEYS = {ORDER_BY_KEY_TIMESTAMP, ORDER_BY_KEY_LAST_UPDATED_TIMESTAMP}
# We encourage users to use timestamp for order-by
RECOMMENDED_ORDER_BY_KEYS_REGISTERED_MODELS = {ORDER_BY_KEY_MODEL_NAME, ORDER_BY_KEY_TIMESTAMP}
@staticmethod
def get_comparison_func(comparator):
return {
">": operator.gt,
">=": operator.ge,
"=": operator.eq,
"!=": operator.ne,
"<=": operator.le,
"<": operator.lt,
"LIKE": _like,
"ILIKE": _ilike,
"IN": lambda x, y: x in y,
"NOT IN": lambda x, y: x not in y,
}[comparator]
@staticmethod
def get_sql_comparison_func(comparator, dialect):
import sqlalchemy as sa
def comparison_func(column, value):
if comparator == "LIKE":
return column.like(value)
elif comparator == "ILIKE":
return column.ilike(value)
elif comparator == "IN":
return column.in_(value)
elif comparator == "NOT IN":
return ~column.in_(value)
return SearchUtils.get_comparison_func(comparator)(column, value)
def mssql_comparison_func(column, value):
if comparator == "RLIKE":
raise MlflowException(
"RLIKE operator is not supported for MSSQL database dialect. "
"Consider using LIKE or ILIKE operators instead.",
error_code=INVALID_PARAMETER_VALUE,
)
if not isinstance(column.type, sa.types.String):
return comparison_func(column, value)
collated = column.collate("Japanese_Bushu_Kakusu_100_CS_AS_KS_WS")
return comparison_func(collated, value)
def mysql_comparison_func(column, value):
if not isinstance(column.type, sa.types.String):
return comparison_func(column, value)
# MySQL is case insensitive by default, so we need to use the binary operator to
# perform case sensitive comparisons.
templates = {
# Use non-binary ahead of binary comparison for runtime performance
"=": "({column} = :value AND BINARY {column} = :value)",
"!=": "({column} != :value OR BINARY {column} != :value)",
"LIKE": "({column} LIKE :value AND BINARY {column} LIKE :value)",
# we need to cast the column to binary to perform a case sensitive comparison
# to avoid error like: `Character set 'utf8mb4_0900_ai_ci' cannot be used in
# conjunction with 'binary' in call to regexp_like`
"RLIKE": "(CAST({column} AS BINARY) REGEXP BINARY :value)",
}
if comparator in templates:
column = f"{column.class_.__tablename__}.{column.key}"
return sa.text(templates[comparator].format(column=column)).bindparams(
sa.bindparam("value", value=value, unique=True)
)
return comparison_func(column, value)
def sqlite_comparison_func(column, value):
if comparator == "RLIKE":
# SQLite requires a custom regexp function to be registered
# Use the built-in function if available
return column.op("REGEXP")(value)
return comparison_func(column, value)
def postgres_comparison_func(column, value):
if comparator == "RLIKE":
return column.op("~")(value)
return comparison_func(column, value)
return {
POSTGRES: postgres_comparison_func,
SQLITE: sqlite_comparison_func,
MSSQL: mssql_comparison_func,
MYSQL: mysql_comparison_func,
}[dialect]
@staticmethod
def translate_key_alias(key):
if key in ["created", "Created"]:
return "start_time"
if key in ["run name", "Run name", "Run Name"]:
return "run_name"
return key
@classmethod
def _trim_ends(cls, string_value):
return string_value[1:-1]
@classmethod
def _is_quoted(cls, value, pattern):
return len(value) >= 2 and value.startswith(pattern) and value.endswith(pattern)
@classmethod
def _trim_backticks(cls, entity_type):
"""Remove backticks from identifier like `param`, if they exist."""
if cls._is_quoted(entity_type, "`"):
return cls._trim_ends(entity_type)
return entity_type
@classmethod
def _strip_quotes(cls, value, expect_quoted_value=False):
"""
Remove quotes for input string.
Values of type strings are expected to have quotes.
Keys containing special characters are also expected to be enclose in quotes.
"""
if cls._is_quoted(value, "'") or cls._is_quoted(value, '"'):
return cls._trim_ends(value)
elif expect_quoted_value:
raise MlflowException(
"Parameter value is either not quoted or unidentified quote "
f"types used for string value {value}. Use either single or double "
"quotes.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return value
@classmethod
def _valid_entity_type(cls, entity_type):
entity_type = cls._trim_backticks(entity_type)
if entity_type not in cls._VALID_IDENTIFIERS:
raise MlflowException(
f"Invalid entity type '{entity_type}'. Valid values are {cls._IDENTIFIERS}",
error_code=INVALID_PARAMETER_VALUE,
)
if entity_type in cls._ALTERNATE_PARAM_IDENTIFIERS:
return cls._PARAM_IDENTIFIER
elif entity_type in cls._ALTERNATE_METRIC_IDENTIFIERS:
return cls._METRIC_IDENTIFIER
elif entity_type in cls._ALTERNATE_TAG_IDENTIFIERS:
return cls._TAG_IDENTIFIER
elif entity_type in cls._ALTERNATE_ATTRIBUTE_IDENTIFIERS:
return cls._ATTRIBUTE_IDENTIFIER
elif entity_type in cls._ALTERNATE_DATASET_IDENTIFIERS:
return cls._DATASET_IDENTIFIER
else:
# one of ("metric", "parameter", "tag", or "attribute") since it a valid type
return entity_type
@classmethod
def _get_identifier(cls, identifier, valid_attributes):
try:
tokens = identifier.split(".", 1)
if len(tokens) == 1:
key = tokens[0]
entity_type = cls._ATTRIBUTE_IDENTIFIER
else:
entity_type, key = tokens
except ValueError:
raise MlflowException(
f"Invalid identifier {identifier!r}. Columns should be specified as "
"'attribute.<key>', 'metric.<key>', 'tag.<key>', 'dataset.<key>', or "
"'param.'.",
error_code=INVALID_PARAMETER_VALUE,
)
identifier = cls._valid_entity_type(entity_type)
key = cls._trim_backticks(cls._strip_quotes(key))
if identifier == cls._ATTRIBUTE_IDENTIFIER and key not in valid_attributes:
raise MlflowException.invalid_parameter_value(
f"Invalid attribute key '{key}' specified. Valid keys are '{valid_attributes}'"
)
elif identifier == cls._DATASET_IDENTIFIER and key not in cls.DATASET_ATTRIBUTES:
raise MlflowException.invalid_parameter_value(
f"Invalid dataset key '{key}' specified. Valid keys are '{cls.DATASET_ATTRIBUTES}'"
)
return {"type": identifier, "key": key}
@classmethod
def validate_list_supported(cls, key: str) -> None:
if key != "run_id":
raise MlflowException(
"Only the 'run_id' attribute supports comparison with a list of quoted "
"string values.",
error_code=INVALID_PARAMETER_VALUE,
)
@classmethod
def _get_value(cls, identifier_type, key, token):
if identifier_type == cls._METRIC_IDENTIFIER:
if token.ttype not in cls.NUMERIC_VALUE_TYPES:
raise MlflowException(
f"Expected numeric value type for metric. Found {token.value}",
error_code=INVALID_PARAMETER_VALUE,
)
return token.value
elif identifier_type in (cls._PARAM_IDENTIFIER, cls._TAG_IDENTIFIER):
if token.ttype in cls.STRING_VALUE_TYPES or isinstance(token, Identifier):
return cls._strip_quotes(token.value, expect_quoted_value=True)
raise MlflowException(
"Expected a quoted string value for "
f"{identifier_type} (e.g. 'my-value'). Got value "
f"{token.value}",
error_code=INVALID_PARAMETER_VALUE,
)
elif identifier_type == cls._ATTRIBUTE_IDENTIFIER:
if key in cls.NUMERIC_ATTRIBUTES:
if token.ttype not in cls.NUMERIC_VALUE_TYPES:
raise MlflowException(
f"Expected numeric value type for numeric attribute: {key}. "
f"Found {token.value}",
error_code=INVALID_PARAMETER_VALUE,
)
return token.value
elif token.ttype in cls.STRING_VALUE_TYPES or isinstance(token, Identifier):
return cls._strip_quotes(token.value, expect_quoted_value=True)
elif isinstance(token, Parenthesis):
cls.validate_list_supported(key)
return cls._parse_run_ids(token)
else:
raise MlflowException(
f"Expected a quoted string value for attributes. Got value {token.value}",
error_code=INVALID_PARAMETER_VALUE,
)
elif identifier_type == cls._DATASET_IDENTIFIER:
if key in cls.DATASET_ATTRIBUTES and (
token.ttype in cls.STRING_VALUE_TYPES or isinstance(token, Identifier)
):
return cls._strip_quotes(token.value, expect_quoted_value=True)
elif isinstance(token, Parenthesis):
if key not in ("name", "digest", "context"):
raise MlflowException(
"Only the dataset 'name' and 'digest' supports comparison with a list of "
"quoted string values.",
error_code=INVALID_PARAMETER_VALUE,
)
return cls._parse_run_ids(token)
else:
raise MlflowException(
"Expected a quoted string value for dataset attributes. "
f"Got value {token.value}",
error_code=INVALID_PARAMETER_VALUE,
)
else:
# Expected to be either "param" or "metric".
raise MlflowException(
"Invalid identifier type. Expected one of "
f"{[cls._METRIC_IDENTIFIER, cls._PARAM_IDENTIFIER]}."
)
@classmethod
def _validate_comparison(cls, tokens, search_traces=False):
base_error_string = "Invalid comparison clause"
if len(tokens) != 3:
raise MlflowException(
f"{base_error_string}. Expected 3 tokens found {len(tokens)}",
error_code=INVALID_PARAMETER_VALUE,
)
if not isinstance(tokens[0], Identifier):
if not search_traces:
raise MlflowException(
f"{base_error_string}. Expected 'Identifier' found '{tokens[0]}'",
error_code=INVALID_PARAMETER_VALUE,
)
if search_traces and not tokens[0].match(
ttype=TokenType.Name.Builtin, values=["timestamp", "timestamp_ms"]
):
raise MlflowException(
f"{base_error_string}. Expected 'TokenType.Name.Builtin' found '{tokens[0]}'",
error_code=INVALID_PARAMETER_VALUE,
)
if not isinstance(tokens[1], Token) and tokens[1].ttype != TokenType.Operator.Comparison:
raise MlflowException(
f"{base_error_string}. Expected comparison found '{tokens[1]}'",
error_code=INVALID_PARAMETER_VALUE,
)
if not isinstance(tokens[2], Token) and (
tokens[2].ttype not in cls.STRING_VALUE_TYPES.union(cls.NUMERIC_VALUE_TYPES)
or isinstance(tokens[2], Identifier)
):
raise MlflowException(
f"{base_error_string}. Expected value token found '{tokens[2]}'",
error_code=INVALID_PARAMETER_VALUE,
)
@classmethod
def _get_comparison(cls, comparison):
stripped_comparison = [token for token in comparison.tokens if not token.is_whitespace]
cls._validate_comparison(stripped_comparison)
comp = cls._get_identifier(stripped_comparison[0].value, cls.VALID_SEARCH_ATTRIBUTE_KEYS)
comp["comparator"] = stripped_comparison[1].value
comp["value"] = cls._get_value(comp.get("type"), comp.get("key"), stripped_comparison[2])
return comp
@classmethod
def _invalid_statement_token_search_runs(cls, token):
if (
isinstance(token, Comparison)
or token.is_whitespace
or token.match(ttype=TokenType.Keyword, values=["AND"])
):
return False
return True
@classmethod
def _process_statement(cls, statement):
# check validity
tokens = _join_in_comparison_tokens(statement.tokens)
invalids = list(filter(cls._invalid_statement_token_search_runs, tokens))
if len(invalids) > 0:
invalid_clauses = ", ".join(f"'{token}'" for token in invalids)
raise MlflowException(
f"Invalid clause(s) in filter string: {invalid_clauses}",
error_code=INVALID_PARAMETER_VALUE,
)
return [cls._get_comparison(si) for si in tokens if isinstance(si, Comparison)]
@classmethod
def parse_search_filter(cls, filter_string):
if not filter_string:
return []
try:
parsed = sqlparse.parse(filter_string)
except Exception:
raise MlflowException(
f"Error on parsing filter '{filter_string}'", error_code=INVALID_PARAMETER_VALUE
)
if len(parsed) == 0 or not isinstance(parsed[0], Statement):
raise MlflowException(
f"Invalid filter '{filter_string}'. Could not be parsed.",
error_code=INVALID_PARAMETER_VALUE,
)
elif len(parsed) > 1:
raise MlflowException(
f"Search filter contained multiple expression {filter_string!r}. "
"Provide AND-ed expression list.",
error_code=INVALID_PARAMETER_VALUE,
)
return cls._process_statement(parsed[0])
@classmethod
def is_metric(cls, key_type, comparator):
if key_type == cls._METRIC_IDENTIFIER:
if comparator not in cls.VALID_METRIC_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' not one of '{cls.VALID_METRIC_COMPARATORS}",
error_code=INVALID_PARAMETER_VALUE,
)
return True
return False
@classmethod
def is_param(cls, key_type, comparator):
if key_type == cls._PARAM_IDENTIFIER:
if comparator not in cls.VALID_PARAM_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' not one of '{cls.VALID_PARAM_COMPARATORS}'",
error_code=INVALID_PARAMETER_VALUE,
)
return True
return False
@classmethod
def is_tag(cls, key_type, comparator):
if key_type == cls._TAG_IDENTIFIER:
if comparator not in cls.VALID_TAG_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' not one of '{cls.VALID_TAG_COMPARATORS}",
error_code=INVALID_PARAMETER_VALUE,
)
return True
return False
@classmethod
def is_attribute(cls, key_type, key_name, comparator):
return cls.is_string_attribute(key_type, key_name, comparator) or cls.is_numeric_attribute(
key_type, key_name, comparator
)
@classmethod
def is_string_attribute(cls, key_type, key_name, comparator):
if key_type == cls._ATTRIBUTE_IDENTIFIER and key_name not in cls.NUMERIC_ATTRIBUTES:
if comparator not in cls.VALID_STRING_ATTRIBUTE_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' not one of "
f"'{cls.VALID_STRING_ATTRIBUTE_COMPARATORS}'",
error_code=INVALID_PARAMETER_VALUE,
)
return True
return False
@classmethod
def is_numeric_attribute(cls, key_type, key_name, comparator):
if key_type == cls._ATTRIBUTE_IDENTIFIER and key_name in cls.NUMERIC_ATTRIBUTES:
if comparator not in cls.VALID_NUMERIC_ATTRIBUTE_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' not one of "
f"'{cls.VALID_STRING_ATTRIBUTE_COMPARATORS}",
error_code=INVALID_PARAMETER_VALUE,
)
return True
return False
@classmethod
def is_dataset(cls, key_type, comparator):
if key_type == cls._DATASET_IDENTIFIER:
if comparator not in cls.VALID_DATASET_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' "
f"not one of '{cls.VALID_DATASET_COMPARATORS}",
error_code=INVALID_PARAMETER_VALUE,
)
return True
return False
@classmethod
def _is_metric_on_dataset(cls, metric: Metric, dataset: dict[str, Any]) -> bool:
return metric.dataset_name == dataset.get("dataset_name") and (
dataset.get("dataset_digest") is None
or dataset.get("dataset_digest") == metric.dataset_digest
)
@classmethod
def _does_run_match_clause(cls, run, sed):
key_type = sed.get("type")
key = sed.get("key")
value = sed.get("value")
comparator = sed.get("comparator").upper()
key = SearchUtils.translate_key_alias(key)
if cls.is_metric(key_type, comparator):
lhs = run.data.metrics.get(key, None)
value = float(value)
elif cls.is_param(key_type, comparator):
lhs = run.data.params.get(key, None)
elif cls.is_tag(key_type, comparator):
lhs = run.data.tags.get(key, None)
elif cls.is_string_attribute(key_type, key, comparator):
lhs = getattr(run.info, key)
elif cls.is_numeric_attribute(key_type, key, comparator):
lhs = getattr(run.info, key)
value = int(value)
elif cls.is_dataset(key_type, comparator):
if key == "context":
return any(
SearchUtils.get_comparison_func(comparator)(tag.value if tag else None, value)
for dataset_input in run.inputs.dataset_inputs
for tag in dataset_input.tags
if tag.key == MLFLOW_DATASET_CONTEXT
)
else:
return any(
SearchUtils.get_comparison_func(comparator)(
getattr(dataset_input.dataset, key), value
)
for dataset_input in run.inputs.dataset_inputs
)
else:
raise MlflowException(
f"Invalid search expression type '{key_type}'", error_code=INVALID_PARAMETER_VALUE
)
if lhs is None:
return False
return SearchUtils.get_comparison_func(comparator)(lhs, value)
@classmethod
def _does_model_match_clause(cls, model, sed):
key_type = sed.get("type")
key = sed.get("key")
value = sed.get("value")
comparator = sed.get("comparator").upper()
key = SearchUtils.translate_key_alias(key)
if cls.is_metric(key_type, comparator):
matching_metrics = [metric for metric in model.metrics if metric.key == key]
lhs = matching_metrics[0].value if matching_metrics else None
value = float(value)
elif cls.is_param(key_type, comparator):
lhs = model.params.get(key, None)
elif cls.is_tag(key_type, comparator):
lhs = model.tags.get(key, None)
elif cls.is_string_attribute(key_type, key, comparator):
lhs = getattr(model.info, key)
elif cls.is_numeric_attribute(key_type, key, comparator):
lhs = getattr(model.info, key)
value = int(value)
else:
raise MlflowException(
f"Invalid model search expression type '{key_type}'",
error_code=INVALID_PARAMETER_VALUE,
)
if lhs is None:
return False
return SearchUtils.get_comparison_func(comparator)(lhs, value)
@classmethod
def filter(cls, runs, filter_string):
"""Filters a set of runs based on a search filter string."""
if not filter_string:
return runs
parsed = cls.parse_search_filter(filter_string)
def run_matches(run):
return all(cls._does_run_match_clause(run, s) for s in parsed)
return [run for run in runs if run_matches(run)]
@classmethod
def _validate_order_by_and_generate_token(cls, order_by):
try:
parsed = sqlparse.parse(order_by)
except Exception:
raise MlflowException(
f"Error on parsing order_by clause '{order_by}'",
error_code=INVALID_PARAMETER_VALUE,
)
if len(parsed) != 1 or not isinstance(parsed[0], Statement):
raise MlflowException(
f"Invalid order_by clause '{order_by}'. Could not be parsed.",
error_code=INVALID_PARAMETER_VALUE,
)
statement = parsed[0]
ttype_for_timestamp = (
TokenType.Name.Builtin
if Version(sqlparse.__version__) >= Version("0.4.3")
else TokenType.Keyword
)
if len(statement.tokens) == 1 and isinstance(statement[0], Identifier):
token_value = statement.tokens[0].value
elif len(statement.tokens) == 1 and statement.tokens[0].match(
ttype=ttype_for_timestamp, values=[cls.ORDER_BY_KEY_TIMESTAMP]
):
token_value = cls.ORDER_BY_KEY_TIMESTAMP
elif (
statement.tokens[0].match(
ttype=ttype_for_timestamp, values=[cls.ORDER_BY_KEY_TIMESTAMP]
)
and all(token.is_whitespace for token in statement.tokens[1:-1])
and statement.tokens[-1].ttype == TokenType.Keyword.Order
):
token_value = cls.ORDER_BY_KEY_TIMESTAMP + " " + statement.tokens[-1].value
else:
raise MlflowException(
f"Invalid order_by clause '{order_by}'. Could not be parsed.",
error_code=INVALID_PARAMETER_VALUE,
)
return token_value
@classmethod
def _parse_order_by_string(cls, order_by):
token_value = cls._validate_order_by_and_generate_token(order_by)
is_ascending = True
tokens = shlex.split(token_value.replace("`", '"'))
if len(tokens) > 2:
raise MlflowException(
f"Invalid order_by clause '{order_by}'. Could not be parsed.",
error_code=INVALID_PARAMETER_VALUE,
)
elif len(tokens) == 2:
order_token = tokens[1].lower()
if order_token not in cls.VALID_ORDER_BY_TAGS:
raise MlflowException(
f"Invalid ordering key in order_by clause '{order_by}'.",
error_code=INVALID_PARAMETER_VALUE,
)
is_ascending = order_token == cls.ASC_OPERATOR
token_value = tokens[0]
return token_value, is_ascending
@classmethod
def parse_order_by_for_search_runs(cls, order_by):
token_value, is_ascending = cls._parse_order_by_string(order_by)
identifier = cls._get_identifier(token_value.strip(), cls.VALID_ORDER_BY_ATTRIBUTE_KEYS)
return identifier["type"], identifier["key"], is_ascending
@classmethod
def parse_order_by_for_search_registered_models(cls, order_by):
token_value, is_ascending = cls._parse_order_by_string(order_by)
token_value = token_value.strip()
if token_value not in cls.VALID_ORDER_BY_KEYS_REGISTERED_MODELS:
raise MlflowException(
f"Invalid order by key '{token_value}' specified. Valid keys "
f"are '{cls.RECOMMENDED_ORDER_BY_KEYS_REGISTERED_MODELS}'",
error_code=INVALID_PARAMETER_VALUE,
)
return token_value, is_ascending
@classmethod
def _get_value_for_sort(cls, run, key_type, key, ascending):
"""Returns a tuple suitable to be used as a sort key for runs."""
sort_value = None
key = SearchUtils.translate_key_alias(key)
if key_type == cls._METRIC_IDENTIFIER:
sort_value = run.data.metrics.get(key)
elif key_type == cls._PARAM_IDENTIFIER:
sort_value = run.data.params.get(key)
elif key_type == cls._TAG_IDENTIFIER:
sort_value = run.data.tags.get(key)
elif key_type == cls._ATTRIBUTE_IDENTIFIER:
sort_value = getattr(run.info, key)
else:
raise MlflowException(
f"Invalid order_by entity type '{key_type}'", error_code=INVALID_PARAMETER_VALUE
)
# Return a key such that None values are always at the end.
is_none = sort_value is None
is_nan = isinstance(sort_value, float) and math.isnan(sort_value)
fill_value = (1 if ascending else -1) * math.inf
if is_none:
sort_value = fill_value
elif is_nan:
sort_value = -fill_value
is_none_or_nan = is_none or is_nan
return (is_none_or_nan, sort_value) if ascending else (not is_none_or_nan, sort_value)
@classmethod
def _get_model_value_for_sort(cls, model, key_type, key, ascending):
"""Returns a tuple suitable to be used as a sort key for models."""
sort_value = None
key = SearchUtils.translate_key_alias(key)
if key_type == cls._METRIC_IDENTIFIER:
matching_metrics = [metric for metric in model.metrics if metric.key == key]
sort_value = float(matching_metrics[0].value) if matching_metrics else None
elif key_type == cls._PARAM_IDENTIFIER:
sort_value = model.params.get(key)
elif key_type == cls._TAG_IDENTIFIER:
sort_value = model.tags.get(key)
elif key_type == cls._ATTRIBUTE_IDENTIFIER:
sort_value = getattr(model, key)
else:
raise MlflowException(
f"Invalid models order_by entity type '{key_type}'",
error_code=INVALID_PARAMETER_VALUE,
)
# Return a key such that None values are always at the end.
is_none = sort_value is None
is_nan = isinstance(sort_value, float) and math.isnan(sort_value)
fill_value = (1 if ascending else -1) * math.inf
if is_none:
sort_value = fill_value
elif is_nan:
sort_value = -fill_value
is_none_or_nan = is_none or is_nan
return (is_none_or_nan, sort_value) if ascending else (not is_none_or_nan, sort_value)
@classmethod
def sort(cls, runs, order_by_list):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
runs = sorted(runs, key=lambda run: (-run.info.start_time, run.info.run_id))
if not order_by_list:
return runs
# NB: We rely on the stability of Python's sort function, so that we can apply
# the ordering conditions in reverse order.
for order_by_clause in reversed(order_by_list):
(key_type, key, ascending) = cls.parse_order_by_for_search_runs(order_by_clause)
runs = sorted(
runs,
key=lambda run: cls._get_value_for_sort(run, key_type, key, ascending),
reverse=not ascending,
)
return runs
@classmethod
def parse_start_offset_from_page_token(cls, page_token):
# Note: the page_token is expected to be a base64-encoded JSON that looks like
# { "offset": xxx }. However, this format is not stable, so it should not be
# relied upon outside of this method.
if not page_token:
return 0
try:
decoded_token = base64.b64decode(page_token)
except TypeError:
raise MlflowException(
"Invalid page token, could not base64-decode", error_code=INVALID_PARAMETER_VALUE
)
except base64.binascii.Error:
raise MlflowException(
"Invalid page token, could not base64-decode", error_code=INVALID_PARAMETER_VALUE
)
try:
parsed_token = json.loads(decoded_token)
except ValueError:
raise MlflowException(
f"Invalid page token, decoded value={decoded_token}",
error_code=INVALID_PARAMETER_VALUE,
)
offset_str = parsed_token.get("offset")
if not offset_str:
raise MlflowException(
f"Invalid page token, parsed value={parsed_token}",
error_code=INVALID_PARAMETER_VALUE,
)
try:
offset = int(offset_str)
except ValueError:
raise MlflowException(
f"Invalid page token, not stringable {offset_str}",
error_code=INVALID_PARAMETER_VALUE,
)
return offset
@classmethod
def create_page_token(cls, offset):
return base64.b64encode(json.dumps({"offset": offset}).encode("utf-8"))
@classmethod
def paginate(cls, runs, page_token, max_results):
"""Paginates a set of runs based on an offset encoded into the page_token and a max
results limit. Returns a pair containing the set of paginated runs, followed by
an optional next_page_token if there are further results that need to be returned.
"""
start_offset = cls.parse_start_offset_from_page_token(page_token)
final_offset = start_offset + max_results
paginated_runs = runs[start_offset:final_offset]
next_page_token = None
if final_offset < len(runs):
next_page_token = cls.create_page_token(final_offset)
return (paginated_runs, next_page_token)
# Model Registry specific parser
# TODO: Tech debt. Refactor search code into common utils, tracking server, and model
# registry specific code.
VALID_SEARCH_KEYS_FOR_MODEL_VERSIONS = {"name", "run_id", "source_path"}
VALID_SEARCH_KEYS_FOR_REGISTERED_MODELS = {"name"}
@classmethod
def _check_valid_identifier_list(cls, tup: tuple[Any, ...]) -> None:
"""
Validate that `tup` is a non-empty tuple of strings.
"""
if len(tup) == 0:
raise MlflowException(
"While parsing a list in the query,"
" expected a non-empty list of string values, but got empty list",
error_code=INVALID_PARAMETER_VALUE,
)
if not all(isinstance(x, str) for x in tup):
raise MlflowException(
"While parsing a list in the query, expected string value, punctuation, "
f"or whitespace, but got different type in list: {tup}",
error_code=INVALID_PARAMETER_VALUE,
)
@classmethod
def _parse_list_from_sql_token(cls, token):
try:
parsed = ast.literal_eval(token.value)
except SyntaxError as e:
raise MlflowException(
"While parsing a list in the query,"
" expected a non-empty list of string values, but got ill-formed list.",
error_code=INVALID_PARAMETER_VALUE,
) from e
parsed = parsed if isinstance(parsed, tuple) else (parsed,)
cls._check_valid_identifier_list(parsed)
return parsed
@classmethod
def _parse_run_ids(cls, token):
run_id_list = cls._parse_list_from_sql_token(token)
# Because MySQL IN clause is case-insensitive, but all run_ids only contain lower
# case letters, so that we filter out run_ids containing upper case letters here.
return [run_id for run_id in run_id_list if run_id.islower()]
| SearchUtils |
python | pyqtgraph__pyqtgraph | pyqtgraph/dockarea/DockDrop.py | {
"start": 2807,
"end": 4468
} | class ____(QtWidgets.QWidget):
"""Overlay widget that draws drop areas during a drag-drop operation"""
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.dropArea = None
self.hide()
self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TransparentForMouseEvents)
def setDropArea(self, area):
self.dropArea = area
if area is None:
self.hide()
else:
## Resize overlay to just the region where drop area should be displayed.
## This works around a Qt bug--can't display transparent widgets over QGLWidget
prgn = self.parent().rect()
rgn = QtCore.QRect(prgn)
w = min(30, int(prgn.width() / 3))
h = min(30, int(prgn.height() / 3))
if self.dropArea == 'left':
rgn.setWidth(w)
elif self.dropArea == 'right':
rgn.setLeft(rgn.left() + prgn.width() - w)
elif self.dropArea == 'top':
rgn.setHeight(h)
elif self.dropArea == 'bottom':
rgn.setTop(rgn.top() + prgn.height() - h)
elif self.dropArea == 'center':
rgn.adjust(w, h, -w, -h)
self.setGeometry(rgn)
self.show()
self.update()
def paintEvent(self, ev):
if self.dropArea is None:
return
p = QtGui.QPainter(self)
rgn = self.rect()
p.setBrush(QtGui.QBrush(QtGui.QColor(100, 100, 255, 50)))
p.setPen(QtGui.QPen(QtGui.QColor(50, 50, 150), 3))
p.drawRect(rgn)
p.end()
| DropAreaOverlay |
python | catalyst-team__catalyst | catalyst/callbacks/mixup.py | {
"start": 179,
"end": 5899
} | class ____(Callback):
"""
Callback to do mixup augmentation. More details about mixin can be found in the paper
`mixup: Beyond Empirical Risk Minimization`: https://arxiv.org/abs/1710.09412 .
Args:
keys: batch keys to which you want to apply augmentation
alpha: beta distribution a=b parameters. Must be >=0.
The more alpha closer to zero the less effect of the mixup.
mode: mode determines the method of use. Must be in ["replace", "add"].
If "replace" then replaces the batch with a mixed one,
while the batch size is not changed.
If "add", concatenates mixed examples to the current ones,
the batch size increases by 2 times.
on_train_only: apply to train only.
As the mixup use the proxy inputs, the targets are also proxy.
We are not interested in them, are we? So, if ``on_train_only``
is ``True`` use a standard output/metric for validation.
Examples:
.. code-block:: python
from typing import Any, Dict
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.callbacks import MixupCallback
from catalyst.contrib.datasets import MNIST
class SimpleNet(nn.Module):
def __init__(self, in_channels, in_hw, out_features):
super().__init__()
self.encoder = nn.Sequential(nn.Conv2d(in_channels,
in_channels, 3, 1, 1), nn.Tanh())
self.clf = nn.Linear(in_channels * in_hw * in_hw, out_features)
def forward(self, x):
features = self.encoder(x)
features = features.view(features.size(0), -1)
logits = self.clf(features)
return logits
class SimpleDataset(torch.utils.data.Dataset):
def __init__(self, train: bool = False):
self.mnist = MNIST(os.getcwd(), train=train)
def __len__(self) -> int:
return len(self.mnist)
def __getitem__(self, idx: int) -> Dict[str, Any]:
x, y = self.mnist.__getitem__(idx)
y_one_hot = np.zeros(10)
y_one_hot[y] = 1
return {"image": x,
"clf_targets": y,
"clf_targets_one_hot": torch.Tensor(y_one_hot)}
model = SimpleNet(1, 28, 10)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(SimpleDataset(train=True), batch_size=32),
"valid": DataLoader(SimpleDataset(train=False), batch_size=32),
}
class CustomRunner(dl.Runner):
def handle_batch(self, batch):
image = batch["image"]
clf_logits = self.model(image)
self.batch["clf_logits"] = clf_logits
runner = CustomRunner()
runner.train(
loaders=loaders,
model=model,
criterion=criterion,
optimizer=optimizer,
logdir="./logdir14",
num_epochs=2,
verbose=True,
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
callbacks={
"mixup": MixupCallback(keys=["image", "clf_targets_one_hot"]),
"criterion": dl.CriterionCallback(
metric_key="loss",
input_key="clf_logits",
target_key="clf_targets_one_hot"
),
"backward": dl.BackwardCallback(metric_key="loss"),
"optimizer": dl.OptimizerCallback(metric_key="loss"),
"classification": dl.ControlFlowCallback(
dl.PrecisionRecallF1SupportCallback(
input_key="clf_logits", target_key="clf_targets", num_classes=10
),
ignore_loaders="train",
),
},
)
.. By running::
With running this callback, many metrics (accuracy, etc) become undefined, so
use ControlFlowCallback in order to evaluate model(see example)
"""
def __init__(
self, keys: Union[str, List[str]], alpha=0.2, mode="replace", on_train_only=True
):
"""Init."""
assert isinstance(keys, (str, list, tuple)), (
f"keys must be str of list[str]," f" get: {type(keys)}"
)
assert alpha >= 0, "alpha must be>=0"
assert mode in (
"add",
"replace",
), f"mode must be in 'add', 'replace', get: {mode}"
super().__init__(order=CallbackOrder.Internal)
if isinstance(keys, str):
keys = [keys]
self.keys = keys
self.on_train_only = on_train_only
self.alpha = alpha
self.mode = mode
self._is_required = True
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler."""
self._is_required = not self.on_train_only or runner.is_train_loader
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler."""
if self._is_required:
mixuped_batch = [runner.batch[key] for key in self.keys]
mixuped_batch = mixup_batch(mixuped_batch, alpha=self.alpha, mode=self.mode)
for key, mixuped_value in zip(self.keys, mixuped_batch):
runner.batch[key] = mixuped_value
__all__ = ["MixupCallback"]
| MixupCallback |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 4265,
"end": 4669
} | class ____(DataStoreEvent):
"""
Signals that the deserialization of a job has failed.
:ivar job_id: ID of the job that failed to deserialize
:ivar exception: the exception that was raised during deserialization
"""
job_id: UUID = attrs.field(converter=as_uuid)
exception: BaseException
#
# Scheduler events
#
@attrs.define(kw_only=True, frozen=True)
| JobDeserializationFailed |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-graphql/llama_index/readers/graphql/base.py | {
"start": 176,
"end": 2135
} | class ____(BaseReader):
"""
GraphQL reader.
Combines all GraphQL results into the Document used by LlamaIndex.
Args:
uri (str): GraphQL uri.
headers (Optional[Dict]): Optional http headers.
"""
def __init__(
self,
uri: Optional[str] = None,
headers: Optional[Dict] = None,
) -> None:
"""Initialize with parameters."""
try:
from gql import Client
from gql.transport.requests import RequestsHTTPTransport
except ImportError:
raise ImportError("`gql` package not found, please run `pip install gql`")
if uri:
if uri is None:
raise ValueError("`uri` must be provided.")
if headers is None:
headers = {}
transport = RequestsHTTPTransport(url=uri, headers=headers)
self.client = Client(transport=transport, fetch_schema_from_transport=True)
def load_data(self, query: str, variables: Optional[Dict] = None) -> List[Document]:
"""
Run query with optional variables and turn results into documents.
Args:
query (str): GraphQL query string.
variables (Optional[Dict]): optional query parameters.
Returns:
List[Document]: A list of documents.
"""
try:
from gql import gql
except ImportError:
raise ImportError("`gql` package not found, please run `pip install gql`")
if variables is None:
variables = {}
documents = []
result = self.client.execute(gql(query), variable_values=variables)
for key in result:
entry = result[key]
if isinstance(entry, list):
documents.extend([Document(text=yaml.dump(v)) for v in entry])
else:
documents.append(Document(text=yaml.dump(entry)))
return documents
| GraphQLReader |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_comment04.py | {
"start": 315,
"end": 1147
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("comment04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
worksheet1.write("A1", "Foo")
worksheet1.write_comment("B2", "Some text")
worksheet3.write("A1", "Bar")
worksheet3.write_comment("C7", "More text")
worksheet1.set_comments_author("John")
worksheet3.set_comments_author("John")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.