language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/diffgeom/diffgeom.py | {
"start": 40122,
"end": 43126
} | class ____(Expr):
"""Tensor product of forms.
Explanation
===========
The tensor product permits the creation of multilinear functionals (i.e.
higher order tensors) out of lower order fields (e.g. 1-forms and vector
fields). However, the higher tensors thus created lack the interesting
features provided by the other type of product, the wedge product, namely
they are not antisymmetric and hence are not form fields.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> TensorProduct(dx, dy)(e_x, e_y)
1
>>> TensorProduct(dx, dy)(e_y, e_x)
0
>>> TensorProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> TensorProduct(e_x, e_y)(fx**2, fy**2)
4*x*y
>>> TensorProduct(e_y, dx)(fy)
dx
You can nest tensor products.
>>> tp1 = TensorProduct(dx, dy)
>>> TensorProduct(tp1, dx)(e_x, e_y, e_x)
1
You can make partial contraction for instance when 'raising an index'.
Putting ``None`` in the second argument of ``rcall`` means that the
respective position in the tensor product is left as it is.
>>> TP = TensorProduct
>>> metric = TP(dx, dx) + 3*TP(dy, dy)
>>> metric.rcall(e_y, None)
3*dy
Or automatically pad the args with ``None`` without specifying them.
>>> metric.rcall(e_y)
3*dy
"""
def __new__(cls, *args):
scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0])
multifields = [m for m in args if covariant_order(m) + contravariant_order(m)]
if multifields:
if len(multifields) == 1:
return scalar*multifields[0]
return scalar*super().__new__(cls, *multifields)
else:
return scalar
def __call__(self, *fields):
"""Apply on a list of fields.
If the number of input fields supplied is not equal to the order of
the tensor product field, the list of arguments is padded with ``None``'s.
The list of arguments is divided in sublists depending on the order of
the forms inside the tensor product. The sublists are provided as
arguments to these forms and the resulting expressions are given to the
constructor of ``TensorProduct``.
"""
tot_order = covariant_order(self) + contravariant_order(self)
tot_args = len(fields)
if tot_args != tot_order:
fields = list(fields) + [None]*(tot_order - tot_args)
orders = [covariant_order(f) + contravariant_order(f) for f in self._args]
indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)]
fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])]
multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)]
return TensorProduct(*multipliers)
| TensorProduct |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 18148,
"end": 19411
} | class ____(Structure):
_fields_ = (
("sectname", p_str16),
("segname", p_str16),
("addr", p_uint32),
("size", p_uint32),
("offset", p_uint32),
("align", p_uint32),
("reloff", p_uint32),
("nreloc", p_uint32),
("flags", p_uint32),
("reserved1", p_uint32),
("reserved2", p_uint32),
)
def describe(self):
s = {}
s["sectname"] = self.sectname.rstrip("\x00")
s["segname"] = self.segname.rstrip("\x00")
s["addr"] = int(self.addr)
s["size"] = int(self.size)
s["offset"] = int(self.offset)
s["align"] = int(self.align)
s["reloff"] = int(self.reloff)
s["nreloc"] = int(self.nreloc)
f = {}
f["type"] = FLAG_SECTION_TYPES[int(self.flags) & 0xFF]
f["attributes"] = []
for k in FLAG_SECTION_ATTRIBUTES:
if k & self.flags:
f["attributes"].append(FLAG_SECTION_ATTRIBUTES[k])
if not f["attributes"]:
del f["attributes"]
s["flags"] = f
s["reserved1"] = int(self.reserved1)
s["reserved2"] = int(self.reserved2)
return s
def add_section_data(self, data):
self.section_data = data
| section |
python | Lightning-AI__lightning | tests/tests_pytorch/utilities/test_model_summary.py | {
"start": 2817,
"end": 3268
} | class ____(LightningModule):
"""The parameters and inputs of this model have different dtypes."""
def __init__(self):
super().__init__()
self.embed = nn.Embedding(10, 20) # expects dtype long as input
self.reduce = nn.Linear(20, 1) # dtype: float
self.example_input_array = torch.tensor([[0, 2, 1], [3, 5, 3]]) # dtype: long
def forward(self, x):
return self.reduce(self.embed(x))
| MixedDtypeModel |
python | optuna__optuna | optuna/pruners/_threshold.py | {
"start": 593,
"end": 4504
} | class ____(BasePruner):
"""Pruner to detect outlying metrics of the trials.
Prune if a metric exceeds upper threshold,
falls behind lower threshold or reaches ``nan``.
Example:
.. testcode::
from optuna import create_study
from optuna.pruners import ThresholdPruner
from optuna import TrialPruned
def objective_for_upper(trial):
for step, y in enumerate(ys_for_upper):
trial.report(y, step)
if trial.should_prune():
raise TrialPruned()
return ys_for_upper[-1]
def objective_for_lower(trial):
for step, y in enumerate(ys_for_lower):
trial.report(y, step)
if trial.should_prune():
raise TrialPruned()
return ys_for_lower[-1]
ys_for_upper = [0.0, 0.1, 0.2, 0.5, 1.2]
ys_for_lower = [100.0, 90.0, 0.1, 0.0, -1]
study = create_study(pruner=ThresholdPruner(upper=1.0))
study.optimize(objective_for_upper, n_trials=10)
study = create_study(pruner=ThresholdPruner(lower=0.0))
study.optimize(objective_for_lower, n_trials=10)
Args:
lower:
A minimum value which determines whether pruner prunes or not.
If an intermediate value is smaller than lower, it prunes.
upper:
A maximum value which determines whether pruner prunes or not.
If an intermediate value is larger than upper, it prunes.
n_warmup_steps:
Pruning is disabled if the step is less than the given number of warmup steps.
interval_steps:
Interval in number of steps between the pruning checks, offset by the warmup steps.
If no value has been reported at the time of a pruning check, that particular check
will be postponed until a value is reported. Value must be at least 1.
"""
def __init__(
self,
lower: float | None = None,
upper: float | None = None,
n_warmup_steps: int = 0,
interval_steps: int = 1,
) -> None:
if lower is None and upper is None:
raise TypeError("Either lower or upper must be specified.")
if lower is not None:
lower = _check_value(lower)
if upper is not None:
upper = _check_value(upper)
lower = lower if lower is not None else -float("inf")
upper = upper if upper is not None else float("inf")
if lower > upper:
raise ValueError("lower should be smaller than upper.")
if n_warmup_steps < 0:
raise ValueError(
"Number of warmup steps cannot be negative but got {}.".format(n_warmup_steps)
)
if interval_steps < 1:
raise ValueError(
"Pruning interval steps must be at least 1 but got {}.".format(interval_steps)
)
self._lower = lower
self._upper = upper
self._n_warmup_steps = n_warmup_steps
self._interval_steps = interval_steps
def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool:
step = trial.last_step
if step is None:
return False
n_warmup_steps = self._n_warmup_steps
if step < n_warmup_steps:
return False
if not _is_first_in_interval_step(
step, trial.intermediate_values.keys(), n_warmup_steps, self._interval_steps
):
return False
latest_value = trial.intermediate_values[step]
if math.isnan(latest_value):
return True
if latest_value < self._lower:
return True
if latest_value > self._upper:
return True
return False
| ThresholdPruner |
python | zarr-developers__zarr-python | src/zarr/core/buffer/core.py | {
"start": 9024,
"end": 17343
} | class ____:
"""An n-dimensional memory block
We use NDBuffer throughout Zarr to represent a n-dimensional memory block.
A NDBuffer is backed by an underlying ndarray-like instance that represents
the memory. The memory type is unspecified; can be regular host memory,
CUDA device memory, or something else. The only requirement is that the
ndarray-like instance can be copied/converted to a regular Numpy array
(host memory).
Notes
-----
The two buffer classes Buffer and NDBuffer are very similar. In fact, Buffer
is a special case of NDBuffer where dim=1, stride=1, and dtype="B". However,
in order to use Python's type system to differentiate between the contiguous
Buffer and the n-dim (non-contiguous) NDBuffer, we keep the definition of the
two classes separate.
Parameters
----------
array : ndarray_like
ndarray-like object that is convertible to a regular Numpy array.
"""
def __init__(self, array: NDArrayLike) -> None:
self._data = array
@classmethod
@abstractmethod
def create(
cls,
*,
shape: Iterable[int],
dtype: npt.DTypeLike,
order: Literal["C", "F"] = "C",
fill_value: Any | None = None,
) -> Self:
"""Create a new buffer and its underlying ndarray-like object
Parameters
----------
shape
The shape of the buffer and its underlying ndarray-like object
dtype
The datatype of the buffer and its underlying ndarray-like object
order
Whether to store multi-dimensional data in row-major (C-style) or
column-major (Fortran-style) order in memory.
fill_value
If not None, fill the new buffer with a scalar value.
Returns
-------
New buffer representing a new ndarray_like object
Notes
-----
A subclass can overwrite this method to create a ndarray-like object
other then the default Numpy array.
"""
if cls is NDBuffer:
raise NotImplementedError(
"Cannot call abstract method on the abstract class 'NDBuffer'"
)
return cls(
cast("NDArrayLike", None)
) # This line will never be reached, but it satisfies the type checker
@classmethod
def empty(
cls, shape: tuple[int, ...], dtype: npt.DTypeLike, order: Literal["C", "F"] = "C"
) -> Self:
"""
Create an empty buffer with the given shape, dtype, and order.
This method can be faster than ``NDBuffer.create`` because it doesn't
have to initialize the memory used by the underlying ndarray-like
object.
Parameters
----------
shape
The shape of the buffer and its underlying ndarray-like object
dtype
The datatype of the buffer and its underlying ndarray-like object
order
Whether to store multi-dimensional data in row-major (C-style) or
column-major (Fortran-style) order in memory.
Returns
-------
buffer
New buffer representing a new ndarray_like object with empty data.
See Also
--------
NDBuffer.create
Create a new buffer with some initial fill value.
"""
# Implementations should override this method if they have a faster way
# to allocate an empty buffer.
return cls.create(shape=shape, dtype=dtype, order=order)
@classmethod
def from_ndarray_like(cls, ndarray_like: NDArrayLike) -> Self:
"""Create a new buffer of a ndarray-like object
Parameters
----------
ndarray_like
ndarray-like object
Returns
-------
New buffer representing `ndarray_like`
"""
return cls(ndarray_like)
@classmethod
@abstractmethod
def from_numpy_array(cls, array_like: npt.ArrayLike) -> Self:
"""Create a new buffer of Numpy array-like object
Parameters
----------
array_like
Object that can be coerced into a Numpy array
Returns
-------
New buffer representing `array_like`
"""
if cls is NDBuffer:
raise NotImplementedError(
"Cannot call abstract method on the abstract class 'NDBuffer'"
)
return cls(
cast("NDArrayLike", None)
) # This line will never be reached, but it satisfies the type checker
def as_ndarray_like(self) -> NDArrayLike:
"""Returns the underlying array (host or device memory) of this buffer
This will never copy data.
Returns
-------
The underlying array such as a NumPy or CuPy array.
"""
return self._data
@abstractmethod
def as_numpy_array(self) -> npt.NDArray[Any]:
"""Returns the buffer as a NumPy array (host memory).
Warnings
--------
Might have to copy data, consider using `.as_ndarray_like()` instead.
Returns
-------
NumPy array of this buffer (might be a data copy)
"""
...
def as_scalar(self) -> ScalarType:
"""Returns the buffer as a scalar value"""
if self._data.size != 1:
raise ValueError("Buffer does not contain a single scalar value")
return cast("ScalarType", self.as_numpy_array()[()])
@property
def dtype(self) -> np.dtype[Any]:
return self._data.dtype
@property
def shape(self) -> tuple[int, ...]:
return self._data.shape
@property
def byteorder(self) -> Endian:
from zarr.codecs.bytes import Endian
if self.dtype.byteorder == "<":
return Endian.little
elif self.dtype.byteorder == ">":
return Endian.big
else:
return Endian(sys.byteorder)
def reshape(self, newshape: tuple[int, ...] | Literal[-1]) -> Self:
return self.__class__(self._data.reshape(newshape))
def squeeze(self, axis: tuple[int, ...]) -> Self:
newshape = tuple(a for i, a in enumerate(self.shape) if i not in axis)
return self.__class__(self._data.reshape(newshape))
def astype(self, dtype: npt.DTypeLike, order: Literal["K", "A", "C", "F"] = "K") -> Self:
return self.__class__(self._data.astype(dtype=dtype, order=order))
@abstractmethod
def __getitem__(self, key: Any) -> Self: ...
@abstractmethod
def __setitem__(self, key: Any, value: Any) -> None: ...
def __len__(self) -> int:
return self._data.__len__()
def __repr__(self) -> str:
return f"<NDBuffer shape={self.shape} dtype={self.dtype} {self._data!r}>"
def all_equal(self, other: Any, equal_nan: bool = True) -> bool:
"""Compare to `other` using np.array_equal."""
if other is None:
# Handle None fill_value for Zarr V2
return False
# Handle positive and negative zero by comparing bit patterns:
if (
np.asarray(other).dtype.kind == "f"
and other == 0.0
and self._data.dtype.kind not in ("U", "S", "T", "O", "V")
):
_data, other = np.broadcast_arrays(self._data, np.asarray(other, self._data.dtype))
void_dtype = "V" + str(_data.dtype.itemsize)
return np.array_equal(_data.view(void_dtype), other.view(void_dtype))
# use array_equal to obtain equal_nan=True functionality
# Since fill-value is a scalar, isn't there a faster path than allocating a new array for fill value
# every single time we have to write data?
_data, other = np.broadcast_arrays(self._data, other)
return np.array_equal(
self._data,
other,
equal_nan=equal_nan
if self._data.dtype.kind not in ("U", "S", "T", "O", "V")
else False,
)
def fill(self, value: Any) -> None:
self._data.fill(value)
def copy(self) -> Self:
return self.__class__(self._data.copy())
def transpose(self, axes: SupportsIndex | Sequence[SupportsIndex] | None) -> Self:
return self.__class__(self._data.transpose(axes))
| NDBuffer |
python | pytorch__pytorch | test/distributed/tensor/test_utils.py | {
"start": 33226,
"end": 37279
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 4
@with_comms
def test_fsdp1_tp_2d_dtensor_local_shards_and_offsets(self):
# We are mimicking the behavior of FSDP1 + TP.
# Currently, the 2D DTensor's local shard is correct, since from_local + redistribute incurs a all_gather behind the scene.
# When we have a global_tensor of [0, 1, 2, 3, 4, 5, 6, 7], the local shard of 2D DTensor would be:
# rank0: [0, 1], rank1: [2, 3], rank2: [4, 5], rank3: [6, 7]
with CommDebugMode() as comm_mode:
global_tensor = torch.arange(8).view(4, 2)
mesh_2d = init_device_mesh(
self.device_type, (2, 2), mesh_dim_names=("DP", "TP")
)
tp_mesh = mesh_2d["TP"]
dtensor_tp = distribute_tensor(
global_tensor, tp_mesh, placements=[Shard(0)]
)
dtensor_2d = DTensor.from_local(
dtensor_tp.to_local(), mesh_2d, [Replicate(), Shard(0)], run_check=False
).redistribute(mesh_2d, [Shard(0), Shard(0)])
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
)
self.assertEqual(
dtensor_2d.to_local(), global_tensor[self.rank : self.rank + 1]
)
# compute_local_shape_and_global_offset currently does take into consideration of strided sharding,
# which should after strided sharding is added.
local_size, global_offset = compute_local_shape_and_global_offset(
global_tensor.shape, mesh_2d, [Shard(0), Shard(0)]
)
self.assertEqual(local_size, torch.Size([1, 2]))
self.assertEqual(global_offset, torch.Size([self.rank, 0]))
@with_comms
def test_fsdp2_tp_2d_dtensor_local_shards_and_offsets(self):
# We are mimicking the behavior of FSDP2 + TP.
# Currently, the 2D DTensor's local shard is incorrect for resharding, since we want to avoid extra communication.
# It's incorrect for resharding, since `compute_local_shape_and_global_offset`
# doesn't know the correct offsets for resharding.
# When we have a global_tensor of [0, 1, 2, 3, 4, 5, 6, 7], the local shard of 2D DTensor would be:
# local tensor -- rank0: [0, 1], rank1: [4, 5], rank2: [2, 3], rank3: [6, 7]
# current offsets -- rank0: [0, 0], rank1: [1, 0], rank2: [2, 0], rank3: [3, 0]
# Ideally, with strided sharding, the offsets should be rank0: [0, 0], rank1: [2, 0], rank2: [1, 0], rank3: [3, 0]
# TODO: to make the local shard of FSDP2 + TP correct for resharding, it would require strided_sharding
# as well as let compute_local_shape_and_global_offset takes into consideration of strided_sharding.
global_tensor = torch.arange(8).view(4, 2)
with CommDebugMode() as comm_mode:
mesh_2d = init_device_mesh(
self.device_type, (2, 2), mesh_dim_names=("DP", "TP")
)
tp_mesh = mesh_2d["TP"]
dtensor_tp = distribute_tensor(
global_tensor, tp_mesh, placements=[Shard(0)]
)
chunks = list(torch.chunk(dtensor_tp.to_local(), 2, dim=0))
shard_rank = 0 if self.rank // 2 == 0 else 1
sharded_param = chunks[shard_rank]
spec_2d = DTensorSpec(
mesh=mesh_2d,
placements=(_StridedShard(0, split_factor=2), Shard(0)),
tensor_meta=TensorMeta(
global_tensor.size(),
global_tensor.stride(),
global_tensor.dtype,
),
)
dtensor_2d = DTensor(
sharded_param,
spec_2d,
requires_grad=False,
)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 0
)
self.assertEqual(global_tensor, dtensor_2d.full_tensor())
| Test2DStridedLocalShard |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_organization_sentry_app_installation_details.py | {
"start": 1209,
"end": 2591
} | class ____(APITestCase):
def setUp(self) -> None:
self.superuser = self.create_user(email="a@example.com", is_superuser=True)
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.super_org = self.create_organization(owner=self.superuser)
self.published_app = self.create_sentry_app(
name="Test",
organization=self.super_org,
published=True,
scopes=("org:write", "team:admin"),
)
self.installation = self.create_sentry_app_installation(
slug=self.published_app.slug,
organization=self.super_org,
user=self.superuser,
status=SentryAppInstallationStatus.PENDING,
prevent_token_exchange=True,
)
self.unpublished_app = self.create_sentry_app(name="Testin", organization=self.org)
self.installation2 = self.create_sentry_app_installation(
slug=self.unpublished_app.slug,
organization=self.org,
user=self.user,
status=SentryAppInstallationStatus.PENDING,
prevent_token_exchange=True,
)
self.url = reverse(
"sentry-api-0-sentry-app-installation-details", args=[self.installation2.uuid]
)
@control_silo_test
| SentryAppInstallationDetailsTest |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 118568,
"end": 122822
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Company(Base):
__tablename__ = "company"
id = Column(Integer, primary_key=True)
name = Column(String(50))
employees = relationship("Employee", cascade="all, delete-orphan")
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
company_id = Column(ForeignKey("company.id"))
__mapper_args__ = {
"polymorphic_identity": "employee",
"polymorphic_on": type,
}
class Engineer(Employee):
__tablename__ = "engineer"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(30))
languages = relationship("Language", cascade="all, delete-orphan")
__mapper_args__ = {"polymorphic_identity": "engineer"}
class MavenBuild(Base):
__tablename__ = "maven_build"
id = Column(Integer, primary_key=True)
java_language_id = Column(
ForeignKey("java_language.id"), nullable=False
)
class Manager(Employee):
__tablename__ = "manager"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = Column(String(30))
__mapper_args__ = {"polymorphic_identity": "manager"}
class Language(Base):
__tablename__ = "language"
id = Column(Integer, primary_key=True)
engineer_id = Column(ForeignKey("engineer.id"), nullable=False)
name = Column(String(50))
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "language",
}
class JavaLanguage(Language):
__tablename__ = "java_language"
id = Column(ForeignKey("language.id"), primary_key=True)
maven_builds = relationship(
"MavenBuild", cascade="all, delete-orphan"
)
__mapper_args__ = {"polymorphic_identity": "java_language"}
def test_cascade_iterator_polymorphic(self):
(
Company,
Employee,
Engineer,
Language,
JavaLanguage,
MavenBuild,
) = self.classes(
"Company",
"Employee",
"Engineer",
"Language",
"JavaLanguage",
"MavenBuild",
)
obj = Company(
employees=[
Engineer(
languages=[
JavaLanguage(name="java", maven_builds=[MavenBuild()])
]
)
]
)
eng = obj.employees[0]
lang = eng.languages[0]
maven_build = lang.maven_builds[0]
from sqlalchemy import inspect
state = inspect(obj)
it = inspect(Company).cascade_iterator("save-update", state)
eq_({rec[0] for rec in it}, {eng, maven_build, lang})
state = inspect(eng)
it = inspect(Employee).cascade_iterator("save-update", state)
eq_({rec[0] for rec in it}, {maven_build, lang})
def test_delete_orphan_round_trip(self):
(
Company,
Employee,
Engineer,
Language,
JavaLanguage,
MavenBuild,
) = self.classes(
"Company",
"Employee",
"Engineer",
"Language",
"JavaLanguage",
"MavenBuild",
)
obj = Company(
employees=[
Engineer(
languages=[
JavaLanguage(name="java", maven_builds=[MavenBuild()])
]
)
]
)
s = fixture_session()
s.add(obj)
s.commit()
obj.employees = []
s.commit()
eq_(s.query(Language).count(), 0)
| SubclassCascadeTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 66582,
"end": 67556
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository_id",
"name",
"owner_id",
"description",
"visibility",
"include_all_branches",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
description = sgqlc.types.Field(String, graphql_name="description")
visibility = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryVisibility), graphql_name="visibility"
)
include_all_branches = sgqlc.types.Field(Boolean, graphql_name="includeAllBranches")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| CloneTemplateRepositoryInput |
python | PrefectHQ__prefect | src/prefect/events/schemas/automations.py | {
"start": 10466,
"end": 10985
} | class ____(Trigger, abc.ABC):
"""
Requires some number of triggers to have fired within the given time period.
"""
type: Literal["compound", "sequence"]
triggers: List["TriggerTypes"]
within: Optional[timedelta] = Field(
None,
description=(
"The time period over which the events must occur. For Reactive triggers, "
"this may be as low as 0 seconds, but must be at least 10 seconds for "
"Proactive triggers"
),
)
| CompositeTrigger |
python | simonw__sqlite-utils | sqlite_utils/utils.py | {
"start": 10929,
"end": 12403
} | class ____:
"""
Wrap an iterator of dictionaries and keep track of which SQLite column
types are the most likely fit for each of their keys.
Example usage:
.. code-block:: python
from sqlite_utils.utils import TypeTracker
import sqlite_utils
db = sqlite_utils.Database(memory=True)
tracker = TypeTracker()
rows = [{"id": "1", "name": "Cleo", "id": "2", "name": "Cardi"}]
db["creatures"].insert_all(tracker.wrap(rows))
print(tracker.types)
# Outputs {'id': 'integer', 'name': 'text'}
db["creatures"].transform(types=tracker.types)
"""
def __init__(self):
self.trackers = {}
def wrap(self, iterator: Iterable[dict]) -> Iterable[dict]:
"""
Use this to loop through an existing iterator, tracking the column types
as part of the iteration.
:param iterator: The iterator to wrap
"""
for row in iterator:
for key, value in row.items():
tracker = self.trackers.setdefault(key, ValueTracker())
tracker.evaluate(value)
yield row
@property
def types(self) -> Dict[str, str]:
"""
A dictionary mapping column names to their detected types. This can be passed
to the ``db[table_name].transform(types=tracker.types)`` method.
"""
return {key: tracker.guessed_type for key, tracker in self.trackers.items()}
| TypeTracker |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 7331,
"end": 8342
} | class ____:
"""Test ja_JP company provider methods"""
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in JaJpCompanyProvider.company_prefixes
def test_company_category(self, faker, num_samples):
for _ in range(num_samples):
category = faker.company_category()
assert isinstance(category, str)
assert category in JaJpCompanyProvider.company_categories
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
assert any(
company.startswith(prefix) or company.endswith(prefix)
for prefix in JaJpCompanyProvider.company_prefixes
)
assert any(category in company for category in JaJpCompanyProvider.company_categories)
| TestJaJp |
python | gevent__gevent | src/gevent/resolver/blocking.py | {
"start": 116,
"end": 1216
} | class ____(object):
"""
A resolver that directly uses the system's resolver functions.
.. caution::
This resolver is *not* cooperative.
This resolver has the lowest overhead of any resolver and
typically approaches the speed of the unmodified :mod:`socket`
functions. However, it is not cooperative, so if name resolution
blocks, the entire thread and all its greenlets will be blocked.
This can be useful during debugging, or it may be a good choice if
your operating system provides a good caching resolver (such as
macOS's Directory Services) that is usually very fast and
functionally non-blocking.
.. versionchanged:: 1.3a2
This was previously undocumented and existed in :mod:`gevent.socket`.
"""
def __init__(self, hub=None):
pass
def close(self):
pass
for method in (
'gethostbyname',
'gethostbyname_ex',
'getaddrinfo',
'gethostbyaddr',
'getnameinfo'
):
locals()[method] = staticmethod(getattr(_socket, method))
| Resolver |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/xcom_arg.py | {
"start": 7449,
"end": 14374
} | class ____(XComArg):
"""
Reference to one single XCom without any additional semantics.
This class should not be accessed directly, but only through XComArg. The
class inheritance chain and ``__new__`` is implemented in this slightly
convoluted way because we want to
a. Allow the user to continue using XComArg directly for the simple
semantics (see documentation of the base class for details).
b. Make ``isinstance(thing, XComArg)`` be able to detect all kinds of XCom
references.
c. Not allow many properties of PlainXComArg (including ``__getitem__`` and
``__str__``) to exist on other kinds of XComArg implementations since
they don't make sense.
:meta private:
"""
operator: Operator
key: str = BaseXCom.XCOM_RETURN_KEY
def __getitem__(self, item: str) -> XComArg:
"""Implement xcomresult['some_result_key']."""
if not isinstance(item, str):
raise ValueError(f"XComArg only supports str lookup, received {type(item).__name__}")
return PlainXComArg(operator=self.operator, key=item)
def __iter__(self):
"""
Override iterable protocol to raise error explicitly.
The default ``__iter__`` implementation in Python calls ``__getitem__``
with 0, 1, 2, etc. until it hits an ``IndexError``. This does not work
well with our custom ``__getitem__`` implementation, and results in poor
Dag-writing experience since a misplaced ``*`` expansion would create an
infinite loop consuming the entire Dag parser.
This override catches the error eagerly, so an incorrectly implemented
Dag fails fast and avoids wasting resources on nonsensical iterating.
"""
raise TypeError("'XComArg' object is not iterable")
def __repr__(self) -> str:
if self.key == BaseXCom.XCOM_RETURN_KEY:
return f"XComArg({self.operator!r})"
return f"XComArg({self.operator!r}, {self.key!r})"
def __str__(self) -> str:
"""
Backward compatibility for old-style jinja used in Airflow Operators.
**Example**: to use XComArg at BashOperator::
BashOperator(cmd=f"... {xcomarg} ...")
:return:
"""
xcom_pull_kwargs = [
f"task_ids='{self.operator.task_id}'",
f"dag_id='{self.operator.dag_id}'",
]
if self.key is not None:
xcom_pull_kwargs.append(f"key='{self.key}'")
xcom_pull_str = ", ".join(xcom_pull_kwargs)
# {{{{ are required for escape {{ in f-string
xcom_pull = f"{{{{ task_instance.xcom_pull({xcom_pull_str}) }}}}"
return xcom_pull
def _serialize(self) -> dict[str, Any]:
return {"task_id": self.operator.task_id, "key": self.key}
@property
def is_setup(self) -> bool:
return self.operator.is_setup
@is_setup.setter
def is_setup(self, val: bool):
self.operator.is_setup = val
@property
def is_teardown(self) -> bool:
return self.operator.is_teardown
@is_teardown.setter
def is_teardown(self, val: bool):
self.operator.is_teardown = val
@property
def on_failure_fail_dagrun(self) -> bool:
return self.operator.on_failure_fail_dagrun
@on_failure_fail_dagrun.setter
def on_failure_fail_dagrun(self, val: bool):
self.operator.on_failure_fail_dagrun = val
def as_setup(self) -> DependencyMixin:
for operator, _ in self.iter_references():
operator.is_setup = True
return self
def as_teardown(
self,
*,
setups: BaseOperator | Iterable[BaseOperator] | None = None,
on_failure_fail_dagrun: bool | None = None,
):
for operator, _ in self.iter_references():
operator.is_teardown = True
operator.trigger_rule = TriggerRule.ALL_DONE_SETUP_SUCCESS
if on_failure_fail_dagrun is not None:
operator.on_failure_fail_dagrun = on_failure_fail_dagrun
if setups is not None:
setups = [setups] if isinstance(setups, DependencyMixin) else setups
for s in setups:
s.is_setup = True
s >> operator
return self
def iter_references(self) -> Iterator[tuple[Operator, str]]:
yield self.operator, self.key
def map(self, f: Callable[[Any], Any]) -> MapXComArg:
if self.key != BaseXCom.XCOM_RETURN_KEY:
raise ValueError("cannot map against non-return XCom")
return super().map(f)
def zip(self, *others: XComArg, fillvalue: Any = NOTSET) -> ZipXComArg:
if self.key != BaseXCom.XCOM_RETURN_KEY:
raise ValueError("cannot map against non-return XCom")
return super().zip(*others, fillvalue=fillvalue)
def concat(self, *others: XComArg) -> ConcatXComArg:
if self.key != BaseXCom.XCOM_RETURN_KEY:
raise ValueError("cannot concatenate non-return XCom")
return super().concat(*others)
def resolve(self, context: Mapping[str, Any]) -> Any:
ti = context["ti"]
task_id = self.operator.task_id
if self.operator.is_mapped:
return LazyXComSequence(xcom_arg=self, ti=ti)
tg = self.operator.get_closest_mapped_task_group()
if tg is None:
map_indexes = None
else:
upstream_map_indexes = getattr(ti, "_upstream_map_indexes", {})
map_indexes = upstream_map_indexes.get(task_id, None)
result = ti.xcom_pull(
task_ids=task_id,
key=self.key,
default=NOTSET,
map_indexes=map_indexes,
)
if is_arg_set(result):
return result
if self.key == BaseXCom.XCOM_RETURN_KEY:
return None
if getattr(self.operator, "multiple_outputs", False):
# If the operator is set to have multiple outputs and it was not executed,
# we should return "None" instead of showing an error. This is because when
# multiple outputs XComs are created, the XCom keys associated with them will have
# different names than the predefined "XCOM_RETURN_KEY" and won't be found.
# Therefore, it's better to return "None" like we did above where self.key==XCOM_RETURN_KEY.
return None
raise XComNotFound(ti.dag_id, task_id, self.key)
def _get_callable_name(f: Callable | str) -> str:
"""Try to "describe" a callable by getting its name."""
if callable(f):
return f.__name__
# Parse the source to find whatever is behind "def". For safety, we don't
# want to evaluate the code in any meaningful way!
with contextlib.suppress(Exception):
kw, name, _ = f.lstrip().split(None, 2)
if kw == "def":
return name
return "<function>"
@attrs.define
| PlainXComArg |
python | pytorch__pytorch | test/inductor/test_static_cuda_launcher.py | {
"start": 15596,
"end": 19178
} | class ____(TestCase):
"""
Tests static cuda launcher with torch.compile()
"""
@skipIfRocm
def test_basic_compile(self):
@torch.compile
def foo(x, y):
return x + y
x = torch.randn(10, device="cuda")
y = torch.randn(10, device="cuda")
self.assertEqual(foo(x, y), x + y)
@skipIfRocm
# The error gets raised on a worker, so we want to not use a separate process
@torch._inductor.config.patch("compile_threads", 1)
def test_incompatible_code(self):
# User defined triton kernel
@triton.jit
def custom_kernel(arg_0, arg_1):
x = tl.load(arg_0)
y = arg_1
tl.store(arg_0, x + y)
@torch.compile
def foo(x):
custom_kernel[1,](x, 5)
return x
x = torch.randn(1, device="cuda")
self.assertRaisesRegex(
torch._inductor.exc.InductorError,
"CannotStaticallyLaunchKernel: User defined triton kernel",
lambda: foo(x),
)
@skipIfRocm
# The error gets raised on a worker, so we want to not use a separate process
@torch._inductor.config.patch(
{"compile_threads": 1, "static_launch_user_defined_triton_kernels": True}
)
def test_static_launch_user_defined_triton_kernels(self):
# User defined triton kernel
@triton.jit
def custom_kernel(arg_0, arg_1):
x = tl.load(arg_0)
y = arg_1
tl.store(arg_0, x + y)
@torch.compile
def foo(x):
custom_kernel[1,](x, 5)
return x
x = torch.randn(1, device="cuda")
x2 = x.clone().detach_()
self.assertEqual(foo(x), x2 + 5)
@skipIfRocm
def test_empty_tensor(self):
@torch.compile()
def foo(x, y):
return torch.cat(((x * 4), y + 10))
x = torch.rand(0, device="cuda")
torch._dynamo.decorators.mark_unbacked(x, 0)
y = torch.rand(20, device="cuda")
result = foo(x, y)
self.assertEqual(result, torch.cat(((x * 4), y + 10)))
@skipIfRocm
def test_any(self):
def fn(x):
return (
x.any(-1),
x.isinf().any(),
torch.all(x.isinf(), dim=0),
torch.all(torch.logical_not(x.isinf())),
)
compiled_fn = torch.compile(fn)
arg = -torch.rand(64, device="cuda", dtype=torch.float64)
eager_result = fn(arg)
compiled_result = compiled_fn(arg)
self.assertEqual(eager_result, compiled_result)
arg[1] = float("inf")
eager_result = fn(arg)
compiled_result = compiled_fn(arg)
self.assertEqual(eager_result, compiled_result)
@skipIfRocm
def test_disable_static_cuda_launcher(self):
@torch.compile
def fn(x, y):
return torch.cat(((x * 4), y + 10))
# Test that static cuda launcher is in fact disabled
with torch._inductor.config.patch("use_static_cuda_launcher", False):
x = torch.rand(20, device="cuda")
y = torch.rand(20, device="cuda")
with mock.patch(
"torch._inductor.runtime.triton_heuristics.StaticTritonCompileResult.make_launcher"
) as mocked:
result = fn(x, y)
mocked.assert_not_called()
self.assertEqual(result, torch.cat(((x * 4), y + 10)))
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
run_tests()
| TestStaticTritonCompileResult |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_rds.py | {
"start": 39600,
"end": 42423
} | class ____:
@classmethod
def setup_class(cls):
cls.dag = DAG(
dag_id="test_dag",
schedule=None,
default_args={"owner": "airflow", "start_date": DEFAULT_DATE},
)
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_aws
def test_start_db_instance(self):
_create_db_instance(self.hook)
self.hook.conn.stop_db_instance(DBInstanceIdentifier=DB_INSTANCE_NAME)
result_before = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status_before = result_before["DBInstances"][0]["DBInstanceStatus"]
assert status_before == "stopped"
start_db_instance = RdsStartDbOperator(
task_id="test_start_db_instance", db_identifier=DB_INSTANCE_NAME
)
_patch_hook_get_connection(start_db_instance.hook)
start_db_instance.execute(None)
result_after = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status_after = result_after["DBInstances"][0]["DBInstanceStatus"]
assert status_after == "available"
@mock_aws
def test_start_db_cluster(self):
_create_db_cluster(self.hook)
self.hook.conn.stop_db_cluster(DBClusterIdentifier=DB_CLUSTER_NAME)
result_before = self.hook.conn.describe_db_clusters(DBClusterIdentifier=DB_CLUSTER_NAME)
status_before = result_before["DBClusters"][0]["Status"]
assert status_before == "stopped"
start_db_cluster = RdsStartDbOperator(
task_id="test_start_db_cluster", db_identifier=DB_CLUSTER_NAME, db_type="cluster"
)
_patch_hook_get_connection(start_db_cluster.hook)
start_db_cluster.execute(None)
result_after = self.hook.conn.describe_db_clusters(DBClusterIdentifier=DB_CLUSTER_NAME)
status_after = result_after["DBClusters"][0]["Status"]
assert status_after == "available"
@mock.patch.object(RdsHook, "conn")
def test_deferred(self, conn_mock):
op = RdsStartDbOperator(
task_id="test_stop_db_instance_no_wait",
db_identifier=DB_INSTANCE_NAME,
deferrable=True,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, RdsDbAvailableTrigger)
def test_template_fields(self):
operator = RdsStartDbOperator(
region_name=REGION,
aws_conn_id=AWS_CONN,
task_id="test_start_db_cluster",
db_identifier=DB_CLUSTER_NAME,
db_type="cluster",
)
validate_template_fields(operator)
| TestRdsStartDbOperator |
python | networkx__networkx | networkx/algorithms/connectivity/edge_kcomponents.py | {
"start": 7276,
"end": 20890
} | class ____:
r"""A simple algorithm to find all k-edge-connected components in a graph.
Constructing the auxiliary graph (which may take some time) allows for the
k-edge-ccs to be found in linear time for arbitrary k.
Notes
-----
This implementation is based on [1]_. The idea is to construct an auxiliary
graph from which the k-edge-ccs can be extracted in linear time. The
auxiliary graph is constructed in $O(|V|\cdot F)$ operations, where F is the
complexity of max flow. Querying the components takes an additional $O(|V|)$
operations. This algorithm can be slow for large graphs, but it handles an
arbitrary k and works for both directed and undirected inputs.
The undirected case for k=1 is exactly connected components.
The undirected case for k=2 is exactly bridge connected components.
The directed case for k=1 is exactly strongly connected components.
References
----------
.. [1] Wang, Tianhao, et al. (2015) A simple algorithm for finding all
k-edge-connected components.
http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264
Examples
--------
>>> import itertools as it
>>> from networkx.utils import pairwise
>>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph
>>> # Build an interesting graph with multiple levels of k-edge-ccs
>>> paths = [
... (1, 2, 3, 4, 1, 3, 4, 2), # a 3-edge-cc (a 4 clique)
... (5, 6, 7, 5), # a 2-edge-cc (a 3 clique)
... (1, 5), # combine first two ccs into a 1-edge-cc
... (0,), # add an additional disconnected 1-edge-cc
... ]
>>> G = nx.Graph()
>>> G.add_nodes_from(it.chain(*paths))
>>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths]))
>>> # Constructing the AuxGraph takes about O(n ** 4)
>>> aux_graph = EdgeComponentAuxGraph.construct(G)
>>> # Once constructed, querying takes O(n)
>>> sorted(map(sorted, aux_graph.k_edge_components(k=1)))
[[0], [1, 2, 3, 4, 5, 6, 7]]
>>> sorted(map(sorted, aux_graph.k_edge_components(k=2)))
[[0], [1, 2, 3, 4], [5, 6, 7]]
>>> sorted(map(sorted, aux_graph.k_edge_components(k=3)))
[[0], [1, 2, 3, 4], [5], [6], [7]]
>>> sorted(map(sorted, aux_graph.k_edge_components(k=4)))
[[0], [1], [2], [3], [4], [5], [6], [7]]
The auxiliary graph is primarily used for k-edge-ccs but it
can also speed up the queries of k-edge-subgraphs by refining the
search space.
>>> import itertools as it
>>> from networkx.utils import pairwise
>>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph
>>> paths = [
... (1, 2, 4, 3, 1, 4),
... ]
>>> G = nx.Graph()
>>> G.add_nodes_from(it.chain(*paths))
>>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths]))
>>> aux_graph = EdgeComponentAuxGraph.construct(G)
>>> sorted(map(sorted, aux_graph.k_edge_subgraphs(k=3)))
[[1], [2], [3], [4]]
>>> sorted(map(sorted, aux_graph.k_edge_components(k=3)))
[[1, 4], [2], [3]]
"""
# @not_implemented_for('multigraph') # TODO: fix decor for classmethods
@classmethod
def construct(EdgeComponentAuxGraph, G):
"""Builds an auxiliary graph encoding edge-connectivity between nodes.
Notes
-----
Given G=(V, E), initialize an empty auxiliary graph A.
Choose an arbitrary source node s. Initialize a set N of available
nodes (that can be used as the sink). The algorithm picks an
arbitrary node t from N - {s}, and then computes the minimum st-cut
(S, T) with value w. If G is directed the minimum of the st-cut or
the ts-cut is used instead. Then, the edge (s, t) is added to the
auxiliary graph with weight w. The algorithm is called recursively
first using S as the available nodes and s as the source, and then
using T and t. Recursion stops when the source is the only available
node.
Parameters
----------
G : NetworkX graph
"""
# workaround for classmethod decorator
not_implemented_for("multigraph")(lambda G: G)(G)
def _recursive_build(H, A, source, avail):
# Terminate once the flow has been compute to every node.
if {source} == avail:
return
# pick an arbitrary node as the sink
sink = arbitrary_element(avail - {source})
# find the minimum cut and its weight
value, (S, T) = nx.minimum_cut(H, source, sink)
if H.is_directed():
# check if the reverse direction has a smaller cut
value_, (T_, S_) = nx.minimum_cut(H, sink, source)
if value_ < value:
value, S, T = value_, S_, T_
# add edge with weight of cut to the aux graph
A.add_edge(source, sink, weight=value)
# recursively call until all but one node is used
_recursive_build(H, A, source, avail.intersection(S))
_recursive_build(H, A, sink, avail.intersection(T))
# Copy input to ensure all edges have unit capacity
H = G.__class__()
H.add_nodes_from(G.nodes())
H.add_edges_from(G.edges(), capacity=1)
# A is the auxiliary graph to be constructed
# It is a weighted undirected tree
A = nx.Graph()
# Pick an arbitrary node as the source
if H.number_of_nodes() > 0:
source = arbitrary_element(H.nodes())
# Initialize a set of elements that can be chosen as the sink
avail = set(H.nodes())
# This constructs A
_recursive_build(H, A, source, avail)
# This class is a container the holds the auxiliary graph A and
# provides access the k_edge_components function.
self = EdgeComponentAuxGraph()
self.A = A
self.H = H
return self
def k_edge_components(self, k):
"""Queries the auxiliary graph for k-edge-connected components.
Parameters
----------
k : Integer
Desired edge connectivity
Returns
-------
k_edge_components : a generator of k-edge-ccs
Notes
-----
Given the auxiliary graph, the k-edge-connected components can be
determined in linear time by removing all edges with weights less than
k from the auxiliary graph. The resulting connected components are the
k-edge-ccs in the original graph.
"""
if k < 1:
raise ValueError("k cannot be less than 1")
A = self.A
# "traverse the auxiliary graph A and delete all edges with weights less
# than k"
aux_weights = nx.get_edge_attributes(A, "weight")
# Create a relevant graph with the auxiliary edges with weights >= k
R = nx.Graph()
R.add_nodes_from(A.nodes())
R.add_edges_from(e for e, w in aux_weights.items() if w >= k)
# Return the nodes that are k-edge-connected in the original graph
yield from nx.connected_components(R)
def k_edge_subgraphs(self, k):
"""Queries the auxiliary graph for k-edge-connected subgraphs.
Parameters
----------
k : Integer
Desired edge connectivity
Returns
-------
k_edge_subgraphs : a generator of k-edge-subgraphs
Notes
-----
Refines the k-edge-ccs into k-edge-subgraphs. The running time is more
than $O(|V|)$.
For single values of k it is faster to use `nx.k_edge_subgraphs`.
But for multiple values of k, it can be faster to build AuxGraph and
then use this method.
"""
if k < 1:
raise ValueError("k cannot be less than 1")
H = self.H
A = self.A
# "traverse the auxiliary graph A and delete all edges with weights less
# than k"
aux_weights = nx.get_edge_attributes(A, "weight")
# Create a relevant graph with the auxiliary edges with weights >= k
R = nx.Graph()
R.add_nodes_from(A.nodes())
R.add_edges_from(e for e, w in aux_weights.items() if w >= k)
# Return the components whose subgraphs are k-edge-connected
for cc in nx.connected_components(R):
if len(cc) < k:
# Early return optimization
for node in cc:
yield {node}
else:
# Call subgraph solution to refine the results
C = H.subgraph(cc)
yield from k_edge_subgraphs(C, k)
def _low_degree_nodes(G, k, nbunch=None):
"""Helper for finding nodes with degree less than k."""
# Nodes with degree less than k cannot be k-edge-connected.
if G.is_directed():
# Consider both in and out degree in the directed case
seen = set()
for node, degree in G.out_degree(nbunch):
if degree < k:
seen.add(node)
yield node
for node, degree in G.in_degree(nbunch):
if node not in seen and degree < k:
seen.add(node)
yield node
else:
# Only the degree matters in the undirected case
for node, degree in G.degree(nbunch):
if degree < k:
yield node
def _high_degree_components(G, k):
"""Helper for filtering components that can't be k-edge-connected.
Removes and generates each node with degree less than k. Then generates
remaining components where all nodes have degree at least k.
"""
# Iteratively remove parts of the graph that are not k-edge-connected
H = G.copy()
singletons = set(_low_degree_nodes(H, k))
while singletons:
# Only search neighbors of removed nodes
nbunch = set(it.chain.from_iterable(map(H.neighbors, singletons)))
nbunch.difference_update(singletons)
H.remove_nodes_from(singletons)
for node in singletons:
yield {node}
singletons = set(_low_degree_nodes(H, k, nbunch))
# Note: remaining connected components may not be k-edge-connected
if G.is_directed():
yield from nx.strongly_connected_components(H)
else:
yield from nx.connected_components(H)
@nx._dispatchable(returns_graph=True)
def general_k_edge_subgraphs(G, k):
"""General algorithm to find all maximal k-edge-connected subgraphs in `G`.
Parameters
----------
G : nx.Graph
Graph in which all maximal k-edge-connected subgraphs will be found.
k : int
Yields
------
k_edge_subgraphs : Graph instances that are k-edge-subgraphs
Each k-edge-subgraph contains a maximal set of nodes that defines a
subgraph of `G` that is k-edge-connected.
Notes
-----
Implementation of the basic algorithm from [1]_. The basic idea is to find
a global minimum cut of the graph. If the cut value is at least k, then the
graph is a k-edge-connected subgraph and can be added to the results.
Otherwise, the cut is used to split the graph in two and the procedure is
applied recursively. If the graph is just a single node, then it is also
added to the results. At the end, each result is either guaranteed to be
a single node or a subgraph of G that is k-edge-connected.
This implementation contains optimizations for reducing the number of calls
to max-flow, but there are other optimizations in [1]_ that could be
implemented.
References
----------
.. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs
from a large graph. ACM International Conference on Extending Database
Technology 2012 480-–491.
https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf
Examples
--------
>>> from networkx.utils import pairwise
>>> paths = [
... (11, 12, 13, 14, 11, 13, 14, 12), # a 4-clique
... (21, 22, 23, 24, 21, 23, 24, 22), # another 4-clique
... # connect the cliques with high degree but low connectivity
... (50, 13),
... (12, 50, 22),
... (13, 102, 23),
... (14, 101, 24),
... ]
>>> G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
>>> sorted(len(k_sg) for k_sg in k_edge_subgraphs(G, k=3))
[1, 1, 1, 4, 4]
"""
if k < 1:
raise ValueError("k cannot be less than 1")
# Node pruning optimization (incorporates early return)
# find_ccs is either connected_components/strongly_connected_components
find_ccs = partial(_high_degree_components, k=k)
# Quick return optimization
if G.number_of_nodes() < k:
for node in G.nodes():
yield G.subgraph([node]).copy()
return
# Intermediate results
R0 = {G.subgraph(cc).copy() for cc in find_ccs(G)}
# Subdivide CCs in the intermediate results until they are k-conn
while R0:
G1 = R0.pop()
if G1.number_of_nodes() == 1:
yield G1
else:
# Find a global minimum cut
cut_edges = nx.minimum_edge_cut(G1)
cut_value = len(cut_edges)
if cut_value < k:
# G1 is not k-edge-connected, so subdivide it
G1.remove_edges_from(cut_edges)
for cc in find_ccs(G1):
R0.add(G1.subgraph(cc).copy())
else:
# Otherwise we found a k-edge-connected subgraph
yield G1
| EdgeComponentAuxGraph |
python | bokeh__bokeh | src/bokeh/models/tiles.py | {
"start": 3258,
"end": 4244
} | class ____(TileSource):
''' A base class for Mercator tile services (e.g. ``WMTSTileSource``).
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
_args = ('url', 'tile_size', 'min_zoom', 'max_zoom', 'x_origin_offset', 'y_origin_offset', 'extra_url_vars', 'initial_resolution')
x_origin_offset = Override(default=20037508.34)
y_origin_offset = Override(default=20037508.34)
initial_resolution = Override(default=156543.03392804097)
snap_to_zoom = Bool(default=False, help="""
Forces initial extents to snap to the closest larger zoom level.""")
wrap_around = Bool(default=True, help="""
Enables continuous horizontal panning by wrapping the x-axis based on
bounds of map.
.. note::
Axis coordinates are not wrapped. To toggle axis label visibility,
use ``plot.axis.visible = False``.
""")
| MercatorTileSource |
python | django__django | django/test/testcases.py | {
"start": 5366,
"end": 5576
} | class ____:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise DatabaseOperationForbidden(self.message)
| _DatabaseFailure |
python | pypa__pipenv | pipenv/vendor/click/types.py | {
"start": 31872,
"end": 36433
} | class ____(CompositeParamType):
"""The default behavior of Click is to apply a type on a value directly.
This works well in most cases, except for when `nargs` is set to a fixed
count and different types should be used for different items. In this
case the :class:`Tuple` type can be used. This type can only be used
if `nargs` is set to a fixed number.
For more information see :ref:`tuple-type`.
This can be selected by using a Python tuple literal as a type.
:param types: a list of types that should be used for the tuple items.
"""
def __init__(self, types: t.Sequence[t.Union[t.Type[t.Any], ParamType]]) -> None:
self.types: t.Sequence[ParamType] = [convert_type(ty) for ty in types]
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["types"] = [t.to_info_dict() for t in self.types]
return info_dict
@property
def name(self) -> str: # type: ignore
return f"<{' '.join(ty.name for ty in self.types)}>"
@property
def arity(self) -> int: # type: ignore
return len(self.types)
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
len_type = len(self.types)
len_value = len(value)
if len_value != len_type:
self.fail(
ngettext(
"{len_type} values are required, but {len_value} was given.",
"{len_type} values are required, but {len_value} were given.",
len_value,
).format(len_type=len_type, len_value=len_value),
param=param,
ctx=ctx,
)
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
def convert_type(ty: t.Optional[t.Any], default: t.Optional[t.Any] = None) -> ParamType:
"""Find the most appropriate :class:`ParamType` for the given Python
type. If the type isn't provided, it can be inferred from a default
value.
"""
guessed_type = False
if ty is None and default is not None:
if isinstance(default, (tuple, list)):
# If the default is empty, ty will remain None and will
# return STRING.
if default:
item = default[0]
# A tuple of tuples needs to detect the inner types.
# Can't call convert recursively because that would
# incorrectly unwind the tuple to a single type.
if isinstance(item, (tuple, list)):
ty = tuple(map(type, item))
else:
ty = type(item)
else:
ty = type(default)
guessed_type = True
if isinstance(ty, tuple):
return Tuple(ty)
if isinstance(ty, ParamType):
return ty
if ty is str or ty is None:
return STRING
if ty is int:
return INT
if ty is float:
return FLOAT
if ty is bool:
return BOOL
if guessed_type:
return STRING
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError(
f"Attempted to use an uninstantiated parameter type ({ty})."
)
except TypeError:
# ty is an instance (correct), so issubclass fails.
pass
return FuncParamType(ty)
#: A dummy parameter type that just does nothing. From a user's
#: perspective this appears to just be the same as `STRING` but
#: internally no string conversion takes place if the input was bytes.
#: This is usually useful when working with file paths as they can
#: appear in bytes and unicode.
#:
#: For path related uses the :class:`Path` type is a better choice but
#: there are situations where an unprocessed type is useful which is why
#: it is is provided.
#:
#: .. versionadded:: 4.0
UNPROCESSED = UnprocessedParamType()
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
| Tuple |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/hitl.py | {
"start": 1703,
"end": 3022
} | class ____(BaseModel):
"""The common part within HITLDetail and HITLDetailHistory."""
# User Request Detail
options: list[str] = Field(min_length=1)
subject: str
body: str | None = None
defaults: list[str] | None = None
multiple: bool = False
params: Mapping = Field(default_factory=dict)
assigned_users: list[HITLUser] = Field(default_factory=list)
created_at: datetime
# Response Content Detail
responded_by_user: HITLUser | None = None
responded_at: datetime | None = None
chosen_options: list[str] | None = None
params_input: dict[str, Any] = Field(default_factory=dict)
response_received: bool = False
@field_validator("params", mode="before")
@classmethod
def get_params(cls, params: dict[str, Any]) -> dict[str, Any]:
"""Convert params attribute to dict representation."""
return {
key: value
if BaseHITLDetail._is_param(value)
else {
"value": value,
"description": None,
"schema": {},
}
for key, value in params.items()
}
@staticmethod
def _is_param(value: Any) -> bool:
return isinstance(value, dict) and all(key in value for key in ("description", "schema", "value"))
| BaseHITLDetail |
python | tornadoweb__tornado | tornado/template.py | {
"start": 26250,
"end": 28106
} | class ____:
def __init__(
self,
file: TextIO,
named_blocks: Dict[str, _NamedBlock],
loader: Optional[BaseLoader],
current_template: Template,
) -> None:
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.apply_counter = 0
self.include_stack = [] # type: List[Tuple[Template, int]]
self._indent = 0
def indent_size(self) -> int:
return self._indent
def indent(self) -> "ContextManager":
class Indenter:
def __enter__(_) -> "_CodeWriter":
self._indent += 1
return self
def __exit__(_, *args: Any) -> None:
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template: Template, line: int) -> "ContextManager":
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate:
def __enter__(_) -> "_CodeWriter":
return self
def __exit__(_, *args: Any) -> None:
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(
self, line: str, line_number: int, indent: Optional[int] = None
) -> None:
if indent is None:
indent = self._indent
line_comment = " # %s:%d" % (self.current_template.name, line_number)
if self.include_stack:
ancestors = [
"%s:%d" % (tmpl.name, lineno) for (tmpl, lineno) in self.include_stack
]
line_comment += " (via %s)" % ", ".join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
| _CodeWriter |
python | facebookresearch__faiss | tests/test_factory.py | {
"start": 7447,
"end": 7734
} | class ____(unittest.TestCase):
def test_clone_size(self):
index = faiss.index_factory(20, 'PCA10,Flat')
xb = faiss.rand((100, 20))
index.train(xb)
index.add(xb)
index2 = faiss.clone_index(index)
assert index2.ntotal == 100
| TestCloneSize |
python | django__django | django/db/models/functions/datetime.py | {
"start": 12450,
"end": 12557
} | class ____(TruncBase):
"""Truncate to midnight on the Monday of the week."""
kind = "week"
| TruncWeek |
python | getsentry__sentry | src/sentry/replays/post_process.py | {
"start": 277,
"end": 417
} | class ____(TypedDict, total=False):
name: str | None
brand: str | None
model: str | None
family: str | None
| DeviceResponseType |
python | huggingface__transformers | tests/models/xglm/test_modeling_xglm.py | {
"start": 1274,
"end": 10757
} | class ____:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
d_model=32,
num_hidden_layers=2,
num_attention_heads=4,
ffn_dim=37,
activation_function="gelu",
activation_dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = d_model
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.ffn_dim = ffn_dim
self.activation_function = activation_function
self.activation_dropout = activation_dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = None
self.bos_token_id = 0
self.eos_token_id = 2
self.pad_token_id = 1
def prepare_config_and_inputs(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (
config,
input_ids,
input_mask,
)
def get_config(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
return XGLMConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
num_layers=self.num_hidden_layers,
attention_heads=self.num_attention_heads,
ffn_dim=self.ffn_dim,
activation_function=self.activation_function,
activation_dropout=self.activation_dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
)
def create_and_check_xglm_model(self, config, input_ids, input_mask, *args):
model = XGLMModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.num_hidden_layers)
def create_and_check_xglm_model_past(self, config, input_ids, input_mask, *args):
model = XGLMModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_xglm_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = XGLMModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.zeros((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_xglm_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = XGLMModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=1)
# append to next input_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[
"last_hidden_state"
]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = XGLMForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = XGLMForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_xglm_weight_initialization(self, config, *args):
model = XGLMModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
}
return config, inputs_dict
@require_torch
| XGLMModelTester |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 698941,
"end": 699671
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for Milestone."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("MilestoneEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Milestone"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| MilestoneConnection |
python | huggingface__transformers | tests/models/moonshine/test_modeling_moonshine.py | {
"start": 18605,
"end": 26985
} | class ____(unittest.TestCase):
def setUp(self):
self.processor_tiny = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
self.processor_base = AutoProcessor.from_pretrained("UsefulSensors/moonshine-base")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@slow
def test_tiny_logits_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
inputs = self.processor_tiny(self._load_datasamples(1), return_tensors="pt")
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
expectations = Expectations(
{
(None, None): [-9.1106, 4.5542, 6.3892, -6.8139, -7.2456, -7.9074, -7.2839, -7.6043, -8.0384, -7.8351, -7.3867, -7.2450, -7.7420, -7.3912, -7.3866, -7.6979, -7.6420, -7.0504, -7.3979, -7.2483, -8.0796, -7.3300, -7.3672, -6.8765, -7.6876, -7.2682, -6.9866, -6.7457, -7.6855, -7.3050],
("cuda", 8): [-9.1107, 4.5538, 6.3902, -6.8141, -7.2459, -7.9076, -7.2842, -7.6045, -8.0387, -7.8354, -7.3869, -7.2453, -7.7423, -7.3914, -7.3869, -7.6982, -7.6422, -7.0507, -7.3982, -7.2486, -8.0798, -7.3302, -7.3675, -6.8769, -7.6878, -7.2684, -6.9868, -6.7459, -7.6858, -7.3052],
}
)
EXPECTED_LOGITS = torch.tensor(expectations.get_expectation()).to(torch_device)
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30], EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_base_logits_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
inputs = self.processor_base(self._load_datasamples(1), return_tensors="pt")
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor([
-6.7336, 1.9482, 5.2448, -8.0277, -7.9167, -7.8956, -7.9649, -7.9348, -8.1312, -8.0616,
-8.1070, -7.7696, -7.8809, -7.9450, -8.1013, -7.8177, -7.8598, -7.8257, -7.8729, -7.9657,
-7.9310, -8.1024, -7.8699, -7.8231, -8.0752, -7.9764, -7.8127, -8.0536, -7.9492, -7.9290,
])
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_tiny_logits_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
inputs = self.processor_tiny(self._load_datasamples(4), return_tensors="pt", padding=True)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-8.5973, 4.8608, 5.8845, -6.6182, -7.0376, -7.7120, -7.0638, -7.3837, -7.8328, -7.6114],
[-4.3157, -2.4944, 8.4917, -6.4806, -7.0952, -6.7500, -6.1084, -6.6484, -6.9868, -6.5919],
[-10.0086, 3.2859, 0.7345, -6.5557, -6.8514, -6.5308, -6.4172, -6.9484, -6.6214, -6.6229],
[-11.1003, 3.9395, 0.6672, -5.0150, -5.3939, -5.4103, -5.2240, -5.4407, -5.2204, -5.2706],
],
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_base_logits_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
inputs = self.processor_base(self._load_datasamples(4), return_tensors="pt", padding=True)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-6.3602, 1.8383, 5.2615, -7.9576, -7.8442, -7.8238, -7.9014, -7.8645, -8.0550, -7.9963],
[-6.1725, -0.6274, 8.1798, -6.8570, -6.8078, -6.7915, -6.9099, -6.8980, -6.9760, -6.8264],
[-7.3186, 3.1192, 3.8938, -5.7208, -5.8429, -5.7610, -5.9997, -5.8213, -5.8616, -5.8720],
[-7.3432, 1.0402, 3.9912, -5.4177, -5.4890, -5.4573, -5.6516, -5.4776, -5.5079, -5.5391],
]
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_tiny_generation_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_tiny(audio_array, return_tensors="pt")
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_base_generation_single(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_base(audio_array, return_tensors="pt")
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_base.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_tiny_generation_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_tiny(audio_array, return_tensors="pt", padding=True)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mr. Quilter is the apostle of the middle classes, and we are glad to welcome",
"Nor is Mr. Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and Rose beef lo",
"He has grave doubts whether Sir Frederick Layton's work is really Greek after all,",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_base_generation_batch(self):
model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_base(audio_array, return_tensors="pt", padding=True)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_base.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mr. Quilter is the apostle of the middle classes, and we are glad to welcome",
"Nor is Mr. Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and rose beef lo",
"He has grave doubts whether Sir Frederick Layton's work is really Greek after all,",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
| MoonshineModelIntegrationTests |
python | qdrant__qdrant-client | qdrant_client/parallel_processor.py | {
"start": 583,
"end": 2748
} | class ____:
@classmethod
def start(cls, *args: Any, **kwargs: Any) -> "Worker":
raise NotImplementedError()
def process(self, items: Iterable[Any]) -> Iterable[Any]:
raise NotImplementedError()
def _worker(
worker_class: Type[Worker],
input_queue: Queue,
output_queue: Queue,
num_active_workers: BaseValue,
worker_id: int,
kwargs: Optional[dict[str, Any]] = None,
) -> None:
"""
A worker that pulls data pints off the input queue, and places the execution result on the output queue.
When there are no data pints left on the input queue, it decrements
num_active_workers to signal completion.
"""
if kwargs is None:
kwargs = {}
logging.info(f"Reader worker: {worker_id} PID: {os.getpid()}")
try:
worker = worker_class.start(**kwargs)
# Keep going until you get an item that's None.
def input_queue_iterable() -> Iterable[Any]:
while True:
item = input_queue.get()
if item == QueueSignals.stop:
break
yield item
for processed_item in worker.process(input_queue_iterable()):
output_queue.put(processed_item)
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
output_queue.put(QueueSignals.error)
finally:
# It's important that we close and join the queue here before
# decrementing num_active_workers. Otherwise our parent may join us
# before the queue's feeder thread has passed all buffered items to
# the underlying pipe resulting in a deadlock.
#
# See:
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#pipes-and-queues
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#programming-guidelines
input_queue.close()
output_queue.close()
input_queue.join_thread()
output_queue.join_thread()
with num_active_workers.get_lock():
num_active_workers.value -= 1
logging.info(f"Reader worker {worker_id} finished")
| Worker |
python | pytorch__pytorch | test/inductor/test_max_autotune.py | {
"start": 3188,
"end": 95072
} | class ____(TestCase):
@parametrize("dynamic", (False, True))
@parametrize("search_space", ("DEFAULT", "EXHAUSTIVE"))
def test_max_autotune_mm_plus_mm_zero_size_input(self, dynamic, search_space):
"""
Make sure autotuning mm_plus_mm with zero-size input works without crashes.
"""
m, n, k = 0, 1536, 64
def mm_plus_mm(a, b, c, d):
return a @ b + c @ d
a = torch.randn(m, k).to(GPU_TYPE)
b = torch.randn(k, n).to(GPU_TYPE)
c = torch.randn(m, k).to(GPU_TYPE)
d = torch.randn(k, n).to(GPU_TYPE)
with config.patch(
{"max_autotune": True, "max_autotune_gemm_search_space": search_space}
):
torch.compile(mm_plus_mm, dynamic=dynamic)(a, b, c, d)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@parametrize("a_transposed", (False, True))
@parametrize("b_transposed", (False, True))
@parametrize("dynamic", (False, True))
@parametrize("tma_store", (False, True))
def test_max_autotune_regular_mm_persistent_tma(
self,
a_transposed: bool,
b_transposed: bool,
dynamic: bool,
tma_store: bool,
):
def mm(a, b):
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
a = a.repeat(8, 8)
b = b.repeat(8, 8)
if a_transposed:
a = a.T
if b_transposed:
b = b.T
return torch.mm(a, b)
M, N, K = 21, 31, 11
a = (
torch.randn(*((K, M) if a_transposed else (M, K)))
.to(torch.float16)
.to(GPU_TYPE)
)
b = (
torch.randn(*((N, K) if b_transposed else (K, N)))
.to(torch.float16)
.to(GPU_TYPE)
)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"triton.enable_template_tma_store": tma_store,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
):
c_actual, code = run_and_get_code(torch.compile(mm, dynamic=dynamic), a, b)
c_expected = mm(a, b)
if has_triton_stable_tma_api():
make_desc_api = "triton.language.make_tensor_descriptor"
read_api = "tl.load_tensor_descriptor"
if tma_store:
# Note: The tma_descriptor0 is generated by the kernel. If the
# code generation process changes this could change.
write_api = "tma_descriptor0.store"
else:
write_api = "tl.store"
else:
make_desc_api = (
"triton.language.extra.cuda.experimental_device_tensormap_create2d"
)
read_api = "tl._experimental_descriptor_load"
# TMA store is not supported with the experimental API
write_api = "tl.store"
# Verify that we are using a TMA implementation
FileCheck().check("triton_tem_fused_mm").check(make_desc_api).check(
read_api
).check(write_api).run(code[0])
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@parametrize("a_transposed", (False, True))
@parametrize("b_transposed", (False, True))
@parametrize("dynamic", (False, True))
def test_max_autotune_regular_mm_persistent_tma_strided(
self,
a_transposed: bool,
b_transposed: bool,
dynamic: bool,
):
def mm(a, b):
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
a = a.repeat(8, 8)
b = b.repeat(8, 8)
if a_transposed:
a = a.T
if b_transposed:
b = b.T
return torch.mm(a, b)
def next_multiple_16(a: int) -> int:
return ((a + 15) // 16) * 16
M, N, K = 21, 31, 11
a_shape = (K, M) if a_transposed else (M, K)
a_stride = (
(next_multiple_16(M), 1) if a_transposed else (next_multiple_16(K), 1)
)
a = torch.empty_strided(a_shape, a_stride, dtype=torch.float16).to(GPU_TYPE)
a[:] = torch.randn(a_shape, dtype=torch.float16)
a = a.to(GPU_TYPE)
b_shape = (N, K) if b_transposed else (K, N)
b_stride = (
(next_multiple_16(K), 1) if a_transposed else (next_multiple_16(N), 1)
)
b = torch.empty_strided(b_shape, b_stride, dtype=torch.float16)
b[:] = torch.randn(b_shape, dtype=torch.float16)
b = b.to(GPU_TYPE)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
):
c_actual, code = run_and_get_code(torch.compile(mm, dynamic=dynamic), a, b)
c_expected = mm(a, b)
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
# Verify that we are using a TMA implementation
# depending on whether we're using the experimental API, we check for a different string
check_str = "triton.language.extra.cuda.experimental_device_tensormap_create2d"
if has_triton_stable_tma_api():
check_str = "triton.language.make_tensor_descriptor"
FileCheck().check("triton_tem_fused_mm").check(check_str).run(code[0])
@unittest.skipIf(
not has_datacenter_blackwell_tma_device(),
"Need Blackwell with device-side TMA support in Triton",
)
@parametrize("a_transposed", (False, True))
@parametrize("b_transposed", (False, True))
@parametrize("dynamic", (False, True))
@parametrize("tma_store", (False, True))
@parametrize("epilogue_subtile", (False, True))
def test_blackwell_max_autotune_regular_mm_persistent_tma(
self,
a_transposed: bool,
b_transposed: bool,
dynamic: bool,
tma_store: bool,
epilogue_subtile: bool,
):
def mm(a, b):
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
a = a.repeat(8, 8)
b = b.repeat(8, 8)
if a_transposed:
a = a.T
if b_transposed:
b = b.T
return torch.mm(a, b)
M, N, K = 32, 16, 48
a = (
torch.randn(*((K, M) if a_transposed else (M, K)))
.to(torch.float16)
.to(GPU_TYPE)
)
b = (
torch.randn(*((N, K) if b_transposed else (K, N)))
.to(torch.float16)
.to(GPU_TYPE)
)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": True,
"triton.enable_template_tma_store": tma_store,
"triton.enable_epilogue_subtiling": epilogue_subtile,
"test_configs.autotune_choice_name_regex": "blackwell_ws_persistent_device_tma",
}
):
c_actual, code = run_and_get_code(torch.compile(mm, dynamic=dynamic), a, b)
c_expected = mm(a, b)
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
write_count = 2 if epilogue_subtile else 1
if tma_store:
# Verify that we are using a TMA implementation
# Note: The tma_descriptor0 is generated by the kernel. If the
# code generation process changes this could change.
write_api = "tma_descriptor0.store"
else:
write_api = "tl.store"
FileCheck().check("triton_tem_fused_mm").check(
"triton.language.make_tensor_descriptor"
).check("tl.load_tensor_descriptor").check_count(write_api, write_count).run(
code[0]
)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@skipIfXpu(msg="TMA path on Intel GPU not require this check")
@parametrize("dynamic", (False, True))
def test_max_autotune_regular_mm_persistent_tma_illegal_alignment(self, dynamic):
def mm(a, b):
return torch.mm(a, b)
M, N, K = 21, 31, 11
a = torch.randn(M, K).to(torch.float16).to(GPU_TYPE)
b = torch.randn(K, N).to(torch.float16).to(GPU_TYPE)
with (
self.assertRaises(BackendCompilerFailed) as context,
config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
),
):
torch.compile(mm, dynamic=dynamic)(a, b)
# Lowering to the persistent+TMA Triton template should be skipped
# if any of the input inner dims are not 16-byte aligned. As a result,
# given the config flags above, we should have no choices left.
self.assertIn("NoValidChoicesError", str(context.exception))
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@parametrize("dynamic", (False, True))
def test_max_autotune_regular_mm_persistent_tma_illegal_output_alignment(
self, dynamic
):
def mm(a, b, out):
torch.mm(a, b, out=out)
return out
M, N, K = 21, 31, 32
a = torch.empty_strided((M, K), (K, 1), dtype=torch.float16, device=GPU_TYPE)
a[:] = torch.randn((M, K), dtype=torch.float16)
b = torch.empty_strided((K, N), (1, K), dtype=torch.float16, device=GPU_TYPE)
b[:] = torch.randn((K, N), dtype=torch.float16)
# allocate an output with a stride not divisible by 16, so it can't satisfy TMA alignment checks.
out = torch.empty_strided((M, N), (N, 1), dtype=torch.float16, device=GPU_TYPE)
with (
self.assertRaises(BackendCompilerFailed) as context,
config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"triton.enable_template_tma_store": True,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
),
):
torch.compile(mm, dynamic=dynamic)(a, b, out)
# Lowering to the persistent+TMA Triton template should be skipped
# since the output doesn't have a stride of 1 in any dim
self.assertIn("NoValidChoicesError", str(context.exception))
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
def test_max_autotune_regular_mm_tma_dynamic_outer_dim(self):
def mm(a, b):
return torch.mm(a, b)
M, N, K = 21, 31, 11
a = torch.randn(M, K).to(torch.float16).to(GPU_TYPE)
b = torch.randn(K, N).to(torch.float16).to(GPU_TYPE)
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
a = a.repeat(8, 8)
b = b.repeat(8, 8)
torch._dynamo.mark_dynamic(a, 0)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
):
c_actual = torch.compile(mm)(a, b)
c_expected = mm(a, b)
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
@parametrize("dynamic", (False, True))
def test_max_autotune_regular_mm_zero_size_input(self, dynamic: bool):
"""
Make sure autotuning mm with zero-size input works without crashes.
"""
def mm(a, b):
a = torch.sin(a)
return a @ b
a = torch.randn(0, 10).to(GPU_TYPE)
b = torch.randn(10, 100).to(GPU_TYPE)
with config.patch({"max_autotune": True}):
torch.compile(mm, dynamic=dynamic)(a, b)
# NOTE: the current Inductor template verifies that the scaling mode is either per-tensor or per-row
# TODO: support additional scaling modes for Blackwell
@unittest.skipIf(
not has_datacenter_blackwell_tma_device(),
"Need Blackwell with device-side TMA support in Triton",
)
@parametrize("dynamic", (False, True))
@parametrize("tma_store", (False, True))
def test_blackwell_max_autotune_scaled_mm_per_tensor_persistent_tma(
self,
dynamic: bool,
tma_store: bool,
):
def scaled_mm(a, b, scale_a, scale_b):
# NOTE: Inductor constrains a to be row_major and b to be col_major
return torch._scaled_mm(
a, b.t(), scale_a, scale_b, use_fast_accum=True, out_dtype=torch.float16
)
def get_scale_per_tensor(t):
scale = torch.finfo(torch.float8_e4m3fn).max / t.abs().max()
return scale.to(torch.float32)
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte.
M, N, K = 32, 16, 48
a = (torch.randn((M, K)).to(torch.float16).to(GPU_TYPE)).repeat(8, 8)
b = (torch.randn((N, K)).to(torch.float16).to(GPU_TYPE)).repeat(8, 8)
scale_a = get_scale_per_tensor(a)
scale_b = get_scale_per_tensor(b)
a = a.to(torch.float8_e4m3fn)
b = b.to(torch.float8_e4m3fn)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": True,
"triton.enable_template_tma_store": tma_store,
"test_configs.autotune_choice_name_regex": "blackwell_ws_persistent_device_tma",
}
):
c_actual, code = run_and_get_code(
torch.compile(scaled_mm, dynamic=dynamic), a, b, scale_a, scale_b
)
c_expected = scaled_mm(a, b, scale_a, scale_b)
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=0.5)
if tma_store:
# Verify that we are using a TMA implementation
# Note: The tma_descriptor0 is generated by the kernel. If the
# code generation process changes this could change.
write_api = "tma_descriptor0.store"
else:
write_api = "tl.store"
FileCheck().check("triton_tem_fused__scaled_mm").check(
"triton.language.make_tensor_descriptor"
).check("tl.load_tensor_descriptor").check(write_api).run(code[0])
@unittest.skipIf(
not has_datacenter_blackwell_tma_device(),
"Need Blackwell with device-side TMA support in Triton",
)
@parametrize("dynamic", (False, True))
@parametrize("tma_store", (False, True))
def test_blackwell_max_autotune_scaled_mm_per_row_persistent_tma(
self,
dynamic: bool,
tma_store: bool,
):
def scaled_mm(a, b, scale_a, scale_b):
# NOTE: Inductor constrains a to be row_major and b to be col_majo
return torch._scaled_mm(
a,
b.t(),
scale_a,
scale_b.t(),
use_fast_accum=True,
out_dtype=torch.bfloat16,
)
def get_scale_per_row(t):
scale = (
torch.finfo(torch.float8_e4m3fn).max
/ t.abs().max(dim=1, keepdim=True).values
)
return scale.to(torch.float32)
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte.
M, N, K = 32, 16, 48
a = (torch.randn((M, K)).to(torch.bfloat16).to(GPU_TYPE)).repeat(8, 8)
b = (torch.randn((N, K)).to(torch.bfloat16).to(GPU_TYPE)).repeat(8, 8)
scale_a = get_scale_per_row(a)
scale_b = get_scale_per_row(b)
a = a.to(torch.float8_e4m3fn)
b = b.to(torch.float8_e4m3fn)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": True,
"triton.enable_template_tma_store": tma_store,
"test_configs.autotune_choice_name_regex": "blackwell_ws_persistent_device_tma",
}
):
c_actual, code = run_and_get_code(
torch.compile(scaled_mm, dynamic=dynamic), a, b, scale_a, scale_b
)
c_expected = scaled_mm(a, b, scale_a, scale_b)
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=0.5)
if tma_store:
# Verify that we are using a TMA implementation
# Note: The tma_descriptor0 is generated by the kernel. If the
# code generation process changes this could change.
write_api = "tma_descriptor0.store"
else:
write_api = "tl.store"
FileCheck().check("triton_tem_fused__scaled_mm").check(
"triton.language.make_tensor_descriptor"
).check("tl.load_tensor_descriptor").check(write_api).run(code[0])
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@parametrize("a_transposed", (False, True))
@parametrize("b_transposed", (False, True))
@parametrize("dynamic", (False, True))
@parametrize("tma_store", (False, True))
def test_max_autotune_addmm_persistent_tma(
self,
a_transposed: bool,
b_transposed: bool,
dynamic: bool,
tma_store: bool,
):
def addmm(x, a, b):
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
x = x.repeat(8)
a = a.repeat(8, 8)
b = b.repeat(8, 8)
if a_transposed:
a = a.T
if b_transposed:
b = b.T
return torch.addmm(x, a, b)
M, N, K = 21, 31, 11
a = (
torch.randn(*((K, M) if a_transposed else (M, K)))
.to(torch.float16)
.to(GPU_TYPE)
)
b = (
torch.randn(*((N, K) if b_transposed else (K, N)))
.to(torch.float16)
.to(GPU_TYPE)
)
x = torch.randn(N).to(torch.float16).to(GPU_TYPE)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"triton.enable_template_tma_store": tma_store,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
):
c_actual, code = run_and_get_code(
torch.compile(addmm, dynamic=dynamic), x, a, b
)
c_expected = addmm(x, a, b)
if has_triton_stable_tma_api():
make_desc_api = "triton.language.make_tensor_descriptor"
read_api = "tl.load_tensor_descriptor"
if tma_store:
# Note: The tma_descriptor0 is generated by the kernel. If the
# code generation process changes this could change.
write_api = "tma_descriptor0.store"
else:
write_api = "tl.store"
else:
make_desc_api = (
"triton.language.extra.cuda.experimental_device_tensormap_create2d"
)
read_api = "tl._experimental_descriptor_load"
# TMA store is not supported with the experimental API
write_api = "tl.store"
# Verify that we are using a TMA implementation
FileCheck().check("triton_tem_fused_addmm").check(make_desc_api).check(
read_api
).check(write_api).run(code[0])
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
@unittest.skipIf(
not has_datacenter_blackwell_tma_device(),
"Need Blackwell with device-side TMA support in Triton",
)
@parametrize("a_transposed", (False, True))
@parametrize("b_transposed", (False, True))
@parametrize("dynamic", (False, True))
@parametrize("tma_store", (False, True))
@parametrize("epilogue_subtile", (False, True))
def test_blackwell_max_autotune_addmm_persistent_tma(
self,
a_transposed: bool,
b_transposed: bool,
dynamic: bool,
tma_store: bool,
epilogue_subtile: bool,
):
def addmm(x, a, b):
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
x = x.repeat(8)
a = a.repeat(8, 8)
b = b.repeat(8, 8)
if a_transposed:
a = a.T
if b_transposed:
b = b.T
return torch.addmm(x, a, b)
M, N, K = 21, 31, 11
a = (
torch.randn(*((K, M) if a_transposed else (M, K)))
.to(torch.float16)
.to(GPU_TYPE)
)
b = (
torch.randn(*((N, K) if b_transposed else (K, N)))
.to(torch.float16)
.to(GPU_TYPE)
)
x = torch.randn(N).to(torch.float16).to(GPU_TYPE)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": True,
"triton.enable_template_tma_store": tma_store,
"triton.enable_epilogue_subtiling": epilogue_subtile,
"test_configs.autotune_choice_name_regex": "blackwell_ws_persistent_device_tma",
}
):
c_actual, code = run_and_get_code(
torch.compile(addmm, dynamic=dynamic), x, a, b
)
c_expected = addmm(x, a, b)
make_desc_api = "triton.language.make_tensor_descriptor"
read_api = "tl.load_tensor_descriptor"
write_count = 2 if epilogue_subtile else 1
if tma_store:
# Verify that we are using a TMA implementation
# Note: The tma_descriptor0 is generated by the kernel. If the
# code generation process changes this could change.
write_api = "tma_descriptor0.store"
else:
write_api = "tl.store"
# Verify that we are using a TMA implementation
FileCheck().check("triton_tem_fused_addmm").check(make_desc_api).check(
read_api
).check_count(write_api, write_count).run(code[0])
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@skipIfXpu(msg="TMA path on Intel GPU not require this check")
@parametrize("dynamic", (False, True))
def test_max_autotune_addmm_persistent_tma_illegal_alignment(self, dynamic):
def addmm(x, a, b):
return torch.addmm(x, a, b)
M, N, K = 21, 31, 11
a = torch.randn(M, K).to(torch.float16).to(GPU_TYPE)
b = torch.randn(K, N).to(torch.float16).to(GPU_TYPE)
x = torch.randn(N).to(torch.float16).to(GPU_TYPE)
with (
self.assertRaises(BackendCompilerFailed) as context,
config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
),
):
torch.compile(addmm, dynamic=dynamic)(x, a, b)
# Lowering to the persistent+TMA Triton template should be skipped
# if any of the input inner dims are not 16-byte aligned. As a result,
# given the config flags above, we should have no choices left.
self.assertIn("NoValidChoicesError", str(context.exception))
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
def test_max_autotune_addmm_tma_dynamic_outer_dim(self):
def addmm(x, a, b):
return torch.addmm(x, a, b)
M, N, K = 21, 31, 11
a = torch.randn(M, K).to(torch.float16).to(GPU_TYPE)
b = torch.randn(K, N).to(torch.float16).to(GPU_TYPE)
x = torch.randn(N).to(torch.float16).to(GPU_TYPE)
# TMA requires 16-byte alignment: here we repeat the dims
# by the factor of 8, as float16 is 2-byte. All dims are
# repeated due to the possible transpositions below.
x = x.repeat(8)
a = a.repeat(8, 8)
b = b.repeat(8, 8)
torch._dynamo.mark_dynamic(a, 0)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": "1",
"triton.native_matmul": False,
"test_configs.autotune_choice_name_regex": "mm_persistent_tma",
}
):
c_actual = torch.compile(addmm)(x, a, b)
c_expected = addmm(x, a, b)
torch.testing.assert_close(c_actual, c_expected, atol=1e-2, rtol=1e-2)
@fresh_cache()
@skipIfXpu(msg="XPU doesn't support sm carveout")
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support sm carveout")
@unittest.skipIf(IS_WINDOWS, "Windows doesn't support persistent TMA")
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@unittest.skipIf(
has_datacenter_blackwell_tma_device(), "B200 doesn't support sm carveout"
)
@parametrize("carveout", (None, 0, 27))
@parametrize("op", ("mm", "scaled_mm"))
def test_honor_sm_carveout_with_triton_tma(self, carveout, op: str):
def mm_func(a, b):
return torch.mm(a, b)
def scaled_mm(
a,
b,
scale_a,
scale_b,
):
return torch._scaled_mm(a, b, scale_a, scale_b, out_dtype=torch.bfloat16)
# Create large matrices to ensure we use all possible sms
size = 2560
a = torch.randn(size, size, device=GPU_TYPE, dtype=torch.bfloat16)
b = (
torch.randn(size, size, device=GPU_TYPE, dtype=torch.bfloat16)
.transpose(0, 1)
.contiguous()
.transpose(0, 1)
)
scale_a = torch.tensor(1, dtype=torch.float32, device=GPU_TYPE)
scale_b = torch.tensor(1, dtype=torch.float32, device=GPU_TYPE)
args = (
(a.to(torch.float8_e4m3fn), b.to(torch.float8_e4m3fn), scale_a, scale_b)
if op == "scaled_mm"
else (a, b)
)
func = scaled_mm if op == "scaled_mm" else mm_func
# Set the specified carveout value
torch._C._set_sm_carveout_experimental(carveout)
if carveout is None:
self.assertIsNone(torch._C._get_sm_carveout_experimental())
else:
self.assertEqual(torch._C._get_sm_carveout_experimental(), carveout)
with config.patch(
{
"max_autotune": True,
"triton.enable_persistent_tma_matmul": True,
"triton.native_matmul": False,
"max_autotune_gemm_backends": "TRITON",
"test_configs.autotune_choice_name_regex": "tma",
}
):
compiled_mm = torch.compile(func, mode="max-autotune-no-cudagraphs")
compiled_mm(*args) # Warm-up compilation
with tempfile.NamedTemporaryFile() as f:
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CUDA]
) as prof:
# Run with the specified carveout
compiled_mm(*args)
# Export trace and analyze results
prof.export_chrome_trace(f.name)
# Extract grid sizes from the trace events for TMA kernels
kernel_name = "triton_tem_fused"
with open(f.name) as file:
kernel_events = [
{
"grid": evt.get("args", {}).get("grid", []),
"grid_size": math.prod(evt.get("args", {}).get("grid", [])),
}
for evt in json.load(file)["traceEvents"]
if evt.get("cat", "") == "kernel"
and kernel_name in evt.get("name", "").lower()
]
# We should have exactly 1 kernel event for this run
self.assertEqual(
len(kernel_events),
1,
f"Expected exactly 1 kernel event, but got {len(kernel_events)}",
)
# Check that grid size matches expected values based on carveout
expected_grid_size = None
max_grid_size = torch.cuda.get_device_properties(
"cuda"
).multi_processor_count
careveout = 0 if carveout is None else carveout
expected_grid_size = max_grid_size - careveout
self.assertEqual(
kernel_events[0]["grid_size"],
expected_grid_size,
f"Grid size {kernel_events[0]['grid_size']} doesn't match {expected_grid_size} for carveout={carveout}",
)
@parametrize("dynamic", (False, True))
def test_max_autotune_addmm_zero_size_input(self, dynamic):
"""
Make sure autotuning addmm with zero-size input works without crashes.
"""
def addmm(x, a, b):
return torch.addmm(x, a, b)
x = torch.randn(100).to(GPU_TYPE)
a = torch.randn(0, 10).to(GPU_TYPE)
b = torch.randn(10, 100).to(GPU_TYPE)
with config.patch({"max_autotune": True}):
torch.compile(addmm, dynamic=dynamic)(x, a, b)
@parametrize("search_space", ("DEFAULT", "EXHAUSTIVE"))
def test_autotune_conv1x1(self, search_space):
# Assuming input has 3 channels and we want to produce 16 channels as output
conv1x1 = (
torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=1)
.to(memory_format=torch.channels_last)
.to(GPU_TYPE)
)
# Example input tensor: batch size = 4, channels = 3, height = 32, width = 32
# The memory format is set to `channels_last`
input_tensor = (
torch.randn(4, 3, 32, 32)
.contiguous(memory_format=torch.channels_last)
.to(GPU_TYPE)
)
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
"max_autotune_gemm_search_space": search_space,
}
):
@torch.compile()
def foo(mod, x):
return mod(x)
with torch.no_grad():
out, code = run_and_get_code(foo, conv1x1, input_tensor)
FileCheck().check_not("extern_kernels.convolution").run(code[0])
self.assertEqual(conv1x1(input_tensor), out, atol=1e-2, rtol=0)
@fresh_cache()
@config.patch(max_autotune=True, max_fusion_size=2)
def test_jit_fusion_matches_aot_fusion(self):
# In this example, AOTInductor's JIT-compile will fuse(buf1, buf2) due
# to proximity, we want to make sure AOT-compile pass does the same.
# AOT could do fuse(buf2, buf4) instead if buf3 was pushed to the end
# of the V.graph.buffers list because fuse(buf2, buf4) would have a
# better proximity score than fuse(buf1, buf2). This scenario is possible
# since finalizing MultiTemplateBuffers needs to replace buffers.
def fn(x, number):
buf0 = x + x
buf1 = number.item()
buf2 = x * x
buf3 = x @ x # MultiTemplateBuffer
buf4 = x**2
return buf0, buf1, buf2, buf3, buf4
inputs = (
torch.rand([256, 256], device=GPU_TYPE),
torch.tensor(3, device=GPU_TYPE),
)
torch._export.aot_compile(fn, args=inputs)
def test_cat_addmm(self):
def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor):
return torch.cat(
[
torch.addmm(a, b, c),
torch.addmm(b, c, a),
],
1,
)
args = [
torch.randn(4, 4, device=GPU_TYPE),
torch.randn(4, 4, device=GPU_TYPE),
torch.randn(4, 4, device=GPU_TYPE),
]
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "Triton",
}
):
expected = fn(*args)
actual = torch.compile(fn)(*args)
torch.testing.assert_close(actual, expected, atol=1e-2, rtol=1e-2)
@config.patch(
benchmark_kernel=True,
fallback_random=True,
max_autotune_gemm=True,
)
@parametrize("device", ("cpu", GPU_TYPE))
def test_matmul_dropout(self, device):
def fwd(a, b):
x = a @ b
x = torch.nn.functional.dropout(x, 0.1)
return x
def fn(a, b):
x = fwd(a, b).sum()
x.backward()
return a.grad
N = 128
a = torch.randn(N, N, device=device, requires_grad=True)
b = torch.randn(N, N, device=device)
opt_fn = torch.compile(fn)
reset_rng_state()
ref = fn(a, b)
reset_rng_state()
act = opt_fn(a, b)
if N <= 8:
print(f"ref\n{ref}\nact\n{act}")
torch.testing.assert_close(ref, act, atol=1e-1, rtol=1e-1)
@config.patch(
max_autotune_gemm=True,
)
@unittest.skipIf(
getattr(torch, GPU_TYPE).device_count() < 2,
"Need at least 2 devices for this test",
)
def test_autotune_device_guard(self):
x = torch.randn(1024, 1024, device=f"{GPU_TYPE}:1")
y = torch.randn(1024, 1024, device=f"{GPU_TYPE}:1")
def f(x, y):
return x @ y
with fresh_cache():
act = torch.compile(f)(x, y)
ref = f(x, y)
self.assertTrue(torch.allclose(act, ref, atol=4 * 1e-3, rtol=4 * 1e-3))
@config.patch(max_autotune=True)
@parametrize("search_space", ("DEFAULT", "EXHAUSTIVE"))
@parametrize("kernel_size", (1, 3))
def test_empty_conv_input(self, search_space, kernel_size):
x = torch.randn(0, 256, 14, 14, device=GPU_TYPE)
weight = torch.randn(256, 256, kernel_size, kernel_size, device=GPU_TYPE)
def f(x, weight):
return torch.convolution(
x,
weight,
bias=None,
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
transposed=False,
output_padding=[0, 0],
groups=1,
)
with config.patch({"max_autotune_gemm_search_space": search_space}):
opt_f = torch.compile(f)
ref = f(x, weight)
act = opt_f(x, weight)
self.assertTrue(torch.allclose(ref, act, atol=4 * 1e-3, rtol=4 * 1e-3))
@skipIfXpu(
msg="Fails on Intel XPU; see https://github.com/pytorch/pytorch/issues/161484"
)
@config.patch(max_autotune_gemm_backends="TRITON")
@parametrize("search_space", ("DEFAULT", "EXHAUSTIVE"))
def test_baddmm(self, search_space):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(
torch.randn(64, 64, 192, dtype=torch.float16)
)
self.bias = torch.nn.Parameter(
torch.randn(64, 1, 192, dtype=torch.float16)
)
def forward(self, x):
return torch.ops.aten.baddbmm.default(self.bias, x, self.weight)
x = torch.randn(
64, 2048, 64, dtype=torch.float16, requires_grad=False, device=GPU_TYPE
)
mod = M().to(GPU_TYPE)
with config.patch({"max_autotune_gemm_search_space": search_space}):
m_c = torch.compile(mode="max-autotune")(mod)
out, code = run_and_get_code(m_c, x)
self.assertEqual(out, mod(x), atol=2e-3, rtol=2e-3)
if not config.triton.native_matmul:
FileCheck().check("triton_tem_fused_baddbmm").run(code[0])
@config.patch(max_autotune=True)
def test_conv1x1_with_free_symbols(self):
"""
Make sure there is no exception due to free symbols.
"""
conv = nn.Conv2d(
3, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False
).to(device=GPU_TYPE)
@torch.compile
def f(x, y, z):
h = y.nonzero().size(0)
w = z.nonzero().size(0)
x = x[:, :, :h, :w]
x = conv(x)
return x
x = torch.randn(4, 3, 224, 224).to(
memory_format=torch.channels_last, device=GPU_TYPE
)
for _ in range(2):
y = torch.randint(0, 10, (224,)).to(device=GPU_TYPE)
z = torch.randint(0, 10, (224,)).to(device=GPU_TYPE)
f(x, y, z)
def _test_cat_max_autotune_impl(self, using_triton_mm):
def f(x, y):
y = torch.cos(y)
x = torch.mm(x, x)
return torch.cat([x, y])
f_c = torch.compile(mode="max-autotune-no-cudagraphs")(f)
inps = [
torch.randn(32, 32, device=GPU_TYPE),
torch.randn(32, 32, device=GPU_TYPE),
]
_, code = run_and_get_code(f_c, inps[0], inps[1])
self.assertEqual(f_c(*inps), f(*inps), atol=0.03, rtol=0.25)
# mm kernel, and cos kernel
count = 2 if (using_triton_mm or config.triton.native_matmul) else 1
FileCheck().check(get_func_call()).check_count(
get_kernel_launch(), count, exactly=True
).run(code[0])
def f(x, y):
y = torch.cos(y)
x = torch.mm(x, x)
out = torch.cat([x, y])
return out, x + 1
f_c = torch.compile(mode="max-autotune-no-cudagraphs")(f)
_, code = run_and_get_code(f_c, inps[0], inps[1])
self.assertEqual(f_c(*inps), f(*inps), atol=0.03, rtol=0.25)
FileCheck().check(get_func_call()).check_count(
get_kernel_launch(), 2, exactly=True
).run(code[0])
def f(x, y):
y = torch.cos(y)
x = torch.mm(x, x)
return torch.cat([x, y]), torch.cat([y, x])
f_c = torch.compile(mode="max-autotune-no-cudagraphs")(f)
self.assertEqual(f_c(*inps), f(*inps), atol=0.03, rtol=0.25)
@config.patch("trace.enabled", True)
@config.patch({"test_configs.force_extern_kernel_in_multi_template": True})
@config.patch("triton.native_matmul", False)
def test_mutation_rename(self):
torch._logging.set_logs(ir_post_fusion=True)
def f(x, y, z, other):
mul = x * y
diag = torch.diagonal(mul)
diag.copy_(other)
x = torch.mm(mul, z)
y = torch.diagonal(x).add_(torch.tensor(1, device=GPU_TYPE))
return y
t = functools.partial(torch.randn, device=GPU_TYPE)
inps = (t(3, 3), t(3, 3), t(3, 3), t(3))
fn = torch.compile(f, mode="max-autotune-no-cudagraphs")
(
(
pre_fusion_tream,
post_fusion_stream,
),
ctx,
) = multiple_logs_to_string(
"torch._inductor.debug", "ir_pre_fusion", "ir_post_fusion"
)
with config.patch({"trace.debug_dir": tempfile.mkdtemp()}):
with (
self.assertLogs(
logging.getLogger("torch._inductor.debug"), level=logging.INFO
) as cm,
ctx(),
):
out = fn(*inps)
self.assertEqual(f(*inps), out)
pre_fusion_stream = cm.output[0]
post_fusion_stream = cm.output[1]
# before and after finalizing multi template buffer, deps should have the same normalization
# wrt writes
FileCheck().check("MultiTemplateBuffer").check("unmet").check_same("buf1").run(
pre_fusion_stream
)
FileCheck().check("ExternKernelSchedulerNode").check("unmet").check_same(
"buf1"
).run(post_fusion_stream)
torch._logging.set_logs()
@config.patch({"test_configs.force_extern_kernel_in_multi_template": True})
def test_cat_max_autotune_extern(self):
self._test_cat_max_autotune_impl(using_triton_mm=False)
@skipIfXpu(
msg="The fusion not happened because it do not speedup on XPU, see issue #146568"
)
@config.patch(
{
"max_autotune_gemm_backends": "TRITON",
"benchmark_epilogue_fusion": False,
}
)
def test_cat_max_autotune_triton(self):
self._test_cat_max_autotune_impl(using_triton_mm=True)
@parametrize("search_space", ("DEFAULT", "EXHAUSTIVE"))
def test_conv_cat(self, search_space):
class ToyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=3, stride=1, padding=1, bias=False
)
def forward(self, x):
x = self.conv(x)
return torch.cat((x, x + 1))
with config.patch({"max_autotune_gemm_search_space": search_space}):
with torch.no_grad():
m = ToyModel().to(device=GPU_TYPE)
input_tensor = torch.randn(32, 3, 64, 64).to(device=GPU_TYPE)
# convolution is not currently plannable
m = torch.compile(m, mode="max-autotune-no-cudagraphs")
out, code = run_and_get_code(m, input_tensor)
self.assertEqual(out, m(input_tensor))
if not TEST_WITH_ROCM:
FileCheck().check("def triton_poi_fused_add_cat_").run(code[0])
@parametrize("search_space", ("DEFAULT", "EXHAUSTIVE"))
def test_conv3d(self, search_space):
fn = torch.nn.functional.conv3d
image = torch.randn([1, 3, 8, 16, 32])
filt = torch.randn([3, 3, 7, 7, 7])
with config.patch(
{"max_autotune": True, "max_autotune_gemm_search_space": search_space}
):
expected = fn(image, filt)
actual = torch.compile(fn)(image, filt)
torch.testing.assert_close(actual, expected, atol=6e-5, rtol=0.001)
@config.patch(
max_autotune=True, max_autotune_conv_backends="", layout_optimization=False
)
def test_conv_backend(self):
m = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 1, 1),
).to(GPU_TYPE)
inp = torch.randn([2, 3, 16, 16]).to(GPU_TYPE)
with self.assertRaises(BackendCompilerFailed) as context:
torch.compile(m)(inp)
self.assertIn("NoValidChoicesError", str(context.exception))
@skipIfRocmArch(NAVI_ARCH)
def test_non_contiguous_input_mm(self):
"""
Make sure the triton template can work with non-contiguous inputs without crash.
Check https://github.com/pytorch/pytorch/issues/125437 for more details.
"""
x = rand_strided(
(50257, 2048), (1, 50304), dtype=torch.bfloat16, device=GPU_TYPE
)
y = rand_strided((2048, 768), (768, 1), dtype=torch.bfloat16, device=GPU_TYPE)
@torch.compile(mode="max-autotune")
def f(x, y):
return x @ y
ref = x @ y
act = f(x, y)
torch.testing.assert_close(act, ref, atol=2e-2, rtol=1e-2)
@skipIfRocmArch(NAVI_ARCH)
def test_non_contiguous_input_addmm(self):
b = torch.randn((768), dtype=torch.bfloat16, device=GPU_TYPE)
x = rand_strided(
(50257, 2048), (1, 50304), dtype=torch.bfloat16, device=GPU_TYPE
)
y = rand_strided((2048, 768), (768, 1), dtype=torch.bfloat16, device=GPU_TYPE)
@torch.compile(mode="max-autotune")
def f(x, y):
return torch.addmm(b, x, y)
ref = torch.addmm(b, x, y)
act = f(x, y)
torch.testing.assert_close(act, ref, atol=2e-2, rtol=1e-2)
@skipIfRocmArch(NAVI_ARCH)
def test_non_contiguous_input_bmm(self):
x = rand_strided(
(1, 50257, 2048), (0, 1, 50304), dtype=torch.bfloat16, device=GPU_TYPE
)
y = rand_strided(
(1, 2048, 768), (0, 768, 1), dtype=torch.bfloat16, device=GPU_TYPE
)
@torch.compile(mode="max-autotune")
def f(x, y):
return torch.bmm(x, y)
ref = torch.bmm(x, y)
act = f(x, y)
torch.testing.assert_close(act, ref, atol=2e-2, rtol=1e-2)
# TODO: fix accuracy failure of the triton template on XPU.
# and enable this test case.
@skipIfXpu
@unittest.skipIf(
config.triton.native_matmul,
"native matmul and Triton template both have accuracy fail (2.2%)",
)
def test_non_contiguous_input_mm_plus_mm(self):
x1 = rand_strided((50257, 2048), (1, 50304), device=GPU_TYPE)
y1 = rand_strided((2048, 768), (768, 1), device=GPU_TYPE)
x2 = rand_strided((50257, 2048), (1, 50304), device=GPU_TYPE)
y2 = rand_strided((2048, 768), (768, 1), device=GPU_TYPE)
@torch.compile(mode="max-autotune")
def f(x1, y1, x2, y2):
return x1 @ y1 + x2 @ y2
ref = x1 @ y1 + x2 @ y2
act = f(x1, y1, x2, y2)
torch.testing.assert_close(act, ref, atol=1e-1, rtol=1e-2)
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="",
)
@unittest.skipIf(
config.triton.native_matmul, "native matmul generates when size >=2"
)
def test_no_valid_choices(self):
a = torch.zeros([2, 2], device=GPU_TYPE)
b = torch.zeros([2, 2], device=GPU_TYPE)
with self.assertRaises(BackendCompilerFailed) as context:
torch.compile(lambda a, b: a.matmul(b))(a, b)
self.assertIn("NoValidChoicesError", str(context.exception))
@unittest.skipIf(
config.triton.native_matmul, "Only test when template is being called"
)
@parametrize("multi_template", (True, False))
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
)
def test_inf_timing(self, multi_template):
from unittest.mock import patch
lookup = AlgorithmSelectorCache.lookup
def mock_lookup(self, *args, **kwargs):
timings = lookup(self, *args, **kwargs)
return {choice: float("inf") for choice in timings}
a = torch.zeros([16, 16], device=GPU_TYPE)
b = torch.zeros([16, 16], device=GPU_TYPE)
with (
patch.object(AlgorithmSelectorCache, "lookup", mock_lookup),
config.patch(benchmark_epilogue_fusion=multi_template),
):
with self.assertRaises(BackendCompilerFailed) as context:
torch.compile(lambda a, b: a.matmul(b))(a, b)
self.assertIn("NoValidChoicesError", str(context.exception))
@config.patch(force_shape_pad=True, max_autotune=True)
def test_linear_and_cel(self):
"""
Similate a GPU without enough SMs. Make sure max-autotune still
works even when the MultiTritonTemplate encapsulates just extern
kernels.
"""
def mock_is_big_gpu(*args, **kwargs):
return False
B, T, C, V = 32, 1024, 768, 50257
linear = nn.Linear(C, V).bfloat16().to(device=GPU_TYPE)
ce = torch.nn.CrossEntropyLoss()
def f(x, y):
x.grad = None
linear.weight.grad = None
linear.bias.grad = None
loss = ce(linear(x), y)
loss.backward()
return loss
x = torch.randn(B * T, C, requires_grad=True).to(GPU_TYPE).bfloat16()
x.retain_grad()
y = torch.randint(0, V, (B * T,)).to(GPU_TYPE)
import torch._inductor.utils as inductor_utils
with unittest.mock.patch.object(inductor_utils, "is_big_gpu", mock_is_big_gpu):
opt_f = torch.compile(f)
expect = (f(x, y), x.grad, linear.weight.grad, linear.bias.grad)
actual = (opt_f(x, y), x.grad, linear.weight.grad, linear.bias.grad)
assert same(expect, actual, tol=1e-2), f"ref:\n{expect}\nact:\n{actual}"
@skipIfXpu
@unittest.skipIf(
config.cpp_wrapper, "decompose_k not supported for cpp_wrapper yet"
)
@unittest.skipIf(
config.triton.native_matmul,
"ignore decompose_k when native matmul codegen",
)
@parametrize("dynamic", (True, False))
@parametrize("dtype", (torch.float16, torch.bfloat16))
@parametrize("sizes", ((32, 32, 32768), (64, 128, 200000), (64, 64, 177147)))
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
comprehensive_padding=False,
shape_padding=False,
)
def test_max_autotune_decompose_k(self, sizes, dtype, dynamic):
fp16_red_setting = (
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
)
bf16_red_setting = (
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction
)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
M, N, K = sizes
a = torch.randn(M, K, dtype=dtype, device=GPU_TYPE, requires_grad=True)
b = torch.randn(K, N, dtype=dtype, device=GPU_TYPE, requires_grad=True)
possible_splits = range(2, min(K // M, K // N) + 1)
divisors = {split for split in possible_splits if K % split == 0}
def check_divisors(code):
for kernel in code:
if "decompose_k" in kernel:
divisor_found = False
for divisor in divisors:
if f"{divisor}_split" in kernel:
divisor_found = True
break
self.assertTrue(
divisor_found,
f"Could not find a split in {divisors} in {kernel}",
)
compiled_func = torch.compile(lambda a, b: a @ b, dynamic=dynamic)
# We assume with the large k dim relative to m, n, decompose_k will be most performant
out, code = run_and_get_code(compiled_func, a, b)
if dynamic or torch.version.hip:
FileCheck().check_not("extern_kernels.bmm_dtype").check_not(
"decompose_k"
).run(code[0])
else:
FileCheck().check("extern_kernels.bmm_dtype").check_regex(
"triton_.*_fused_0.run"
).check("decompose_k").run(code[0])
check_divisors(code)
torch.testing.assert_close(out, a @ b, atol=1e-2, rtol=1e-2)
# Test adding epilogue also equivalent to eager
compiled_func = torch.compile(lambda a, b: (a @ b).relu(), dynamic=dynamic)
out, code = run_and_get_code(compiled_func, a, b)
if dynamic or torch.version.hip:
FileCheck().check_not("extern_kernels.bmm_dtype").check_not(
"decompose_k"
).run(code[0])
else:
FileCheck().check("extern_kernels.bmm_dtype").check_regex(
"triton_.*_fused_mm_0.run"
).check("decompose_k").run(code[0])
check_divisors(code)
torch.testing.assert_close(
compiled_func(a, b), (a @ b).relu(), atol=1e-2, rtol=1e-2
)
# Test adding reinterpret view before subgraph
a = a.transpose(0, 1)
compiled_func = torch.compile(
lambda a, b: (a.transpose(0, 1) @ b).relu(), dynamic=dynamic
)
out, code = run_and_get_code(compiled_func, a, b)
# DecomposeK is not enabled for AMD yet
if dynamic or torch.version.hip:
FileCheck().check_not("extern_kernels.bmm_dtype").check_not(
"decompose_k"
).run(code[0])
else:
FileCheck().check("extern_kernels.bmm_dtype").check_regex(
"triton_.*_fused_.*_0.run"
).check("decompose_k").run(code[0])
check_divisors(code)
torch.testing.assert_close(
compiled_func(a, b),
(a.transpose(0, 1) @ b).relu(),
atol=1e-2,
rtol=1e-2,
)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = (
fp16_red_setting
)
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = (
bf16_red_setting
)
@skipIfXpu
@unittest.skipIf(TEST_WITH_ROCM, "decompose_k not supported on ROCm")
@unittest.skipIf(
config.cpp_wrapper, "decompose_k not supported for cpp_wrapper yet"
)
@unittest.skipIf(
config.triton.native_matmul,
"ignore decompose_k when native matmul codegen",
)
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
)
def test_max_autotune_decompose_k_dynamic_input(self):
def f(a, b):
a_in = torch.stack((a, a), dim=0)
return (a_in @ b).relu()
a = torch.randn(
32, 32768, dtype=torch.bfloat16, device=GPU_TYPE, requires_grad=True
)
b = torch.randn(
32768, 64, dtype=torch.bfloat16, device=GPU_TYPE, requires_grad=True
)
torch._dynamo.reset()
torch._dynamo.maybe_mark_dynamic(a, 0)
compiled_func = torch.compile(f)
with mock.patch(
"torch._inductor.kernel.mm.use_decompose_k_choice"
) as decomp_mock:
decomp_mock.side_effect = (
lambda *args, **kwargs: kwargs.get("threshold_multiple", 1) == 1
)
out, code = run_and_get_code(compiled_func, a, b)
FileCheck().check("extern_kernels.bmm_dtype").check_regex(
"triton_.*_fused_.*.run"
).check("decompose_k").check_regex(r"s[0-9]+ = s[0-9]+").check_regex(
r"2\*s[0-9]+"
).check_regex("s[0-9]+ = 32").run(code[0])
torch.testing.assert_close(
out,
f(a, b),
atol=1e-2,
rtol=1e-2,
)
@skipIfXpu
@unittest.skipIf(TEST_WITH_ROCM, "decompose_k not supported on ROCm")
@unittest.skipIf(
config.cpp_wrapper, "decompose_k not supported for cpp_wrapper yet"
)
@unittest.skipIf(
config.triton.native_matmul,
"ignore decompose_k when native matmul codegen",
)
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
)
def test_max_autotune_decompose_k_dynamic_input_bwd(self):
def f(a, b):
# 256 * s0
a_in = torch.cat([a for _ in range(256)], dim=0)
return (a_in @ b).relu().sum()
a = torch.randn(
8, 64, dtype=torch.bfloat16, device=GPU_TYPE, requires_grad=True
)
b = torch.randn(
64, 32768, dtype=torch.bfloat16, device=GPU_TYPE, requires_grad=True
)
torch._dynamo.reset()
torch._dynamo.maybe_mark_dynamic(a, 0)
compiled_func = torch.compile(f)
res = compiled_func(a, b)
res.backward()
with mock.patch(
"torch._inductor.kernel.mm.use_decompose_k_choice"
) as decomp_mock:
decomp_mock.side_effect = (
lambda *args, **kwargs: kwargs.get("threshold_multiple", 1) == 1
)
out, code = run_and_get_code(compiled_func, a, b)
out.backward()
FileCheck().check("extern_kernels.bmm_dtype").check_regex(
"triton_.*_fused_0.run"
).check("decompose_k").check_regex(r"s[0-9]+ = s[0-9]+").check_regex(
r"256\*s[0-9]+"
).check_regex("s[0-9]+ = 8").run(
# code[1] in this case given backwards
code[1]
)
@skipIfXpu
@unittest.skipIf(TEST_WITH_ROCM, "decompose_k not supported on ROCm")
@unittest.skipIf(
config.cpp_wrapper, "decompose_k not supported for cpp_wrapper yet"
)
@unittest.skipIf(
config.triton.native_matmul,
"ignore decompose_k when native matmul codegen",
)
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
)
def test_max_autotune_decompose_k_output_stride(self):
def f(a, b):
a = a.transpose(0, 1)
return a @ b
a = torch.randn((32768, 256), device=GPU_TYPE, dtype=torch.bfloat16)
b = torch.randn((32768, 1152), device=GPU_TYPE, dtype=torch.bfloat16)
b = b[:, :1096]
# Force only decomposeK choice
with (
override_template_heuristics(
device_type=GPU_TYPE,
template_op_pairs=[(torch._inductor.kernel.mm.mm_template.name, "mm")],
),
mock.patch(
"torch._inductor.kernel.mm.use_decompose_k_choice"
) as decompose_mock,
):
decompose_mock.return_value = True
compiled_f = torch.compile(f)
out, code = run_and_get_code(compiled_f, a, b)
# Output stride equal to original gm output stride
# If output stride is not correctly checked, this will be (1152, 1) which can cause nans
self.assertEqual(out.stride(), (1096, 1))
FileCheck().check_not("extern_kernels.bmm_dtype").check(
"decompose_k"
).check(" empty_strided_cuda((256, 1096), (1096, 1), torch.bfloat16)").run(
code[0]
)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@parametrize("dtype", (torch.float16, torch.bfloat16, torch.float32))
@parametrize("sizes", ((64, 128, 256), (128, 256, 512), (256, 512, 1024)))
@config.patch(
max_autotune=True,
)
def test_max_autotune_contiguous_transform_mm(self, sizes, dtype):
"""
Test the contiguous subgraph transform with A * transpose(B) pattern.
This transform makes the second matrix contiguous before the matmul.
"""
M, N, K = sizes
def mm_transpose(a, b):
return a @ b.transpose(0, 1)
a = torch.randn(M, K, dtype=dtype, device=GPU_TYPE, requires_grad=True)
b = torch.randn(N, K, dtype=dtype, device=GPU_TYPE, requires_grad=True)
# Compute fp64 baseline
a_fp64 = a.to(torch.float64)
b_fp64 = b.to(torch.float64)
expected_fp64 = mm_transpose(a_fp64, b_fp64)
# Force only contiguous choice to test the transform
with (
mock.patch(
"torch._inductor.template_heuristics.contiguous_mm.use_contiguous"
) as contiguous_mock,
):
contiguous_mock.return_value = True
compiled_func = torch.compile(mm_transpose)
out, code = run_and_get_code(compiled_func, a, b)
# Verify correctness against fp64 baseline
torch.testing.assert_close(
out, expected_fp64.to(dtype), atol=1e-2, rtol=1e-2
)
# Check that contiguous transform was used
FileCheck().check("contiguous_mm").run(code[0])
@unittest.skipIf(not torch.version.hip, "ROCM only")
@parametrize("dtype", (torch.float16, torch.bfloat16, torch.float32))
@parametrize("sizes", ((64, 128, 256), (128, 256, 512), (256, 512, 1024)))
@config.patch(
max_autotune=True,
)
def test_max_autotune_contiguous_transform_addmm(self, sizes, dtype):
"""
Test the contiguous subgraph transform for addmm with non-contiguous second matrix.
"""
M, N, K = sizes
def addmm_transpose(inp, a, b):
return torch.addmm(inp, a, b.transpose(0, 1))
inp = torch.randn(M, N, dtype=dtype, device=GPU_TYPE, requires_grad=True)
a = torch.randn(M, K, dtype=dtype, device=GPU_TYPE, requires_grad=True)
b = torch.randn(N, K, dtype=dtype, device=GPU_TYPE, requires_grad=True)
# Compute fp64 baseline
inp_fp64 = inp.to(torch.float64)
a_fp64 = a.to(torch.float64)
b_fp64 = b.to(torch.float64)
expected_fp64 = addmm_transpose(inp_fp64, a_fp64, b_fp64)
# Force contiguous choice to test the transform
with (
mock.patch(
"torch._inductor.template_heuristics.contiguous_mm.use_contiguous"
) as contiguous_mock,
):
contiguous_mock.return_value = True
compiled_func = torch.compile(addmm_transpose)
out, code = run_and_get_code(compiled_func, inp, a, b)
# Verify correctness against fp64 baseline
torch.testing.assert_close(
out, expected_fp64.to(dtype), atol=1e-2, rtol=1e-2
)
# Check that contiguous transform was used
FileCheck().check("contiguous_addmm").run(code[0])
@unittest.skipIf(not torch.version.hip, "ROCM only")
@parametrize("dynamic", (False, True))
def test_max_autotune_contiguous_transform_non_contiguous_second_matrix(
self, dynamic
):
"""
Test that contiguous transform is only applied when the second matrix is non-contiguous.
"""
M, N, K = 64, 128, 64
def mm(a, b):
return a @ b
a = torch.randn(M, K, dtype=torch.float32, device=GPU_TYPE)
b_contiguous = torch.randn(K, N, dtype=torch.float32, device=GPU_TYPE)
b_non_contiguous = torch.randn(
N, K, dtype=torch.float32, device=GPU_TYPE
).transpose(0, 1)
# Compute fp64 baselines without max_autotune (since fp64 doesn't work with max_autotune=True)
a_fp64 = a.to(torch.float64)
b_contiguous_fp64 = b_contiguous.to(torch.float64)
b_non_contiguous_fp64 = b_non_contiguous.to(torch.float64)
expected1_fp64 = mm(a_fp64, b_contiguous_fp64)
expected2_fp64 = mm(a_fp64, b_non_contiguous_fp64)
with config.patch(
max_autotune=True,
):
# Test with contiguous second matrix - should not use contiguous transform
compiled_func_contiguous = torch.compile(mm, dynamic=dynamic)
out1, code1 = run_and_get_code(compiled_func_contiguous, a, b_contiguous)
# Should not contain contiguous transform
try:
FileCheck().check("contiguous_mm").run(code1[0])
self.fail(
"Contiguous transform should not be used for contiguous matrices"
)
except RuntimeError:
pass # Expected - contiguous transform should not be used
# Test with non-contiguous second matrix - should use contiguous transform
with (
mock.patch(
"torch._inductor.template_heuristics.contiguous_mm.use_contiguous"
) as contiguous_mock,
):
contiguous_mock.return_value = True
compiled_func_non_contiguous = torch.compile(mm, dynamic=dynamic)
out2, code2 = run_and_get_code(
compiled_func_non_contiguous, a, b_non_contiguous
)
# Should contain contiguous transform
FileCheck().check("contiguous_mm").run(code2[0])
# Verify correctness against fp64 baselines
torch.testing.assert_close(
out1, expected1_fp64.to(torch.float32), atol=1e-2, rtol=1e-2
)
torch.testing.assert_close(
out2, expected2_fp64.to(torch.float32), atol=1e-2, rtol=1e-2
)
@unittest.skipIf(not torch.version.hip, "ROCM only")
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
)
def test_max_autotune_contiguous_transform_with_epilogue(self):
"""
Test contiguous transform with epilogue operations like relu.
"""
M, N, K = 128, 256, 512
def mm_transpose_relu(a, b):
return (a @ b.transpose(0, 1)).relu()
a = torch.randn(M, K, dtype=torch.float32, device=GPU_TYPE)
b = torch.randn(N, K, dtype=torch.float32, device=GPU_TYPE)
# Compute fp64 baseline
a_fp64 = a.to(torch.float64)
b_fp64 = b.to(torch.float64)
expected_fp64 = mm_transpose_relu(a_fp64, b_fp64)
# Force contiguous transform
with (
mock.patch(
"torch._inductor.template_heuristics.contiguous_mm.use_contiguous"
) as contiguous_mock,
):
contiguous_mock.return_value = True
compiled_func = torch.compile(mm_transpose_relu)
out, code = run_and_get_code(compiled_func, a, b)
# Verify correctness against fp64 baseline
torch.testing.assert_close(
out, expected_fp64.to(torch.float32), atol=1e-2, rtol=1e-2
)
# Check that contiguous transform was used
FileCheck().check("contiguous_mm").run(code[0])
@unittest.skipIf(config.cpp_wrapper, "out_dtype override not supported for AOTI")
@unittest.skipIf(TEST_WITH_ROCM, "out_dtype override only available on NVIDIA")
def test_bmm_out_dtype(self):
def f(a, b):
return torch.bmm(a, b, out_dtype=torch.float32)
a = torch.randn(2, 3, 4, device=GPU_TYPE, dtype=torch.float16)
b = torch.randn(2, 4, 5, device=GPU_TYPE, dtype=torch.float16)
with config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
):
compiled_f = torch.compile(f)
with self.assertRaisesRegex(
torch._inductor.exc.InductorError,
r"LoweringException: NoValidChoicesError: No choices to select",
):
out, code = run_and_get_code(compiled_f, a, b)
compiled_f = torch.compile(f)
out, code = run_and_get_code(compiled_f, a, b)
FileCheck().check("extern_kernels.bmm_dtype").run(code[0])
def test_triton_template_generated_code_cache_key(self):
generate_and_load_args = len(
inspect.signature(
torch._inductor.select_algorithm.TritonTemplate.generate_and_load
).parameters
)
make_key_args = len(
inspect.signature(
torch._inductor.select_algorithm.GeneratedCodeCache.make_key
).parameters
)
# Make sure all args of generate_and_load_args are passed to make_key_args (Except generate_with_caching)
# update this function each time new arg added to generate_and_load and make sure arg is added to make_key
self.assertEqual(generate_and_load_args - 1, make_key_args)
self.assertEqual(generate_and_load_args, 18)
@fresh_cache()
@config.patch(
{
"max_autotune": True,
"test_configs.max_mm_configs": 4,
"max_autotune_gemm_backends": "TRITON",
}
)
@unittest.skipIf(config.triton.native_matmul, "only test on template-based matmul")
def test_triton_template_generated_code_cache_strategy(self):
def func_test1(x, y, z, m):
a = torch.matmul(x, y)
b = torch.matmul(z, m)
return a, b
a = torch.rand(10, 22, device=GPU_TYPE)
b = torch.rand(22, 30, device=GPU_TYPE)
# Test that the testing strategy works by overriding input_dependent_preserved_state and simulate a cache hit.
with unittest.mock.patch(
"torch._inductor.select_algorithm.TritonTemplateKernel.input_dependent_preserved_state",
new=(lambda self: "same always"),
):
with self.assertRaisesRegex(
torch._inductor.exc.InductorError,
r".*Generated code cache results in wrong output.*",
):
torch.compile(func_test1, dynamic=False)(a, b, a, b)
@config.patch(
{
"max_autotune": True,
"test_configs.max_mm_configs": 4,
"max_autotune_gemm_backends": "TRITON",
}
)
@unittest.skipIf(config.triton.native_matmul, "only test on template-based matmul")
def test_triton_template_generated_code_caching(self):
def reset_counters():
torch._dynamo.utils.counters.clear()
def hits():
return torch._dynamo.utils.counters["inductor"][
"generated_module_cache_hit"
]
def misses():
return torch._dynamo.utils.counters["inductor"][
"generated_module_cache_miss"
]
# remove white space from x.
def remove_white_space(x: str) -> str:
return re.sub(r"\s+", "", x)
def get_cache_key_and_events() -> tuple[str, str]:
cache = TritonTemplate.all_templates["mm"]._generated_code_cache._cache
cache_key = next(iter(cache))
events = str(cache[cache_key].events)
return cache_key, events
def func_test1(x, y, z, m):
a = torch.matmul(x, y)
b = torch.matmul(z, m)
return a, b
a = torch.rand(10, 22, device=GPU_TYPE)
b = torch.rand(22, 30, device=GPU_TYPE)
# Valid cache hit.
with fresh_cache():
reset_counters()
compile_results = torch.compile(func_test1, dynamic=False)(a, b, a, b)
eager_results = func_test1(a, b, a, b)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 4)
self.assertEqual(misses(), 4)
cache_key, events = get_cache_key_and_events()
if not TEST_WITH_ROCM:
expected = """{
'input_nodes':[
"[[10,22],[22,1],torch.float32,device(type='cuda',index=0),0]",
"[[22,30],[30,1],torch.float32,device(type='cuda',index=0),0]"],
'num_stages':1,'num_warps':2,'prefix_args':0,'suffix_args':0,'call_sizes':[10,30],
'layout':"[[10,30],[30,1],torch.float32,device(type='cuda',index=0),0]",
'num_consumer_groups':0,'num_buffers_warp_spec':0,'epilogue_fn_hash':'identity','tma_store':False,
'kwargs':{'EVEN_K':False,'USE_FAST_ACCUM':False,'ACC_TYPE':'tl.float32',
'BLOCK_M':16,'BLOCK_N':32,'BLOCK_K':16,'GROUP_M':8,'ALLOW_TF32':True},'hint_override':None}"""
expected = expected.replace("cuda", GPU_TYPE)
self.assertExpectedInline(
remove_white_space(cache_key),
remove_white_space(expected),
)
self.assertEqual(
remove_white_space(events),
remove_white_space("""[('def_kernel', ['A', 'B'], {})]"""),
)
# Test symbolic shapes with different symbols. Will cache miss due to different symbols in inputs.
with fresh_cache():
a = torch.rand(10, 22, device=GPU_TYPE)
b = torch.rand(22, 30, device=GPU_TYPE)
c = torch.rand(9, 21, device=GPU_TYPE)
d = torch.rand(21, 30, device=GPU_TYPE)
reset_counters()
compiled_results = torch.compile(func_test1, dynamic=True)(a, b, c, d)
eager_results = func_test1(a, b, c, d)
self.assertEqual(compiled_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 0)
self.assertEqual(misses(), 8)
cache_key, events = get_cache_key_and_events()
if not TEST_WITH_ROCM:
expected = """{
'input_nodes':[
"[[s77,s27],[s27,1],torch.float32,device(type='cuda',index=0),0]",
"[[s27,s94],[s94,1],torch.float32,device(type='cuda',index=0),0]"],
'num_stages':1,'num_warps':2,'prefix_args':0,'suffix_args':0,'call_sizes':[s77,s94],
'layout':"[[s77,s94],[s94,1],torch.float32,device(type='cuda',index=0),0]",'num_consumer_groups':0,
'num_buffers_warp_spec':0,'epilogue_fn_hash':'identity','tma_store':False,'kwargs':{'EVEN_K':False,'USE_FAST_ACCUM':False,
'ACC_TYPE':'tl.float32','BLOCK_M':16,'BLOCK_N':32,'BLOCK_K':16,'GROUP_M':8,'ALLOW_TF32':True},'hint_override':None}"""
expected = expected.replace("cuda", GPU_TYPE)
self.assertExpectedInline(
remove_white_space(cache_key),
remove_white_space(expected),
)
self.assertExpectedInline(
remove_white_space(events),
remove_white_space(
"""[('def_kernel',['A','B'],{}),('size',['A',0],{}),('size',['B',1],{}),('size',['A',1],{})]"""
),
)
self.assertExpectedInline(
remove_white_space(events),
remove_white_space(
"""[
('def_kernel', ['A', 'B'], {}),
('size', ['A', 0], {}),
('size', ['B', 1], {}),
('size', ['A', 1], {})]
"""
),
)
# Test duck typing.
with fresh_cache():
reset_counters()
compile_results = torch.compile(func_test1, dynamic=True)(a, b, a, b)
eager_results = func_test1(a, b, a, b)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 4)
self.assertEqual(misses(), 4)
# Test loop.
def test_func2(x):
for _ in range(10):
x = torch.matmul(x, x)
return x
with fresh_cache():
reset_counters()
input = torch.rand(10, 10, device=GPU_TYPE)
compile_results = torch.compile(test_func2, dynamic=False)(input)
eager_results = test_func2(input)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 36)
self.assertEqual(misses(), 4)
with fresh_cache():
reset_counters()
input = torch.rand(10, 10, device=GPU_TYPE)
compile_results = torch.compile(test_func2, dynamic=True)(input)
eager_results = test_func2(input)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 36)
self.assertEqual(misses(), 4)
# No cache hit due to symbolic expressions passed i.e mm(s0 + s1, 2) vs mm(s3, 2).
reset_counters()
def test_func3(x, y, z, m, l):
a = torch.matmul(x, y)
b = torch.matmul(torch.cat([x, z], 1), torch.cat([y, m, l], 0))
return a, b
with fresh_cache():
a = torch.rand(10, 22, device=GPU_TYPE)
b = torch.rand(22, 30, device=GPU_TYPE)
c = torch.rand(10, 11, device=GPU_TYPE)
d = torch.rand(8, 30, device=GPU_TYPE)
e = torch.rand(3, 30, device=GPU_TYPE)
compile_results = torch.compile(test_func3, dynamic=True)(a, b, c, d, e)
eager_results = test_func3(a, b, c, d, e)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 0)
self.assertEqual(misses(), 7)
@config.patch(
{
"max_autotune": True,
"test_configs.max_mm_configs": 4,
"max_autotune_gemm_backends": "TRITON",
}
)
@unittest.skipIf(config.triton.native_matmul, "only test on template-based matmul")
def test_triton_template_generated_code_caching_bmm(self):
def func_test1(x, y, z, m):
a = torch.bmm(x, y)
b = torch.bmm(z, m)
return a, b
a = torch.rand(10, 10, 22, device=GPU_TYPE)
b = torch.rand(10, 22, 30, device=GPU_TYPE)
def hits():
return torch._dynamo.utils.counters["inductor"][
"generated_module_cache_hit"
]
def misses():
return torch._dynamo.utils.counters["inductor"][
"generated_module_cache_miss"
]
# Valid cache hit.
with fresh_cache():
torch._dynamo.utils.counters.clear()
compile_results = torch.compile(func_test1, dynamic=False)(a, b, a, b)
eager_results = func_test1(a, b, a, b)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 4)
self.assertEqual(misses(), 4)
@config.patch(
{
"max_autotune": True,
"test_configs.max_mm_configs": 4,
"max_autotune_gemm_backends": "ATEN, TRITON",
}
)
@unittest.skipIf(config.triton.native_matmul, "only test on template-based matmul")
def test_triton_template_generated_code_caching_mm_plus_mm(self):
def func_test1(x, y, z, m):
a = torch.mm(x, y)
b = torch.mm(z, m)
sum1 = a + b
c = torch.mm(x, y)
d = torch.mm(z, m)
sum2 = c + d
return sum1, sum2
a = torch.rand(10, 40, device=GPU_TYPE)
b = torch.rand(40, 30, device=GPU_TYPE)
def hits():
return torch._dynamo.utils.counters["inductor"][
"generated_module_cache_hit"
]
def misses():
return torch._dynamo.utils.counters["inductor"][
"generated_module_cache_miss"
]
# Valid cache hit.
with fresh_cache():
torch._dynamo.utils.counters.clear()
compile_results = torch.compile(func_test1, dynamic=False)(a, b, a, b)
eager_results = func_test1(a, b, a, b)
self.assertEqual(compile_results, eager_results, atol=0.05, rtol=0.05)
self.assertEqual(hits(), 4)
self.assertEqual(misses(), 4)
@fresh_cache()
@skipIfXpu
@unittest.skipIf(TEST_WITH_ROCM, "decompose_k not supported on ROCm")
@unittest.skipIf(
config.cpp_wrapper, "decompose_k not supported for cpp_wrapper yet"
)
@unittest.skipIf(
config.triton.native_matmul,
"ignore decompose_k when native matmul codegen",
)
@config.patch(
max_autotune=True,
max_autotune_gemm_backends="TRITON",
autotune_fallback_to_aten=False,
)
@parametrize("num_decompose_k_splits", (0, 5, 20))
@parametrize("decompose_k_threshold", (8, 16))
def test_max_autotune_decompose_k_envvars(
self, num_decompose_k_splits, decompose_k_threshold
):
shapes = [(32, 32, 32768), (32, 32, 256)]
for M, N, K in shapes:
get_k_splits.cache_clear()
use_decompose_k_choice.cache_clear()
a = torch.randn(M, K, dtype=torch.float16, device=GPU_TYPE)
b = torch.randn(K, N, dtype=torch.float16, device=GPU_TYPE)
with config.patch(
{
"triton.num_decompose_k_splits": num_decompose_k_splits,
"triton.decompose_k_threshold": decompose_k_threshold,
}
):
compiled_func = torch.compile(lambda a, b: a @ b)
_, code = run_and_get_code(compiled_func, a, b)
decompose_count = 0
for codegen in code:
if "benchmark_decompose_k_mm" in codegen:
decompose_count += 1
if (
K // M < decompose_k_threshold
or K // N < decompose_k_threshold
or num_decompose_k_splits == 0
):
self.assertEqual(decompose_count, 0)
else:
self.assertTrue(decompose_count > 0)
self.assertTrue(decompose_count <= num_decompose_k_splits)
@skipIfXpu
@unittest.skipIf(
TEST_WITH_ROCM, "exhaustive currently only thoroughly tested on NVIDIA"
)
@unittest.skipIf(
config.triton.native_matmul,
"native matmul takes different tuning configs",
)
@config.patch(max_autotune=True, max_autotune_gemm_search_space="EXHAUSTIVE")
def test_max_autotune_exhaustive(self):
def f(a, b):
return a @ b
M, N, K = (1024, 1024, 1024)
a = torch.randn(M, K, dtype=torch.float16, device=GPU_TYPE, requires_grad=True)
b = torch.randn(K, N, dtype=torch.float16, device=GPU_TYPE, requires_grad=True)
with mock.patch(
"torch._inductor.template_heuristics.registry.get_template_heuristic"
) as config_mock:
config_heuristics = CUDAMMTemplateConfigHeuristic()
# Traditionally, this would be set of all possible configs
# We mock out the code path for the sake of the unit test
config_heuristics.exhaustive_configs = [GemmConfig(32, 32, 32, 1, 8, 8)]
config_mock.return_value = config_heuristics
from torch._dynamo.utils import counters
compiled_func = torch.compile(f)
compiled_func(a, b)
# Only benchmarks 2 choices, aten and the exhaustive triton config
# Counter can be InductorBenchmarker or TritonBenchmarker
for counter in counters["inductor"]:
if "benchmark_gpu" in counter:
self.assertEqual(counters["inductor"][counter], 2)
@config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
}
)
def test_mm_k_1(self):
def mm(x, y):
return x @ y
for i in range(90, 100):
torch._dynamo.reset()
a = torch.randn((i, 1), device=GPU_TYPE, dtype=torch.float32)
b = torch.randn((1, i), device=GPU_TYPE, dtype=torch.float32)
compiled_f = torch.compile(mm)
out, code = run_and_get_code(compiled_f, a, b)
torch.testing.assert_close(out, mm(a, b), atol=1e-2, rtol=1e-2)
@config.patch(
max_autotune_gemm=True,
max_autotune_prune_choices_based_on_shared_mem=True,
)
def test_max_autotune_prune_choices(self):
def mm(x, y):
return x @ y
M, K, N = (3, 3, 3)
x = torch.rand([M, K], device=GPU_TYPE, dtype=torch.float32)
y = torch.rand([K, N], device=GPU_TYPE, dtype=torch.float32)
compiled_f = torch.compile(mm)
compiled_f(x, y)
self.assertEqual(
counters["inductor"]["select_algorithm_num_precompilation_exceptions"], 0
)
@parametrize("op", ("mm", "addmm", "bmm", "baddbmm", "mm_plus_mm"))
@parametrize("max_autotune", (False, True))
@config.patch(
{
"test_configs.max_mm_configs": 4,
"max_autotune_gemm_backends": "ATEN,TRITON",
"triton.native_matmul": False,
}
)
def test_autotune_gemm_choice_validation(self, op, max_autotune):
def generate_inputs_and_func(op_name):
# Base config with just x and w
base_inputs = [
torch.randn(128, 256, device=GPU_TYPE),
torch.randn(256, 128, device=GPU_TYPE),
]
func = torch.mm
if op_name == "mm":
# default
pass
elif op_name == "addmm":
# Add bias for addmm
base_inputs = [torch.randn(128, device=GPU_TYPE)] + base_inputs
func = torch.addmm
elif op_name in ["bmm", "baddbmm"]:
# Override for batch dimensions
base_inputs[0] = torch.randn(4, 128, 256, device=GPU_TYPE)
base_inputs[1] = torch.randn(4, 256, 128, device=GPU_TYPE)
func = torch.bmm
if op_name == "baddbmm":
# Add batch bias
base_inputs = [
torch.torch.randn(4, 128, 128, device=GPU_TYPE)
] + base_inputs
func = torch.baddbmm
elif op_name == "mm_plus_mm":
# Add second matrix pair
base_inputs += [
torch.randn(128, 256, device=GPU_TYPE),
torch.randn(256, 128, device=GPU_TYPE),
]
def mmpmm(x, w, x2, w2):
return torch.mm(x, w) + torch.mm(x2, w2)
func = mmpmm
else:
raise ValueError(f"Unsupported op: {op_name}")
return base_inputs, func
choice_types_seen = set()
def choice_validator(choices):
for choice in choices:
choice_types_seen.add(type(choice))
return choices
inputs, fn = generate_inputs_and_func(op)
add_preprocessing_fn(choice_validator)
try:
with config.patch({"max_autotune": max_autotune}):
compiled_fn = torch.compile(fn, dynamic=False)
compiled_fn(*inputs)
if max_autotune:
self.assertIn(ExternKernelCaller, choice_types_seen)
self.assertIn(TritonTemplateCaller, choice_types_seen)
else:
self.assertIn(ExternKernelCaller, choice_types_seen)
self.assertNotIn(TritonTemplateCaller, choice_types_seen)
finally:
clear_preprocessing_fns()
@config.patch(
{"test_configs.max_mm_configs": 4, "max_autotune_gemm_backends": "ATEN,TRITON"}
)
@parametrize("max_autotune_enabled", (True, False))
def test_autotune_layout_optimization(self, max_autotune_enabled):
"""Test that layouts are flexible when every choice is ExternKernelChoice"""
# we use a proxy here of bias_addmm and max-autotune because this enables us to see
# multiple choices in both scenarios (bias_addmm, addmm, triton (max-autotune only))
# and both bias_addmm and addmm are extern kernel choices
def layout_checker(choices):
if choices:
expected_layout = (
FixedLayout if max_autotune_enabled else FlexibleLayout
)
for choice in choices:
self.assertIsInstance(
choice.layout,
expected_layout,
f"Expected {expected_layout.__name__} with max_autotune={max_autotune_enabled}",
)
return choices
add_preprocessing_fn(layout_checker)
try:
bias = torch.randn(64, device=GPU_TYPE)
x = torch.randn(32, 128, device=GPU_TYPE)
w = torch.randn(128, 64, device=GPU_TYPE)
with config.patch({"max_autotune": max_autotune_enabled}):
compiled_fn = torch.compile(lambda b, x, w: torch.addmm(b, x, w))
_ = compiled_fn(bias, x, w)
finally:
clear_preprocessing_fns(clear_defaults=False)
@config.patch(
{"test_configs.max_mm_configs": 4, "max_autotune_gemm_backends": "TRITON"}
)
def test_fixed_layout_at_lowering(self):
"""
Test that max-autotune with addmm/bmm/mm_plus_mm correctly handles
padding and maintains correct output strides. Specifically, when matrix
b with shape (4608, 1490) is padded, its stride should become 1536.
"""
def mm_func(a, b) -> torch.Tensor:
a_t = torch.permute(a, [1, 0]).to(torch.bfloat16)
b_dtype = b.to(torch.bfloat16)
# Add .to() to make sure that mm could be potentially padded
# Strides for output are not padded
return (a_t @ b_dtype).to(torch.float32)
def addmm_func(a, b, bias) -> torch.Tensor:
a_t = torch.permute(a, [1, 0]).to(torch.bfloat16)
b_dtype = b.to(torch.bfloat16)
bias_dtype = bias.to(torch.bfloat16)
return torch.addmm(bias_dtype, a_t, b_dtype).to(torch.float32)
def bmm_func(a, b) -> torch.Tensor:
a_t = torch.permute(a, [2, 0, 1]).to(torch.bfloat16)
b_dtype = b.to(torch.bfloat16)
return torch.bmm(a_t, b_dtype).to(torch.float32)
def mm_plus_mm_func(a1, b1, a2, b2) -> torch.Tensor:
a1_t = torch.permute(a1, [1, 0]).to(torch.bfloat16)
b1_dtype = b1.to(torch.bfloat16)
a2_t = torch.permute(a2, [1, 0]).to(torch.bfloat16)
b2_dtype = b2.to(torch.bfloat16)
return (a1_t @ b1_dtype + a2_t @ b2_dtype).to(torch.float32)
a = torch.randn((4608, 512), device=GPU_TYPE, dtype=torch.bfloat16)
b = torch.randn((4608, 1490), device=GPU_TYPE)
bias = torch.randn(1490, device=GPU_TYPE)
a_bmm = torch.randn((512, 4608, 8), device=GPU_TYPE, dtype=torch.bfloat16)
b_bmm = torch.randn((8, 4608, 1490), device=GPU_TYPE)
# Test mm_plus_mm
a2 = torch.randn((4608, 512), device=GPU_TYPE, dtype=torch.bfloat16)
b2 = torch.randn((4608, 1490), device=GPU_TYPE)
# 1490 padded to 1536, check in template code
output_code_padding_check = "stride_bk = 1536"
funcs_and_args = [
(mm_func, (a, b)),
(addmm_func, (a, b, bias)),
(bmm_func, (a_bmm, b_bmm)),
(mm_plus_mm_func, (a, b, a2, b2)),
]
for f, args in funcs_and_args:
c_f = torch.compile(f, mode="max-autotune-no-cudagraphs")
_, code_out = run_and_get_code(c_f, *args)
FileCheck().check(output_code_padding_check).run(code_out[0])
| TestMaxAutotune |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 62922,
"end": 63202
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('bSetBest', c_uint),
('bwMode', c_uint8)
]
def __init__(self):
super(c_nvmlNvlinkSetBwMode_v1_t, self).__init__(version=nvmlNvlinkSetBwMode_v1)
| c_nvmlNvlinkSetBwMode_v1_t |
python | gevent__gevent | src/gevent/tests/test__fileobject.py | {
"start": 933,
"end": 1319
} | class ____(object):
def _mkstemp(self, suffix):
fileno, path = tempfile.mkstemp(suffix)
self.addCleanup(os.remove, path)
self.addCleanup(close_fd_quietly, fileno)
return fileno, path
def _pipe(self):
r, w = os.pipe()
self.addCleanup(close_fd_quietly, r)
self.addCleanup(close_fd_quietly, w)
return r, w
| CleanupMixin |
python | bokeh__bokeh | src/bokeh/models/annotations/html/html_annotation.py | {
"start": 1314,
"end": 2312
} | class ____(Annotation):
''' Base class for HTML-based annotations.
.. note::
All annotations that inherit from this base class can be attached to
a canvas, but are not rendered to it, thus they won't appear in saved
plots. Only ``export_png()`` function can preserve HTML annotations.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| HTMLAnnotation |
python | sympy__sympy | sympy/stats/frv.py | {
"start": 1181,
"end": 1693
} | class ____(dict):
"""
A domain with Finite Density.
"""
def __call__(self, item):
"""
Make instance of a class callable.
If item belongs to current instance of a class, return it.
Otherwise, return 0.
"""
item = sympify(item)
if item in self:
return self[item]
else:
return 0
@property
def dict(self):
"""
Return item as dictionary.
"""
return dict(self)
| FiniteDensity |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/gcs.py | {
"start": 1142,
"end": 4562
} | class ____(BaseTrigger):
"""
A trigger that fires and it finds the requested file or folder present in the given bucket.
:param bucket: the bucket in the google cloud storage where the objects are residing.
:param object_name: the file or folder present in the bucket
:param use_glob: if true object_name is interpreted as glob
:param google_cloud_conn_id: reference to the Google Connection
:param poke_interval: polling period in seconds to check for file/folder
:param hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
"""
def __init__(
self,
bucket: str,
object_name: str,
use_glob: bool,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
):
super().__init__()
self.bucket = bucket
self.object_name = object_name
self.use_glob = use_glob
self.poke_interval = poke_interval
self.google_cloud_conn_id: str = google_cloud_conn_id
self.hook_params = hook_params
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize GCSBlobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSBlobTrigger",
{
"bucket": self.bucket,
"object_name": self.object_name,
"use_glob": self.use_glob,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the relevant file/folder is found."""
try:
hook = self._get_async_hook()
while True:
res = await self._object_exists(
hook=hook, bucket_name=self.bucket, object_name=self.object_name
)
if res == "success":
yield TriggerEvent({"status": "success", "message": res})
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> GCSAsyncHook:
return GCSAsyncHook(gcp_conn_id=self.google_cloud_conn_id, **self.hook_params)
async def _object_exists(self, hook: GCSAsyncHook, bucket_name: str, object_name: str) -> str:
"""
Check for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
"""
async with ClientSession() as s:
client = await hook.get_storage_client(s)
bucket = client.get_bucket(bucket_name)
if self.use_glob:
list_blobs_response = await bucket.list_blobs(match_glob=object_name)
if len(list_blobs_response) > 0:
return "success"
else:
blob_exists_response = await bucket.blob_exists(blob_name=object_name)
if blob_exists_response:
return "success"
return "pending"
| GCSBlobTrigger |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1398487,
"end": 1400310
} | class ____(sgqlc.types.Type, Node):
"""A release asset contains the content for a release asset."""
__schema__ = github_schema
__field_names__ = (
"content_type",
"created_at",
"download_count",
"download_url",
"name",
"release",
"size",
"updated_at",
"uploaded_by",
"url",
)
content_type = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="contentType")
"""The asset's content-type"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
download_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="downloadCount")
"""The number of times this asset was downloaded"""
download_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="downloadUrl")
"""Identifies the URL where you can download the release asset via
the browser.
"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""Identifies the title of the release asset."""
release = sgqlc.types.Field(Release, graphql_name="release")
"""Release that the asset is associated with"""
size = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="size")
"""The size (in bytes) of the asset"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
uploaded_by = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="uploadedBy")
"""The user that performed the upload"""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""Identifies the URL of the release asset."""
| ReleaseAsset |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 16272,
"end": 16445
} | class ____(object):
def __init__(self, dist, cross, replica):
self.strategy = dist
self.cross_replica_context = cross
self.replica_context = replica
| _ThreadMode |
python | plotly__plotly.py | plotly/graph_objs/icicle/_root.py | {
"start": 233,
"end": 2657
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle"
_path_str = "icicle.root"
_valid_props = {"color"}
@property
def color(self):
"""
sets the color of the root node for a sunburst/treemap/icicle
trace. this has no effect when a colorscale is used to set the
markers.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
sets the color of the root node for a
sunburst/treemap/icicle trace. this has no effect when
a colorscale is used to set the markers.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Root object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.icicle.Root`
color
sets the color of the root node for a
sunburst/treemap/icicle trace. this has no effect when
a colorscale is used to set the markers.
Returns
-------
Root
"""
super().__init__("root")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.Root
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.Root`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Root |
python | getsentry__sentry | tests/sentry/core/endpoints/scim/test_scim_user_details.py | {
"start": 16155,
"end": 31085
} | class ____(SCIMTestCase):
endpoint = "sentry-api-0-organization-scim-member-details"
def test_user_details_get(self) -> None:
member = self.create_member(organization=self.organization, email="test.user@okta.local")
response = self.get_success_response(
self.organization.slug,
member.id,
)
assert response.data == {
"schemas": ["urn:ietf:params:scim:schemas:core:2.0:User"],
"id": str(member.id),
"userName": "test.user@okta.local",
"emails": [{"primary": True, "value": "test.user@okta.local", "type": "work"}],
"name": {"familyName": "N/A", "givenName": "N/A"},
"active": True,
"meta": {"resourceType": "User"},
"sentryOrgRole": self.organization.default_role,
}
def test_user_details_set_inactive(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"), organization=self.organization
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "Replace", "path": "active", "value": False}],
}
self.get_success_response(
self.organization.slug,
member.id,
raw_data=patch_req,
method="patch",
)
with pytest.raises(OrganizationMember.DoesNotExist):
OrganizationMember.objects.get(organization=self.organization, id=member.id)
with pytest.raises(AuthIdentity.DoesNotExist), assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.get(auth_provider=self.auth_provider_inst, id=member.id)
def test_user_details_cannot_set_partnership_member_inactive(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"),
organization=self.organization,
flags=OrganizationMember.flags["partnership:restricted"],
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "Replace", "path": "active", "value": False}],
}
self.get_error_response(
self.organization.slug, member.id, raw_data=patch_req, method="patch", status_code=403
)
def test_user_details_set_inactive_dict(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"), organization=self.organization
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "Replace", "value": {"active": False}}],
}
self.get_success_response(
self.organization.slug,
member.id,
raw_data=patch_req,
method="patch",
)
with pytest.raises(OrganizationMember.DoesNotExist):
OrganizationMember.objects.get(organization=self.organization, id=member.id)
with pytest.raises(AuthIdentity.DoesNotExist), assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.get(auth_provider=self.auth_provider_inst, id=member.id)
def test_user_details_set_inactive_with_bool_string(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"), organization=self.organization
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "Replace", "path": "active", "value": "False"}],
}
self.get_success_response(
self.organization.slug,
member.id,
raw_data=patch_req,
method="patch",
)
with pytest.raises(OrganizationMember.DoesNotExist):
OrganizationMember.objects.get(organization=self.organization, id=member.id)
with pytest.raises(AuthIdentity.DoesNotExist), assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.get(auth_provider=self.auth_provider_inst, id=member.id)
def test_user_details_set_inactive_with_dict_bool_string(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"), organization=self.organization
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "Replace", "value": {"id": "xxxx", "active": "False"}}],
}
self.get_success_response(
self.organization.slug,
member.id,
raw_data=patch_req,
method="patch",
)
with pytest.raises(OrganizationMember.DoesNotExist):
OrganizationMember.objects.get(organization=self.organization, id=member.id)
with pytest.raises(AuthIdentity.DoesNotExist), assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.get(auth_provider=self.auth_provider_inst, id=member.id)
def test_invalid_patch_op(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"), organization=self.organization
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "invalid", "value": {"active": False}}],
}
self.get_error_response(
self.organization.slug, member.id, raw_data=patch_req, method="patch", status_code=400
)
def test_invalid_patch_op_value(self) -> None:
member = self.create_member(
user=self.create_user(email="test.user@okta.local"), organization=self.organization
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "REPLACE", "value": {"active": "invalid"}}],
}
self.get_error_response(
self.organization.slug, member.id, raw_data=patch_req, method="patch", status_code=400
)
def test_user_details_get_404(self) -> None:
self.get_error_response(self.organization.slug, 99999999, status_code=404)
def test_user_details_patch_404(self) -> None:
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "replace", "value": {"active": False}}],
}
self.get_error_response(
self.organization.slug, 99999999, raw_data=patch_req, method="patch", status_code=404
)
def test_delete_route(self) -> None:
member = self.create_member(user=self.create_user(), organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
self.get_success_response(
self.organization.slug,
member.id,
method="delete",
)
with pytest.raises(OrganizationMember.DoesNotExist):
OrganizationMember.objects.get(organization=self.organization, id=member.id)
with pytest.raises(AuthIdentity.DoesNotExist), assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.get(auth_provider=self.auth_provider_inst, id=member.id)
def test_cannot_delete_partnership_member(self) -> None:
member = self.create_member(
user=self.create_user(),
organization=self.organization,
flags=OrganizationMember.flags["partnership:restricted"],
)
with assume_test_silo_mode(SiloMode.CONTROL):
AuthIdentity.objects.create(
user_id=member.user_id, auth_provider=self.auth_provider_inst, ident="test_ident"
)
self.get_error_response(self.organization.slug, member.id, method="delete", status_code=403)
def test_patch_inactive_alternate_schema(self) -> None:
member = self.create_member(user=self.create_user(), organization=self.organization)
patch_req = {"Operations": [{"op": "replace", "path": "active", "value": False}]}
self.get_success_response(
self.organization.slug,
member.id,
raw_data=patch_req,
method="patch",
)
with pytest.raises(OrganizationMember.DoesNotExist):
OrganizationMember.objects.get(organization=self.organization, id=member.id)
def test_patch_bad_schema(self) -> None:
member = self.create_member(user=self.create_user(), organization=self.organization)
patch_req = {"Operations": [{"op": "replace", "path": "blahblahbbalh", "value": False}]}
response = self.get_error_response(
self.organization.slug, member.id, raw_data=patch_req, method="patch", status_code=400
)
assert response.data == {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:Error"],
"detail": "Invalid Patch Operation.",
}
patch_req = {"Operations": [{"op": "replace", "value": False}]}
response = self.get_error_response(
self.organization.slug, member.id, raw_data=patch_req, method="patch", status_code=400
)
assert response.data == {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:Error"],
"detail": "Invalid Patch Operation.",
}
def test_member_detail_patch_too_many_ops(self) -> None:
member = self.create_member(user=self.create_user(), organization=self.organization)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "replace", "path": "active", "value": False}] * 101,
}
response = self.get_error_response(
self.organization.slug, member.id, raw_data=patch_req, method="patch", status_code=400
)
assert response.status_code == 400, response.data
assert response.data == {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:Error"],
"detail": '{"Operations":["Ensure this field has no more than 100 elements."]}',
}
# Disabling below test for now.
# need to see what Okta admins would expect to happen with invited members
# def test_request_invite_members_not_in_requests(self) -> None:
# member1 = self.create_member(user=self.create_user(), organization=self.organization)
# member1.invite_status = InviteStatus.REQUESTED_TO_BE_INVITED.value
# member1.save()
# member2 = self.create_member(user=self.create_user(), organization=self.organization)
# member2.invite_status = InviteStatus.REQUESTED_TO_JOIN.value
# member2.save()
# member3 = self.create_member(user=self.create_user(), organization=self.organization)
# member3.invite_status = InviteStatus.APPROVED.value # default val
# member3.save()
# url = reverse("sentry-api-0-organization-scim-member-index", args=[self.organization.slug])
# response = self.client.get(f"{url}?startIndex=1&count=100")
# assert response.status_code == 200, response.content
# assert response.data["totalResults"] == 2
# url = reverse(
# "sentry-api-0-organization-scim-member-details", args=[self.organization.slug, member1.id]
# )
# response = self.client.get(url)
# assert response.status_code == 404, response.content
# url = reverse(
# "sentry-api-0-organization-scim-member-details", args=[self.organization.slug, member2.id]
# )
# response = self.client.get(url)
# assert response.status_code == 404, response.content
def test_overflow_cases(self) -> None:
member = self.create_member(user=self.create_user(), organization=self.organization)
self.get_error_response(
self.organization.slug, "010101001010101011001010101011", status_code=404
)
self.get_error_response(
self.organization.slug,
"010101001010101011001010101011",
raw_data={},
method="patch",
status_code=404,
)
self.get_error_response(
self.organization.slug,
"010101001010101011001010101011",
raw_data=member.id,
method="delete",
status_code=404,
)
def test_cant_delete_only_owner_route(self) -> None:
member_om = OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
)
self.get_error_response(
self.organization.slug,
member_om.id,
method="delete",
status_code=403,
)
def test_cant_delete_only_owner_route_patch(self) -> None:
member_om = OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
)
patch_req = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [{"op": "replace", "value": {"active": False}}],
}
self.get_error_response(
self.organization.slug,
member_om.id,
raw_data=patch_req,
method="patch",
status_code=403,
)
# TODO: test patch with bad op
| SCIMMemberDetailsTests |
python | pydata__xarray | xarray/tests/test_dask.py | {
"start": 29756,
"end": 66228
} | class ____:
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/11584")
def test_to_dask_dataframe(self):
# Test conversion of Datasets to dask DataFrames
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset(
{"a": ("t", da.from_array(x, chunks=4)), "b": ("t", y), "t": ("t", t)}
)
expected_pd = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
# test if 1-D index is correctly set up
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
# test if we have dask dataframes
assert isinstance(actual, dd.DataFrame)
# use the .equals from pandas to check dataframes are equivalent
assert_frame_equal(actual.compute(), expected.compute())
# test if no index is given
expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(actual.compute(), expected.compute())
@pytest.mark.xfail(
reason="Currently pandas with pyarrow installed will return a `string[pyarrow]` type, "
"which causes the `y` column to have a different type depending on whether pyarrow is installed"
)
def test_to_dask_dataframe_2D(self):
# Test if 2-D dataset is supplied
w = np.random.randn(2, 3)
ds = Dataset({"w": (("x", "y"), da.from_array(w, chunks=(1, 2)))})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
# dask dataframes do not (yet) support multiindex,
# but when it does, this would be the expected index:
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame({"w": w.reshape(-1)}, index=exp_index)
# so for now, reset the index
expected = expected.reset_index(drop=False)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(actual.compute(), expected)
@pytest.mark.xfail(raises=NotImplementedError)
def test_to_dask_dataframe_2D_set_index(self):
# This will fail until dask implements MultiIndex support
w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
ds = Dataset({"w": (("x", "y"), w)})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_coordinates(self):
# Test if coordinate is also a dask array
x = np.random.randn(10)
t = np.arange(10) * 2
ds = Dataset(
{
"a": ("t", da.from_array(x, chunks=4)),
"t": ("t", da.from_array(t, chunks=4)),
}
)
expected_pd = pd.DataFrame({"a": x}, index=pd.Index(t, name="t"))
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
@pytest.mark.xfail(
reason="Currently pandas with pyarrow installed will return a `string[pyarrow]` type, "
"which causes the index to have a different type depending on whether pyarrow is installed"
)
def test_to_dask_dataframe_not_daskarray(self):
# Test if DataArray is not a dask array
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_no_coordinate(self):
x = da.from_array(np.random.randn(10), chunks=4)
ds = Dataset({"x": ("dim_0", x)})
expected = ds.compute().to_dataframe().reset_index()
actual = ds.to_dask_dataframe()
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_dim_order(self):
values = np.array([[1, 2], [3, 4]], dtype=np.int64)
ds = Dataset({"w": (("x", "y"), values)}).chunk(1)
expected = ds["w"].to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["x", "y"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds["w"].T.to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["y", "x"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
with pytest.raises(ValueError, match=r"does not match the set of dimensions"):
ds.to_dask_dataframe(dim_order=["x"])
@pytest.mark.parametrize("method", ["load", "compute"])
def test_dask_kwargs_variable(method):
chunked_array = da.from_array(np.arange(3), chunks=(2,))
x = Variable("y", chunked_array)
# args should be passed on to dask.compute() (via DaskManager.compute())
with mock.patch.object(da, "compute", return_value=(np.arange(3),)) as mock_compute:
getattr(x, method)(foo="bar")
mock_compute.assert_called_with(chunked_array, foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataarray(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = DataArray(data)
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataset(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = Dataset({"x": (("y"), data)})
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
kernel_call_count = 0
def kernel(name):
"""Dask kernel to test pickling/unpickling and __repr__.
Must be global to make it pickleable.
"""
global kernel_call_count
kernel_call_count += 1
return np.ones(1, dtype=np.int64)
def build_dask_array(name):
global kernel_call_count
kernel_call_count = 0
return dask.array.Array(
dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64
)
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_Dataset(persist):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
ds = ds + 1
n = len(ds.foo.data.dask)
ds2 = persist(ds)
assert len(ds2.foo.data.dask) == 1
assert len(ds.foo.data.dask) == n # doesn't mutate in place
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_DataArray(persist):
x = da.arange(10, chunks=(5,))
y = DataArray(x)
z = y + 1
n = len(z.data.dask)
zz = persist(z)
assert len(z.data.dask) == n
assert len(zz.data.dask) == zz.data.npartitions
def test_dataarray_with_dask_coords():
import toolz
x = xr.Variable("x", da.arange(8, chunks=(4,)))
y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2)
data = da.random.random((8, 8), chunks=(4, 4)) + 1
array = xr.DataArray(data, dims=["x", "y"])
array.coords["xx"] = x
array.coords["yy"] = y
assert dict(array.__dask_graph__()) == toolz.merge(
data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__()
)
(array2,) = dask.compute(array)
assert not dask.is_dask_collection(array2)
assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())
def test_basic_compute():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2})
for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]:
with dask.config.set(scheduler=get):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
def test_dataset_as_delayed():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
assert dask.delayed(ds).compute() == ds.compute()
def make_da():
da = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(100, 120)},
name="a",
).chunk({"x": 4, "y": 5})
da.x.attrs["long_name"] = "x"
da.attrs["test"] = "test"
da.coords["c2"] = 0.5
da.coords["ndcoord"] = da.x * 2
da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5})
return da
def make_ds():
map_ds = xr.Dataset()
map_ds["a"] = make_da()
map_ds["b"] = map_ds.a + 50
map_ds["c"] = map_ds.x + 20
map_ds = map_ds.chunk({"x": 4, "y": 5})
map_ds["d"] = ("z", [1, 1, 1, 1])
map_ds["z"] = [0, 1, 2, 3]
map_ds["e"] = map_ds.x + map_ds.y
map_ds.coords["c1"] = 0.5
map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x)))
map_ds.coords["cx"].attrs["test2"] = "test2"
map_ds.attrs["test"] = "test"
map_ds.coords["xx"] = map_ds["a"] * map_ds.y
map_ds.x.attrs["long_name"] = "x"
map_ds.y.attrs["long_name"] = "y"
return map_ds
# fixtures cannot be used in parametrize statements
# instead use this workaround
# https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly
@pytest.fixture
def map_da():
return make_da()
@pytest.fixture
def map_ds():
return make_ds()
def test_unify_chunks(map_ds):
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
_ = ds_copy.chunks
expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)}
with raise_if_dask_computes():
actual_chunks = ds_copy.unify_chunks().chunks
assert actual_chunks == expected_chunks
assert_identical(map_ds, ds_copy.unify_chunks())
out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy"))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == expected_chunks
# Test unordered dims
da = ds_copy["cxy"]
out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1}))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2))
# Test mismatch
with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"):
xr.unify_chunks(da, da.isel(x=slice(2)))
@pytest.mark.parametrize("obj", [make_ds(), make_da()])
@pytest.mark.parametrize(
"transform", [lambda x: x.compute(), lambda x: x.unify_chunks()]
)
def test_unify_chunks_shallow_copy(obj, transform):
obj = transform(obj)
unified = obj.unify_chunks()
assert_identical(obj, unified)
# assert obj is not unified
@pytest.mark.parametrize("obj", [make_da()])
def test_auto_chunk_da(obj):
actual = obj.chunk("auto").data
expected = obj.data.rechunk("auto")
np.testing.assert_array_equal(actual, expected)
assert actual.chunks == expected.chunks
def test_auto_chunk_da_cftime():
yrs = np.arange(2000, 2120)
cftime_dates = xr.date_range(
start=f"{yrs[0]}-01-01", end=f"{yrs[-1]}-12-31", freq="1YE", use_cftime=True
)
yr_array = np.tile(cftime_dates.values, (10, 1))
da = xr.DataArray(
yr_array, dims=["x", "t"], coords={"x": np.arange(10), "t": cftime_dates}
).chunk({"x": 4, "t": 5})
actual = da.chunk("auto").data
expected = da.data.rechunk({0: 10, 1: 120})
np.testing.assert_array_equal(actual, expected)
assert actual.chunks == expected.chunks
def test_map_blocks_error(map_da, map_ds):
def bad_func(darray):
return (darray * darray.x + 5 * darray.y)[:1, :1]
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(bad_func, map_da).compute()
def returns_numpy(darray):
return (darray * darray.x + 5 * darray.y).values
with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"):
xr.map_blocks(returns_numpy, map_da)
with pytest.raises(TypeError, match=r"args must be"):
xr.map_blocks(operator.add, map_da, args=10) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"kwargs must be"):
xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20]) # type: ignore[arg-type]
def really_bad_func(darray):
raise ValueError("couldn't do anything.")
with pytest.raises(Exception, match=r"Cannot infer"):
xr.map_blocks(really_bad_func, map_da)
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
xr.map_blocks(bad_func, ds_copy)
with pytest.raises(TypeError, match=r"Cannot pass dask collections"):
xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk()))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj)
expected = func(obj)
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_mixed_type_inputs(obj):
def func(obj1, non_xarray_input, obj2):
result = obj1 + obj1.x + 5 * obj1.y
return result
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj, args=["non_xarray_input", obj])
expected = func(obj, "non_xarray_input", obj)
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_convert_args_to_list(obj):
expected = obj + 10
with raise_if_dask_computes():
actual = xr.map_blocks(operator.add, obj, [10])
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_dask_args():
da1 = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(20)},
).chunk({"x": 5, "y": 4})
# check that block shapes are the same
def sumda(da1, da2):
assert da1.shape == da2.shape
return da1 + da2
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(sumda, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# one dimension in common
da2 = (da1 + 1).isel(x=1, drop=True)
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# test that everything works when dimension names are different
da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"})
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"):
xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})])
with pytest.raises(ValueError, match=r"cannot align.*index.*are not equal"):
xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))])
# reduction
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2])
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
# reduction with template
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(
lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x")
)
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
# bad template: not chunked
with pytest.raises(ValueError, match="Provided template has no dask arrays"):
xr.map_blocks(
lambda a, b: (a + b).sum("x"),
da1,
args=[da2],
template=da1.sum("x").compute(),
)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_add_attrs(obj):
def add_attrs(obj):
obj = obj.copy(deep=True)
obj.attrs["new"] = "new"
obj.cxy.attrs["new2"] = "new2"
return obj
expected = add_attrs(obj)
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj)
assert_identical(actual, expected)
# when template is specified, attrs are copied from template, not set by function
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj, template=obj)
assert_identical(actual, obj)
def test_map_blocks_change_name(map_da):
def change_name(obj):
obj = obj.copy(deep=True)
obj.name = "new"
return obj
expected = change_name(map_da)
with raise_if_dask_computes():
actual = xr.map_blocks(change_name, map_da)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_kwargs(obj):
expected = xr.full_like(obj, fill_value=np.nan)
with raise_if_dask_computes():
actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan))
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_to_dataarray(map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(lambda x: x.to_dataarray(), map_ds)
# to_dataarray does not preserve name, so cannot use assert_identical
assert_equal(actual, map_ds.to_dataarray())
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.to_dataset(),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)),
lambda x: x.astype(np.int32),
lambda x: x.x,
],
)
def test_map_blocks_da_transformations(func, map_da):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_da)
assert_identical(actual, func(map_da))
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.drop_vars("cxy"),
lambda x: x.drop_vars("a"),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.rename({"a": "new1", "b": "new2"}),
lambda x: x.x,
],
)
def test_map_blocks_ds_transformations(func, map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_ds)
assert_identical(actual, func(map_ds))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_da_ds_with_template(obj):
func = lambda x: x.isel(x=[1])
# a simple .isel(x=[1, 5, 9]) puts all those in a single chunk.
template = xr.concat([obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x")
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj, template=template)
assert_identical(actual, template)
# Check that indexes are written into the graph directly
dsk = dict(actual.__dask_graph__())
assert {k for k in dsk if "x-coordinate" in k}
assert all(
isinstance(v, PandasIndex) for k, v in dsk.items() if "x-coordinate" in k
)
with raise_if_dask_computes():
actual = obj.map_blocks(func, template=template)
assert_identical(actual, template)
def test_map_blocks_roundtrip_string_index():
ds = xr.Dataset(
{"data": (["label"], [1, 2, 3])}, coords={"label": ["foo", "bar", "baz"]}
).chunk(label=1)
assert ds.label.dtype == np.dtype("=U3")
mapped = ds.map_blocks(lambda x: x, template=ds)
assert mapped.label.dtype == ds.label.dtype
mapped = ds.map_blocks(lambda x: x, template=None)
assert mapped.label.dtype == ds.label.dtype
mapped = ds.data.map_blocks(lambda x: x, template=ds.data)
assert mapped.label.dtype == ds.label.dtype
mapped = ds.data.map_blocks(lambda x: x, template=None)
assert mapped.label.dtype == ds.label.dtype
def test_map_blocks_template_convert_object():
da = make_da()
ds = da.to_dataset()
func = lambda x: x.to_dataset().isel(x=[1])
template = xr.concat([da.to_dataset().isel(x=[i]) for i in [1, 5, 9]], dim="x")
with raise_if_dask_computes():
actual = xr.map_blocks(func, da, template=template)
assert_identical(actual, template)
func = lambda x: x.to_dataarray().isel(x=[1])
template = xr.concat([ds.to_dataarray().isel(x=[i]) for i in [1, 5, 9]], dim="x")
with raise_if_dask_computes():
actual = xr.map_blocks(func, ds, template=template)
assert_identical(actual, template)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_errors_bad_template(obj):
with pytest.raises(ValueError, match=r"unexpected coordinate variables"):
xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"does not contain coordinate variables"):
xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"):
xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()
with pytest.raises(TypeError, match=r"must be a DataArray"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute() # type: ignore[arg-type]
with pytest.raises(ValueError, match=r"map_blocks requires that one block"):
xr.map_blocks(
lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1])
).compute()
with pytest.raises(ValueError, match=r"Expected index 'x' to be"):
xr.map_blocks(
lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values
obj,
template=xr.concat(
[obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x"
),
).compute()
def test_map_blocks_errors_bad_template_2(map_ds):
with pytest.raises(ValueError, match=r"unexpected data variables {'xyz'}"):
xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute()
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_object_method(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
expected = xr.map_blocks(func, obj)
actual = obj.map_blocks(func)
assert_identical(expected, actual)
def test_map_blocks_hlg_layers():
# regression test for #3599
ds = xr.Dataset(
{
"x": (("a",), dask.array.ones(10, chunks=(5,))),
"z": (("b",), dask.array.ones(10, chunks=(5,))),
}
)
mapped = ds.map_blocks(lambda x: x)
xr.testing.assert_equal(mapped, ds)
def test_make_meta(map_ds):
from xarray.core.parallel import make_meta
meta = make_meta(map_ds)
for variable in map_ds._coord_names:
assert variable in meta._coord_names
assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim
for variable in map_ds.data_vars:
assert variable in meta.data_vars
assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim
def test_identical_coords_no_computes():
lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
a = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
b = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
with raise_if_dask_computes():
c = a + b
assert_identical(c, a)
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
@pytest.mark.parametrize(
"transform",
[
lambda x: x.reset_coords(),
lambda x: x.reset_coords(drop=True),
lambda x: x.isel(x=1),
lambda x: x.attrs.update(new_attrs=1),
lambda x: x.assign_coords(cxy=1),
lambda x: x.rename({"x": "xnew"}),
lambda x: x.rename({"cxy": "cxynew"}),
],
)
def test_token_changes_on_transform(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj))
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
def test_token_changes_when_data_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
# Change data_var
if isinstance(obj, DataArray):
obj *= 2
else:
obj["a"] *= 2
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
# Change non-index coord
obj.coords["ndcoord"] *= 2
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
# Change IndexVariable
obj = obj.assign_coords(x=obj.x * 2)
with raise_if_dask_computes():
t4 = dask.base.tokenize(obj)
assert t4 != t3
@pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()])
def test_token_changes_when_buffer_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
if isinstance(obj, DataArray):
obj[0, 0] = 123
else:
obj["a"][0, 0] = 123
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
obj.coords["ndcoord"][0] = 123
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
@pytest.mark.parametrize(
"transform",
[lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]])
def test_token_identical(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))
assert dask.base.tokenize(obj.compute()) == dask.base.tokenize(
transform(obj.compute())
)
@pytest.mark.parametrize(
"obj",
[
make_ds(), # Dataset
make_ds().variables["c2"], # Variable
make_ds().variables["x"], # IndexVariable
],
)
def test_tokenize_empty_attrs(obj):
"""Issues #6970 and #8788"""
obj.attrs = {}
assert obj._attrs is None
a = dask.base.tokenize(obj)
assert obj.attrs == {}
assert obj._attrs == {} # attrs getter changed None to dict
b = dask.base.tokenize(obj)
assert a == b
obj2 = obj.copy()
c = dask.base.tokenize(obj2)
assert a == c
def test_recursive_token():
"""Test that tokenization is invoked recursively, and doesn't just rely on the
output of str()
"""
a = np.ones(10000)
b = np.ones(10000)
b[5000] = 2
assert str(a) == str(b)
assert dask.base.tokenize(a) != dask.base.tokenize(b)
# Test DataArray and Variable
da_a = DataArray(a)
da_b = DataArray(b)
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
# Test Dataset
ds_a = da_a.to_dataset(name="x")
ds_b = da_b.to_dataset(name="x")
assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b)
# Test IndexVariable
da_a = DataArray(a, dims=["x"], coords={"x": a})
da_b = DataArray(a, dims=["x"], coords={"x": b})
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
@requires_scipy_or_netCDF4
def test_normalize_token_with_backend(map_ds):
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
map_ds.to_netcdf(tmp_file)
read = xr.open_dataset(tmp_file)
assert dask.base.tokenize(map_ds) != dask.base.tokenize(read)
read.close()
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_variables(compat):
var1 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var2 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var3 = xr.Variable(("y", "x"), da.zeros((20, 10), chunks=2))
with raise_if_dask_computes():
assert getattr(var1, compat)(var2, equiv=lazy_array_equiv)
# values are actually equal, but we don't know that till we compute, return None
with raise_if_dask_computes():
assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None
# shapes are not equal, return False without computes
with raise_if_dask_computes():
assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False
# if one or both arrays are numpy, return None
assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None
assert (
getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None
)
with raise_if_dask_computes():
assert getattr(var1, compat)(var2.transpose("y", "x"))
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_merge(compat):
da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=("y", "x"))
with raise_if_dask_computes():
xr.merge([da1, da2], compat=compat)
# shapes are not equal; no computes necessary
with raise_if_dask_computes(max_computes=0):
with pytest.raises(ValueError):
xr.merge([da1, da3], compat=compat)
with raise_if_dask_computes(max_computes=2):
xr.merge([da1, da2 / 2], compat=compat)
@pytest.mark.filterwarnings("ignore::FutureWarning") # transpose_coords
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
@pytest.mark.parametrize(
"transform",
[
lambda a: a.assign_attrs(new_attr="anew"),
lambda a: a.assign_coords(cxy=a.cxy),
lambda a: a.copy(),
lambda a: a.isel(x=slice(None)),
lambda a: a.loc[dict(x=slice(None))],
lambda a: a.transpose(...),
lambda a: a.squeeze(), # no dimensions to squeeze
lambda a: a.reindex(x=a.x),
lambda a: a.reindex_like(a),
lambda a: a.rename({"cxy": "cnew"}).rename({"cnew": "cxy"}),
lambda a: a.pipe(lambda x: x),
lambda a: xr.align(a, xr.zeros_like(a))[0],
# assign
# swap_dims
# set_index / reset_index
],
)
def test_transforms_pass_lazy_array_equiv(obj, transform):
with raise_if_dask_computes():
assert_equal(obj, transform(obj))
def test_more_transforms_pass_lazy_array_equiv(map_da, map_ds):
with raise_if_dask_computes():
assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy)
assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy)
assert_equal(map_ds.map(lambda x: x), map_ds)
assert_equal(map_ds.set_coords("a").reset_coords("a"), map_ds)
assert_equal(map_ds.assign({"a": map_ds.a}), map_ds)
# fails because of index error
# assert_equal(
# map_ds.rename_dims({"x": "xnew"}).rename_dims({"xnew": "x"}), map_ds
# )
assert_equal(
map_ds.rename_vars({"cxy": "cnew"}).rename_vars({"cnew": "cxy"}), map_ds
)
assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da)
assert_equal(map_da.astype(map_da.dtype), map_da)
assert_equal(map_da.transpose("y", "x", transpose_coords=False).cxy, map_da.cxy)
def test_optimize():
# https://github.com/pydata/xarray/issues/3698
a = dask.array.ones((10, 4), chunks=(5, 2))
arr = xr.DataArray(a).chunk(5)
(arr2,) = dask.optimize(arr)
arr2.compute()
def test_graph_manipulation():
"""dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder
function returned by __dask_postperist__; also, the dsk passed to the rebuilder is
a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict.
"""
import dask.graph_manipulation as gm
v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2
da = DataArray(v)
ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])})
v2, da2, ds2 = gm.clone(v, da, ds)
assert_equal(v2, v)
assert_equal(da2, da)
assert_equal(ds2, ds)
for a, b in ((v, v2), (da, da2), (ds, ds2)):
assert a.__dask_layers__() != b.__dask_layers__()
assert len(a.__dask_layers__()) == len(b.__dask_layers__())
assert a.__dask_graph__().keys() != b.__dask_graph__().keys() # type: ignore[union-attr]
assert len(a.__dask_graph__()) == len(b.__dask_graph__()) # type: ignore[arg-type]
assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys() # type: ignore[union-attr]
assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers) # type: ignore[union-attr]
# Above we performed a slice operation; adding the two slices back together creates
# a diamond-shaped dependency graph, which in turn will trigger a collision in layer
# names if we were to use HighLevelGraph.cull() instead of
# HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__().
assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2)
def test_new_index_var_computes_once():
# regression test for GH1533
data = dask.array.from_array(np.array([100, 200]))
with raise_if_dask_computes(max_computes=1):
Dataset(coords={"z": ("z", data)})
def test_minimize_graph_size():
# regression test for https://github.com/pydata/xarray/issues/8409
ds = Dataset(
{
"foo": (
("x", "y", "z"),
dask.array.ones((120, 120, 120), chunks=(20, 20, 1)),
)
},
coords={"x": np.arange(120), "y": np.arange(120), "z": np.arange(120)},
)
mapped = ds.map_blocks(lambda x: x)
graph = dict(mapped.__dask_graph__())
numchunks = {k: len(v) for k, v in ds.chunksizes.items()}
for var in "xyz":
actual = len([key for key in graph if var in key[0]])
# assert that we only include each chunk of an index variable
# is only included once, not the product of number of chunks of
# all the other dimensions.
# e.g. previously for 'x', actual == numchunks['y'] * numchunks['z']
assert actual == numchunks[var], (actual, numchunks[var])
def test_idxmin_chunking():
# GH9425
x, y, t = 100, 100, 10
rang = np.arange(t * x * y)
da = xr.DataArray(
rang.reshape(t, x, y), coords={"time": range(t), "x": range(x), "y": range(y)}
)
da = da.chunk(dict(time=-1, x=25, y=25))
actual = da.idxmin("time")
assert actual.chunksizes == {k: da.chunksizes[k] for k in ["x", "y"]}
assert_identical(actual, da.compute().idxmin("time"))
def test_conjugate():
# Test for https://github.com/pydata/xarray/issues/10302
z = 1j * da.arange(100)
data = xr.DataArray(z, coords={"x": np.arange(100)})
conj_data = data.conjugate()
assert dask.is_dask_collection(conj_data)
assert_equal(conj_data, data.conj())
| TestToDaskDataFrame |
python | walkccc__LeetCode | solutions/3301. Maximize the Total Height of Unique Towers/3301.py | {
"start": 0,
"end": 301
} | class ____:
def maximumTotalSum(self, maximumHeight: list[int]) -> int:
ans = 0
mn = math.inf
for height in sorted(maximumHeight, reverse=True):
assigned = min(height, mn - 1)
if assigned == 0:
return -1
ans += assigned
mn = assigned
return ans
| Solution |
python | pytest-dev__pytest-mock | tests/test_pytest_mock.py | {
"start": 1541,
"end": 5926
} | class ____:
"""
Class that is used for testing create_autospec with child mocks
"""
def run(self) -> str:
return "not mocked"
@pytest.fixture
def check_unix_fs_mocked(
tmpdir: Any, mocker: MockerFixture
) -> Callable[[Any, Any], None]:
"""
performs a standard test in a UnixFS, assuming that both `os.remove` and
`os.listdir` have been mocked previously.
"""
def check(mocked_rm, mocked_ls):
assert mocked_rm is os.remove
assert mocked_ls is os.listdir
file_name = tmpdir / "foo.txt"
file_name.ensure()
UnixFS.rm(str(file_name))
mocked_rm.assert_called_once_with(str(file_name))
assert os.path.isfile(str(file_name))
mocked_ls.return_value = ["bar.txt"]
assert UnixFS.ls(str(tmpdir)) == ["bar.txt"]
mocked_ls.assert_called_once_with(str(tmpdir))
mocker.stopall()
assert UnixFS.ls(str(tmpdir)) == ["foo.txt"]
UnixFS.rm(str(file_name))
assert not os.path.isfile(str(file_name))
return check
def mock_using_patch_object(mocker: MockerFixture) -> tuple[MagicMock, MagicMock]:
return mocker.patch.object(os, "remove"), mocker.patch.object(os, "listdir")
def mock_using_patch(mocker: MockerFixture) -> tuple[MagicMock, MagicMock]:
return mocker.patch("os.remove"), mocker.patch("os.listdir")
def mock_using_patch_multiple(mocker: MockerFixture) -> tuple[MagicMock, MagicMock]:
r = mocker.patch.multiple("os", remove=mocker.DEFAULT, listdir=mocker.DEFAULT)
return r["remove"], r["listdir"]
@pytest.mark.parametrize(
"mock_fs", [mock_using_patch_object, mock_using_patch, mock_using_patch_multiple]
)
def test_mock_patches(
mock_fs: Any,
mocker: MockerFixture,
check_unix_fs_mocked: Callable[[Any, Any], None],
) -> None:
"""
Installs mocks into `os` functions and performs a standard testing of
mock functionality. We parametrize different mock methods to ensure
all (intended, at least) mock API is covered.
"""
# mock it twice on purpose to ensure we unmock it correctly later
mock_fs(mocker)
mocked_rm, mocked_ls = mock_fs(mocker)
check_unix_fs_mocked(mocked_rm, mocked_ls)
mocker.resetall()
mocker.stopall()
def test_mock_patch_dict(mocker: MockerFixture) -> None:
"""
Testing
:param mock:
"""
x = {"original": 1}
mocker.patch.dict(x, values=[("new", 10)], clear=True)
assert x == {"new": 10}
mocker.stopall()
assert x == {"original": 1}
def test_mock_patch_dict_resetall(mocker: MockerFixture) -> None:
"""
We can call resetall after patching a dict.
:param mock:
"""
x = {"original": 1}
mocker.patch.dict(x, values=[("new", 10)], clear=True)
assert x == {"new": 10}
mocker.resetall()
assert x == {"new": 10}
@pytest.mark.parametrize(
"name",
[
"ANY",
"call",
"MagicMock",
"Mock",
"mock_open",
"NonCallableMagicMock",
"NonCallableMock",
"PropertyMock",
"sentinel",
"seal",
],
)
def test_mocker_aliases(name: str, pytestconfig: Any) -> None:
from pytest_mock._util import get_mock_module
mock_module = get_mock_module(pytestconfig)
mocker = MockerFixture(pytestconfig)
assert getattr(mocker, name) is getattr(mock_module, name)
def test_mocker_resetall(mocker: MockerFixture) -> None:
listdir = mocker.patch("os.listdir", return_value="foo")
open = mocker.patch("os.open", side_effect=["bar", "baz"])
mocked_object = mocker.create_autospec(TestObject)
mocked_object.run.return_value = "mocked"
assert listdir("/tmp") == "foo"
assert open("/tmp/foo.txt") == "bar"
assert mocked_object.run() == "mocked"
listdir.assert_called_once_with("/tmp")
open.assert_called_once_with("/tmp/foo.txt")
mocked_object.run.assert_called_once()
mocker.resetall()
assert not listdir.called
assert not open.called
assert not mocked_object.called
assert listdir.return_value == "foo"
assert list(open.side_effect) == ["baz"]
assert mocked_object.run.return_value == "mocked"
mocker.resetall(return_value=True, side_effect=True)
assert isinstance(listdir.return_value, mocker.Mock)
assert open.side_effect is None
assert mocked_object.run.return_value != "mocked"
| TestObject |
python | numba__numba | numba/tests/test_dispatcher.py | {
"start": 1906,
"end": 2249
} | class ____(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
| BaseTest |
python | huggingface__transformers | tests/models/gpt_neox/test_modeling_gpt_neox.py | {
"start": 1350,
"end": 11245
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.pad_token_id = vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return config, input_ids, input_mask, token_labels
def get_config(self):
return GPTNeoXConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_decoder(self):
config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs()
config.is_decoder = True
return config, input_ids, input_mask, token_labels
def create_and_check_model(self, config, input_ids, input_mask):
model = GPTNeoXModel(config=config)
model.to(torch_device)
model.eval()
_ = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(self, config, input_ids, input_mask):
config.add_cross_attention = True
model = GPTNeoXModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels):
model = GPTNeoXForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(self, config, input_ids, input_mask, token_labels):
config.num_labels = self.num_labels
model = GPTNeoXForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(self, config, input_ids, input_mask, token_labels):
config.num_labels = self.num_labels
model = GPTNeoXForSequenceClassification(config)
model.to(torch_device)
model.eval()
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(self, config, input_ids, input_mask, token_labels):
config.num_labels = self.num_labels
model = GPTNeoXForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask):
config.is_decoder = True
model = GPTNeoXForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True)
output_from_no_past = output_from_no_past["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_cached_forward_with_and_without_attention_mask(self, config, input_ids, *args):
# Relevant issue: https://github.com/huggingface/transformers/issues/31943
model = GPTNeoXModel(config)
model.to(torch_device)
model.eval()
# We want this for SDPA, eager works with a `None` attention mask
assert model.config._attn_implementation == "sdpa", (
"This test assumes the model to have the SDPA implementation for its attention calculations."
)
# Prepare cache and non_cache input, needs a full attention mask
cached_len = input_ids.shape[-1] // 2
input_mask = torch.ones(size=input_ids.size()).to(torch_device)
cache_inputs = {"input_ids": input_ids[:, :cached_len], "attention_mask": input_mask[:, :cached_len]}
non_cache_inputs = {"input_ids": input_ids[:, cached_len:], "attention_mask": input_mask}
def copy_cache(cache: DynamicCache):
"""Deep copy a DynamicCache to reuse the same one multiple times."""
new_cache = cache
for i in range(len(cache)):
new_cache.layers[i].keys = cache.layers[i].keys.clone()
new_cache.layers[i].values = cache.layers[i].values.clone()
# Cached forward once with the attention mask provided and the other time without it (which should assume full attention)
# We need to run both on a copy of the cache, otherwise it is modified in-place
cache_outputs = model(**cache_inputs)
cache = cache_outputs.past_key_values
full_outputs_with_attention_mask = model(
**non_cache_inputs, past_key_values=copy_cache(cache)
).last_hidden_state
full_outputs_without_attention_mask = model(
non_cache_inputs["input_ids"], past_key_values=copy_cache(cache)
).last_hidden_state
self.parent.assertTrue(
torch.allclose(full_outputs_with_attention_mask, full_outputs_without_attention_mask, atol=1e-5)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, token_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| GPTNeoXModelTester |
python | django-debug-toolbar__django-debug-toolbar | tests/models.py | {
"start": 176,
"end": 281
} | class ____(models.Model):
field = models.BinaryField()
def __str__(self):
return ""
| Binary |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 4917,
"end": 5069
} | class ____(BaseModel):
"""Represents a repository's discussions in the GitHub GraphQL response."""
discussions: Discussions
| DiscussionsRepository |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/dml.py | {
"start": 10929,
"end": 12297
} | class ____(DMLState):
isupdate = True
include_table_with_column_exprs = False
def __init__(self, statement: Update, compiler: SQLCompiler, **kw: Any):
self.statement = statement
self.isupdate = True
if statement._maintain_values_ordering:
self._process_ordered_values(statement)
elif statement._values is not None:
self._process_values(statement)
elif statement._multi_values:
self._no_multi_values_supported(statement)
t, ef = self._make_extra_froms(statement)
self._primary_table = t
self._extra_froms = ef
self.is_multitable = mt = ef
self.include_table_with_column_exprs = bool(
mt and compiler.render_table_with_column_in_update_from
)
def _process_ordered_values(self, statement: ValuesBase) -> None:
parameters = statement._values
if self._no_parameters:
self._no_parameters = False
assert parameters is not None
self._dict_parameters = dict(parameters)
self._maintain_values_ordering = True
else:
raise exc.InvalidRequestError(
"Can only invoke ordered_values() once, and not mixed "
"with any other values() call"
)
@CompileState.plugin_for("default", "delete")
| UpdateDMLState |
python | jazzband__django-oauth-toolkit | oauth2_provider/exceptions.py | {
"start": 0,
"end": 321
} | class ____(Exception):
"""
Base class for exceptions
"""
def __init__(self, error=None, redirect_uri=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.oauthlib_error = error
if redirect_uri:
self.oauthlib_error.redirect_uri = redirect_uri
| OAuthToolkitError |
python | walkccc__LeetCode | solutions/367. Valid Perfect Square/367.py | {
"start": 0,
"end": 151
} | class ____:
def isPerfectSquare(self, num: int) -> bool:
l = bisect.bisect_left(range(num), num, key=lambda m: m * m)
return l**2 == num
| Solution |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/telemetry.py | {
"start": 33,
"end": 95
} | class ____(BaseModel, extra="forbid"):
enabled: bool
| Telemetry |
python | plotly__plotly.py | plotly/graph_objs/layout/geo/_center.py | {
"start": 235,
"end": 3536
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.geo"
_path_str = "layout.geo.center"
_valid_props = {"lat", "lon"}
@property
def lat(self):
"""
Sets the latitude of the map's center. For all projection
types, the map's latitude center lies at the middle of the
latitude range by default.
The 'lat' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
@property
def lon(self):
"""
Sets the longitude of the map's center. By default, the map's
longitude center lies at the middle of the longitude range for
scoped projection and above `projection.rotation.lon`
otherwise.
The 'lon' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
@property
def _prop_descriptions(self):
return """\
lat
Sets the latitude of the map's center. For all
projection types, the map's latitude center lies at the
middle of the latitude range by default.
lon
Sets the longitude of the map's center. By default, the
map's longitude center lies at the middle of the
longitude range for scoped projection and above
`projection.rotation.lon` otherwise.
"""
def __init__(self, arg=None, lat=None, lon=None, **kwargs):
"""
Construct a new Center object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.geo.Center`
lat
Sets the latitude of the map's center. For all
projection types, the map's latitude center lies at the
middle of the latitude range by default.
lon
Sets the longitude of the map's center. By default, the
map's longitude center lies at the middle of the
longitude range for scoped projection and above
`projection.rotation.lon` otherwise.
Returns
-------
Center
"""
super().__init__("center")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.geo.Center
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.geo.Center`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("lat", arg, lat)
self._set_property("lon", arg, lon)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Center |
python | kamyu104__LeetCode-Solutions | Python/maximum-bags-with-full-capacity-of-rocks.py | {
"start": 48,
"end": 541
} | class ____(object):
def maximumBags(self, capacity, rocks, additionalRocks):
"""
:type capacity: List[int]
:type rocks: List[int]
:type additionalRocks: int
:rtype: int
"""
for i in xrange(len(capacity)):
capacity[i] -= rocks[i]
capacity.sort()
for i, c in enumerate(capacity):
if c > additionalRocks:
return i
additionalRocks -= c
return len(capacity)
| Solution |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 17502,
"end": 17634
} | class ____(AbstractExternal):
name = models.CharField(max_length=50)
class Meta:
app_label = "tests"
| ConcreteExternal |
python | PyCQA__pylint | tests/functional/a/arguments_renamed.py | {
"start": 411,
"end": 715
} | class ____(Fruit):
def brew(self, orange_name: str): # [arguments-renamed]
print(f"Brewing an orange named {orange_name}")
def eat_with_condiment(self, orange_name: str, condiment: Condiment): #[arguments-renamed]
print(f"Eating a fruit named {orange_name} with {condiment}")
| Orange |
python | has2k1__plotnine | plotnine/coords/coord_fixed.py | {
"start": 198,
"end": 1763
} | class ____(coord_cartesian):
"""
Cartesian coordinates with fixed relationship between x and y scales
Parameters
----------
ratio : float
Desired aspect_ratio (:math:`y/x`) of the panel(s).
xlim : tuple[float, float]
Limits for x axis. If None, then they are automatically computed.
ylim : tuple[float, float]
Limits for y axis. If None, then they are automatically computed.
expand : bool
If `True`, expand the coordinate axes by some factor. If `False`,
use the limits from the data.
Notes
-----
To specify aspect ratio of the visual size for the axes use the
[](`~plotnine.themes.themeable.aspect_ratio`) themeable.
```python
ggplot(data, aes('x', 'y')) + theme(aspect_ratio=0.5)
```
When changing the `aspect_ratio` in either way, the `width` of the
panel remains constant (as derived from the
[](`plotnine.themes.themeable.figure_size`) themeable) and the
`height` is altered to achieve desired ratio.
"""
ratio: float
def __init__(
self,
ratio: float = 1,
xlim: Optional[tuple[float, float]] = None,
ylim: Optional[tuple[float, float]] = None,
expand: bool = True,
):
super().__init__(xlim=xlim, ylim=ylim, expand=expand)
self.ratio = ratio
def aspect(self, panel_params: panel_view) -> float | None:
x = panel_params.x.range
y = panel_params.y.range
return (y[1] - y[0]) / (x[1] - x[0]) * self.ratio
coord_equal = coord_fixed
| coord_fixed |
python | pallets__quart | src/quart/testing/app.py | {
"start": 449,
"end": 2727
} | class ____:
def __init__(
self,
app: Quart,
startup_timeout: int = DEFAULT_TIMEOUT,
shutdown_timeout: int = DEFAULT_TIMEOUT,
) -> None:
self.app = app
self.startup_timeout = startup_timeout
self.shutdown_timeout = shutdown_timeout
self._startup = asyncio.Event()
self._shutdown = asyncio.Event()
self._app_queue: asyncio.Queue = asyncio.Queue()
self._task: Awaitable[None] = None
def test_client(self) -> TestClientProtocol:
return self.app.test_client()
async def startup(self) -> None:
scope: LifespanScope = {
"type": "lifespan",
"asgi": {"spec_version": "2.0"},
"state": {},
}
self._task = asyncio.ensure_future(
self.app(scope, self._asgi_receive, self._asgi_send)
)
await self._app_queue.put({"type": "lifespan.startup"})
await asyncio.wait_for(self._startup.wait(), timeout=self.startup_timeout)
if self._task.done():
# This will re-raise any exceptions in the task
await self._task
async def shutdown(self) -> None:
await self._app_queue.put({"type": "lifespan.shutdown"})
await asyncio.wait_for(self._shutdown.wait(), timeout=self.shutdown_timeout)
await self._task
async def __aenter__(self) -> TestApp:
await self.startup()
return self
async def __aexit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> None:
await self.shutdown()
async def _asgi_receive(self) -> ASGIReceiveEvent:
return await self._app_queue.get()
async def _asgi_send(self, message: ASGISendEvent) -> None:
if message["type"] == "lifespan.startup.complete":
self._startup.set()
elif message["type"] == "lifespan.shutdown.complete":
self._shutdown.set()
elif message["type"] == "lifespan.startup.failed":
self._startup.set()
raise LifespanError(f"Error during startup {message['message']}")
elif message["type"] == "lifespan.shutdown.failed":
self._shutdown.set()
raise LifespanError(f"Error during shutdown {message['message']}")
| TestApp |
python | geekcomputers__Python | PingPong/Slab.py | {
"start": 31,
"end": 1192
} | class ____:
def __init__(self, win, size, pos, player, minPos, maxPos):
self.win = win
self.size = size
self.pos = pos
self.player = player # player = 1 or 2
self.minPos = minPos
self.maxPos = maxPos
def draw(self):
pygame.draw.rect(
self.win,
(255, 255, 255),
(self.pos[0], self.pos[1], self.size[0], self.size[1]),
)
def getCoords(self):
return [
self.pos[0],
self.pos[1],
self.pos[0] + self.size[0],
self.pos[1] + self.size[1],
]
def updatePos(self):
keys = pygame.key.get_pressed()
if self.player == 1:
if keys[pygame.K_UP] and self.getCoords()[1] > self.minPos[1]:
self.pos[1] -= 0.3
if keys[pygame.K_DOWN] and self.getCoords()[3] < self.maxPos[1]:
self.pos[1] += 0.3
else:
if keys[pygame.K_w] and self.getCoords()[1] > self.minPos[1]:
self.pos[1] -= 0.3
if keys[pygame.K_s] and self.getCoords()[3] < self.maxPos[1]:
self.pos[1] += 0.3
| Slab |
python | getsentry__sentry | src/sentry/models/debugfile.py | {
"start": 11902,
"end": 12495
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField()
project_id = BoundedBigIntegerField()
release_name = models.CharField(max_length=250)
proguard_uuid = models.UUIDField(db_index=True)
project_debug_file = FlexibleForeignKey("sentry.ProjectDebugFile")
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_proguardartifactrelease"
unique_together = (("project_id", "release_name", "proguard_uuid"),)
| ProguardArtifactRelease |
python | plotly__plotly.py | plotly/graph_objs/barpolar/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9959
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar.marker.colorbar"
_path_str = "barpolar.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.barpolar.marke
r.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | kamyu104__LeetCode-Solutions | Python/max-difference-you-can-get-from-changing-an-integer.py | {
"start": 41,
"end": 1065
} | class ____(object):
def maxDiff(self, num):
"""
:type num: int
:rtype: int
"""
def find(num, check):
result = 0
while num:
num, d = divmod(num, 10)
if check(d):
result = d
return result
def reverse(num, l):
result = 0
while num or l > 0:
num, d = divmod(num, 10)
result = result*10+d
l -= 1
return result
def replace(num, x, y):
result = l = 0
while num:
num, d = divmod(num, 10)
if d == x:
d = y
result = result*10+d
l += 1
return reverse(result, l)
b = find(num, lambda x: x < 9)
a = find(num, lambda x: x > 1)
return replace(num, b, 9)-replace(num, a, 1 if reverse(num, 0)%10 != 1 else 0)
# Time: O(logn)
# Space: O(logn)
# greedy
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0007_migrate_canonical_data.py | {
"start": 989,
"end": 1209
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0006_add_domain_models"),
]
operations = [
migrations.RunPython(migrate_canonical),
]
| Migration |
python | sqlalchemy__sqlalchemy | test/ext/test_extendedattr.py | {
"start": 2441,
"end": 3111
} | class ____(list):
# add @appender, @remover decorators as needed
_sa_iterator = list.__iter__
_sa_linker = None
def _sa_appender(self, item, _sa_initiator=None):
if _sa_initiator is not False:
self._sa_adapter.fire_append_event(item, _sa_initiator)
list.append(self, item)
append = _sa_appender
def _sa_remover(self, item, _sa_initiator=None):
self._sa_adapter.fire_pre_remove_event(_sa_initiator)
if _sa_initiator is not False:
self._sa_adapter.fire_remove_event(item, _sa_initiator)
list.remove(self, item)
remove = _sa_remover
MyBaseClass, MyClass = None, None
| MyListLike |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 114975,
"end": 116171
} | class ____(MaybeAlignPartitions):
_parameters = ["frame", "other", "op"]
_projection_passthrough = True
@functools.cached_property
def _meta(self):
return getattr(self.frame._meta, self.op)(self.other._meta)
def _lower(self):
# This can be expensive when something that has expensive division
# calculation is in the Expression
dfs = self.args
if (
len(dfs) == 1
or all(dfs[0].divisions == df.divisions for df in dfs)
or len(self.divisions) == 2
and max(map(lambda x: len(x.divisions), dfs)) == 2
):
return self._op(self.frame, self.op, self.other, *self.operands[3:])
from dask.dataframe.dask_expr._repartition import RepartitionDivisions
frame = RepartitionDivisions(
self.frame, new_divisions=self.divisions, force=True
)
other = RepartitionDivisions(
self.other, new_divisions=self.divisions, force=True
)
return self._op(frame, self.op, other, *self.operands[3:])
@staticmethod
def _op(frame, op, other, *args, **kwargs):
return getattr(frame, op)(other)
| OpAlignPartitions |
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 15818,
"end": 15983
} | class ____(TestCase):
def test_arrays_in_lists(self):
lst = [[1, 2], [3, w.array(4)]]
assert_equal(w.asarray(lst), [[1, 2], [3, 4]])
| TestCtorNested |
python | doocs__leetcode | solution/0600-0699/0698.Partition to K Equal Sum Subsets/Solution3.py | {
"start": 0,
"end": 679
} | class ____:
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
s = sum(nums)
if s % k:
return False
s //= k
nums.sort()
n = len(nums)
f = [False] * (1 << n)
cur = [0] * (1 << n)
f[0] = True
for i in range(1 << n):
if not f[i]:
continue
for j in range(n):
if cur[i] + nums[j] > s:
break
if (i >> j & 1) == 0:
if not f[i | 1 << j]:
cur[i | 1 << j] = (cur[i] + nums[j]) % s
f[i | 1 << j] = True
return f[-1]
| Solution |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/filters/base.py | {
"start": 3966,
"end": 5230
} | class ____(Filter):
"""
Result of |-operation between several filters.
"""
def __init__(self, filters: list[Filter]) -> None:
super().__init__()
self.filters = filters
@classmethod
def create(cls, filters: Iterable[Filter]) -> Filter:
"""
Create a new filter by applying an `|` operator between them.
If there's only one unique filter in the given iterable, it will return
that one filter instead of an `_OrList`.
"""
filters_2: list[Filter] = []
for f in filters:
if isinstance(f, _OrList): # Turn nested _AndLists into one.
filters_2.extend(f.filters)
else:
filters_2.append(f)
# Remove duplicates. This could speed up execution, and doesn't make a
# difference for the evaluation.
filters = _remove_duplicates(filters_2)
# If only one filter is left, return that without wrapping into an
# `_AndList`.
if len(filters) == 1:
return filters[0]
return cls(filters)
def __call__(self) -> bool:
return any(f() for f in self.filters)
def __repr__(self) -> str:
return "|".join(repr(f) for f in self.filters)
| _OrList |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/match.py | {
"start": 1984,
"end": 2673
} | class ____:
a: str = ""
b: str = ""
c: str = ""
def test_match_class_pattern():
x = _test_source()
match x:
case MyClass(a):
_test_sink(a) # Issue.
case MyClass(b, a=""):
_test_sink(a) # Issue.
_test_sink(b) # Issue.
case MyClass(a, b, c):
_test_sink(a) # Issue.
_test_sink(b) # Issue.
_test_sink(c) # Issue.
o = MyClass(a="", b=_test_source(), c="")
match o:
case MyClass(a, b, c):
_test_sink(a) # No issue (currently a false positive)
_test_sink(b) # Issue.
_test_sink(c) # No issue (currently a false positive).
| MyClass |
python | jazzband__django-polymorphic | example/pexp/management/commands/polybench.py | {
"start": 2339,
"end": 2747
} | class ____(BaseCommand):
help = ""
def handle_noargs(self, **options):
func_list = [
(bench_delete, 1),
(bench_create, 1),
(bench_load1, 5),
(bench_load1_short, 5),
(bench_load2, 5),
(bench_load2_short, 5),
]
for f, iterations in func_list:
run_vanilla_any_poly(f, iterations=iterations)
| Command |
python | ansible__ansible | lib/ansible/module_utils/_internal/_patches/__init__.py | {
"start": 216,
"end": 397
} | class ____(t.Protocol):
"""Runtime-checkable protocol that allows identification of a patched function via `isinstance`."""
unpatched_implementation: t.Callable
| PatchedTarget |
python | prabhupant__python-ds | data_structures/segment_tree/min_in_range_queries.py | {
"start": 323,
"end": 3488
} | class ____:
def __init__(self, array):
self.MAX_INT = 2**31-1
self.src = array
self.len = len(array)
self.total_nodes = (2**mt.ceil(mt.log(self.len, 2))*2-1)
self.tree = [0] * self.total_nodes
self.dues = [0] * self.total_nodes
def build(self):
self.build_tree(l=0, r=self.len-1, par=0)
def build_tree(self, l, r, par):
if l == r:
self.tree[par] = self.src[l]
return
div = (l+r)//2
self.build_tree(l, div, (par<<1)+1)
self.build_tree(div+1, r, (par<<1)+2)
self.tree[par]=min(self.tree[(par<<1)+1], self.tree[(par<<1)+2])
def min_in_range(self, low, high):
return self.min_(low, high, 0, self.len-1, 0)
def min_(self, low, high, left, right, par):
if low > right or high < left:
return self.MAX_INT
if self.dues[par] > 0:
self.tree[par] += self.dues[par]
if left is not right:
self.dues[(par<<1)+1] += self.dues[par]
self.dues[(par<<1)+2] += self.dues[par]
self.dues[par]=0;
if low <= left and high >= right:
return self.tree[par]
div = int((right+left)/2)
return min(self.min_(low, high, left, div, (par<<1)+1),
self.min_(low, high, div+1, right, (par<<1)+2) )
def update_range(self, low, high, value):
return self.update(low, high, value, 0, self.len-1, 0)
def update(self, low, high, value, left, right, par):
if self.dues[par] > 0:
self.tree[par] += self.dues[par]
if left is not right:
self.dues[(par<<1)+1] += self.dues[par]
self.dues[(par<<1)+2] += self.dues[par]
self.dues[par]=0;
if low > right or high < left:
return;
if low <= left and high >= right:
self.tree[par] += value
if(left is not right):
self.dues[(par<<1)+1] += value
self.dues[(par<<1)+2] += value
return
div = (right+left)//2
self.update(low, high, value, left, div, (par<<1)+1)
self.update(low, high, value, div+1, right, (par<<1)+2)
if __name__=='__main__':
array = list(map(int, input('Enter space seprated number\n').strip().split(' ')))
sg = SegmentTree(array)
sg.build()
n_query = int(input('Enter number of queries\n'))
print('Query format\nq low high (to find minimum in range e.g: q 0 3)\nu low high value (to add "value" to all elements e.g: u 3 4 99)')
while n_query:
n_query -= 1
args = input('Enter query: ').strip().split(' ')
if args[0] == 'q' and len(args) == 3:
print(sg.min_in_range(low=int(args[1]), high=int(args[2])))
elif args[0] == 'u' and len(args) == 4:
sg.update_range(low=int(args[1]), high=int(args[2]), value=int(args[3]))
print('Update Successfully!')
else:
print('Invalid query format')
n_query += 1
| SegmentTree |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 1892,
"end": 1995
} | class ____(MilvusException):
"""Raise when fail to describe collection"""
| DescribeCollectionException |
python | apache__airflow | airflow-core/tests/unit/core/test_configuration.py | {
"start": 47657,
"end": 82924
} | class ____:
@conf_vars(
{
("celery", "worker_concurrency"): None,
("celery", "celeryd_concurrency"): None,
}
)
def test_deprecated_options(self):
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
with set_deprecated_options(
deprecated_options={("celery", "worker_concurrency"): ("celery", "celeryd_concurrency", "2.0.0")}
):
# Remove it so we are sure we use the right setting
conf.remove_option("celery", "worker_concurrency")
with pytest.warns(DeprecationWarning, match="celeryd_concurrency"):
with mock.patch.dict("os.environ", AIRFLOW__CELERY__CELERYD_CONCURRENCY="99"):
assert conf.getint("celery", "worker_concurrency") == 99
with (
pytest.warns(DeprecationWarning, match="celeryd_concurrency"),
conf_vars({("celery", "celeryd_concurrency"): "99"}),
):
assert conf.getint("celery", "worker_concurrency") == 99
@pytest.mark.parametrize(
("deprecated_options_dict", "kwargs", "new_section_expected_value", "old_section_expected_value"),
[
pytest.param(
{("old_section", "old_key"): ("new_section", "new_key", "2.0.0")},
{"fallback": None},
None,
"value",
id="deprecated_in_different_section_lookup_enabled",
),
pytest.param(
{("old_section", "old_key"): ("new_section", "new_key", "2.0.0")},
{"fallback": None, "lookup_from_deprecated": False},
None,
None,
id="deprecated_in_different_section_lookup_disabled",
),
pytest.param(
{("new_section", "old_key"): ("new_section", "new_key", "2.0.0")},
{"fallback": None},
"value",
None,
id="deprecated_in_same_section_lookup_enabled",
),
pytest.param(
{("new_section", "old_key"): ("new_section", "new_key", "2.0.0")},
{"fallback": None, "lookup_from_deprecated": False},
None,
None,
id="deprecated_in_same_section_lookup_disabled",
),
],
)
def test_deprecated_options_with_lookup_from_deprecated(
self, deprecated_options_dict, kwargs, new_section_expected_value, old_section_expected_value
):
with conf_vars({("new_section", "new_key"): "value"}):
with set_deprecated_options(deprecated_options=deprecated_options_dict):
assert conf.get("new_section", "old_key", **kwargs) == new_section_expected_value
assert conf.get("old_section", "old_key", **kwargs) == old_section_expected_value
@conf_vars(
{
("celery", "result_backend"): None,
("celery", "celery_result_backend"): None,
("celery", "celery_result_backend_cmd"): None,
}
)
def test_deprecated_options_cmd(self):
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
with (
set_deprecated_options(
deprecated_options={
("celery", "result_backend"): ("celery", "celery_result_backend", "2.0.0")
}
),
set_sensitive_config_values(sensitive_config_values={("celery", "celery_result_backend")}),
):
conf.remove_option("celery", "result_backend")
with conf_vars({("celery", "celery_result_backend_cmd"): "/bin/echo 99"}):
tmp = None
if "AIRFLOW__CELERY__RESULT_BACKEND" in os.environ:
tmp = os.environ.pop("AIRFLOW__CELERY__RESULT_BACKEND")
with pytest.warns(DeprecationWarning, match="result_backend"):
assert conf.getint("celery", "result_backend") == 99
if tmp:
os.environ["AIRFLOW__CELERY__RESULT_BACKEND"] = tmp
def test_deprecated_values_from_conf(self):
test_conf = AirflowConfigParser(
default_config="""
[core]
executor=LocalExecutor
[database]
sql_alchemy_conn=sqlite://test
"""
)
# Guarantee we have deprecated settings, so we test the deprecation
# lookup even if we remove this explicit fallback
test_conf.deprecated_values = {
"core": {"hostname_callable": (re.compile(r":"), r".")},
}
test_conf.read_dict({"core": {"hostname_callable": "airflow.utils.net:getfqdn"}})
with pytest.warns(FutureWarning):
test_conf.validate()
assert test_conf.get("core", "hostname_callable") == "airflow.utils.net.getfqdn"
@pytest.mark.parametrize(
("old", "new"),
[
(
("core", "sql_alchemy_conn", "postgres+psycopg2://localhost/postgres"),
("database", "sql_alchemy_conn", "postgresql://localhost/postgres"),
),
],
)
def test_deprecated_env_vars_upgraded_and_removed(self, old, new):
test_conf = AirflowConfigParser(
default_config="""
[core]
executor=LocalExecutor
[database]
sql_alchemy_conn=sqlite://test
"""
)
old_section, old_key, old_value = old
new_section, new_key, new_value = new
old_env_var = test_conf._env_var_name(old_section, old_key)
new_env_var = test_conf._env_var_name(new_section, new_key)
with mock.patch.dict("os.environ", **{old_env_var: old_value}):
# Can't start with the new env var existing...
os.environ.pop(new_env_var, None)
with pytest.warns(FutureWarning):
test_conf.validate()
assert test_conf.get(new_section, new_key) == new_value
# We also need to make sure the deprecated env var is removed
# so that any subprocesses don't use it in place of our updated
# value.
assert old_env_var not in os.environ
# and make sure we track the old value as well, under the new section/key
assert test_conf.upgraded_values[(new_section, new_key)] == old_value
@pytest.mark.parametrize(
"conf_dict",
[
{}, # Even if the section is absent from config file, environ still needs replacing.
{"core": {"hostname_callable": "airflow.utils.net.getfqdn"}},
],
)
def test_deprecated_values_from_environ(self, conf_dict):
def make_config():
test_conf = AirflowConfigParser(
default_config="""
[core]
executor=LocalExecutor
[database]
sql_alchemy_conn=sqlite://test
"""
)
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
test_conf.deprecated_values = {
"core": {"hostname_callable": (re.compile(r":"), r".")},
}
test_conf.read_dict(conf_dict)
test_conf.validate()
return test_conf
with mock.patch.dict("os.environ", AIRFLOW__CORE__HOSTNAME_CALLABLE="airflow.utils.net:getfqdn"):
with pytest.warns(FutureWarning):
test_conf = make_config()
assert test_conf.get("core", "hostname_callable") == "airflow.utils.net.getfqdn"
with reset_warning_registry():
with warnings.catch_warnings(record=True) as warning:
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__HOSTNAME_CALLABLE="CarrierPigeon",
):
test_conf = make_config()
assert test_conf.get("core", "hostname_callable") == "CarrierPigeon"
assert warning == []
@pytest.mark.parametrize(
("conf_dict", "environ", "expected"),
[
pytest.param({"old_section": {"val": "old_val"}}, None, "old_val", id="old_config"),
pytest.param(
{"old_section": {"val": "old_val"}},
("AIRFLOW__OLD_SECTION__VAL", "old_env"),
"old_env",
id="old_config_old_env",
),
pytest.param(
{},
("AIRFLOW__OLD_SECTION__VAL", "old_env"),
"old_env",
id="old_env",
),
pytest.param(
{"new_section": {"val": "val2"}},
("AIRFLOW__OLD_SECTION__VAL", "old_env"),
"old_env",
id="new_config_old_env",
),
],
)
def test_deprecated_sections(self, conf_dict, environ, expected, monkeypatch):
def make_config():
test_conf = AirflowConfigParser(
default_config=textwrap.dedent(
"""
[new_section]
val=new
"""
)
)
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
test_conf.deprecated_sections = {
"new_section": ("old_section", "2.1"),
}
test_conf.read_dict(conf_dict)
test_conf.validate()
return test_conf
if environ:
monkeypatch.setenv(*environ)
test_conf = make_config()
with pytest.warns(
DeprecationWarning,
match=r"\[old_section\] has been moved to the val option in \[new_section\].*update your config",
):
# Test when you've _set_ the old value that we warn you need to update your config
assert test_conf.get("new_section", "val") == expected
with pytest.warns(
FutureWarning,
match=r"\[old_section\] has been renamed to \[new_section\].*update your `conf.get",
):
# Test when you read using the old section you get told to change your `conf.get` call
assert test_conf.get("old_section", "val") == expected
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_config(self, display_source: bool):
with use_config(config="deprecated.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=False,
include_cmds=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("mysql://", "airflow.cfg") if display_source else "mysql://"
)
# database should be None because the deprecated value is set in config
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "mysql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN": "postgresql://"}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_both_env_and_config(self, display_source: bool):
with use_config(config="deprecated.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_cmds=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("postgresql://", "env var") if display_source else "postgresql://"
)
# database should be None because the deprecated value is set in env value
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "postgresql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN": "postgresql://"}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_both_env_and_config_exclude_env(
self, display_source: bool
):
with use_config(config="deprecated.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=False,
include_cmds=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("mysql://", "airflow.cfg") if display_source else "mysql://"
)
# database should be None because the deprecated value is set in env value
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "mysql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN": "postgresql://"}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_env(self, display_source: bool):
with use_config(config="empty.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source, raw=True, display_sensitive=True, include_env=True
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("postgresql://", "env var") if display_source else "postgresql://"
)
# database should be None because the deprecated value is set in env value
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "postgresql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {}, clear=True)
def test_conf_as_dict_when_both_conf_and_env_are_empty(self, display_source: bool):
with use_config(config="empty.cfg"):
cfg_dict = conf.as_dict(display_source=display_source, raw=True, display_sensitive=True)
assert cfg_dict["core"].get("sql_alchemy_conn") is None
# database should be taken from default because the deprecated value is missing in config
assert cfg_dict["database"].get("sql_alchemy_conn") == (
(f"sqlite:///{HOME_DIR}/airflow/airflow.db", "default")
if display_source
else f"sqlite:///{HOME_DIR}/airflow/airflow.db"
)
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == f"sqlite:///{HOME_DIR}/airflow/airflow.db"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_cmd_config(self, display_source: bool):
with use_config(config="deprecated_cmd.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_cmds=True,
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("postgresql://", "cmd") if display_source else "postgresql://"
)
# database should be None because the deprecated value is set in env value
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "postgresql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict(
"os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN_CMD": "echo -n 'postgresql://'"}, clear=True
)
def test_conf_as_dict_when_deprecated_value_in_cmd_env(self, display_source: bool):
with use_config(config="empty.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_cmds=True,
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("postgresql://", "cmd") if display_source else "postgresql://"
)
# database should be None because the deprecated value is set in env value
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "postgresql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict(
"os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN_CMD": "echo -n 'postgresql://'"}, clear=True
)
def test_conf_as_dict_when_deprecated_value_in_cmd_disabled_env(self, display_source: bool):
with use_config(config="empty.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_cmds=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") is None
assert cfg_dict["database"].get("sql_alchemy_conn") == (
(f"sqlite:///{HOME_DIR}/airflow/airflow.db", "default")
if display_source
else f"sqlite:///{HOME_DIR}/airflow/airflow.db"
)
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == f"sqlite:///{HOME_DIR}/airflow/airflow.db"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_cmd_disabled_config(self, display_source: bool):
with use_config(config="deprecated_cmd.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_cmds=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") is None
assert cfg_dict["database"].get("sql_alchemy_conn") == (
(f"sqlite:///{HOME_DIR}/airflow/airflow.db", "default")
if display_source
else f"sqlite:///{HOME_DIR}/airflow/airflow.db"
)
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == f"sqlite:///{HOME_DIR}/airflow/airflow.db"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN_SECRET": "secret_path'"}, clear=True)
@mock.patch("airflow.configuration.get_custom_secret_backend")
def test_conf_as_dict_when_deprecated_value_in_secrets(
self, get_custom_secret_backend, display_source: bool
):
get_custom_secret_backend.return_value.get_config.return_value = "postgresql://"
with use_config(config="empty.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_secret=True,
)
assert cfg_dict["core"].get("sql_alchemy_conn") == (
("postgresql://", "secret") if display_source else "postgresql://"
)
# database should be None because the deprecated value is set in env value
assert cfg_dict["database"].get("sql_alchemy_conn") is None
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == "postgresql://"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch.dict("os.environ", {"AIRFLOW__CORE__SQL_ALCHEMY_CONN_SECRET": "secret_path'"}, clear=True)
@mock.patch("airflow.configuration.get_custom_secret_backend")
def test_conf_as_dict_when_deprecated_value_in_secrets_disabled_env(
self, get_custom_secret_backend, display_source: bool
):
get_custom_secret_backend.return_value.get_config.return_value = "postgresql://"
with use_config(config="empty.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_secret=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") is None
assert cfg_dict["database"].get("sql_alchemy_conn") == (
(f"sqlite:///{HOME_DIR}/airflow/airflow.db", "default")
if display_source
else f"sqlite:///{HOME_DIR}/airflow/airflow.db"
)
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == f"sqlite:///{HOME_DIR}/airflow/airflow.db"
@pytest.mark.parametrize("display_source", [True, False])
@mock.patch("airflow.configuration.get_custom_secret_backend")
@mock.patch.dict("os.environ", {}, clear=True)
def test_conf_as_dict_when_deprecated_value_in_secrets_disabled_config(
self, get_custom_secret_backend, display_source: bool
):
get_custom_secret_backend.return_value.get_config.return_value = "postgresql://"
with use_config(config="deprecated_secret.cfg"):
cfg_dict = conf.as_dict(
display_source=display_source,
raw=True,
display_sensitive=True,
include_env=True,
include_secret=False,
)
assert cfg_dict["core"].get("sql_alchemy_conn") is None
assert cfg_dict["database"].get("sql_alchemy_conn") == (
(f"sqlite:///{HOME_DIR}/airflow/airflow.db", "default")
if display_source
else f"sqlite:///{HOME_DIR}/airflow/airflow.db"
)
if not display_source:
remove_all_configurations()
conf.read_dict(dictionary=cfg_dict)
os.environ.clear()
assert conf.get("database", "sql_alchemy_conn") == f"sqlite:///{HOME_DIR}/airflow/airflow.db"
def test_as_dict_should_not_falsely_emit_future_warning(self):
from airflow.configuration import AirflowConfigParser
test_conf = AirflowConfigParser()
test_conf.read_dict({"scheduler": {"deactivate_stale_dags_interval": 60}})
with warnings.catch_warnings(record=True) as captured:
test_conf.as_dict()
for w in captured: # only one expected
assert "deactivate_stale_dags_interval option in [scheduler] has been renamed" in str(w.message)
def test_suppress_future_warnings_no_future_warning(self):
from airflow.configuration import AirflowConfigParser
test_conf = AirflowConfigParser()
test_conf.read_dict({"scheduler": {"deactivate_stale_dags_interval": 60}})
with warnings.catch_warnings(record=True) as captured:
test_conf.items("scheduler")
assert len(captured) == 1
c = captured[0]
assert c.category is FutureWarning
assert (
"you should use[scheduler/parsing_cleanup_interval] "
"instead. Please update your `conf.get*`" in str(c.message)
)
with warnings.catch_warnings(record=True) as captured:
with test_conf.suppress_future_warnings():
test_conf.items("scheduler")
assert len(captured) == 1
c = captured[0]
assert c.category is DeprecationWarning
assert (
"deactivate_stale_dags_interval option in [scheduler] "
"has been renamed to parsing_cleanup_interval" in str(c.message)
)
@pytest.mark.parametrize(
"key",
[
pytest.param("deactivate_stale_dags_interval", id="old"),
pytest.param("parsing_cleanup_interval", id="new"),
],
)
def test_future_warning_only_for_code_ref(self, key):
from airflow.configuration import AirflowConfigParser
old_val = "deactivate_stale_dags_interval"
test_conf = AirflowConfigParser()
test_conf.read_dict({"scheduler": {old_val: 60}}) # config has old value
with warnings.catch_warnings(record=True) as captured:
test_conf.get("scheduler", str(key)) # could be old or new value
w = captured.pop()
assert "the old setting has been used, but please update" in str(w.message)
assert w.category is DeprecationWarning
# only if we use old value, do we also get a warning about code update
if key == old_val:
w = captured.pop()
assert "your `conf.get*` call to use the new name" in str(w.message)
assert w.category is FutureWarning
def test_as_dict_raw(self):
test_conf = AirflowConfigParser()
raw_dict = test_conf.as_dict(raw=True)
assert "%%" in raw_dict["logging"]["simple_log_format"]
def test_as_dict_not_raw(self):
test_conf = AirflowConfigParser()
raw_dict = test_conf.as_dict(raw=False)
assert "%%" not in raw_dict["logging"]["simple_log_format"]
def test_default_value_raw(self):
test_conf = AirflowConfigParser()
log_format = test_conf.get_default_value("logging", "simple_log_format", raw=True)
assert "%%" in log_format
def test_default_value_not_raw(self):
test_conf = AirflowConfigParser()
log_format = test_conf.get_default_value("logging", "simple_log_format", raw=False)
assert "%%" not in log_format
def test_default_value_raw_with_fallback(self):
test_conf = AirflowConfigParser()
log_format = test_conf.get_default_value("logging", "missing", fallback="aa %%", raw=True)
assert "%%" in log_format
def test_default_value_not_raw_with_fallback(self):
test_conf = AirflowConfigParser()
log_format = test_conf.get_default_value("logging", "missing", fallback="aa %%", raw=False)
# Note that fallback is never interpolated so we expect the value passed as-is
assert "%%" in log_format
def test_written_defaults_are_raw_for_defaults(self):
test_conf = AirflowConfigParser()
with StringIO() as f:
test_conf.write(f, only_defaults=True)
string_written = f.getvalue()
assert "%%(asctime)s" in string_written
def test_written_defaults_are_raw_for_non_defaults(self):
test_conf = AirflowConfigParser()
with StringIO() as f:
test_conf.write(f)
string_written = f.getvalue()
assert "%%(asctime)s" in string_written
def test_get_sections_including_defaults(self):
airflow_cfg = AirflowConfigParser()
airflow_cfg.remove_all_read_configurations()
default_sections = airflow_cfg.get_sections_including_defaults()
assert "core" in default_sections
assert "test-section" not in default_sections
airflow_cfg.add_section("test-section")
airflow_cfg.set("test-section", "test-key", "test-value")
all_sections_including_defaults = airflow_cfg.get_sections_including_defaults()
assert "core" in all_sections_including_defaults
assert "test-section" in all_sections_including_defaults
airflow_cfg.add_section("core")
airflow_cfg.set("core", "new-test-key", "test-value")
all_sections_including_defaults = airflow_cfg.get_sections_including_defaults()
assert "core" in all_sections_including_defaults
assert "test-section" in all_sections_including_defaults
assert sum(1 for section in all_sections_including_defaults if section == "core") == 1
def test_get_options_including_defaults(self):
airflow_cfg = AirflowConfigParser()
airflow_cfg.remove_all_read_configurations()
default_options = airflow_cfg.get_options_including_defaults("core")
assert "hostname_callable" in default_options
assert airflow_cfg.get("core", "hostname_callable") == "airflow.utils.net.getfqdn"
assert "test-key" not in default_options
no_options = airflow_cfg.get_options_including_defaults("test-section")
assert no_options == []
airflow_cfg.add_section("test-section")
airflow_cfg.set("test-section", "test-key", "test-value")
test_section_options = airflow_cfg.get_options_including_defaults("test-section")
assert "test-key" in test_section_options
assert airflow_cfg.get("core", "hostname_callable") == "airflow.utils.net.getfqdn"
airflow_cfg.add_section("core")
airflow_cfg.set("core", "new-test-key", "test-value")
airflow_cfg.set("core", "hostname_callable", "test-fn")
all_core_options_including_defaults = airflow_cfg.get_options_including_defaults("core")
assert "new-test-key" in all_core_options_including_defaults
assert "dags_folder" in all_core_options_including_defaults
assert airflow_cfg.get("core", "new-test-key") == "test-value"
assert airflow_cfg.get("core", "hostname_callable") == "test-fn"
assert sum(1 for option in all_core_options_including_defaults if option == "hostname_callable") == 1
@skip_if_force_lowest_dependencies_marker
def test_sensitive_values():
from airflow.settings import conf
# this list was hardcoded prior to 2.6.2
# included here to avoid regression in refactor
# inclusion of keys ending in "password" or "kwargs" is automated from 2.6.2
# items not matching this pattern must be added here manually
sensitive_values = {
("database", "sql_alchemy_conn"),
("database", "sql_alchemy_conn_async"),
("core", "fernet_key"),
("api_auth", "jwt_secret"),
("api", "secret_key"),
("secrets", "backend_kwargs"),
("sentry", "sentry_dsn"),
("database", "sql_alchemy_engine_args"),
("core", "sql_alchemy_conn"),
("celery_broker_transport_options", "sentinel_kwargs"),
("celery", "broker_url"),
("celery", "flower_basic_auth"),
("celery", "result_backend"),
("opensearch", "username"),
("opensearch", "password"),
("webserver", "secret_key"),
}
all_keys = {(s, k) for s, v in conf.configuration_description.items() for k in v["options"]}
suspected_sensitive = {(s, k) for (s, k) in all_keys if k.endswith(("password", "kwargs"))}
exclude_list = {
("aws_batch_executor", "submit_job_kwargs"),
("kubernetes_executor", "delete_option_kwargs"),
("aws_ecs_executor", "run_task_kwargs"), # Only a constrained set of values, none are sensitive
}
suspected_sensitive -= exclude_list
sensitive_values.update(suspected_sensitive)
assert sensitive_values == conf.sensitive_config_values
@skip_if_force_lowest_dependencies_marker
def test_restore_and_reload_provider_configuration():
from airflow.settings import conf
assert conf.providers_configuration_loaded is True
assert conf.get("celery", "celery_app_name") == "airflow.providers.celery.executors.celery_executor"
conf.restore_core_default_configuration()
assert conf.providers_configuration_loaded is False
# built-in pre-2-7 celery executor
assert conf.get("celery", "celery_app_name") == "airflow.executors.celery_executor"
conf.load_providers_configuration()
assert conf.providers_configuration_loaded is True
assert conf.get("celery", "celery_app_name") == "airflow.providers.celery.executors.celery_executor"
@skip_if_force_lowest_dependencies_marker
def test_error_when_contributing_to_existing_section():
from airflow.settings import conf
with conf.make_sure_configuration_loaded(with_providers=True):
assert conf.providers_configuration_loaded is True
assert conf.get("celery", "celery_app_name") == "airflow.providers.celery.executors.celery_executor"
conf.restore_core_default_configuration()
assert conf.providers_configuration_loaded is False
conf.configuration_description["celery"] = {
"description": "Celery Executor configuration",
"options": {
"celery_app_name": {
"default": "test",
}
},
}
conf._default_values.add_section("celery")
conf._default_values.set("celery", "celery_app_name", "test")
assert conf.get("celery", "celery_app_name") == "test"
# patching restoring_core_default_configuration to avoid reloading the defaults
with patch.object(conf, "restore_core_default_configuration"):
with pytest.raises(
AirflowConfigException,
match="The provider apache-airflow-providers-celery is attempting to contribute "
"configuration section celery that has already been added before. "
"The source of it: Airflow's core package",
):
conf.load_providers_configuration()
assert conf.get("celery", "celery_app_name") == "test"
# Technically it's not a DB test, but we want to make sure it's not interfering with xdist non-db tests
# Because the `_cleanup` method might cause side-effect for parallel-run tests
@pytest.mark.db_test
| TestDeprecatedConf |
python | pypa__setuptools | setuptools/_vendor/more_itertools/more.py | {
"start": 101134,
"end": 114619
} | class ____:
"""
Yield items from *iterable* until *limit_seconds* have passed.
If the time limit expires before all items have been yielded, the
``timed_out`` parameter will be set to ``True``.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = time_limited(0.1, generator())
>>> list(iterable)
[1, 2]
>>> iterable.timed_out
True
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
As a special case, when *limit_seconds* is zero, the iterator never
returns anything.
"""
def __init__(self, limit_seconds, iterable):
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
self.limit_seconds = limit_seconds
self._iterable = iter(iterable)
self._start_time = monotonic()
self.timed_out = False
def __iter__(self):
return self
def __next__(self):
if self.limit_seconds == 0:
self.timed_out = True
raise StopIteration
item = next(self._iterable)
if monotonic() - self._start_time > self.limit_seconds:
self.timed_out = True
raise StopIteration
return item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def _ichunk(iterable, n):
cache = deque()
chunk = islice(iterable, n)
def generator():
while True:
if cache:
yield cache.popleft()
else:
try:
item = next(chunk)
except StopIteration:
return
else:
yield item
def materialize_next(n=1):
# if n not specified materialize everything
if n is None:
cache.extend(chunk)
return len(cache)
to_cache = n - len(cache)
# materialize up to n
if to_cache > 0:
cache.extend(islice(chunk, to_cache))
# return number materialized up to n
return min(n, len(cache))
return (generator(), materialize_next)
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
iterable = iter(iterable)
while True:
# Create new chunk
chunk, materialize_next = _ichunk(iterable, n)
# Check to see whether we're at the end of the source iterable
if not materialize_next():
return
yield chunk
# Fill previous chunk's cache
materialize_next(None)
def iequals(*iterables):
"""Return ``True`` if all given *iterables* are equal to each other,
which means that they contain the same elements in the same order.
The function is useful for comparing iterables of different data types
or iterables that do not support equality checks.
>>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc"))
True
>>> iequals("abc", "acb")
False
Not to be confused with :func:`all_equal`, which checks whether all
elements of iterable are equal to each other.
"""
return all(map(all_equal, zip_longest(*iterables, fillvalue=object())))
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
return
pool = tuple(iterable)
generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
current_combo = [None] * r
level = 0
while generators:
try:
cur_idx, p = next(generators[-1])
except StopIteration:
generators.pop()
level -= 1
continue
current_combo[level] = p
if level + 1 == r:
yield tuple(current_combo)
else:
generators.append(
unique_everseen(
enumerate(pool[cur_idx + 1 :], cur_idx + 1),
key=itemgetter(1),
)
)
level += 1
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def map_if(iterable, pred, func, func_else=lambda x: x):
"""Evaluate each item from *iterable* using *pred*. If the result is
equivalent to ``True``, transform the item with *func* and yield it.
Otherwise, transform the item with *func_else* and yield it.
*pred*, *func*, and *func_else* should each be functions that accept
one argument. By default, *func_else* is the identity function.
>>> from math import sqrt
>>> iterable = list(range(-5, 5))
>>> iterable
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
>>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
>>> list(map_if(iterable, lambda x: x >= 0,
... lambda x: f'{sqrt(x):.2f}', lambda x: None))
[None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
"""
for item in iterable:
yield func(item) if pred(item) else func_else(item)
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
def is_sorted(iterable, key=None, reverse=False, strict=False):
"""Returns ``True`` if the items of iterable are in sorted order, and
``False`` otherwise. *key* and *reverse* have the same meaning that they do
in the built-in :func:`sorted` function.
>>> is_sorted(['1', '2', '3', '4', '5'], key=int)
True
>>> is_sorted([5, 4, 3, 1, 2], reverse=True)
False
If *strict*, tests for strict sorting, that is, returns ``False`` if equal
elements are found:
>>> is_sorted([1, 2, 2])
True
>>> is_sorted([1, 2, 2], strict=True)
False
The function returns ``False`` after encountering the first out-of-order
item. If there are no out-of-order items, the iterable is exhausted.
"""
compare = (le if reverse else ge) if strict else (lt if reverse else gt)
it = iterable if key is None else map(key, iterable)
return not any(starmap(compare, pairwise(it)))
| time_limited |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup_py39.py | {
"start": 4106,
"end": 4968
} | class ____(tuple):
def foo(self):
pass
def test_only_tuple_subclasses_in_typing_type():
# A generic typing type (such as Fooable) whose only concrete
# instantiations are tuples should still generate tuples. This is in
# contrast to test_tuple_subclasses_not_generic_sequences, which discards
# tuples if there are any alternatives.
with temp_registered(FooableConcrete, st.builds(FooableConcrete)):
s = st.from_type(Fooable[int])
assert_all_examples(s, lambda x: type(x) is FooableConcrete)
def test_lookup_registered_tuple():
sentinel = object()
typ = tuple[int]
with temp_registered(tuple, st.just(sentinel)):
assert_simple_property(st.from_type(typ), lambda v: v is sentinel)
assert_simple_property(st.from_type(typ), lambda v: v is not sentinel)
sentinel = object()
| FooableConcrete |
python | Pylons__pyramid | tests/test_config/test_views.py | {
"start": 126944,
"end": 129611
} | class ____(unittest.TestCase):
def _callFUT(self, view, wrapped_view):
from pyramid.config.views import preserve_view_attrs
return preserve_view_attrs(view, wrapped_view)
def test_it_same(self):
def view(context, request):
""" """
result = self._callFUT(view, view)
self.assertTrue(result is view)
def test_it_view_is_None(self):
def view(context, request):
""" """
result = self._callFUT(None, view)
self.assertTrue(result is view)
def test_it_different_with_existing_original_view(self):
def view1(context, request): # pragma: no cover
pass
view1.__original_view__ = 'abc'
def view2(context, request): # pragma: no cover
pass
result = self._callFUT(view1, view2)
self.assertEqual(result.__original_view__, 'abc')
self.assertFalse(result is view1)
def test_it_different(self):
class DummyView1:
"""1"""
__name__ = '1'
__module__ = '1'
def __call__(self, context, request):
""" """
def __call_permissive__(self, context, request):
""" """
def __predicated__(self, context, request):
""" """
def __permitted__(self, context, request):
""" """
class DummyView2:
"""2"""
__name__ = '2'
__module__ = '2'
def __call__(self, context, request):
""" """
def __call_permissive__(self, context, request):
""" """
def __predicated__(self, context, request):
""" """
def __permitted__(self, context, request):
""" """
view1 = DummyView1()
view2 = DummyView2()
result = self._callFUT(view2, view1)
self.assertEqual(result, view1)
self.assertTrue(view1.__original_view__ is view2)
self.assertTrue(view1.__doc__ is view2.__doc__)
self.assertTrue(view1.__module__ is view2.__module__)
self.assertTrue(view1.__name__ is view2.__name__)
self.assertTrue(
getattr(view1.__call_permissive__, '__func__')
is getattr(view2.__call_permissive__, '__func__')
)
self.assertTrue(
getattr(view1.__permitted__, '__func__')
is getattr(view2.__permitted__, '__func__')
)
self.assertTrue(
getattr(view1.__predicated__, '__func__')
is getattr(view2.__predicated__, '__func__')
)
| Test_preserve_view_attrs |
python | getsentry__sentry | tests/sentry/models/test_base.py | {
"start": 348,
"end": 3599
} | class ____(TestCase):
class TestModel(Model):
__relocation_scope__ = RelocationScope.Excluded
class Meta:
abstract = True
app_label = "fixtures"
@ModelSiloLimit(SiloMode.CONTROL)
class ControlModel(TestModel):
pass
@ModelSiloLimit(SiloMode.REGION)
class RegionModel(TestModel):
pass
class ModelOnMonolith(TestModel):
pass
def test_available_on_monolith_mode(self) -> None:
assert list(self.ModelOnMonolith.objects.all()) == []
with raises(self.ModelOnMonolith.DoesNotExist):
self.ModelOnMonolith.objects.get(id=1)
self.ModelOnMonolith.objects.create()
assert self.ModelOnMonolith.objects.count() == 1
self.ModelOnMonolith.objects.filter(id=1).delete()
@override_settings(SILO_MODE=SiloMode.REGION)
def test_available_on_same_mode(self) -> None:
assert list(self.RegionModel.objects.all()) == []
with raises(self.RegionModel.DoesNotExist):
self.RegionModel.objects.get(id=1)
self.RegionModel.objects.create()
assert self.RegionModel.objects.count() == 1
self.RegionModel.objects.filter(id=1).delete()
@override_settings(SILO_MODE=SiloMode.REGION)
def test_unavailable_on_other_mode(self) -> None:
with raises(ModelSiloLimit.AvailabilityError):
list(self.ControlModel.objects.all())
with raises(ModelSiloLimit.AvailabilityError):
self.ControlModel.objects.get(id=1)
with raises(ModelSiloLimit.AvailabilityError):
self.ControlModel.objects.create()
with raises(ModelSiloLimit.AvailabilityError):
self.ControlModel.objects.filter(id=1).delete()
def test_get_model_if_available(self) -> None:
test_models = {
m.__name__: m
for m in (
self.ControlModel,
self.RegionModel,
self.ModelOnMonolith,
)
}
app_config = MagicMock()
app_config.get_model.side_effect = test_models.get
with override_settings(SILO_MODE=SiloMode.REGION):
assert get_model_if_available(app_config, "ControlModel") is None
assert get_model_if_available(app_config, "RegionModel") is self.RegionModel
assert get_model_if_available(app_config, "ModelOnMonolith") is self.ModelOnMonolith
def test_get_model_with_nonexistent_name(self) -> None:
app_config = MagicMock()
app_config.get_model.side_effect = LookupError
assert get_model_if_available(app_config, "BogusModel") is None
app_config.get_model.assert_called_with("BogusModel")
def test_model_index_location() -> None:
"""
Validates that we didn't misconfigure a model such that the index or
constraints are defined on the model body itself.
"""
for model in apps.get_models():
for attr in ["indexes", "constraints", "unique_together"]:
if hasattr(model, attr):
model_name = f"{model._meta.app_label}.{model.__name__}"
raise AssertionError(
f"{model_name} declares `{attr}` on the model class, not in Meta"
)
| AvailableOnTest |
python | doocs__leetcode | solution/3100-3199/3170.Lexicographically Minimum String After Removing Stars/Solution.py | {
"start": 0,
"end": 483
} | class ____:
def clearStars(self, s: str) -> str:
g = defaultdict(list)
n = len(s)
rem = [False] * n
for i, c in enumerate(s):
if c == "*":
rem[i] = True
for a in ascii_lowercase:
if g[a]:
rem[g[a].pop()] = True
break
else:
g[c].append(i)
return "".join(c for i, c in enumerate(s) if not rem[i])
| Solution |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/database.py | {
"start": 1887,
"end": 11313
} | class ____(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
try:
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata, env=self)
elif self._include_egg and entry.endswith(('.egg-info', '.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
except Exception as e:
msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s'
logger.warning(msg, r.path, e)
import warnings
warnings.warn(msg % (r.path, e), stacklevel=2)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if version is not None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' % (name, version))
for dist in self.get_distributions():
# We hit a problem on Travis where enum34 was installed and doesn't
# have a provides attribute ...
if not hasattr(dist, 'provides'):
logger.debug('No "provides": %s', dist)
else:
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
| DistributionPath |
python | apache__airflow | providers/dbt/cloud/src/airflow/providers/dbt/cloud/hooks/dbt.py | {
"start": 4516,
"end": 5702
} | class ____(AirflowException):
"""Exception raised when a dbt Cloud resource cannot be uniquely identified."""
T = TypeVar("T", bound=Any)
def provide_account_id(func: T) -> T:
"""
Provide a fallback value for ``account_id``.
If the ``account_id`` is None or not passed to the decorated function,
the value will be taken from the configured dbt Cloud Airflow Connection.
"""
function_signature = signature(func)
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
bound_args = function_signature.bind(*args, **kwargs)
if bound_args.arguments.get("account_id") is None:
self = args[0]
if self.dbt_cloud_conn_id:
connection = await sync_to_async(self.get_connection)(self.dbt_cloud_conn_id)
default_account_id = connection.login
if not default_account_id:
raise AirflowException("Could not determine the dbt Cloud account.")
bound_args.arguments["account_id"] = int(default_account_id)
return await func(*bound_args.args, **bound_args.kwargs)
return cast("T", wrapper)
| DbtCloudResourceLookupError |
python | pytest-dev__pytest | doc/en/example/assertion/failure_demo.py | {
"start": 646,
"end": 3219
} | class ____:
def test_eq_text(self):
assert "spam" == "eggs"
def test_eq_similar_text(self):
assert "foo 1 bar" == "foo 2 bar"
def test_eq_multiline_text(self):
assert "foo\nspam\nbar" == "foo\neggs\nbar"
def test_eq_long_text(self):
a = "1" * 100 + "a" + "2" * 100
b = "1" * 100 + "b" + "2" * 100
assert a == b
def test_eq_long_text_multiline(self):
a = "1\n" * 100 + "a" + "2\n" * 100
b = "1\n" * 100 + "b" + "2\n" * 100
assert a == b
def test_eq_list(self):
assert [0, 1, 2] == [0, 1, 3]
def test_eq_list_long(self):
a = [0] * 100 + [1] + [3] * 100
b = [0] * 100 + [2] + [3] * 100
assert a == b
def test_eq_dict(self):
assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0}
def test_eq_set(self):
assert {0, 10, 11, 12} == {0, 20, 21}
def test_eq_longer_list(self):
assert [1, 2] == [1, 2, 3]
def test_in_list(self):
assert 1 in [0, 2, 3, 4, 5]
def test_not_in_text_multiline(self):
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
assert "foo" not in text
def test_not_in_text_single(self):
text = "single foo line"
assert "foo" not in text
def test_not_in_text_single_long(self):
text = "head " * 50 + "foo " + "tail " * 20
assert "foo" not in text
def test_not_in_text_single_long_term(self):
text = "head " * 50 + "f" * 70 + "tail " * 20
assert "f" * 70 not in text
def test_eq_dataclass(self):
from dataclasses import dataclass
@dataclass
class Foo:
a: int
b: str
left = Foo(1, "b")
right = Foo(1, "c")
assert left == right
def test_eq_attrs(self):
import attr
@attr.s
class Foo:
a = attr.ib()
b = attr.ib()
left = Foo(1, "b")
right = Foo(1, "c")
assert left == right
def test_attribute():
class Foo:
b = 1
i = Foo()
assert i.b == 2
def test_attribute_instance():
class Foo:
b = 1
assert Foo().b == 2
def test_attribute_failure():
class Foo:
def _get_b(self):
raise Exception("Failed to get attrib")
b = property(_get_b)
i = Foo()
assert i.b == 2
def test_attribute_multiple():
class Foo:
b = 1
class Bar:
b = 2
assert Foo().b == Bar().b
def globf(x):
return x + 1
| TestSpecialisedExplanations |
python | scipy__scipy | scipy/integrate/_cubature.py | {
"start": 1131,
"end": 19470
} | class ____:
estimate: Array
error: Array
status: str
regions: list[CubatureRegion]
subdivisions: int
atol: float
rtol: float
@xp_capabilities(allow_dask_compute=True, jax_jit=False)
def cubature(f, a, b, *, rule="gk21", rtol=1e-8, atol=0, max_subdivisions=10000,
args=(), workers=1, points=None):
r"""
Adaptive cubature of multidimensional array-valued function.
Given an arbitrary integration rule, this function returns an estimate of the
integral to the requested tolerance over the region defined by the arrays `a` and
`b` specifying the corners of a hypercube.
Convergence is not guaranteed for all integrals.
Parameters
----------
f : callable
Function to integrate. `f` must have the signature::
f(x : ndarray, *args) -> ndarray
`f` should accept arrays ``x`` of shape::
(npoints, ndim)
and output arrays of shape::
(npoints, output_dim_1, ..., output_dim_n)
In this case, `cubature` will return arrays of shape::
(output_dim_1, ..., output_dim_n)
a, b : array_like
Lower and upper limits of integration as 1D arrays specifying the left and right
endpoints of the intervals being integrated over. Limits can be infinite.
rule : str, optional
Rule used to estimate the integral. If passing a string, the options are
"gauss-kronrod" (21 node), or "genz-malik" (degree 7). If a rule like
"gauss-kronrod" is specified for an ``n``-dim integrand, the corresponding
Cartesian product rule is used. "gk21", "gk15" are also supported for
compatibility with `quad_vec`. See Notes.
rtol, atol : float, optional
Relative and absolute tolerances. Iterations are performed until the error is
estimated to be less than ``atol + rtol * abs(est)``. Here `rtol` controls
relative accuracy (number of correct digits), while `atol` controls absolute
accuracy (number of correct decimal places). To achieve the desired `rtol`, set
`atol` to be smaller than the smallest value that can be expected from
``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is
larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed.
Conversely, to achieve the desired `atol`, set `rtol` such that
``rtol * abs(y)`` is always smaller than `atol`. Default values are 1e-8 for
`rtol` and 0 for `atol`.
max_subdivisions : int, optional
Upper bound on the number of subdivisions to perform. Default is 10,000.
args : tuple, optional
Additional positional args passed to `f`, if any.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in parallel
subdivided to this many tasks (using :class:`python:multiprocessing.pool.Pool`).
Supply `-1` to use all cores available to the Process. Alternatively, supply a
map-like callable, such as :meth:`python:multiprocessing.pool.Pool.map` for
evaluating the population in parallel. This evaluation is carried out as
``workers(func, iterable)``.
points : list of array_like, optional
List of points to avoid evaluating `f` at, under the condition that the rule
being used does not evaluate `f` on the boundary of a region (which is the
case for all Genz-Malik and Gauss-Kronrod rules). This can be useful if `f` has
a singularity at the specified point. This should be a list of array-likes where
each element has length ``ndim``. Default is empty. See Examples.
Returns
-------
res : object
Object containing the results of the estimation. It has the following
attributes:
estimate : ndarray
Estimate of the value of the integral over the overall region specified.
error : ndarray
Estimate of the error of the approximation over the overall region
specified.
status : str
Whether the estimation was successful. Can be either: "converged",
"not_converged".
subdivisions : int
Number of subdivisions performed.
atol, rtol : float
Requested tolerances for the approximation.
regions: list of object
List of objects containing the estimates of the integral over smaller
regions of the domain.
Each object in ``regions`` has the following attributes:
a, b : ndarray
Points describing the corners of the region. If the original integral
contained infinite limits or was over a region described by `region`,
then `a` and `b` are in the transformed coordinates.
estimate : ndarray
Estimate of the value of the integral over this region.
error : ndarray
Estimate of the error of the approximation over this region.
Notes
-----
The algorithm uses a similar algorithm to `quad_vec`, which itself is based on the
implementation of QUADPACK's DQAG* algorithms, implementing global error control and
adaptive subdivision.
The source of the nodes and weights used for Gauss-Kronrod quadrature can be found
in [1]_, and the algorithm for calculating the nodes and weights in Genz-Malik
cubature can be found in [2]_.
The rules currently supported via the `rule` argument are:
- ``"gauss-kronrod"``, 21-node Gauss-Kronrod
- ``"genz-malik"``, n-node Genz-Malik
If using Gauss-Kronrod for an ``n``-dim integrand where ``n > 2``, then the
corresponding Cartesian product rule will be found by taking the Cartesian product
of the nodes in the 1D case. This means that the number of nodes scales
exponentially as ``21^n`` in the Gauss-Kronrod case, which may be problematic in a
moderate number of dimensions.
Genz-Malik is typically less accurate than Gauss-Kronrod but has much fewer nodes,
so in this situation using "genz-malik" might be preferable.
Infinite limits are handled with an appropriate variable transformation. Assuming
``a = [a_1, ..., a_n]`` and ``b = [b_1, ..., b_n]``:
If :math:`a_i = -\infty` and :math:`b_i = \infty`, the i-th integration variable
will use the transformation :math:`x = \frac{1-|t|}{t}` and :math:`t \in (-1, 1)`.
If :math:`a_i \ne \pm\infty` and :math:`b_i = \infty`, the i-th integration variable
will use the transformation :math:`x = a_i + \frac{1-t}{t}` and
:math:`t \in (0, 1)`.
If :math:`a_i = -\infty` and :math:`b_i \ne \pm\infty`, the i-th integration
variable will use the transformation :math:`x = b_i - \frac{1-t}{t}` and
:math:`t \in (0, 1)`.
References
----------
.. [1] R. Piessens, E. de Doncker, Quadpack: A Subroutine Package for Automatic
Integration, files: dqk21.f, dqk15.f (1983).
.. [2] A.C. Genz, A.A. Malik, Remarks on algorithm 006: An adaptive algorithm for
numerical integration over an N-dimensional rectangular region, Journal of
Computational and Applied Mathematics, Volume 6, Issue 4, 1980, Pages 295-302,
ISSN 0377-0427
:doi:`10.1016/0771-050X(80)90039-X`
Examples
--------
**1D integral with vector output**:
.. math::
\int^1_0 \mathbf f(x) \text dx
Where ``f(x) = x^n`` and ``n = np.arange(10)`` is a vector. Since no rule is
specified, the default "gk21" is used, which corresponds to Gauss-Kronrod
integration with 21 nodes.
>>> import numpy as np
>>> from scipy.integrate import cubature
>>> def f(x, n):
... # Make sure x and n are broadcastable
... return x[:, np.newaxis]**n[np.newaxis, :]
>>> res = cubature(
... f,
... a=[0],
... b=[1],
... args=(np.arange(10),),
... )
>>> res.estimate
array([1. , 0.5 , 0.33333333, 0.25 , 0.2 ,
0.16666667, 0.14285714, 0.125 , 0.11111111, 0.1 ])
**7D integral with arbitrary-shaped array output**::
f(x) = cos(2*pi*r + alphas @ x)
for some ``r`` and ``alphas``, and the integral is performed over the unit
hybercube, :math:`[0, 1]^7`. Since the integral is in a moderate number of
dimensions, "genz-malik" is used rather than the default "gauss-kronrod" to
avoid constructing a product rule with :math:`21^7 \approx 2 \times 10^9` nodes.
>>> import numpy as np
>>> from scipy.integrate import cubature
>>> def f(x, r, alphas):
... # f(x) = cos(2*pi*r + alphas @ x)
... # Need to allow r and alphas to be arbitrary shape
... npoints, ndim = x.shape[0], x.shape[-1]
... alphas = alphas[np.newaxis, ...]
... x = x.reshape(npoints, *([1]*(len(alphas.shape) - 1)), ndim)
... return np.cos(2*np.pi*r + np.sum(alphas * x, axis=-1))
>>> rng = np.random.default_rng()
>>> r, alphas = rng.random((2, 3)), rng.random((2, 3, 7))
>>> res = cubature(
... f=f,
... a=np.array([0, 0, 0, 0, 0, 0, 0]),
... b=np.array([1, 1, 1, 1, 1, 1, 1]),
... rtol=1e-5,
... rule="genz-malik",
... args=(r, alphas),
... )
>>> res.estimate
array([[-0.79812452, 0.35246913, -0.52273628],
[ 0.88392779, 0.59139899, 0.41895111]])
**Parallel computation with** `workers`:
>>> from concurrent.futures import ThreadPoolExecutor
>>> with ThreadPoolExecutor() as executor:
... res = cubature(
... f=f,
... a=np.array([0, 0, 0, 0, 0, 0, 0]),
... b=np.array([1, 1, 1, 1, 1, 1, 1]),
... rtol=1e-5,
... rule="genz-malik",
... args=(r, alphas),
... workers=executor.map,
... )
>>> res.estimate
array([[-0.79812452, 0.35246913, -0.52273628],
[ 0.88392779, 0.59139899, 0.41895111]])
**2D integral with infinite limits**:
.. math::
\int^{ \infty }_{ -\infty }
\int^{ \infty }_{ -\infty }
e^{-x^2-y^2}
\text dy
\text dx
>>> def gaussian(x):
... return np.exp(-np.sum(x**2, axis=-1))
>>> res = cubature(gaussian, [-np.inf, -np.inf], [np.inf, np.inf])
>>> res.estimate
3.1415926
**1D integral with singularities avoided using** `points`:
.. math::
\int^{ 1 }_{ -1 }
\frac{\sin(x)}{x}
\text dx
It is necessary to use the `points` parameter to avoid evaluating `f` at the origin.
>>> def sinc(x):
... return np.sin(x)/x
>>> res = cubature(sinc, [-1], [1], points=[[0]])
>>> res.estimate
1.8921661
"""
# It is also possible to use a custom rule, but this is not yet part of the public
# API. An example of this can be found in the class scipy.integrate._rules.Rule.
xp = array_namespace(a, b)
max_subdivisions = float("inf") if max_subdivisions is None else max_subdivisions
points = [] if points is None else points
# Convert a and b to arrays and convert each point in points to an array, promoting
# each to a common floating dtype.
a, b, *points = xp_promote(a, b, *points, broadcast=True, force_floating=True,
xp=xp)
result_dtype = a.dtype
if xp_size(a) == 0 or xp_size(b) == 0:
raise ValueError("`a` and `b` must be nonempty")
if a.ndim != 1 or b.ndim != 1:
raise ValueError("`a` and `b` must be 1D arrays")
# If the rule is a string, convert to a corresponding product rule
if isinstance(rule, str):
ndim = xp_size(a)
if rule == "genz-malik":
rule = GenzMalikCubature(ndim, xp=xp)
else:
quadratues = {
"gauss-kronrod": GaussKronrodQuadrature(21, xp=xp),
# Also allow names quad_vec uses:
"gk21": GaussKronrodQuadrature(21, xp=xp),
"gk15": GaussKronrodQuadrature(15, xp=xp),
}
base_rule = quadratues.get(rule)
if base_rule is None:
raise ValueError(f"unknown rule {rule}")
rule = ProductNestedFixed([base_rule] * ndim)
# If any of limits are the wrong way around (a > b), flip them and keep track of
# the sign.
sign = (-1) ** xp.sum(xp.astype(a > b, xp.int8), dtype=result_dtype)
a_flipped = xp.min(xp.stack([a, b]), axis=0)
b_flipped = xp.max(xp.stack([a, b]), axis=0)
a, b = a_flipped, b_flipped
# If any of the limits are infinite, apply a transformation
if xp.any(xp.isinf(a)) or xp.any(xp.isinf(b)):
f = _InfiniteLimitsTransform(f, a, b, xp=xp)
a, b = f.transformed_limits
# Map points from the original coordinates to the new transformed coordinates.
#
# `points` is a list of arrays of shape (ndim,), but transformations are applied
# to arrays of shape (npoints, ndim).
#
# It is not possible to combine all the points into one array and then apply
# f.inv to all of them at once since `points` needs to remain iterable.
# Instead, each point is reshaped to an array of shape (1, ndim), `f.inv` is
# applied, and then each is reshaped back to (ndim,).
points = [xp.reshape(point, (1, -1)) for point in points]
points = [f.inv(point) for point in points]
points = [xp.reshape(point, (-1,)) for point in points]
# Include any problematic points introduced by the transformation
points.extend(f.points)
# If any problematic points are specified, divide the initial region so that these
# points lie on the edge of a subregion.
#
# This means ``f`` won't be evaluated there if the rule being used has no evaluation
# points on the boundary.
if len(points) == 0:
initial_regions = [(a, b)]
else:
initial_regions = _split_region_at_points(a, b, points, xp)
regions = []
est = 0.0
err = 0.0
for a_k, b_k in initial_regions:
est_k = rule.estimate(f, a_k, b_k, args)
err_k = rule.estimate_error(f, a_k, b_k, args)
regions.append(CubatureRegion(est_k, err_k, a_k, b_k, xp))
est += est_k
err += err_k
subdivisions = 0
success = True
with MapWrapper(workers) as mapwrapper:
while xp.any(err > atol + rtol * xp.abs(est)):
# region_k is the region with highest estimated error
region_k = heapq.heappop(regions)
est_k = region_k.estimate
err_k = region_k.error
a_k, b_k = region_k.a, region_k.b
# Subtract the estimate of the integral and its error over this region from
# the current global estimates, since these will be refined in the loop over
# all subregions.
est -= est_k
err -= err_k
# Find all 2^ndim subregions formed by splitting region_k along each axis,
# e.g. for 1D integrals this splits an estimate over an interval into an
# estimate over two subintervals, for 3D integrals this splits an estimate
# over a cube into 8 subcubes.
#
# For each of the new subregions, calculate an estimate for the integral and
# the error there, and push these regions onto the heap for potential
# further subdividing.
executor_args = zip(
itertools.repeat(f),
itertools.repeat(rule),
itertools.repeat(args),
_split_subregion(a_k, b_k, xp),
)
for subdivision_result in mapwrapper(_process_subregion, executor_args):
a_k_sub, b_k_sub, est_sub, err_sub = subdivision_result
est += est_sub
err += err_sub
new_region = CubatureRegion(est_sub, err_sub, a_k_sub, b_k_sub, xp)
heapq.heappush(regions, new_region)
subdivisions += 1
if subdivisions >= max_subdivisions:
success = False
break
status = "converged" if success else "not_converged"
# Apply sign change to handle any limits which were initially flipped.
est = sign * est
return CubatureResult(
estimate=est,
error=err,
status=status,
subdivisions=subdivisions,
regions=regions,
atol=atol,
rtol=rtol,
)
def _process_subregion(data):
f, rule, args, coord = data
a_k_sub, b_k_sub = coord
est_sub = rule.estimate(f, a_k_sub, b_k_sub, args)
err_sub = rule.estimate_error(f, a_k_sub, b_k_sub, args)
return a_k_sub, b_k_sub, est_sub, err_sub
def _is_strictly_in_region(a, b, point, xp):
if xp.all(point == a) or xp.all(point == b):
return False
return xp.all(a <= point) and xp.all(point <= b)
def _split_region_at_points(a, b, points, xp):
"""
Given the integration limits `a` and `b` describing a rectangular region and a list
of `points`, find the list of ``[(a_1, b_1), ..., (a_l, b_l)]`` which breaks up the
initial region into smaller subregion such that no `points` lie strictly inside
any of the subregions.
"""
regions = [(a, b)]
for point in points:
if xp.any(xp.isinf(point)):
# If a point is specified at infinity, ignore.
#
# This case occurs when points are given by the user to avoid, but after
# applying a transformation, they are removed.
continue
new_subregions = []
for a_k, b_k in regions:
if _is_strictly_in_region(a_k, b_k, point, xp):
subregions = _split_subregion(a_k, b_k, xp, point)
for left, right in subregions:
# Skip any zero-width regions.
if xp.any(left == right):
continue
else:
new_subregions.append((left, right))
new_subregions.extend(subregions)
else:
new_subregions.append((a_k, b_k))
regions = new_subregions
return regions
| CubatureResult |
python | openai__openai-python | src/openai/types/chat/chat_completion_content_part_text_param.py | {
"start": 234,
"end": 429
} | class ____(TypedDict, total=False):
text: Required[str]
"""The text content."""
type: Required[Literal["text"]]
"""The type of the content part."""
| ChatCompletionContentPartTextParam |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/mlengine.py | {
"start": 2380,
"end": 2586
} | class ____(BaseGoogleLink):
"""Helper class for constructing ML Engine link."""
name = "MLEngine Jobs List"
key = "ml_engine_jobs_list"
format_str = MLENGINE_JOBS_LIST_LINK
| MLEngineJobSListLink |
python | pypa__warehouse | tests/unit/admin/views/test_projects.py | {
"start": 37253,
"end": 38094
} | class ____:
def test_reindexes_project(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/projects/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
# Mock request task handler
request_task_mock = mock.Mock()
db_request.task = request_task_mock
views.reindex_project(project, db_request)
# Make sure reindex_project task was called
request_task_mock.assert_called_with(reindex_project)
assert db_request.session.flash.calls == [
pretend.call("Task sent to reindex the project 'foo'", queue="success")
]
| TestReindexProject |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 681144,
"end": 681726
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("UserEdge"), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| UserConnection |
python | doocs__leetcode | solution/3400-3499/3402.Minimum Operations to Make Columns Strictly Increasing/Solution.py | {
"start": 0,
"end": 342
} | class ____:
def minimumOperations(self, grid: List[List[int]]) -> int:
ans = 0
for col in zip(*grid):
pre = -1
for cur in col:
if pre < cur:
pre = cur
else:
pre += 1
ans += pre - cur
return ans
| Solution |
python | hyperopt__hyperopt | hyperopt/algobase.py | {
"start": 8559,
"end": 10345
} | class ____(ExprEvaluator):
"""Add constructor and call signature to match suggest()
Also, detect when on_node is handling a hyperparameter, and
delegate that to an `on_node_hyperparameter` method. This method
must be implemented by a derived class.
"""
def __init__(self, domain, trials, seed):
ExprEvaluator.__init__(self, domain.s_idxs_vals)
self.domain = domain
self.trials = trials
self.label_by_node = {
n: l for l, n in list(self.domain.vh.vals_by_label().items())
}
self._seed = seed
self.rng = np.random.default_rng(seed)
def __call__(self, new_id):
self.rng = np.random.default_rng(self._seed + new_id)
memo = self.eval_nodes(
memo={self.domain.s_new_ids: [new_id], self.domain.s_rng: self.rng}
)
idxs, vals = memo[self.expr]
new_result = self.domain.new_result()
new_misc = dict(tid=new_id, cmd=self.domain.cmd, workdir=self.domain.workdir)
miscs_update_idxs_vals([new_misc], idxs, vals)
rval = self.trials.new_trial_docs([new_id], [None], [new_result], [new_misc])
return rval
def on_node(self, memo, node):
if node in self.label_by_node:
label = self.label_by_node[node]
return self.on_node_hyperparameter(memo, node, label)
else:
return ExprEvaluator.on_node(self, memo, node)
def batch(self, new_ids):
new_ids = list(new_ids)
self.rng = np.random.default_rng([self._seed] + new_ids)
memo = self.eval_nodes(
memo={self.domain.s_new_ids: new_ids, self.domain.s_rng: self.rng}
)
idxs, vals = memo[self.expr]
return idxs, vals
# -- flake-8 abhors blank line EOF
| SuggestAlgo |
python | jina-ai__jina | tests/integration/docarray_v2/test_issues.py | {
"start": 1025,
"end": 2575
} | class ____(Executor):
@requests(on='/endpoint')
async def endpoint(
self, docs: DocList[RootDocWithNestedList], **kwargs
) -> DocList[RootDocWithNestedList]:
rets = DocList[RootDocWithNestedList]()
rets.append(
RootDocWithNestedList(
text='hello world', nested=[Nested1Doc(nested=Nested2Doc(value='test'))]
)
)
return rets
def test_issue_6019():
flow = Flow().add(name='inference', needs='gateway', uses=NestedSchemaExecutor)
with flow:
res = flow.post(
on='/endpoint', inputs=RootDoc(text='hello'), return_type=DocList[RootDoc]
)
assert res[0].text == 'hello world'
assert res[0].nested.nested.value == 'test'
def test_issue_6019_with_nested_list():
flow = Flow().add(name='inference', needs='gateway', uses=ListNestedSchemaExecutor)
with flow:
res = flow.post(
on='/endpoint',
inputs=RootDocWithNestedList(text='hello'),
return_type=DocList[RootDocWithNestedList],
)
assert res[0].text == 'hello world'
assert res[0].nested[0].nested.value == 'test'
def test_issue_6084():
class EnvInfo(BaseDoc):
history: str = ''
class A(BaseDoc):
b: EnvInfo
class MyIssue6084Exec(Executor):
@requests
def foo(self, docs: DocList[A], **kwargs) -> DocList[A]:
pass
f = Flow().add(uses=MyIssue6084Exec).add(uses=MyIssue6084Exec)
with f:
pass
| ListNestedSchemaExecutor |
python | Textualize__textual | src/textual/renderables/blank.py | {
"start": 268,
"end": 1359
} | class ____(Visual):
"""Draw solid background color."""
def __init__(self, color: Color | str = "transparent") -> None:
self._rich_style = RichStyle.from_color(bgcolor=Color.parse(color).rich_color)
def visualize(self) -> Blank:
return self
def get_optimal_width(self, rules: RulesMap, container_width: int) -> int:
return container_width
def get_height(self, rules: RulesMap, width: int) -> int:
return 1
def render_strips(
self, width: int, height: int | None, style: Style, options: RenderOptions
) -> list[Strip]:
"""Render the Visual into an iterable of strips. Part of the Visual protocol.
Args:
width: Width of desired render.
height: Height of desired render or `None` for any height.
style: The base style to render on top of.
options: Additional render options.
Returns:
An list of Strips.
"""
line_count = 1 if height is None else height
return [Strip.blank(width, self._rich_style)] * line_count
| Blank |
python | django__django | django/contrib/gis/admin/options.py | {
"start": 627,
"end": 689
} | class ____(GeoModelAdminMixin, ModelAdmin):
pass
| GISModelAdmin |
python | kamyu104__LeetCode-Solutions | Python/palindromic-substrings.py | {
"start": 29,
"end": 656
} | class ____(object):
def countSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
def manacher(s):
s = '^#' + '#'.join(s) + '#$'
P = [0] * len(s)
C, R = 0, 0
for i in xrange(1, len(s) - 1):
i_mirror = 2*C-i
if R > i:
P[i] = min(R-i, P[i_mirror])
while s[i+1+P[i]] == s[i-1-P[i]]:
P[i] += 1
if i+P[i] > R:
C, R = i, i+P[i]
return P
return sum((max_len+1)//2 for max_len in manacher(s))
| Solution |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 3040,
"end": 3768
} | class ____(WeaviateBaseError):
def __init__(self, location: str, response: httpx.Response):
"""Raised when a weaviate response cannot be decoded to json.
Args:
location: From which code path the exception was raised.
response: The request response of which the status code was unexpected.
"""
msg = f"Cannot decode response from weaviate {response} with content '{response.text}' for request from {location}"
super().__init__(msg)
self._status_code: int = response.status_code
@property
def status_code(self) -> int:
return self._status_code
ResponseCannotBeDecodedException = ResponseCannotBeDecodedError
| ResponseCannotBeDecodedError |
python | Textualize__textual | src/textual/demo/page.py | {
"start": 1238,
"end": 2527
} | class ____(Screen):
DEFAULT_CSS = """
PageScreen {
width: 100%;
height: 1fr;
overflow-y: auto;
}
"""
BINDINGS = [
Binding(
"c",
"show_code",
"Code",
tooltip="Show the code used to generate this screen",
)
]
@work(thread=True)
def get_code(self, source_file: str) -> str | None:
"""Read code from disk, or return `None` on error."""
try:
with open(source_file, "rt", encoding="utf-8") as file_:
return file_.read()
except Exception:
return None
async def action_show_code(self):
source_file = inspect.getsourcefile(self.__class__)
if source_file is None:
self.notify(
"Could not get the code for this page",
title="Show code",
severity="error",
)
return
code = await self.get_code(source_file).wait()
if code is None:
self.notify(
"Could not get the code for this page",
title="Show code",
severity="error",
)
else:
self.app.push_screen(CodeScreen("Code for this page", code))
| PageScreen |
python | getsentry__sentry | src/sentry/workflow_engine/migration_helpers/alert_rule.py | {
"start": 2662,
"end": 37430
} | class ____(Exception):
pass
def get_action_type(alert_rule_trigger_action: AlertRuleTriggerAction) -> Action.Type | None:
return TYPE_TO_PROVIDER.get(alert_rule_trigger_action.type, None)
def build_sentry_app_data_blob(
alert_rule_trigger_action: AlertRuleTriggerAction,
) -> dict[str, Any]:
if not alert_rule_trigger_action.sentry_app_config:
return {}
# Convert config to proper type for SentryAppDataBlob
settings = (
[alert_rule_trigger_action.sentry_app_config]
if isinstance(alert_rule_trigger_action.sentry_app_config, dict)
else alert_rule_trigger_action.sentry_app_config
)
return dataclasses.asdict(SentryAppDataBlob.from_list(settings))
def build_on_call_data_blob(
alert_rule_trigger_action: AlertRuleTriggerAction, action_type: Action.Type
) -> dict[str, Any]:
default_priority = (
OPSGENIE_DEFAULT_PRIORITY
if action_type == Action.Type.OPSGENIE
else PAGERDUTY_DEFAULT_SEVERITY
)
if not alert_rule_trigger_action.sentry_app_config:
return {"priority": default_priority}
# Ensure sentry_app_config is a dict before accessing
config = alert_rule_trigger_action.sentry_app_config
if not isinstance(config, dict):
return {"priority": default_priority}
priority = config.get("priority", default_priority)
return dataclasses.asdict(OnCallDataBlob(priority=priority))
def build_action_data_blob(
alert_rule_trigger_action: AlertRuleTriggerAction, action_type: Action.Type
) -> dict[str, Any]:
# if the action is a Sentry app, we need to get the Sentry app installation ID
if action_type == Action.Type.SENTRY_APP:
return build_sentry_app_data_blob(alert_rule_trigger_action)
elif action_type in (Action.Type.OPSGENIE, Action.Type.PAGERDUTY):
return build_on_call_data_blob(alert_rule_trigger_action, action_type)
else:
return {}
def get_target_identifier(
alert_rule_trigger_action: AlertRuleTriggerAction, action_type: Action.Type
) -> str | None:
if action_type == Action.Type.SENTRY_APP:
# Ensure we have a valid sentry_app_id
if not alert_rule_trigger_action.sentry_app_id:
raise ValidationError(
f"sentry_app_id is required for Sentry App actions for alert rule trigger action {alert_rule_trigger_action.id}",
)
return str(alert_rule_trigger_action.sentry_app_id)
# Ensure we have a valid target_identifier
return alert_rule_trigger_action.target_identifier
def build_action_config(
target_display: str | None, target_identifier: str | None, target_type: int
) -> dict[str, str | int | None]:
base_config = {
"target_display": target_display,
"target_identifier": target_identifier,
"target_type": target_type,
}
if target_type == ActionTarget.SENTRY_APP.value:
base_config["sentry_app_identifier"] = SentryAppIdentifier.SENTRY_APP_ID
return base_config
def get_detector_trigger(
alert_rule_trigger: AlertRuleTrigger, priority: DetectorPriorityLevel
) -> DataCondition | None:
"""
Helper method to find the detector trigger corresponding to an AlertRuleTrigger.
Returns None if the detector cannot be found. Raises an exception if the detector
exists but the detector trigger cannot be found.
"""
alert_rule = alert_rule_trigger.alert_rule
try:
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule_id=alert_rule.id)
except AlertRuleDetector.DoesNotExist:
# We attempted to dual delete a trigger that was not dual migrated
logger.info(
"alert rule was not dual written, returning early",
extra={"alert_rule": alert_rule},
)
return None
detector = alert_rule_detector.detector
detector_data_condition_group = detector.workflow_condition_group
if detector_data_condition_group is None:
logger.error(
"detector_data_condition_group does not exist",
extra={"alert_rule_trigger_id": alert_rule_trigger.id},
)
raise MissingDataConditionGroup
detector_trigger = DataCondition.objects.get(
condition_group=detector_data_condition_group,
condition_result=priority,
)
return detector_trigger
def get_action_filter(
alert_rule_trigger: AlertRuleTrigger, priority: DetectorPriorityLevel
) -> DataCondition:
"""
Helper method to find the action filter corresponding to an AlertRuleTrigger.
Raises an exception if the action filter cannot be found.
"""
alert_rule = alert_rule_trigger.alert_rule
alert_rule_workflow = AlertRuleWorkflow.objects.get(alert_rule_id=alert_rule.id)
workflow = alert_rule_workflow.workflow
workflow_dcgs = DataConditionGroup.objects.filter(workflowdataconditiongroup__workflow=workflow)
action_filter = DataCondition.objects.get(
condition_group__in=workflow_dcgs,
comparison=priority,
type=Condition.ISSUE_PRIORITY_GREATER_OR_EQUAL,
)
return action_filter
def migrate_metric_action(
alert_rule_trigger_action: AlertRuleTriggerAction,
) -> tuple[Action, DataConditionGroupAction, ActionAlertRuleTriggerAction]:
alert_rule_trigger = alert_rule_trigger_action.alert_rule_trigger
priority = PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH)
action_filter = get_action_filter(alert_rule_trigger, priority)
action_type = get_action_type(alert_rule_trigger_action)
if not action_type:
logger.warning(
"Could not find a matching Action.Type for the trigger action",
extra={"alert_rule_trigger_action_id": alert_rule_trigger_action.id},
)
raise ValidationError(
f"Could not find a matching Action.Type for the trigger action {alert_rule_trigger_action.id}"
)
# Ensure action_type is Action.Type before passing to functions
action_type_enum = Action.Type(action_type)
data = build_action_data_blob(alert_rule_trigger_action, action_type_enum)
target_identifier = get_target_identifier(alert_rule_trigger_action, action_type_enum)
action_config = build_action_config(
alert_rule_trigger_action.target_display,
target_identifier,
alert_rule_trigger_action.target_type,
)
action = Action.objects.create(
type=action_type_enum,
data=data,
integration_id=alert_rule_trigger_action.integration_id,
config=action_config,
)
data_condition_group_action = DataConditionGroupAction.objects.create(
condition_group_id=action_filter.condition_group.id,
action_id=action.id,
)
action_alert_rule_trigger_action = ActionAlertRuleTriggerAction.objects.create(
action_id=action.id,
alert_rule_trigger_action_id=alert_rule_trigger_action.id,
)
return action, data_condition_group_action, action_alert_rule_trigger_action
def migrate_metric_data_conditions(
alert_rule_trigger: AlertRuleTrigger,
) -> tuple[DataCondition, DataCondition, DataCondition]:
alert_rule = alert_rule_trigger.alert_rule
# create a data condition for the Detector's data condition group with the
# threshold and associated priority level
alert_rule_detector = AlertRuleDetector.objects.select_related(
"detector__workflow_condition_group"
).get(alert_rule_id=alert_rule.id)
detector = alert_rule_detector.detector
detector_data_condition_group = detector.workflow_condition_group
if detector_data_condition_group is None:
logger.error(
"detector.workflow_condition_group does not exist", extra={"detector": detector}
)
raise MissingDataConditionGroup
threshold_type = (
Condition.GREATER
if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value
else Condition.LESS
)
condition_result = PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH)
if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC:
detector_trigger = DataCondition.objects.create(
type=Condition.ANOMALY_DETECTION,
comparison={
"sensitivity": alert_rule.sensitivity,
"seasonality": alert_rule.seasonality,
"threshold_type": alert_rule.threshold_type,
},
condition_result=condition_result,
condition_group=detector_data_condition_group,
)
else:
detector_trigger = DataCondition.objects.create(
comparison=alert_rule_trigger.alert_threshold,
condition_result=condition_result,
type=threshold_type,
condition_group=detector_data_condition_group,
)
DataConditionAlertRuleTrigger.objects.create(
data_condition=detector_trigger,
alert_rule_trigger_id=alert_rule_trigger.id,
)
# create an "action filter": if the detector's status matches a certain priority level,
# then the condition result is set to true
data_condition_group = DataConditionGroup.objects.create(
organization_id=alert_rule.organization_id
)
alert_rule_workflow = AlertRuleWorkflow.objects.select_related("workflow").get(
alert_rule_id=alert_rule.id
)
WorkflowDataConditionGroup.objects.create(
condition_group=data_condition_group,
workflow=alert_rule_workflow.workflow,
)
action_filter = DataCondition.objects.create(
comparison=PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH),
condition_result=True,
type=Condition.ISSUE_PRIORITY_GREATER_OR_EQUAL,
condition_group=data_condition_group,
)
# finally, create a "resolution action filter": the condition result is set to true
# if we're de-escalating from the priority specified in the comparison
resolve_action_filter = DataCondition.objects.create(
comparison=PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH),
condition_result=True,
type=Condition.ISSUE_PRIORITY_DEESCALATING,
condition_group=data_condition_group,
)
return detector_trigger, action_filter, resolve_action_filter
def get_resolve_threshold(detector_data_condition_group: DataConditionGroup) -> float:
"""
Helper method to get the resolve threshold for a Detector if none is specified on
the legacy AlertRule.
"""
detector_triggers = DataCondition.objects.filter(condition_group=detector_data_condition_group)
warning_data_condition = detector_triggers.filter(
condition_result=DetectorPriorityLevel.MEDIUM
).first()
if warning_data_condition is not None:
resolve_threshold = warning_data_condition.comparison
else:
critical_data_condition = detector_triggers.filter(
condition_result=DetectorPriorityLevel.HIGH
).first()
if critical_data_condition is None:
logger.error(
"no critical or warning data conditions exist for detector data condition group",
extra={"detector_data_condition_group": detector_triggers},
)
return -1
else:
resolve_threshold = critical_data_condition.comparison
return resolve_threshold
def migrate_resolve_threshold_data_condition(
alert_rule: AlertRule,
) -> DataCondition:
"""
Create data conditions for the old world's "resolve" threshold. If a resolve threshold
has been explicitly set on the alert rule, then use this as our comparison value. Otherwise,
we need to figure out what the resolve threshold is based on the trigger threshold values.
"""
alert_rule_detector = AlertRuleDetector.objects.select_related(
"detector__workflow_condition_group"
).get(alert_rule_id=alert_rule.id)
detector = alert_rule_detector.detector
detector_data_condition_group = detector.workflow_condition_group
if detector_data_condition_group is None:
logger.error("workflow_condition_group does not exist", extra={"detector": detector})
raise MissingDataConditionGroup
# XXX: we set the resolve trigger's threshold_type to whatever the opposite of the rule's threshold_type is
# e.g. if the rule has a critical trigger ABOVE some number, the resolve threshold is automatically set to BELOW
threshold_type = (
Condition.LESS_OR_EQUAL
if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value
else Condition.GREATER_OR_EQUAL
)
if alert_rule.resolve_threshold is not None:
resolve_threshold = alert_rule.resolve_threshold
else:
# figure out the resolve threshold ourselves
resolve_threshold = get_resolve_threshold(detector_data_condition_group)
if resolve_threshold == -1:
# something went wrong
raise UnresolvableResolveThreshold
detector_trigger = DataCondition.objects.create(
comparison=resolve_threshold,
condition_result=DetectorPriorityLevel.OK,
type=threshold_type,
condition_group=detector_data_condition_group,
)
return detector_trigger
def create_metric_alert_lookup_tables(
alert_rule: AlertRule,
detector: Detector,
workflow: Workflow,
) -> tuple[AlertRuleDetector, AlertRuleWorkflow, DetectorWorkflow]:
alert_rule_detector = AlertRuleDetector.objects.create(
alert_rule_id=alert_rule.id, detector=detector
)
alert_rule_workflow = AlertRuleWorkflow.objects.create(
alert_rule_id=alert_rule.id, workflow=workflow
)
detector_workflow = DetectorWorkflow.objects.create(detector=detector, workflow=workflow)
return (
alert_rule_detector,
alert_rule_workflow,
detector_workflow,
)
def create_data_source(
organization_id: int, snuba_query: SnubaQuery | None = None
) -> DataSource | None:
if not snuba_query:
return None
try:
query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id)
except QuerySubscription.DoesNotExist:
return None
return DataSource.objects.create(
organization_id=organization_id,
source_id=str(query_subscription.id),
type="snuba_query_subscription",
)
def update_data_source_for_detector(alert_rule: AlertRule, detector: Detector) -> None:
"""
Updates the Detector's DataSource to point to the AlertRule's current QuerySubscription.
"""
snuba_query = alert_rule.snuba_query
if not snuba_query:
logger.error(
"AlertRule has no SnubaQuery",
extra={"alert_rule_id": alert_rule.id},
)
return
current_subscription = QuerySubscription.objects.filter(snuba_query=snuba_query.id).first()
if not current_subscription:
logger.error(
"No QuerySubscription found for AlertRule's SnubaQuery",
extra={"alert_rule_id": alert_rule.id, "snuba_query_id": snuba_query.id},
)
return
data_source = DataSource.objects.filter(
detectors=detector,
type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION,
).first()
if not data_source:
logger.warning(
"No DataSource found for Detector",
extra={"detector_id": detector.id, "alert_rule_id": alert_rule.id},
)
return
new_source_id = str(current_subscription.id)
if data_source.source_id == new_source_id:
return
old_source_id = data_source.source_id
data_source.update(source_id=new_source_id)
logger.info(
"Updated DataSource to current QuerySubscription",
extra={
"data_source_id": data_source.id,
"old_source_id": old_source_id,
"new_source_id": new_source_id,
"alert_rule_id": alert_rule.id,
},
)
def create_data_condition_group(organization_id: int) -> DataConditionGroup:
return DataConditionGroup.objects.create(
organization_id=organization_id,
)
def create_workflow(
alert_rule: AlertRule,
name: str,
organization_id: int,
user: RpcUser | None = None,
) -> Workflow:
return Workflow.objects.create(
name=name,
organization_id=organization_id,
when_condition_group=None,
enabled=True,
created_by_id=user.id if user else None,
owner_user_id=alert_rule.user_id,
owner_team=alert_rule.team,
config={},
)
def get_detector_field_values(
alert_rule: AlertRule,
data_condition_group: DataConditionGroup,
project_id: int | None = None,
user: RpcUser | None = None,
) -> dict[str, Any]:
detector_field_values = {
"name": alert_rule.name if len(alert_rule.name) < 200 else alert_rule.name[:197] + "...",
"description": alert_rule.description,
"workflow_condition_group": data_condition_group,
"owner_user_id": alert_rule.user_id,
"owner_team": alert_rule.team,
"config": {
"comparison_delta": alert_rule.comparison_delta,
"detection_type": alert_rule.detection_type,
},
}
if project_id is not None:
# these fields are only set on create, not update
detector_field_values.update(
{
"project_id": project_id,
"enabled": True,
"created_by_id": user.id if user else None,
"type": MetricIssue.slug,
}
)
return detector_field_values
def create_detector(
alert_rule: AlertRule,
project_id: int,
data_condition_group: DataConditionGroup,
user: RpcUser | None = None,
) -> Detector:
detector_field_values = get_detector_field_values(
alert_rule, data_condition_group, project_id, user
)
return Detector.objects.create(**detector_field_values)
def update_detector(
alert_rule: AlertRule,
detector: Detector,
) -> Detector:
if detector.workflow_condition_group is None:
raise MissingDataConditionGroup
detector_field_values = get_detector_field_values(alert_rule, detector.workflow_condition_group)
detector.update(**detector_field_values)
return detector
def migrate_alert_rule(
alert_rule: AlertRule,
user: RpcUser | None = None,
) -> tuple[
DataSource,
DataConditionGroup,
Workflow,
Detector,
DetectorState,
AlertRuleDetector,
AlertRuleWorkflow,
DetectorWorkflow,
]:
organization_id = alert_rule.organization_id
project = alert_rule.projects.get()
data_source = create_data_source(organization_id, alert_rule.snuba_query)
if not data_source:
raise CouldNotCreateDataSource
detector_data_condition_group = create_data_condition_group(organization_id)
detector = create_detector(alert_rule, project.id, detector_data_condition_group, user)
workflow_name = get_workflow_name(alert_rule)
workflow = create_workflow(alert_rule, workflow_name, organization_id, user)
open_incident = Incident.objects.get_active_incident(alert_rule, project)
if open_incident:
state = (
DetectorPriorityLevel.MEDIUM
if open_incident.status == IncidentStatus.WARNING.value
else DetectorPriorityLevel.HIGH
)
else:
state = DetectorPriorityLevel.OK
data_source.detectors.set([detector])
detector_state = DetectorState.objects.create(
detector=detector,
is_triggered=True if open_incident else False,
state=state,
)
alert_rule_detector, alert_rule_workflow, detector_workflow = create_metric_alert_lookup_tables(
alert_rule, detector, workflow
)
return (
data_source,
detector_data_condition_group,
workflow,
detector,
detector_state,
alert_rule_detector,
alert_rule_workflow,
detector_workflow,
)
def dual_write_alert_rule(alert_rule: AlertRule, user: RpcUser | None = None) -> None:
"""
Comprehensively dual write the ACI objects corresponding to an alert rule, its triggers, and
its actions. All these objects will have been created before calling this method.
"""
with transaction.atomic(router.db_for_write(Detector)):
# step 1: migrate the alert rule
migrate_alert_rule(alert_rule, user)
triggers = AlertRuleTrigger.objects.filter(alert_rule=alert_rule)
# step 2: migrate each trigger
for trigger in triggers:
migrate_metric_data_conditions(trigger)
trigger_actions = AlertRuleTriggerAction.objects.filter(alert_rule_trigger=trigger)
# step 3: migrate this trigger's actions
for trigger_action in trigger_actions:
migrate_metric_action(trigger_action)
# step 4: migrate alert rule resolution
# if the alert rule is an anomaly detection alert, then this is handled by the anomaly detection data condition
if alert_rule.detection_type != AlertRuleDetectionType.DYNAMIC:
migrate_resolve_threshold_data_condition(alert_rule)
def dual_update_alert_rule(alert_rule: AlertRule) -> None:
"""
Comprehensively dual update the ACI objects corresponding to an alert rule, its triggers, and
its actions. All of these objects will have been created/updated prior to calling this method.
If an alert was not dual written, then quit early. If a trigger/trigger action on a dual written
alert rule has no ACI equivalent, then create the corresponding ACI objects. Otherwise, update
the corresponding ACI objects.
"""
try:
AlertRuleDetector.objects.get(alert_rule_id=alert_rule.id)
except AlertRuleDetector.DoesNotExist:
logger.info(
"alert rule was not dual written, returning early",
extra={"alert_rule": alert_rule},
)
# This alert rule was not dual written
return None
with transaction.atomic(router.db_for_write(Detector)):
# step 1: update the alert rule
dual_update_migrated_alert_rule(alert_rule)
triggers = AlertRuleTrigger.objects.filter(alert_rule=alert_rule)
# step 2: create/update the ACI objects for triggers
for trigger in triggers:
try:
get_detector_trigger(trigger, PRIORITY_MAP[trigger.label])
except DataCondition.DoesNotExist:
# we need to migrate this trigger
migrate_metric_data_conditions(trigger)
dual_update_migrated_alert_rule_trigger(trigger)
trigger_actions = AlertRuleTriggerAction.objects.filter(alert_rule_trigger=trigger)
# step 3: create/update the ACI objects for this trigger's actions
for trigger_action in trigger_actions:
try:
ActionAlertRuleTriggerAction.objects.get(
alert_rule_trigger_action_id=trigger_action.id
)
except ActionAlertRuleTriggerAction.DoesNotExist:
# we need to migrate this action
migrate_metric_action(trigger_action)
dual_update_migrated_alert_rule_trigger_action(trigger_action)
# step 4: update alert rule resolution
dual_update_resolve_condition(alert_rule)
def dual_update_migrated_alert_rule(alert_rule: AlertRule) -> (
tuple[
DetectorState,
Detector,
]
| None
):
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule_id=alert_rule.id)
detector: Detector = alert_rule_detector.detector
detector_state = DetectorState.objects.get(detector=detector)
update_detector(alert_rule, detector)
# Sync the DataSource to ensure it points to a valid QuerySubscription
update_data_source_for_detector(alert_rule, detector)
data_condition_group = detector.workflow_condition_group
if data_condition_group is None:
# this shouldn't be possible due to the way we dual write
logger.error(
"AlertRuleDetector has no associated DataConditionGroup",
extra={"alert_rule_id": alert_rule.id},
)
raise MissingDataConditionGroup
data_conditions = DataCondition.objects.filter(condition_group=data_condition_group)
# update the data condition types if the threshold type was updated
threshold_type = (
Condition.GREATER
if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value
else Condition.LESS
)
resolve_threshold_type = (
Condition.LESS_OR_EQUAL
if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value
else Condition.GREATER_OR_EQUAL
)
for dc in data_conditions:
if dc.condition_result == DetectorPriorityLevel.OK:
dc.update(type=resolve_threshold_type)
else:
dc.update(type=threshold_type)
# reset detector status, as the rule was updated
detector_state.update(is_triggered=False, state=DetectorPriorityLevel.OK)
return detector_state, detector
def dual_update_resolve_condition(alert_rule: AlertRule) -> DataCondition | None:
"""
Helper method to update the detector trigger for a legacy resolution "trigger."
"""
# if the alert rule hasn't been dual written, return early
try:
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule_id=alert_rule.id)
except AlertRuleDetector.DoesNotExist:
# We attempted to dual delete a trigger that was not dual migrated
return None
detector = alert_rule_detector.detector
detector_data_condition_group = detector.workflow_condition_group
if detector_data_condition_group is None:
logger.error(
"detector_data_condition_group does not exist",
extra={"alert_rule_id": alert_rule.id},
)
raise MissingDataConditionGroup
data_conditions = DataCondition.objects.filter(condition_group=detector_data_condition_group)
resolve_condition = data_conditions.filter(condition_result=DetectorPriorityLevel.OK).first()
# changing detector priority level for anomaly detection alerts is handled by the data condition
# so we should delete the explicit resolution condition if it exists
if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC:
if resolve_condition is not None:
resolve_condition.delete()
return None
# if we're changing from anomaly detection alert to regular metric alert, we need to recreate the resolve condition
if resolve_condition is None:
resolve_condition = migrate_resolve_threshold_data_condition(alert_rule)
return resolve_condition
if alert_rule.resolve_threshold is not None:
resolve_threshold = alert_rule.resolve_threshold
else:
resolve_threshold = get_resolve_threshold(detector_data_condition_group)
if resolve_threshold == -1:
raise UnresolvableResolveThreshold
resolve_condition.update(comparison=resolve_threshold)
return resolve_condition
def dual_update_migrated_alert_rule_trigger(
alert_rule_trigger: AlertRuleTrigger,
) -> tuple[DataCondition, DataCondition] | None:
priority = PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH)
detector_trigger = get_detector_trigger(alert_rule_trigger, priority)
if detector_trigger is None:
# we will have already verified that that alert rule was dual written, so
# we won't reach this path
return None
action_filter = get_action_filter(alert_rule_trigger, priority)
resolve_action_filter = DataCondition.objects.filter(
condition_group=action_filter.condition_group,
type=Condition.ISSUE_PRIORITY_DEESCALATING,
).first()
updated_detector_trigger_fields: dict[str, Any] = {}
updated_action_filter_fields: dict[str, Any] = {}
label = alert_rule_trigger.label
updated_detector_trigger_fields["condition_result"] = PRIORITY_MAP.get(
label, DetectorPriorityLevel.HIGH
)
updated_action_filter_fields["comparison"] = PRIORITY_MAP.get(label, DetectorPriorityLevel.HIGH)
alert_rule = alert_rule_trigger.alert_rule
# if we're changing to anomaly detection, we need to set the comparison JSON
if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC:
updated_detector_trigger_fields["type"] = Condition.ANOMALY_DETECTION
updated_detector_trigger_fields["comparison"] = {
"sensitivity": alert_rule.sensitivity,
"seasonality": alert_rule.seasonality,
"threshold_type": alert_rule.threshold_type,
}
else:
updated_detector_trigger_fields["comparison"] = alert_rule_trigger.alert_threshold
detector_trigger.update(**updated_detector_trigger_fields)
if updated_action_filter_fields:
# these are updated together
action_filter.update(**updated_action_filter_fields)
if resolve_action_filter is not None:
resolve_action_filter.update(**updated_action_filter_fields)
return detector_trigger, action_filter
def dual_update_migrated_alert_rule_trigger_action(
trigger_action: AlertRuleTriggerAction,
) -> Action | None:
aarta = ActionAlertRuleTriggerAction.objects.get(alert_rule_trigger_action_id=trigger_action.id)
action = aarta.action
action_type = get_action_type(trigger_action)
if not action_type:
logger.error(
"Could not find a matching Action.Type for the trigger action",
extra={"alert_rule_trigger_action_id": trigger_action.id},
)
raise ValidationError(
f"Could not find a matching Action.Type for the trigger action {trigger_action.id}"
)
data = build_action_data_blob(trigger_action, action_type)
target_identifier = get_target_identifier(trigger_action, action_type)
action_config = build_action_config(
trigger_action.target_display,
target_identifier,
trigger_action.target_type,
)
updated_action_fields: dict[str, Any] = {}
updated_action_fields["type"] = Action.Type(action_type)
updated_action_fields["data"] = data
updated_action_fields["integration_id"] = trigger_action.integration_id
updated_action_fields["config"] = action_config
action.update(**updated_action_fields)
return action
def get_data_source(alert_rule: AlertRule) -> DataSource | None:
snuba_query = alert_rule.snuba_query
organization = alert_rule.organization
if not snuba_query or not organization:
# This shouldn't be possible, but just in case.
return None
try:
query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id)
except QuerySubscription.DoesNotExist:
return None
try:
data_source = DataSource.objects.get(
organization=organization,
source_id=query_subscription.id,
type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION,
)
except DataSource.DoesNotExist:
return None
return data_source
def dual_delete_migrated_alert_rule(alert_rule: AlertRule) -> None:
try:
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule_id=alert_rule.id)
except AlertRuleDetector.DoesNotExist:
# NOTE: we run the dual delete even if the user isn't flagged into dual write
logger.info(
"alert rule was not dual written or objects were already deleted, returning early",
extra={"alert_rule_id": alert_rule.id},
)
return
detector: Detector = alert_rule_detector.detector
alert_rule_workflow = None
try:
alert_rule_workflow = AlertRuleWorkflow.objects.get(alert_rule_id=alert_rule.id)
except AlertRuleWorkflow.DoesNotExist:
logger.exception(
"AlertRuleWorkflow not found for AlertRule, workflow may be orphaned",
extra={"detector_id": detector.id},
)
if alert_rule_workflow:
workflow: Workflow = alert_rule_workflow.workflow
with transaction.atomic(router.db_for_write(Detector)):
detector.update(status=ObjectStatus.PENDING_DELETION)
workflow.update(status=ObjectStatus.PENDING_DELETION)
RegionScheduledDeletion.schedule(instance=detector, days=0)
RegionScheduledDeletion.schedule(instance=workflow, days=0)
else:
with transaction.atomic(router.db_for_write(Detector)):
detector.update(status=ObjectStatus.PENDING_DELETION)
RegionScheduledDeletion.schedule(instance=detector, days=0)
return
def dual_delete_migrated_alert_rule_trigger(alert_rule_trigger: AlertRuleTrigger) -> None:
priority = PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH)
detector_trigger = get_detector_trigger(alert_rule_trigger, priority)
if detector_trigger is None:
logger.info(
"alert rule was not dual written, returning early",
extra={"alert_rule": alert_rule_trigger.alert_rule},
)
return None
action_filter = get_action_filter(alert_rule_trigger, priority)
action_filter_dcg = action_filter.condition_group
# also dual delete the ACI objects for the trigger's associated trigger actions
actions_to_dual_delete = AlertRuleTriggerAction.objects.filter(
alert_rule_trigger_id=alert_rule_trigger.id
)
with transaction.atomic(router.db_for_write(DataCondition)):
for trigger_action in actions_to_dual_delete:
aarta = ActionAlertRuleTriggerAction.objects.get(
alert_rule_trigger_action_id=trigger_action.id
)
action = aarta.action
action.delete()
detector_trigger.delete()
action_filter_dcg.delete() # deletes the action filter and resolve action filter
return None
def dual_delete_migrated_alert_rule_trigger_action(trigger_action: AlertRuleTriggerAction) -> None:
alert_rule_trigger = trigger_action.alert_rule_trigger
# Check that we dual wrote this action
priority = PRIORITY_MAP.get(alert_rule_trigger.label, DetectorPriorityLevel.HIGH)
detector_trigger = get_detector_trigger(alert_rule_trigger, priority)
if detector_trigger is None:
logger.info(
"alert rule was not dual written, returning early",
extra={"alert_rule": alert_rule_trigger.alert_rule},
)
return None
aarta = ActionAlertRuleTriggerAction.objects.get(alert_rule_trigger_action_id=trigger_action.id)
with transaction.atomic(router.db_for_write(Action)):
action = aarta.action
action.delete()
return None
| CouldNotCreateDataSource |
python | Textualize__textual | src/textual/widgets/_data_table.py | {
"start": 2653,
"end": 4057
} | class ____:
"""An object used as a key in a mapping.
It can optionally wrap a string,
and lookups into a map using the object behave the same as lookups using
the string itself."""
value: str | None
def __init__(self, value: str | None = None):
self.value = value
def __hash__(self):
# If a string is supplied, we use the hash of the string. If no string was
# supplied, we use the default hash to ensure uniqueness amongst instances.
return hash(self.value) if self.value is not None else id(self)
def __eq__(self, other: object) -> bool:
# Strings will match Keys containing the same string value.
# Otherwise, you'll need to supply the exact same key object.
if isinstance(other, str):
return self.value == other
elif isinstance(other, StringKey):
if self.value is not None and other.value is not None:
return self.value == other.value
else:
return hash(self) == hash(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, str):
return self.value < other
elif isinstance(other, StringKey):
return self.value < other.value
else:
return NotImplemented
def __rich_repr__(self):
yield "value", self.value
| StringKey |
python | pandas-dev__pandas | pandas/tests/internals/test_internals.py | {
"start": 10539,
"end": 30079
} | class ____:
def test_attrs(self):
mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2")
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr("a:bool; a: f8")
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = BlockPlacement(np.array([0]))
blocks[1].mgr_locs = BlockPlacement(np.array([0]))
# test trying to create block manager with overlapping ref locs
msg = "Gaps in blk ref_locs"
mgr = BlockManager(blocks, axes)
with pytest.raises(AssertionError, match=msg):
mgr._rebuild_blknos_and_blklocs()
blocks[0].mgr_locs = BlockPlacement(np.array([0]))
blocks[1].mgr_locs = BlockPlacement(np.array([1]))
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
tm.assert_frame_equal(
DataFrame._from_mgr(mgr, axes=mgr.axes),
DataFrame._from_mgr(mgr2, axes=mgr2.axes),
)
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
@pytest.mark.parametrize("mgr_string", ["a,a,a:f8", "a: f8; a: i8"])
def test_non_unique_pickle(self, mgr_string):
mgr = create_mgr(mgr_string)
mgr2 = tm.round_trip_pickle(mgr)
tm.assert_frame_equal(
DataFrame._from_mgr(mgr, axes=mgr.axes),
DataFrame._from_mgr(mgr2, axes=mgr2.axes),
)
def test_categorical_block_pickle(self):
mgr = create_mgr("a: category")
mgr2 = tm.round_trip_pickle(mgr)
tm.assert_frame_equal(
DataFrame._from_mgr(mgr, axes=mgr.axes),
DataFrame._from_mgr(mgr2, axes=mgr2.axes),
)
smgr = create_single_mgr("category")
smgr2 = tm.round_trip_pickle(smgr)
tm.assert_series_equal(
Series()._constructor_from_mgr(smgr, axes=smgr.axes),
Series()._constructor_from_mgr(smgr2, axes=smgr2.axes),
)
def test_iget(self):
cols = Index(list("abc"))
values = np.random.default_rng(2).random((3, 3))
block = new_block(
values=values.copy(),
placement=BlockPlacement(np.arange(3, dtype=np.intp)),
ndim=values.ndim,
)
mgr = BlockManager(blocks=(block,), axes=[cols, Index(np.arange(3))])
tm.assert_almost_equal(mgr.iget(0).internal_values(), values[0])
tm.assert_almost_equal(mgr.iget(1).internal_values(), values[1])
tm.assert_almost_equal(mgr.iget(2).internal_values(), values[2])
def test_set(self):
mgr = create_mgr("a,b,c: int", item_shape=(3,))
mgr.insert(len(mgr.items), "d", np.array(["foo"] * 3))
mgr.iset(1, np.array(["bar"] * 3))
tm.assert_numpy_array_equal(mgr.iget(0).internal_values(), np.array([0] * 3))
tm.assert_numpy_array_equal(
mgr.iget(1).internal_values(), np.array(["bar"] * 3, dtype=np.object_)
)
tm.assert_numpy_array_equal(mgr.iget(2).internal_values(), np.array([2] * 3))
tm.assert_numpy_array_equal(
mgr.iget(3).internal_values(), np.array(["foo"] * 3, dtype=np.object_)
)
def test_set_change_dtype(self, mgr):
mgr.insert(len(mgr.items), "baz", np.zeros(N, dtype=bool))
mgr.iset(mgr.items.get_loc("baz"), np.repeat("foo", N))
idx = mgr.items.get_loc("baz")
assert mgr.iget(idx).dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.iset(mgr2.items.get_loc("baz"), np.repeat("foo", N))
idx = mgr2.items.get_loc("baz")
assert mgr2.iget(idx).dtype == np.object_
mgr2.insert(
len(mgr2.items),
"quux",
np.random.default_rng(2).standard_normal(N).astype(int),
)
idx = mgr2.items.get_loc("quux")
assert mgr2.iget(idx).dtype == np.dtype(int)
mgr2.iset(
mgr2.items.get_loc("quux"), np.random.default_rng(2).standard_normal(N)
)
assert mgr2.iget(idx).dtype == np.float64
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
tm.assert_equal(cp_blk.values, blk.values)
if isinstance(blk.values, np.ndarray):
assert cp_blk.values.base is blk.values.base
else:
# DatetimeTZBlock has DatetimeIndex values
assert cp_blk.values._ndarray.base is blk.values._ndarray.base
# copy(deep=True) consolidates, so the block-wise assertions will
# fail is mgr is not consolidated
mgr._consolidate_inplace()
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
bvals = blk.values
cpvals = cp_blk.values
tm.assert_equal(cpvals, bvals)
if isinstance(cpvals, np.ndarray):
lbase = cpvals.base
rbase = bvals.base
else:
lbase = cpvals._ndarray.base
rbase = bvals._ndarray.base
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
if isinstance(cpvals, DatetimeArray):
assert (lbase is None and rbase is None) or (lbase is not rbase)
elif not isinstance(cpvals, np.ndarray):
assert lbase is not rbase
else:
assert lbase is None and rbase is None
def test_sparse(self):
mgr = create_mgr("a: sparse-1; b: sparse-2")
assert mgr.as_array().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr("a: sparse-1; b: sparse-2; c: f8")
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
@pytest.mark.parametrize(
"mgr_string, dtype",
[("c: f4; d: f2", np.float32), ("c: f4; d: f2; e: f8", np.float64)],
)
def test_as_array_float(self, mgr_string, dtype):
mgr = create_mgr(mgr_string)
assert mgr.as_array().dtype == dtype
@pytest.mark.parametrize(
"mgr_string, dtype",
[
("a: bool-1; b: bool-2", np.bool_),
("a: i8-1; b: i8-2; c: i4; d: i2; e: u1", np.int64),
("c: i4; d: i2; e: u1", np.int32),
],
)
def test_as_array_int_bool(self, mgr_string, dtype):
mgr = create_mgr(mgr_string)
assert mgr.as_array().dtype == dtype
def test_as_array_datetime(self):
mgr = create_mgr("h: datetime-1; g: datetime-2")
assert mgr.as_array().dtype == "M8[ns]"
def test_as_array_datetime_tz(self):
mgr = create_mgr("h: M8[ns, US/Eastern]; g: M8[ns, CET]")
assert mgr.iget(0).dtype == "datetime64[ns, US/Eastern]"
assert mgr.iget(1).dtype == "datetime64[ns, CET]"
assert mgr.as_array().dtype == "object"
@pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"])
def test_astype(self, t):
# coerce all
mgr = create_mgr("c: f4; d: f2; e: f8")
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.iget(0).dtype.type == t
assert tmgr.iget(1).dtype.type == t
assert tmgr.iget(2).dtype.type == t
# mixed
mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8")
t = np.dtype(t)
tmgr = mgr.astype(t, errors="ignore")
assert tmgr.iget(2).dtype.type == t
assert tmgr.iget(4).dtype.type == t
assert tmgr.iget(5).dtype.type == t
assert tmgr.iget(6).dtype.type == t
assert tmgr.iget(0).dtype.type == np.object_
assert tmgr.iget(1).dtype.type == np.object_
if t != np.int64:
assert tmgr.iget(3).dtype.type == np.datetime64
else:
assert tmgr.iget(3).dtype.type == t
def test_convert(self, using_infer_string):
def _compare(old_mgr, new_mgr):
"""compare the blocks, numeric compare ==, object don't"""
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr("f: i8; g: f8")
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr("a,b,foo: object; f: i8; g: f8")
mgr.iset(0, np.array(["1"] * N, dtype=np.object_))
mgr.iset(1, np.array(["2."] * N, dtype=np.object_))
mgr.iset(2, np.array(["foo."] * N, dtype=np.object_))
new_mgr = mgr.convert()
dtype = "str" if using_infer_string else np.object_
assert new_mgr.iget(0).dtype == dtype
assert new_mgr.iget(1).dtype == dtype
assert new_mgr.iget(2).dtype == dtype
assert new_mgr.iget(3).dtype == np.int64
assert new_mgr.iget(4).dtype == np.float64
mgr = create_mgr(
"a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2"
)
mgr.iset(0, np.array(["1"] * N, dtype=np.object_))
mgr.iset(1, np.array(["2."] * N, dtype=np.object_))
mgr.iset(2, np.array(["foo."] * N, dtype=np.object_))
new_mgr = mgr.convert()
assert new_mgr.iget(0).dtype == dtype
assert new_mgr.iget(1).dtype == dtype
assert new_mgr.iget(2).dtype == dtype
assert new_mgr.iget(3).dtype == np.int32
assert new_mgr.iget(4).dtype == np.bool_
assert new_mgr.iget(5).dtype.type, np.datetime64
assert new_mgr.iget(6).dtype == np.int64
assert new_mgr.iget(7).dtype == np.float64
assert new_mgr.iget(8).dtype == np.float16
def test_interleave(self):
# self
for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]:
mgr = create_mgr(f"a: {dtype}")
assert mgr.as_array().dtype == dtype
mgr = create_mgr(f"a: {dtype}; b: {dtype}")
assert mgr.as_array().dtype == dtype
@pytest.mark.parametrize(
"mgr_string, dtype",
[
("a: category", "i8"),
("a: category; b: category", "i8"),
("a: category; b: category2", "object"),
("a: category2", "object"),
("a: category2; b: category2", "object"),
("a: f8", "f8"),
("a: f8; b: i8", "f8"),
("a: f4; b: i8", "f8"),
("a: f4; b: i8; d: object", "object"),
("a: bool; b: i8", "object"),
("a: complex", "complex"),
("a: f8; b: category", "object"),
("a: M8[ns]; b: category", "object"),
("a: M8[ns]; b: bool", "object"),
("a: M8[ns]; b: i8", "object"),
("a: m8[ns]; b: bool", "object"),
("a: m8[ns]; b: i8", "object"),
("a: M8[ns]; b: m8[ns]", "object"),
],
)
def test_interleave_dtype(self, mgr_string, dtype):
# will be converted according the actual dtype of the underlying
mgr = create_mgr("a: category")
assert mgr.as_array().dtype == "i8"
mgr = create_mgr("a: category; b: category2")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: category2")
assert mgr.as_array().dtype == "object"
# combinations
mgr = create_mgr("a: f8")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: f8; b: i8")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: f4; b: i8")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: f4; b: i8; d: object")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: bool; b: i8")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: complex")
assert mgr.as_array().dtype == "complex"
mgr = create_mgr("a: f8; b: category")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: M8[ns]; b: category")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: bool")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: i8")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: m8[ns]; b: bool")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: m8[ns]; b: i8")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: m8[ns]")
assert mgr.as_array().dtype == "object"
def test_consolidate_ordering_issues(self, mgr):
mgr.iset(mgr.items.get_loc("f"), np.random.default_rng(2).standard_normal(N))
mgr.iset(mgr.items.get_loc("d"), np.random.default_rng(2).standard_normal(N))
mgr.iset(mgr.items.get_loc("b"), np.random.default_rng(2).standard_normal(N))
mgr.iset(mgr.items.get_loc("g"), np.random.default_rng(2).standard_normal(N))
mgr.iset(mgr.items.get_loc("h"), np.random.default_rng(2).standard_normal(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(
cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.intp)
)
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2")
reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0)
assert not reindexed.is_consolidated()
tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"]))
tm.assert_almost_equal(
mgr.iget(6).internal_values(), reindexed.iget(0).internal_values()
)
tm.assert_almost_equal(
mgr.iget(2).internal_values(), reindexed.iget(1).internal_values()
)
tm.assert_almost_equal(
mgr.iget(0).internal_values(), reindexed.iget(2).internal_values()
)
tm.assert_almost_equal(
mgr.iget(3).internal_values(), reindexed.iget(3).internal_values()
)
def test_get_numeric_data(self):
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
item_shape=(3,),
)
mgr.iset(5, np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
tm.assert_almost_equal(
mgr.iget(mgr.items.get_loc("float")).internal_values(),
numeric.iget(numeric.items.get_loc("float")).internal_values(),
)
# Check sharing
numeric.iset(
numeric.items.get_loc("float"),
np.array([100.0, 200.0, 300.0]),
inplace=True,
)
tm.assert_almost_equal(
mgr.iget(mgr.items.get_loc("float")).internal_values(),
np.array([1.0, 1.0, 1.0]),
)
def test_get_bool_data(self):
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
item_shape=(3,),
)
mgr.iset(6, np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, Index(["bool"]))
tm.assert_almost_equal(
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
bools.iget(bools.items.get_loc("bool")).internal_values(),
)
bools.iset(0, np.array([True, False, True]), inplace=True)
tm.assert_numpy_array_equal(
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
np.array([True, True, True]),
)
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr("b,\u05d0: object"))
@pytest.mark.parametrize(
"mgr_string", ["a,b,c: i8-1; d,e,f: i8-2", "a,a,a: i8-1; b,b,b: i8-2"]
)
def test_equals(self, mgr_string):
# unique items
bm1 = create_mgr(mgr_string)
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
@pytest.mark.parametrize(
"mgr_string",
[
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
],
)
def test_equals_block_order_different_dtypes(self, mgr_string):
# GH 9330
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(tuple(bm_perm), bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr("f8", num_rows=5)
assert mgr.external_values().tolist() == [0.0, 1.0, 2.0, 3.0, 4.0]
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(self, value):
bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2")
msg = (
'For argument "inplace" expected type bool, '
f"received type {type(value).__name__}."
)
with pytest.raises(ValueError, match=msg):
bm1.replace_list([1], [2], inplace=value)
def test_iset_split_block(self):
bm = create_mgr("a,b,c: i8; d: f8")
bm._iset_split_block(0, np.array([0]))
tm.assert_numpy_array_equal(
bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
)
# First indexer currently does not have a block associated with it in case
tm.assert_numpy_array_equal(
bm.blknos, np.array([0, 0, 0, 1], dtype="int64" if IS64 else "int32")
)
assert len(bm.blocks) == 2
def test_iset_split_block_values(self):
bm = create_mgr("a,b,c: i8; d: f8")
bm._iset_split_block(0, np.array([0]), np.array([list(range(10))]))
tm.assert_numpy_array_equal(
bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
)
# First indexer currently does not have a block associated with it in case
tm.assert_numpy_array_equal(
bm.blknos, np.array([0, 2, 2, 1], dtype="int64" if IS64 else "int32")
)
assert len(bm.blocks) == 3
def _as_array(mgr):
if mgr.ndim == 1:
return mgr.external_values()
return mgr.as_array().T
| TestBlockManager |
python | keras-team__keras | keras/src/initializers/random_initializers.py | {
"start": 1071,
"end": 2880
} | class ____(RandomInitializer):
"""Random normal initializer.
Draws samples from a normal distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"mean": self.mean, "stddev": self.stddev}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.TruncatedNormal",
"keras.initializers.truncated_normal",
]
)
| RandomNormal |
python | falconry__falcon | falcon/routing/static.py | {
"start": 4732,
"end": 11502
} | class ____:
"""Represents a static route.
Args:
prefix (str): The path prefix to match for this route. If the
path in the requested URI starts with this string, the remainder
of the path will be appended to the source directory to
determine the file to serve. This is done in a secure manner
to prevent an attacker from requesting a file outside the
specified directory.
Note that static routes are matched in LIFO order, and are only
attempted after checking dynamic routes and sinks.
directory (Union[str, pathlib.Path]): The source directory from which to
serve files. Must be an absolute path.
downloadable (bool): Set to ``True`` to include a
Content-Disposition header in the response. The "filename"
directive is simply set to the name of the requested file.
fallback_filename (str): Fallback filename used when the requested file
is not found. Can be a relative path inside the prefix folder or
any valid absolute path.
Note:
If the fallback file is served instead of the requested file,
the response Content-Type header, as well as the
Content-Disposition header (provided it was requested with the
`downloadable` parameter described above), are derived from the
fallback filename, as opposed to the requested filename.
"""
# NOTE(kgriffs): Don't allow control characters and reserved chars
_DISALLOWED_CHARS_PATTERN: ClassVar[Pattern[str]] = re.compile(
'[\x00-\x1f\x80-\x9f\ufffd~?<>:*|\'"]'
)
# NOTE(vytas): Match the behavior of the underlying os.path.normpath.
_DISALLOWED_NORMALIZED_PREFIXES: ClassVar[tuple[str, ...]] = (
'..' + os.path.sep,
os.path.sep,
)
# NOTE(kgriffs): If somehow an executable code exploit is triggerable, this
# minimizes how much can be included in the payload.
_MAX_NON_PREFIXED_LEN: ClassVar[int] = 512
def __init__(
self,
prefix: str,
directory: str | Path,
downloadable: bool = False,
fallback_filename: str | None = None,
) -> None:
if not prefix.startswith('/'):
raise ValueError("prefix must start with '/'")
self._directory = os.path.normpath(directory)
if not os.path.isabs(self._directory):
raise ValueError('directory must be an absolute path')
if fallback_filename is None:
self._fallback_filename = None
else:
self._fallback_filename = os.path.normpath(
os.path.join(self._directory, fallback_filename)
)
if not os.path.isfile(self._fallback_filename):
raise ValueError('fallback_filename is not a file')
# NOTE(kgriffs): Ensure it ends with a path separator to ensure
# we only match on the complete segment. Don't raise an error
# because most people won't expect to have to append a slash.
if not prefix.endswith('/'):
prefix += '/'
self._prefix = prefix
self._downloadable = downloadable
def match(self, path: str) -> bool:
"""Check whether the given path matches this route."""
if self._fallback_filename is None:
return path.startswith(self._prefix)
return path.startswith(self._prefix) or path == self._prefix[:-1]
def __call__(self, req: Request, resp: Response, **kw: Any) -> None:
"""Resource responder for this route."""
assert not kw
if req.method == 'OPTIONS':
# it's likely a CORS request. Set the allow header to the appropriate value.
resp.set_header('Allow', 'GET')
resp.set_header('Content-Length', '0')
return
without_prefix = req.path[len(self._prefix) :]
# NOTE(kgriffs): Check surrounding whitespace and strip trailing
# periods, which are illegal on windows
# NOTE(CaselIT): An empty filename is allowed when fallback_filename is provided
if (
not (without_prefix or self._fallback_filename is not None)
or without_prefix.strip().rstrip('.') != without_prefix
or self._DISALLOWED_CHARS_PATTERN.search(without_prefix)
or '\\' in without_prefix
or '//' in without_prefix
or len(without_prefix) > self._MAX_NON_PREFIXED_LEN
):
raise falcon.HTTPNotFound()
normalized = os.path.normpath(without_prefix)
if normalized.startswith(self._DISALLOWED_NORMALIZED_PREFIXES):
raise falcon.HTTPNotFound()
file_path = os.path.join(self._directory, normalized)
# NOTE(kgriffs): Final sanity-check just to be safe. This check
# should never succeed, but this should guard against us having
# overlooked something.
if '..' in file_path or not file_path.startswith(self._directory):
raise falcon.HTTPNotFound()
if self._fallback_filename is None:
fh, st = _open_file(file_path)
else:
try:
fh, st = _open_file(file_path)
except falcon.HTTPNotFound:
fh, st = _open_file(self._fallback_filename)
file_path = self._fallback_filename
etag = f'{int(st.st_mtime):x}-{st.st_size:x}'
resp.etag = etag
last_modified = datetime.fromtimestamp(st.st_mtime, timezone.utc)
# NOTE(vytas): Strip the microsecond part because that is not reflected
# in HTTP date, and when the client passes a previous value via
# If-Modified-Since, it will look as if our copy is ostensibly newer.
last_modified = last_modified.replace(microsecond=0)
resp.last_modified = last_modified
if _is_not_modified(req, etag, last_modified):
fh.close()
resp.status = falcon.HTTP_304
return
req_range = req.range if req.range_unit == 'bytes' else None
try:
stream, length, content_range = _set_range(fh, st, req_range)
except OSError:
fh.close()
raise falcon.HTTPNotFound()
resp.set_stream(stream, length)
suffix = os.path.splitext(file_path)[1]
resp.content_type = resp.options.static_media_types.get(
suffix, 'application/octet-stream'
)
resp.accept_ranges = 'bytes'
if self._downloadable:
resp.downloadable_as = os.path.basename(file_path)
if content_range:
resp.status = falcon.HTTP_206
resp.content_range = content_range
| StaticRoute |
python | apache__airflow | airflow-core/tests/unit/dag_processing/test_processor.py | {
"start": 21546,
"end": 38587
} | class ____:
"""Test the _execute_dag_callbacks function with context_from_server"""
def test_execute_dag_callbacks_with_context_from_server(self, spy_agency):
"""Test _execute_dag_callbacks uses RuntimeTaskInstance context when context_from_server is provided"""
called = False
context_received = None
def on_failure(context):
nonlocal called, context_received
called = True
context_received = context
with DAG(dag_id="test_dag", on_failure_callback=on_failure) as dag:
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
current_time = timezone.utcnow()
dag_run_data = DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="running",
consumed_asset_events=[],
partition_key=None,
)
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
)
context_from_server = DagRunContext(dag_run=dag_run_data, last_ti=ti_data)
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=context_from_server,
is_failure_callback=True,
msg="Test failure message",
)
log = structlog.get_logger()
_execute_dag_callbacks(dagbag, request, log)
assert called is True
assert context_received is not None
# When context_from_server is provided, we get a full RuntimeTaskInstance context
assert "dag_run" in context_received
assert "logical_date" in context_received
assert "reason" in context_received
assert context_received["reason"] == "Test failure message"
# Check that we have template context variables from RuntimeTaskInstance
assert "ts" in context_received
assert "params" in context_received
def test_execute_dag_callbacks_without_context_from_server(self, spy_agency):
"""Test _execute_dag_callbacks falls back to simple context when context_from_server is None"""
called = False
context_received = None
def on_failure(context):
nonlocal called, context_received
called = True
context_received = context
with DAG(dag_id="test_dag", on_failure_callback=on_failure) as dag:
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=None, # No context from server
is_failure_callback=True,
msg="Test failure message",
)
log = structlog.get_logger()
_execute_dag_callbacks(dagbag, request, log)
assert called is True
assert context_received is not None
# When context_from_server is None, we get simple context
assert context_received["dag"] == dag
assert context_received["run_id"] == "test_run"
assert context_received["reason"] == "Test failure message"
# Should not have template context variables
assert "ts" not in context_received
assert "params" not in context_received
def test_execute_dag_callbacks_success_callback(self, spy_agency):
"""Test _execute_dag_callbacks executes success callback with context_from_server"""
called = False
context_received = None
def on_success(context):
nonlocal called, context_received
called = True
context_received = context
with DAG(dag_id="test_dag", on_success_callback=on_success) as dag:
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
# Create test data
current_time = timezone.utcnow()
dag_run_data = DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="success",
consumed_asset_events=[],
partition_key=None,
)
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
)
context_from_server = DagRunContext(dag_run=dag_run_data, last_ti=ti_data)
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=context_from_server,
is_failure_callback=False, # Success callback
msg="Test success message",
)
log = structlog.get_logger()
_execute_dag_callbacks(dagbag, request, log)
assert called is True
assert context_received is not None
assert "dag_run" in context_received
assert context_received["reason"] == "Test success message"
def test_execute_dag_callbacks_multiple_callbacks(self, spy_agency):
"""Test _execute_dag_callbacks executes multiple callbacks"""
call_count = 0
def on_failure_1(context):
nonlocal call_count
call_count += 1
def on_failure_2(context):
nonlocal call_count
call_count += 1
with DAG(dag_id="test_dag", on_failure_callback=[on_failure_1, on_failure_2]) as dag:
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
is_failure_callback=True,
msg="Test failure message",
)
log = structlog.get_logger()
_execute_dag_callbacks(dagbag, request, log)
assert call_count == 2
def test_execute_dag_callbacks_no_callback_defined(self, spy_agency):
"""Test _execute_dag_callbacks when no callback is defined"""
with DAG(dag_id="test_dag") as dag: # No callbacks defined
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
is_failure_callback=True,
msg="Test failure message",
)
log = MagicMock(spec=FilteringBoundLogger)
_execute_dag_callbacks(dagbag, request, log)
# Should log warning about no callback found
log.warning.assert_called_once_with("Callback requested, but dag didn't have any", dag_id="test_dag")
def test_execute_dag_callbacks_missing_dag(self):
"""Test _execute_dag_callbacks raises ValueError for missing DAG"""
dagbag = DagBag()
request = DagCallbackRequest(
filepath="test.py",
dag_id="missing_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
is_failure_callback=True,
msg="Test failure message",
)
log = structlog.get_logger()
with pytest.raises(ValueError, match="DAG 'missing_dag' not found in DagBag"):
_execute_dag_callbacks(dagbag, request, log)
@pytest.mark.parametrize(
("xcom_operation", "expected_message_type", "expected_message", "mock_response"),
[
(
lambda ti, task_ids: ti.xcom_pull(key="report_df", task_ids=task_ids),
"GetXComSequenceSlice",
GetXComSequenceSlice(
key="report_df",
dag_id="test_dag",
run_id="test_run",
task_id="test_task",
start=None,
stop=None,
step=None,
include_prior_dates=False,
),
XComSequenceSliceResult(root=["test data"]),
),
(
lambda ti, task_ids: ti.xcom_pull(key="single_value", task_ids=["test_task"]),
"GetXComSequenceSlice",
GetXComSequenceSlice(
key="single_value",
dag_id="test_dag",
run_id="test_run",
task_id="test_task",
start=None,
stop=None,
step=None,
include_prior_dates=False,
),
XComSequenceSliceResult(root=["test data"]),
),
(
lambda ti, task_ids: ti.xcom_pull(key="direct_value", task_ids="test_task", map_indexes=None),
"GetXCom",
GetXCom(
key="direct_value",
dag_id="test_dag",
run_id="test_run",
task_id="test_task",
map_index=None,
include_prior_dates=False,
),
XComResult(
key="direct_value",
value="test",
),
),
],
)
def test_notifier_xcom_operations_send_correct_messages(
self,
spy_agency,
mock_supervisor_comms,
xcom_operation,
expected_message_type,
expected_message,
mock_response,
):
"""Test that different XCom operations send correct message types"""
mock_supervisor_comms.send.return_value = mock_response
class TestNotifier:
def __call__(self, context):
ti = context["ti"]
dag = context["dag"]
task_ids = list(dag.task_dict)
xcom_operation(ti, task_ids)
with DAG(dag_id="test_dag", on_success_callback=TestNotifier()) as dag:
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
current_time = timezone.utcnow()
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=DagRunContext(
dag_run=DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="success",
consumed_asset_events=[],
partition_key=None,
),
last_ti=TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
),
),
is_failure_callback=False,
msg="Test success message",
)
_execute_dag_callbacks(dagbag, request, structlog.get_logger())
mock_supervisor_comms.send.assert_called_once_with(msg=expected_message)
@pytest.mark.parametrize(
("request_operation", "operation_type", "mock_response", "operation_response"),
[
(
lambda context: context["task_instance"].get_ti_count(dag_id="test_dag"),
GetTICount(dag_id="test_dag"),
TICount(count=2),
"Got response 2",
),
(
lambda context: context["task_instance"].get_task_states(
dag_id="test_dag", task_ids=["test_task"]
),
GetTaskStates(
dag_id="test_dag",
task_ids=["test_task"],
),
TaskStatesResult(task_states={"test_run": {"task1": "running"}}),
"Got response {'test_run': {'task1': 'running'}}",
),
],
)
def test_dagfileprocessorprocess_request_handler_operations(
self,
spy_agency,
mock_supervisor_comms,
request_operation,
operation_type,
mock_response,
operation_response,
caplog,
):
"""Test that DagFileProcessorProcess Request Handler Operations"""
mock_supervisor_comms.send.return_value = mock_response
def callback_fn(context):
log = structlog.get_logger()
log.info("Callback started..")
log.info("Got response %s", request_operation(context))
with DAG(dag_id="test_dag", on_success_callback=callback_fn) as dag:
BaseOperator(task_id="test_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
current_time = timezone.utcnow()
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=DagRunContext(
dag_run=DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="success",
consumed_asset_events=[],
partition_key=None,
),
last_ti=TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
),
),
is_failure_callback=False,
msg="Test success message",
)
_execute_dag_callbacks(dagbag, request, structlog.get_logger())
mock_supervisor_comms.send.assert_called_once_with(msg=operation_type)
assert operation_response in caplog.text
| TestExecuteDagCallbacks |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 904,
"end": 1087
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("FAILURE", "NOTICE", "WARNING")
| CheckAnnotationLevel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.