language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/interfaces/breadcrumbs.py | {
"start": 221,
"end": 2923
} | class ____(Interface):
"""
This interface stores information that leads up to an error.
- ``message`` must be no more than 1000 characters in length.
>>> [{
>>> "type": "message",
>>> // timestamp can be ISO format or a unix timestamp (as float)
>>> "timestamp": "2016-01-17T12:30:00",
>>> "data": {
>>> "message": "My raw message with interpreted strings like %s",
>>> }
>>> ], ...}
"""
display_score = 1100
score = 800
@classmethod
def to_python(cls, data, **kwargs):
values = []
for index, crumb in enumerate(get_path(data, "values", filter=True, default=())):
# TODO(ja): Handle already invalid and None breadcrumbs
values.append(cls.normalize_crumb(crumb))
return super().to_python({"values": values}, **kwargs)
def to_json(self):
return prune_empty_keys(
{
"values": [
prune_empty_keys(
{
"type": crumb["type"],
"level": crumb["level"],
"timestamp": crumb["timestamp"],
"message": crumb["message"],
"category": crumb["category"],
"event_id": crumb["event_id"],
"data": crumb["data"] or None,
}
)
for crumb in self.values
]
or None
}
)
@classmethod
def normalize_crumb(cls, crumb):
crumb = dict(crumb)
ts = parse_timestamp(crumb.get("timestamp"))
if ts:
crumb["timestamp"] = ts.timestamp()
else:
crumb["timestamp"] = None
for key in ("type", "level", "message", "category", "event_id", "data"):
crumb.setdefault(key, None)
return crumb
def get_api_context(self, is_public=False, platform=None):
def _convert(x):
return {
"type": x["type"],
"timestamp": x["timestamp"] and to_datetime(x["timestamp"]),
"level": x.get("level", "info"),
"message": x.get("message"),
"category": x.get("category"),
"data": x.get("data") or None,
"event_id": x.get("event_id"),
}
return {"values": [_convert(v) for v in self.values]}
def get_api_meta(self, meta, is_public=False, platform=None):
if meta and "values" not in meta:
return {"values": meta}
else:
return meta
| Breadcrumbs |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 5026,
"end": 5425
} | class ____:
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
| TestTestDeprecated |
python | fluentpython__example-code | 06-dp-1class-func/strategy_best.py | {
"start": 1587,
"end": 3121
} | class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
def fidelity_promo(order):
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# BEGIN STRATEGY_BEST
promos = [fidelity_promo, bulk_item_promo, large_order_promo] # <1>
def best_promo(order): # <2>
"""Select best discount available
"""
return max(promo(order) for promo in promos) # <3>
# END STRATEGY_BEST
| Order |
python | pydata__xarray | xarray/computation/rolling.py | {
"start": 9114,
"end": 28043
} | class ____(Rolling["DataArray"]):
__slots__ = ("window_labels",)
def __init__(
self,
obj: DataArray,
windows: Mapping[Any, int],
min_periods: int | None = None,
center: bool | Mapping[Any, bool] = False,
) -> None:
"""
Moving window object for DataArray.
You should use DataArray.rolling() method to construct this object
instead of the class constructor.
Parameters
----------
obj : DataArray
Object to window.
windows : mapping of hashable to int
A mapping from the name of the dimension to create the rolling
exponential window along (e.g. `time`) to the size of the moving window.
min_periods : int, default: None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : bool, default: False
Set the labels at the center of the window. The default, False,
sets the labels at the right edge of the window.
Returns
-------
rolling : type of input argument
See Also
--------
xarray.DataArray.rolling
xarray.DataArray.groupby
xarray.Dataset.rolling
xarray.Dataset.groupby
"""
super().__init__(obj, windows, min_periods=min_periods, center=center)
# TODO legacy attribute
self.window_labels = self.obj[self.dim[0]]
def __iter__(self) -> Iterator[tuple[DataArray, DataArray]]:
if self.ndim > 1:
raise ValueError("__iter__ is only supported for 1d-rolling")
dim0 = self.dim[0]
window0 = int(self.window[0])
offset = (window0 + 1) // 2 if self.center[0] else 1
stops = np.arange(offset, self.obj.sizes[dim0] + offset)
starts = stops - window0
starts[: window0 - offset] = 0
for label, start, stop in zip(self.window_labels, starts, stops, strict=True):
window = self.obj.isel({dim0: slice(start, stop)})
counts = window.count(dim=[dim0])
window = window.where(counts >= self.min_periods)
yield (label, window)
@_deprecate_positional_args("v2024.11.0")
def construct(
self,
window_dim: Hashable | Mapping[Any, Hashable] | None = None,
*,
stride: int | Mapping[Any, int] = 1,
fill_value: Any = dtypes.NA,
keep_attrs: bool | None = None,
sliding_window_view_kwargs: Mapping[Any, Any] | None = None,
**window_dim_kwargs: Hashable,
) -> DataArray:
"""
Convert this rolling object to xr.DataArray,
where the window dimension is stacked as a new dimension
Parameters
----------
window_dim : Hashable or dict-like to Hashable, optional
A mapping from dimension name to the new window dimension names.
stride : int or mapping of int, default: 1
Size of stride for the rolling window.
fill_value : default: dtypes.NA
Filling value to match the dimension size.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
sliding_window_view_kwargs : Mapping
Keyword arguments that should be passed to the underlying array type's
``sliding_window_view`` function.
**window_dim_kwargs : Hashable, optional
The keyword arguments form of ``window_dim`` {dim: new_name, ...}.
Returns
-------
DataArray
a view of the original array. By default, the returned array is not writeable.
For numpy arrays, one can pass ``writeable=True`` in ``sliding_window_view_kwargs``.
See Also
--------
numpy.lib.stride_tricks.sliding_window_view
dask.array.lib.stride_tricks.sliding_window_view
Notes
-----
With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as
``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls
whether dask should automatically rechunk the output to avoid
exploding chunk sizes. Automatically rechunking is the default behaviour.
Importantly, each chunk will be a view of the data so large chunk sizes are
only safe if *no* copies are made later.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> rolling = da.rolling(b=3)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
Dimensions without coordinates: a, b, window_dim
>>> rolling = da.rolling(b=3, center=True)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
Dimensions without coordinates: a, b, window_dim
"""
if sliding_window_view_kwargs is None:
sliding_window_view_kwargs = {}
return self._construct(
self.obj,
window_dim=window_dim,
stride=stride,
fill_value=fill_value,
keep_attrs=keep_attrs,
sliding_window_view_kwargs=sliding_window_view_kwargs,
**window_dim_kwargs,
)
def _construct(
self,
obj: DataArray,
*,
window_dim: Hashable | Mapping[Any, Hashable] | None = None,
stride: int | Mapping[Any, int] = 1,
fill_value: Any = dtypes.NA,
keep_attrs: bool | None = None,
sliding_window_view_kwargs: Mapping[Any, Any] | None = None,
**window_dim_kwargs: Hashable,
) -> DataArray:
from xarray.core.dataarray import DataArray
if sliding_window_view_kwargs is None:
sliding_window_view_kwargs = {}
keep_attrs = self._get_keep_attrs(keep_attrs)
if window_dim is None:
if len(window_dim_kwargs) == 0:
raise ValueError(
"Either window_dim or window_dim_kwargs need to be specified."
)
window_dim = {d: window_dim_kwargs[str(d)] for d in self.dim}
window_dims = self._mapping_to_list(
window_dim, allow_default=False, allow_allsame=False
)
strides = self._mapping_to_list(stride, default=1)
window = obj.variable.rolling_window(
self.dim,
self.window,
window_dims,
center=self.center,
fill_value=fill_value,
**sliding_window_view_kwargs,
)
attrs = obj.attrs if keep_attrs else {}
result = DataArray(
window,
dims=obj.dims + tuple(window_dims),
coords=obj.coords,
attrs=attrs,
name=obj.name,
)
return result.isel(
{d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)}
)
def reduce(
self,
func: Callable,
keep_attrs: bool | None = None,
*,
sliding_window_view_kwargs: Mapping[Any, Any] | None = None,
**kwargs: Any,
) -> DataArray:
"""Reduce each window by applying `func`.
Equivalent to ``.construct(...).reduce(func, ...)``.
Parameters
----------
func : callable
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
sliding_window_view_kwargs
Keyword arguments that should be passed to the underlying array type's
``sliding_window_view`` function.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
See Also
--------
numpy.lib.stride_tricks.sliding_window_view
dask.array.lib.stride_tricks.sliding_window_view
Notes
-----
With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as
``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls
whether dask should automatically rechunk the output to avoid
exploding chunk sizes. Automatically rechunking is the default behaviour.
Importantly, each chunk will be a view of the data so large chunk sizes are
only safe if *no* copies are made later.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> rolling = da.rolling(b=3)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
Dimensions without coordinates: a, b, window_dim
>>> rolling.reduce(np.sum)
<xarray.DataArray (a: 2, b: 4)> Size: 64B
array([[nan, nan, 3., 6.],
[nan, nan, 15., 18.]])
Dimensions without coordinates: a, b
>>> rolling = da.rolling(b=3, min_periods=1)
>>> rolling.reduce(np.nansum)
<xarray.DataArray (a: 2, b: 4)> Size: 64B
array([[ 0., 1., 3., 6.],
[ 4., 9., 15., 18.]])
Dimensions without coordinates: a, b
"""
keep_attrs = self._get_keep_attrs(keep_attrs)
rolling_dim = {
d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}")
for d in self.dim
}
# save memory with reductions GH4325
fillna = kwargs.pop("fillna", dtypes.NA)
if fillna is not dtypes.NA:
obj = self.obj.fillna(fillna)
else:
obj = self.obj
windows = self._construct(
obj,
window_dim=rolling_dim,
keep_attrs=keep_attrs,
fill_value=fillna,
sliding_window_view_kwargs=sliding_window_view_kwargs,
)
dim = list(rolling_dim.values())
result = windows.reduce(func, dim=dim, keep_attrs=keep_attrs, **kwargs)
# Find valid windows based on count.
counts = self._counts(keep_attrs=False)
return result.where(counts >= self.min_periods)
def _counts(self, keep_attrs: bool | None) -> DataArray:
"""Number of non-nan entries in each rolling window."""
rolling_dim = {
d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}")
for d in self.dim
}
# We use False as the fill_value instead of np.nan, since boolean
# array is faster to be reduced than object array.
# The use of skipna==False is also faster since it does not need to
# copy the strided array.
dim = list(rolling_dim.values())
counts = (
self.obj.notnull(keep_attrs=keep_attrs)
.rolling(
dict(zip(self.dim, self.window, strict=True)),
center={d: self.center[i] for i, d in enumerate(self.dim)},
)
.construct(rolling_dim, fill_value=False, keep_attrs=keep_attrs)
.sum(dim=dim, skipna=False, keep_attrs=keep_attrs)
)
return counts
def _numbagg_reduce(self, func, keep_attrs, **kwargs):
# Some of this is copied from `_bottleneck_reduce`, we could reduce this as part
# of a wider refactor.
axis = self.obj.get_axis_num(self.dim[0])
padded = self.obj.variable
if self.center[0]:
if is_duck_dask_array(padded.data):
# workaround to make the padded chunk size larger than
# self.window - 1
shift = -(self.window[0] + 1) // 2
offset = (self.window[0] - 1) // 2
valid = (slice(None),) * axis + (
slice(offset, offset + self.obj.shape[axis]),
)
else:
shift = (-self.window[0] // 2) + 1
valid = (slice(None),) * axis + (slice(-shift, None),)
padded = padded.pad({self.dim[0]: (0, -shift)}, mode="constant")
if is_duck_dask_array(padded.data) and False:
raise AssertionError("should not be reachable")
else:
values = func(
padded.data,
window=self.window[0],
min_count=self.min_periods,
axis=axis,
)
if self.center[0]:
values = values[valid]
attrs = self.obj.attrs if keep_attrs else {}
return self.obj.__class__(
values, self.obj.coords, attrs=attrs, name=self.obj.name
)
def _bottleneck_reduce(self, func, keep_attrs, **kwargs):
# bottleneck doesn't allow min_count to be 0, although it should
# work the same as if min_count = 1
# Note bottleneck only works with 1d-rolling.
if self.min_periods == 0:
min_count = 1
else:
min_count = self.min_periods
axis = self.obj.get_axis_num(self.dim[0])
padded = self.obj.variable
if self.center[0]:
if is_duck_dask_array(padded.data):
# workaround to make the padded chunk size larger than
# self.window - 1
shift = -(self.window[0] + 1) // 2
offset = (self.window[0] - 1) // 2
valid = (slice(None),) * axis + (
slice(offset, offset + self.obj.shape[axis]),
)
else:
shift = (-self.window[0] // 2) + 1
valid = (slice(None),) * axis + (slice(-shift, None),)
padded = padded.pad({self.dim[0]: (0, -shift)}, mode="constant")
if is_duck_dask_array(padded.data):
values = dask_array_ops.dask_rolling_wrapper(
func, padded, axis=axis, window=self.window[0], min_count=min_count
)
else:
values = func(
padded.data, window=self.window[0], min_count=min_count, axis=axis
)
# index 0 is at the rightmost edge of the window
# need to reverse index here
# see GH #8541
if func in [bottleneck.move_argmin, bottleneck.move_argmax]:
values = self.window[0] - 1 - values
if self.center[0]:
values = values[valid]
attrs = self.obj.attrs if keep_attrs else {}
return self.obj.__class__(
values, self.obj.coords, attrs=attrs, name=self.obj.name
)
def _array_reduce(
self,
array_agg_func,
bottleneck_move_func,
numbagg_move_func,
rolling_agg_func,
keep_attrs,
fillna,
**kwargs,
):
if "dim" in kwargs:
warnings.warn(
f"Reductions are applied along the rolling dimension(s) "
f"'{self.dim}'. Passing the 'dim' kwarg to reduction "
f"operations has no effect.",
DeprecationWarning,
stacklevel=3,
)
del kwargs["dim"]
xp = duck_array_ops.get_array_namespace(self.obj.data)
if (
OPTIONS["use_numbagg"]
and module_available("numbagg")
and numbagg_move_func is not None
# TODO: we could at least allow this for the equivalent of `apply_ufunc`'s
# "parallelized". `rolling_exp` does this, as an example (but rolling_exp is
# much simpler)
and not is_duck_dask_array(self.obj.data)
# Numbagg doesn't handle object arrays and generally has dtype consistency,
# so doesn't deal well with bool arrays which are expected to change type.
and self.obj.data.dtype.kind not in "ObMm"
# TODO: we could also allow this, probably as part of a refactoring of this
# module, so we can use the machinery in `self.reduce`.
and self.ndim == 1
and xp is np
):
import numbagg
# Numbagg has a default ddof of 1. I (@max-sixty) think we should make
# this the default in xarray too, but until we do, don't use numbagg for
# std and var unless ddof is set to 1.
if (
numbagg_move_func not in [numbagg.move_std, numbagg.move_var]
or kwargs.get("ddof") == 1
):
return self._numbagg_reduce(
numbagg_move_func, keep_attrs=keep_attrs, **kwargs
)
if (
OPTIONS["use_bottleneck"]
and bottleneck_move_func is not None
and (
not is_duck_dask_array(self.obj.data)
or module_available("dask", "2024.11.0")
)
and self.ndim == 1
and xp is np
):
return self._bottleneck_reduce(
bottleneck_move_func, keep_attrs=keep_attrs, **kwargs
)
if rolling_agg_func:
return rolling_agg_func(self, keep_attrs=self._get_keep_attrs(keep_attrs))
if fillna is not None:
if fillna is dtypes.INF:
fillna = dtypes.get_pos_infinity(self.obj.dtype, max_for_int=True)
elif fillna is dtypes.NINF:
fillna = dtypes.get_neg_infinity(self.obj.dtype, min_for_int=True)
kwargs.setdefault("skipna", False)
kwargs.setdefault("fillna", fillna)
return self.reduce(array_agg_func, keep_attrs=keep_attrs, **kwargs)
| DataArrayRolling |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 968610,
"end": 969130
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of SetRepositoryInteractionLimit"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The repository that the interaction limit was set for."""
| SetRepositoryInteractionLimitPayload |
python | walkccc__LeetCode | solutions/3309. Maximum Possible Number by Binary Concatenation/3309.py | {
"start": 0,
"end": 397
} | class ____:
def maxGoodNumber(self, nums: list[int]) -> int:
ans = 0
def concat(a: int, b: int) -> int:
"""Returns the concatenation of the binary representations of a and b."""
return (a << b.bit_length()) + b
nums.sort(key=functools.cmp_to_key(
lambda a, b: concat(b, a) - concat(a, b)))
for num in nums:
ans = concat(ans, num)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/segformer/modular_segformer.py | {
"start": 1324,
"end": 5781
} | class ____(BeitImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 512, "width": 512}
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_reduce_labels = False
do_center_crop = None
crop_size = None
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[SegformerImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": F.InterpolationMode.NEAREST_EXACT,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
interpolation: Optional["F.InterpolationMode"],
do_resize: bool,
do_rescale: bool,
do_normalize: bool,
size: SizeDict,
rescale_factor: float,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
disable_grouping: bool,
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature: # Return type can be list if return_tensors=None
if do_reduce_labels:
images = self.reduce_label(images) # Apply reduction if needed
# Group images by size for batched resizing
resized_images = images
if do_resize:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
resized_stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = resized_stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing (rescale/normalize)
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack images into a single tensor if return_tensors is set
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["SegformerImageProcessorFast"]
| SegformerImageProcessorFast |
python | PyCQA__pylint | tests/functional/m/membership_protocol_py3.py | {
"start": 253,
"end": 402
} | class ____(type):
def __getitem__(cls, key):
if key < 10:
return key ** 2
raise IndexError("bad index")
| MetaOldIterable |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/arrayeditor.py | {
"start": 2116,
"end": 4151
} | class ____:
OptionsToolButton = 'options_button_widget'
Toolbar = 'toolbar'
ToolbarStretcher = 'toolbar_stretcher'
# Note: string and unicode data types will be formatted with '' (see below)
SUPPORTED_FORMATS = {
'single': '.6g',
'double': '.6g',
'float_': '.6g',
'longfloat': '.6g',
'float16': '.6g',
'float32': '.6g',
'float64': '.6g',
'float96': '.6g',
'float128': '.6g',
'csingle': '.6g',
'complex_': '.6g',
'clongfloat': '.6g',
'complex64': '.6g',
'complex128': '.6g',
'complex192': '.6g',
'complex256': '.6g',
'byte': 'd',
'bytes8': 's',
'short': 'd',
'intc': 'd',
'int_': 'd',
'longlong': 'd',
'intp': 'd',
'int8': 'd',
'int16': 'd',
'int32': 'd',
'int64': 'd',
'ubyte': 'd',
'ushort': 'd',
'uintc': 'd',
'uint': 'd',
'ulonglong': 'd',
'uintp': 'd',
'uint8': 'd',
'uint16': 'd',
'uint32': 'd',
'uint64': 'd',
'bool_': '',
'bool8': '',
'bool': '',
}
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
#==============================================================================
# ---- Utility functions
#==============================================================================
def is_float(dtype):
"""Return True if datatype dtype is a float kind"""
return ('float' in dtype.name) or dtype.name in ['single', 'double']
def is_number(dtype):
"""Return True is datatype dtype is a number kind"""
return is_float(dtype) or ('int' in dtype.name) or ('long' in dtype.name) \
or ('short' in dtype.name)
def get_idx_rect(index_list):
"""Extract the boundaries from a list of indexes"""
rows, cols = list(zip(*[(i.row(), i.column()) for i in index_list]))
return ( min(rows), max(rows), min(cols), max(cols) )
#==============================================================================
# ---- Main classes
#==============================================================================
| ArrayEditorWidgets |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 24341,
"end": 24513
} | class ____(Structure):
_fields_ = fvmlib._fields_
def describe(self):
s = {}
s["header_addr"] = int(self.header_addr)
return s
| fvmlib_command |
python | openai__openai-python | src/openai/types/responses/response_input_audio.py | {
"start": 209,
"end": 415
} | class ____(BaseModel):
data: str
"""Base64-encoded audio data."""
format: Literal["mp3", "wav"]
"""The format of the audio data. Currently supported formats are `mp3` and `wav`."""
| InputAudio |
python | sympy__sympy | sympy/diffgeom/diffgeom.py | {
"start": 72061,
"end": 72125
} | class ____(_deprecated_container, list):
pass
| _deprecated_list |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/exception.py | {
"start": 0,
"end": 109
} | class ____(Exception):
"""
Any error related to ml-agents environment.
"""
pass
| UnityException |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 11884,
"end": 12028
} | class ____(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float64
self.dec = 14
self.type = 3
| TestIDCTIIIDouble |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/utils/timer.py | {
"start": 212,
"end": 1005
} | class ____:
"""
A timer for recording execution times of nodes.
Parameters
----------
query_start
Duration in nanoseconds since the query was started on the
Polars side
"""
def __init__(self, query_start: int):
self.query_start = query_start
self.timings: list[tuple[int, int, str]] = []
def store(self, start: int, end: int, name: str) -> None:
"""
Store timing for a node.
Parameters
----------
start
Start of the execution for this node (use time.monotonic_ns).
end
End of the execution for this node.
name
The name for this node.
"""
self.timings.append((start - self.query_start, end - self.query_start, name))
| Timer |
python | RaRe-Technologies__gensim | gensim/test/test_nmf.py | {
"start": 558,
"end": 6821
} | class ____(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.model = nmf.Nmf(
common_corpus,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=100,
random_state=42,
)
def test_generator(self):
model_1 = nmf.Nmf(
iter(common_corpus * 100),
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=1,
random_state=42,
)
model_2 = nmf.Nmf(
common_corpus * 100,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=1,
random_state=42,
)
self.assertTrue(np.allclose(model_1.get_topics(), model_2.get_topics()))
def test_update(self):
model = copy.deepcopy(self.model)
model.update(common_corpus)
self.assertFalse(np.allclose(self.model.get_topics(), model.get_topics()))
def test_random_state(self):
model_1 = nmf.Nmf(
common_corpus,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=100,
random_state=42,
)
model_2 = nmf.Nmf(
common_corpus,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=100,
random_state=0,
)
self.assertTrue(np.allclose(self.model.get_topics(), model_1.get_topics()))
self.assertFalse(np.allclose(self.model.get_topics(), model_2.get_topics()))
def test_transform(self):
# transform one document
doc = list(common_corpus)[0]
transformed = self.model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
# The results sometimes differ on Windows, for unknown reasons.
# See https://github.com/RaRe-Technologies/gensim/pull/2481#issuecomment-549456750
expected = [0.03028875, 0.96971124]
# must contain the same values, up to re-ordering
self.assertTrue(np.allclose(sorted(vec), sorted(expected), atol=1e-3))
# transform one word
word = 5
transformed = self.model.get_term_topics(word)
vec = matutils.sparse2full(transformed, 2)
expected = [[0.3076869, 0.69231313]]
# must contain the same values, up to re-ordering
self.assertTrue(np.allclose(sorted(vec), sorted(expected), atol=1e-3))
def test_top_topics(self):
top_topics = self.model.top_topics(common_corpus)
for topic, score in top_topics:
self.assertTrue(isinstance(topic, list))
self.assertTrue(isinstance(score, float))
for v, k in topic:
self.assertTrue(isinstance(k, str))
self.assertTrue(np.issubdtype(v, float))
def test_get_topic_terms(self):
topic_terms = self.model.get_topic_terms(1)
for k, v in topic_terms:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
def test_get_document_topics(self):
doc_topics = self.model.get_document_topics(common_corpus)
for topic in doc_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
# Test case to use the get_document_topic function for the corpus
all_topics = self.model.get_document_topics(common_corpus)
print(list(all_topics))
for topic in all_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic: # list of doc_topics
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
def test_term_topics(self):
# check with word_type
result = self.model.get_term_topics(2)
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(np.issubdtype(probability, float))
# if user has entered word instead, check with word
result = self.model.get_term_topics(str(self.model.id2word[2]))
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(np.issubdtype(probability, float))
def test_persistence(self):
fname = get_tmpfile('gensim_models_nmf.tst')
self.model.save(fname)
model2 = nmf.Nmf.load(fname)
tstvec = []
self.assertTrue(np.allclose(self.model[tstvec], model2[tstvec])) # try projecting an empty vector
def test_large_mmap(self):
fname = get_tmpfile('gensim_models_nmf.tst')
# simulate storing large arrays separately
self.model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
model2 = nmf.Nmf.load(fname, mmap='r')
self.assertEqual(self.model.num_topics, model2.num_topics)
tstvec = []
self.assertTrue(np.allclose(self.model[tstvec], model2[tstvec])) # try projecting an empty vector
def test_large_mmap_compressed(self):
fname = get_tmpfile('gensim_models_nmf.tst.gz')
# simulate storing large arrays separately
self.model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, nmf.Nmf.load, fname, mmap='r')
def test_dtype_backward_compatibility(self):
nmf_fname = datapath('nmf_model')
test_doc = [(0, 1), (1, 1), (2, 1)]
expected_topics = [(1, 1.0)]
# save model to use in test
# self.model.save(nmf_fname)
# load a model saved using the latest version of Gensim
model = nmf.Nmf.load(nmf_fname)
# and test it on a predefined document
topics = model[test_doc]
self.assertTrue(np.allclose(expected_topics, topics))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
| TestNmf |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 10388,
"end": 16696
} | class ____(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories: dict[type[_ModuleLike], _ProviderFactoryType] = {}
PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(
loader_type: type[_ModuleLike], provider_factory: _ProviderFactoryType
):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
@overload
def get_provider(moduleOrReq: str) -> IResourceProvider: ...
@overload
def get_provider(moduleOrReq: Requirement) -> Distribution: ...
def get_provider(moduleOrReq: str | Requirement) -> IResourceProvider | Distribution:
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
@functools.lru_cache(maxsize=None)
def _macos_vers():
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
with open(plist, 'rb') as fh:
plist_content = plistlib.load(fh)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
return version.split('.')
def _macos_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and macOS.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macos_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]),
int(version[1]),
_macos_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided: str | None, required: str | None):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# macOS special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macOS designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if (
dversion == 7
and macosversion >= "10.3"
or dversion == 8
and macosversion >= "10.4"
):
return True
# egg isn't macOS or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
@overload
def get_distribution(dist: _DistributionT) -> _DistributionT: ...
@overload
def get_distribution(dist: _PkgReqType) -> Distribution: ...
def get_distribution(dist: Distribution | _PkgReqType) -> Distribution:
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, str):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
# Bad type narrowing, dist has to be a Requirement here, so get_provider has to return Distribution
dist = get_provider(dist) # type: ignore[assignment]
if not isinstance(dist, Distribution):
raise TypeError("Expected str, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist: _EPDistType, group: str, name: str) -> _ResolvedEntryPoint:
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
@overload
def get_entry_map(
dist: _EPDistType, group: None = None
) -> dict[str, dict[str, EntryPoint]]: ...
@overload
def get_entry_map(dist: _EPDistType, group: str) -> dict[str, EntryPoint]: ...
def get_entry_map(dist: _EPDistType, group: str | None = None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist: _EPDistType, group: str, name: str):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
| UnknownExtra |
python | google__pytype | pytype/pytd/parse/node_test.py | {
"start": 2288,
"end": 7915
} | class ____(unittest.TestCase):
"""Test the node.Node class generator."""
def test_eq1(self):
"""Test the __eq__ and __ne__ functions of node.Node."""
n1 = Node1(a=1, b=2)
n2 = Node1(a=1, b=2)
self.assertEqual(n1, n2)
self.assertFalse(n1 != n2)
def test_hash1(self):
n1 = Node1(a=1, b=2)
n2 = Node1(a=1, b=2)
self.assertEqual(hash(n1), hash(n2))
def test_eq2(self):
"""Test the __eq__ and __ne__ functions of identical nested nodes."""
n1 = Node1(a=1, b=2)
n2 = Node1(a=1, b=2)
d1 = Node2(x="foo", y=n1)
d2 = Node2(x="foo", y=n1)
d3 = Node2(x="foo", y=n2)
d4 = Node2(x="foo", y=n2)
self.assertTrue(d1 == d2 and d2 == d3 and d3 == d4 and d4 == d1)
# Since node overloads __ne___, too, test it explicitly:
self.assertFalse(d1 != d2 or d2 != d3 or d3 != d4 or d4 != d1)
def test_hash2(self):
n1 = Node1(a=1, b=2)
n2 = Node1(a=1, b=2)
d1 = Node2(x="foo", y=n1)
d2 = Node2(x="foo", y=n1)
d3 = Node2(x="foo", y=n2)
d4 = Node2(x="foo", y=n2)
self.assertEqual(hash(d1), hash(d2))
self.assertEqual(hash(d2), hash(d3))
self.assertEqual(hash(d3), hash(d4))
self.assertEqual(hash(d4), hash(d1))
def test_deep_eq2(self):
"""Test the __eq__ and __ne__ functions of differing nested nodes."""
n1 = Node1(a=1, b=2)
n2 = Node1(a=1, b=3)
d1 = Node2(x="foo", y=n1)
d2 = Node3(x="foo", y=n1)
d3 = Node2(x="foo", y=n2)
d4 = Node3(x="foo", y=n2)
self.assertNotEqual(d1, d2)
self.assertNotEqual(d1, d3)
self.assertNotEqual(d1, d4)
self.assertNotEqual(d2, d3)
self.assertNotEqual(d2, d4)
self.assertNotEqual(d3, d4)
self.assertFalse(d1 == d2)
self.assertFalse(d1 == d3)
self.assertFalse(d1 == d4)
self.assertFalse(d2 == d3)
self.assertFalse(d2 == d4)
self.assertFalse(d3 == d4)
def test_deep_hash2(self):
n1 = Node1(a=1, b=2)
n2 = Node1(a=1, b=3)
d1 = Node2(x="foo", y=n1)
d2 = Node3(x="foo", y=n1)
d3 = Node2(x="foo", y=n2)
d4 = Node3(x="foo", y=n2)
self.assertNotEqual(hash(d1), hash(d2))
self.assertNotEqual(hash(d1), hash(d3))
self.assertNotEqual(hash(d1), hash(d4))
self.assertNotEqual(hash(d2), hash(d3))
self.assertNotEqual(hash(d2), hash(d4))
self.assertNotEqual(hash(d3), hash(d4))
def test_immutable(self):
"""Test that node.Node has/preserves immutatibility."""
n1 = Node1(a=1, b=2)
n2 = Node2(x="foo", y=n1)
with self.assertRaises(AttributeError):
n1.a = 2
with self.assertRaises(AttributeError):
n2.x = "bar"
with self.assertRaises(AttributeError):
n2.x.b = 3
def test_visitor1(self):
"""Test node.Node.Visit() for a visitor that modifies leaf nodes."""
x = X(1, (1, 2))
y = Y((V(1),), Data(42, 43, 44))
xy = XY(x, y)
xy_expected = ("XY(x=X(a=1, b=(1, 2)), y=Y(c=(V(x=1),),"
" d=Data(d1=42, d2=43, d3=44)))")
self.assertEqual(repr(xy), xy_expected)
v = DataVisitor()
new_xy = xy.Visit(v)
self.assertEqual(repr(new_xy),
"XY(x=X(a=1, b=(1, 2)), y=Y(c=(V(x=1),),"
" d=Data(d1=42, d2=43, d3=-1)))")
self.assertEqual(repr(xy), xy_expected) # check that xy is unchanged
def test_visitor2(self):
"""Test node.Node.Visit() for visitors that modify inner nodes."""
xy = XY(V(1), Data(1, 2, 3))
xy_expected = "XY(x=V(x=1), y=Data(d1=1, d2=2, d3=3))"
self.assertEqual(repr(xy), xy_expected)
v = MultiNodeVisitor()
new_xy = xy.Visit(v, 42)
self.assertEqual(repr(new_xy),
"XY(x=X(a=V(x=42), b=V(x=42)), y=XY(x=42, y=42))")
self.assertEqual(repr(xy), xy_expected) # check that xy is unchanged
def test_skip_visitor(self):
tree = XY(V(Data(1, 2, 3)), XY(Data(3, 4, 5), Data(6, 7, 8)))
init = ("XY(x=V(x=Data(d1=1, d2=2, d3=3)), y=XY(x=Data(d1=3, d2=4, d3=5), "
"y=Data(d1=6, d2=7, d3=8)))")
self.assertEqual(repr(tree), init)
new_tree = tree.Visit(SkipNodeVisitor())
exp = ("XY(x=V(x=Data(d1=0, d2=0, d3=0)), y=XY(x=Data(d1=3, d2=4, d3=5), "
"y=Data(d1=6, d2=7, d3=8)))")
self.assertEqual(repr(new_tree), exp)
def test_recursion(self):
"""Test node.Node.Visit() for visitors that preserve attributes."""
y = Y(Y(1, 2), Y(3, Y(4, 5)))
y_expected = "Y(c=Y(c=1, d=2), d=Y(c=3, d=Y(c=4, d=5)))"
self.assertEqual(repr(y), y_expected)
v = MultiNodeVisitor()
new_y = y.Visit(v)
new_repr = "X(a=X(a=1, b=2), b=X(a=3, b=X(a=4, b=5)))"
self.assertEqual(repr(new_y), new_repr)
self.assertEqual(repr(y), y_expected) # check that original is unchanged
def test_tuple(self):
"""Test node.Node.Visit() for nodes that contain tuples."""
v = V((Data(1, 2, 3), Data(4, 5, 6)))
v_expected = "V(x=(Data(d1=1, d2=2, d3=3), Data(d1=4, d2=5, d3=6)))"
self.assertEqual(repr(v), v_expected)
visit = DataVisitor()
new_v = v.Visit(visit)
new_v_expected = "V(x=(Data(d1=1, d2=2, d3=-1), Data(d1=4, d2=5, d3=-1)))"
self.assertEqual(repr(new_v), new_v_expected)
def test_ordering(self):
nodes = [Node1(True, False), Node1(1, 2),
Node2(1, 1), Node2("2", "1"),
Node3(1, 1), Node3(2, 2),
V(2)]
for n1, n2 in zip(nodes[:-1], nodes[1:]):
self.assertLess(n1, n2)
self.assertLessEqual(n1, n2)
self.assertGreater(n2, n1)
self.assertGreaterEqual(n2, n1)
for p in itertools.permutations(nodes):
self.assertEqual(list(sorted(p)), nodes)
# pylint: enable=g-generic-assert
if __name__ == "__main__":
unittest.main()
| TestNode |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/locators.py | {
"start": 15501,
"end": 17308
} | class ____(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
| PyPIRPCLocator |
python | getsentry__sentry | src/sentry/organizations/services/organization/service.py | {
"start": 1296,
"end": 19975
} | class ____(RpcService):
key = "organization"
local_mode = SiloMode.REGION
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.organizations.services.organization.impl import (
DatabaseBackedOrganizationService,
)
return DatabaseBackedOrganizationService()
def get(self, id: int) -> RpcOrganization | None:
"""
Get an organization by id
:param id: The organization id
"""
org_context = self.get_organization_by_id(id=id)
return org_context.organization if org_context else None
@regional_rpc_method(resolve=ByOrganizationId("id"))
@abstractmethod
def serialize_organization(
self,
*,
id: int,
as_user: RpcUser | None = None,
) -> Any | None:
"""
Fetch an organization's API serialized form
Note that this can be None if the organization is already deleted
in the corresponding region silo.
:param id: The organization id
:param as_user: The user making the request, used for authorization on the output.
"""
@regional_rpc_method(resolve=ByOrganizationId("id"), return_none_if_mapping_not_found=True)
@abstractmethod
def get_organization_by_id(
self,
*,
id: int,
user_id: int | None = None,
slug: str | None = None,
include_projects: bool | None = True,
include_teams: bool | None = True,
) -> RpcUserOrganizationContext | None:
"""
Fetches the organization, team, and project data given by an organization id, regardless of
its visibility status
When user_id is provided, membership data related to that user from the organization
is also given in the response. See RpcUserOrganizationContext for more info.
:param id: The id of the organization to fetch
:param user_id: The id of the user to fetch membership for.
:param slug: The slug of the organization to fetch (alternative to id)
:param include_projects: Whether you want projects in the response.
:param include_teams: Whether you want teams in the response.
"""
@regional_rpc_method(resolve=ByOrganizationSlug(), return_none_if_mapping_not_found=True)
@abstractmethod
def get_org_by_slug(
self,
*,
slug: str,
user_id: int | None = None,
) -> RpcOrganizationSummary | None:
"""
Fetches an organization by slug.
If user_id is passed, it will enforce visibility rules. This method is differentiated from
get_organization_by_slug by not being cached and returning RpcOrganizationSummary instead of
org contexts
:param slug: The slug to search by
:param user_id: The user to check membership with
"""
@regional_rpc_method(resolve=ByOrganizationId("id"), return_none_if_mapping_not_found=True)
@abstractmethod
def get_org_by_id(
self,
*,
id: int,
user_id: int | None = None,
) -> RpcOrganizationSummary | None:
"""
Fetch an organization by id.
If user_id is passed, it will enforce visibility rules. This method is differentiated from
get_organization_by_id by not being cached and returning RpcOrganizationSummary instead of
org contexts
:param id: The id to search by
:param user_id: The user to check membership with
"""
@regional_rpc_method(resolve=ByRegionName())
@abstractmethod
def get_organizations_by_user_and_scope(
self, *, region_name: str, user: RpcUser, scope: str | None = None
) -> list[RpcOrganization]:
"""
Fetches organizations for the given user, with the given organization member scope.
:param region_name: The region to locate an organization in
:param user: The user to filter by membership
:param scope: The api scopes to search by
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def update_flags(self, *, organization_id: int, flags: RpcOrganizationFlagsUpdate) -> None:
"""
Update the flags on an organization
:param organization_id: The organization id
:param flags: Dict of flags to set.
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_aggregate_project_flags(self, *, organization_id: int) -> RpcProjectFlags:
"""
Get the union-aggregated project flags of an the organization
:param organization_id: The organization id
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def check_membership_by_email(
self, *, organization_id: int, email: str
) -> RpcOrganizationMember | None:
"""
Used to look up an organization membership by an email
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def check_membership_by_id(
self, *, organization_id: int, user_id: int
) -> RpcOrganizationMember | None:
"""
Used to look up an organization membership by a user id
:param organization_id: The id to search by
:param user_id: The user to check membership with
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_member_summaries_by_ids(
self, *, organization_id: int, user_ids: list[int]
) -> list[RpcOrganizationMemberSummary]:
"""
Used to look up multiple membership summaries by users' id.
:param organization_id: The id to search by
:param user_ids: The userids to get membership data on.
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_invite_by_id(
self,
*,
organization_id: int,
organization_member_id: int | None = None,
user_id: int | None = None,
email: str | None = None,
) -> RpcUserInviteContext | None:
"""
Get a membership invite context
Provide an organization_id and one of organziation_member_id, user_id and email.
:param organziation_id: The organization to search in
:param organization_member_id: The member id to search by
:param user_id: The userid to search by
:param email: The email to search by
"""
pass
@regional_rpc_method(resolve=ByOrganizationSlug(), return_none_if_mapping_not_found=True)
@abstractmethod
def get_invite_by_slug(
self,
*,
slug: str,
organization_member_id: int | None = None,
user_id: int | None = None,
email: str | None = None,
) -> RpcUserInviteContext | None:
"""
Get a membership invite context
Provide an organization slug and one of organziation_member_id, user_id and email.
:param slug: The organization to search in
:param organization_member_id: The member id to search by
:param user_id: The userid to search by
:param email: The email to search by
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def delete_organization_member(
self, *, organization_id: int, organization_member_id: int
) -> bool:
"""
Delete an organization member by its id.
:param organization_id: The organization to search in
:param organization_member_id: The id of the membership
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def set_user_for_organization_member(
self,
*,
organization_member_id: int,
organization_id: int,
user_id: int,
) -> RpcOrganizationMember | None:
"""
Set the user id for an organization member.
:param organization_id: The organization to search in
:param organization_member_id: The id of the membership
:param user_id: The new user for the membership
"""
def check_organization_by_slug(self, *, slug: str, only_visible: bool) -> int | None:
"""
If exists and matches the only_visible requirement, returns an organization's id by the slug.
:param slug: The organization to search in
:param only_visible: Whether or not to consider only visible orgs
"""
return _organization_check_service.check_organization_by_slug(
slug=slug, only_visible=only_visible
)
def check_organization_by_id(self, *, id: int, only_visible: bool) -> bool:
"""
Checks if an organization exists by the id.
:param id: The organization to search in
:param only_visible: Whether or not to consider only visible orgs
"""
return _organization_check_service.check_organization_by_id(
id=id, only_visible=only_visible
)
def get_organization_by_slug(
self,
*,
slug: str,
only_visible: bool,
user_id: int | None = None,
include_projects: bool | None = True,
include_teams: bool | None = True,
) -> RpcUserOrganizationContext | None:
"""
Defers to check_organization_by_slug and get_organization_by_id
"""
from sentry.models.organization import OrganizationStatus
org_id = self.check_organization_by_slug(slug=slug, only_visible=only_visible)
if org_id is None:
return None
org_context = self.get_organization_by_id(
id=org_id,
user_id=user_id,
include_projects=include_projects,
include_teams=include_teams,
)
if (
only_visible
and org_context
and org_context.organization.status != OrganizationStatus.ACTIVE
):
return None
return org_context
@regional_rpc_method(resolve=RequireSingleOrganization())
@abstractmethod
def get_default_organization(self) -> RpcOrganization:
"""
Get the default Organization
See Organization.get_default()
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def add_organization_member(
self,
*,
organization_id: int,
default_org_role: str,
user_id: int | None = None,
email: str | None = None,
flags: RpcOrganizationMemberFlags | None = None,
role: str | None = None,
inviter_id: int | None = None,
invite_status: int | None = None,
) -> RpcOrganizationMember:
"""
Add an organization member
:param organization_id: The id of the organization to add a member to
:param default_org_role: The fallback role the member should have.
:param user_id: The id of the user to create a membership for
:param email: The email to create a membership invite for.
:param flags: The membership flags to use.
:param role: The member's role, overrides default_org_role
:param inviter_id: The user_id who is creating the membership
:param invite_status: The status of the invitation.
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def update_organization_member(
self, *, organization_id: int, member_id: int, attrs: OrganizationMemberUpdateArgs
) -> RpcOrganizationMember | None:
"""
Update an organization member
:param organziation_id: The organization to update
:param member_id: The org membership id to update.
:param attrs: The attributes to set.
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_single_team(self, *, organization_id: int) -> RpcTeam | None:
"""If the organization has exactly one team, return it.
Return None if the organization has no teams or more than one.
"""
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def add_team_member(
self, *, organization_id: int, team_id: int, organization_member_id: int
) -> None:
"""
Add a team member for a given organization, team and member.
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_or_create_team_member(
self,
*,
organization_id: int,
team_id: int,
organization_member_id: int,
role: str | None,
) -> None:
"""
Get or create a team member
:param organziation_id: The organization to update
:param team_id: The team to add a member to
:param organization_member_id: The member id
:param role: The member role (only used during create)
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_or_create_default_team(
self,
*,
organization_id: int,
new_team_slug: str,
) -> RpcTeam:
"""
Get or create a team with a given slug.
"""
pass
@regional_rpc_method(resolve=ByOrganizationIdAttribute("organization_member"))
@abstractmethod
def update_membership_flags(self, *, organization_member: RpcOrganizationMember) -> None:
"""
Update the flags on a membership
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def merge_users(self, *, organization_id: int, from_user_id: int, to_user_id: int) -> None:
"""
Merge two members.
Will update all teams and group related models to reflect new member
If `to_user_id` does not have a membership in the organization, a membership
will be created for them.
:param organization_id: The organization to operate on
:param from_user_id: The user id of the membership to merge
:param to_user_id: The user id of the user to merge into
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def update_default_role(self, *, organization_id: int, default_role: str) -> RpcOrganization:
"""
Update the default role for an organization
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def remove_user(self, *, organization_id: int, user_id: int) -> RpcOrganizationMember | None:
"""
Remove a membership by user_id
"""
pass
@regional_rpc_method(resolve=ByRegionName())
@abstractmethod
def update_region_user(self, *, user: RpcRegionUser, region_name: str) -> None:
"""
Update all memberships in a region to reflect changes in user details.
Will sync is_active and email attributes.
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def reset_idp_flags(self, *, organization_id: int) -> None:
"""
Reset the identity provider related flags for all members in an organization
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_option(self, *, organization_id: int, key: str) -> OptionValue:
"""
Get an organziation option by key
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def update_option(self, *, organization_id: int, key: str, value: OptionValue) -> bool:
"""
Update an organziation option by key
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def delete_option(self, *, organization_id: int, key: str) -> None:
"""
Delete an organization option by key
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def send_sso_link_emails(
self, *, organization_id: int, sending_user_email: str, provider_key: str
) -> None:
"""
Send SSO link emails to all members in the organization
:param organization_id: The organization to operate on
:param sending_user_email: The email address who initiated the link process
:param provider_key: The SSO provider key
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def send_sso_unlink_emails(
self, *, organization_id: int, sending_user_email: str, provider_key: str
) -> None:
"""
Send SSO link break emails to all members in the organization
:param organization_id: The organization to operate on
:param sending_user_email: The email address who initiated the link process
:param provider_key: The SSO provider key
"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def count_members_without_sso(self, *, organization_id: int) -> int:
"""Get the number of users without SSO flags set"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def delete_organization(
self, *, organization_id: int, user: RpcUser
) -> RpcOrganizationDeleteResponse:
"""Delete an organization"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def create_org_delete_log(
self, *, organization_id: int, audit_log_actor: RpcAuditLogEntryActor
) -> None:
"""Record an audit log for an organization deletion"""
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def send_signal(
self,
*,
signal: RpcOrganizationSignal,
organization_id: int,
args: Mapping[str, int | str | None],
) -> None:
"""Trigger a django signal on an organization synchronously"""
pass
def schedule_signal(
self,
signal: Signal,
organization_id: int,
args: Mapping[str, int | str | None],
) -> None:
"""Trigger a django signal on an organization asynchronously"""
_organization_signal_service.schedule_signal(
signal=signal, organization_id=organization_id, args=args
)
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_organization_owner_members(
self, *, organization_id: int
) -> list[RpcOrganizationMember]:
"""Get a list of members with the owner role"""
pass
| OrganizationService |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 22685,
"end": 78322
} | class ____(TestCase):
def _serialization_test_helper(self, datapipe, use_dill):
if use_dill:
serialized_dp = dill.dumps(datapipe)
deserialized_dp = dill.loads(serialized_dp)
else:
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_test_for_single_dp(self, dp, use_dill=False):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp, use_dill)
# 2. Testing for serialization after DataPipe is partially read
it = iter(dp)
_ = next(it)
self._serialization_test_helper(dp, use_dill)
# 3. Testing for serialization after DataPipe is fully read
it = iter(dp)
_ = list(it)
self._serialization_test_helper(dp, use_dill)
def _serialization_test_for_dp_with_children(self, dp1, dp2, use_dill=False):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
# 2. Testing for serialization after DataPipe is partially read
it1, it2 = iter(dp1), iter(dp2)
_, _ = next(it1), next(it2)
# Catch `fork`, `demux` "some child DataPipes are not exhausted" warning
with warnings.catch_warnings(record=True):
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
# 2.5. Testing for serialization after one child DataPipe is fully read
# (Only for DataPipes with children DataPipes)
it1 = iter(dp1)
_ = list(it1) # fully read one child
# Catch `fork`, `demux` "some child DataPipes are not exhausted" warning
with warnings.catch_warnings(record=True):
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
# 3. Testing for serialization after DataPipe is fully read
it2 = iter(dp2)
_ = list(it2) # fully read the other child
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
def test_serializable(self):
picklable_datapipes: list = [
(
dp.iter.Batcher,
None,
(
3,
True,
),
{},
),
(dp.iter.Collator, None, (_fake_fn,), {}),
(dp.iter.Concater, None, (dp.iter.IterableWrapper(range(5)),), {}),
(dp.iter.Demultiplexer, None, (2, _simple_filter_fn), {}),
(dp.iter.FileLister, ".", (), {}),
(dp.iter.FileOpener, None, (), {}),
(dp.iter.Filter, None, (_fake_filter_fn,), {}),
(dp.iter.Filter, None, (partial(_fake_filter_fn_constant, 5),), {}),
(dp.iter.Forker, None, (2,), {}),
(dp.iter.Forker, None, (2,), {"copy": "shallow"}),
(dp.iter.Grouper, None, (_fake_filter_fn,), {"group_size": 2}),
(dp.iter.IterableWrapper, range(10), (), {}),
(dp.iter.Mapper, None, (_fake_fn,), {}),
(dp.iter.Mapper, None, (partial(_fake_add, 1),), {}),
(dp.iter.Multiplexer, None, (dp.iter.IterableWrapper(range(10)),), {}),
(dp.iter.Sampler, None, (), {}),
(dp.iter.Shuffler, dp.iter.IterableWrapper([0] * 10), (), {}),
(dp.iter.StreamReader, None, (), {}),
(dp.iter.UnBatcher, None, (0,), {}),
(dp.iter.Zipper, None, (dp.iter.IterableWrapper(range(10)),), {}),
]
# Skipping comparison for these DataPipes
dp_skip_comparison = {dp.iter.FileOpener, dp.iter.StreamReader}
# These DataPipes produce multiple DataPipes as outputs and those should be compared
dp_compare_children = {dp.iter.Demultiplexer, dp.iter.Forker}
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
if custom_input is None:
custom_input = dp.iter.IterableWrapper(range(10))
if (
dpipe in dp_skip_comparison
): # Merely make sure they are picklable and loadable (no value comparison)
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = pickle.dumps(datapipe)
_ = pickle.loads(serialized_dp)
elif dpipe in dp_compare_children: # DataPipes that have children
dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2)
else: # Single DataPipe that requires comparison
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe)
@skipIfTorchDynamo("Dict with function as keys")
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = dp.iter.IterableWrapper(range(10))
datapipes_with_lambda_fn: list[
tuple[type[IterDataPipe], tuple, dict[str, Any]]
] = [
(dp.iter.Collator, (lambda_fn1,), {}),
(
dp.iter.Demultiplexer,
(
2,
lambda_fn2,
),
{},
),
(dp.iter.Filter, (lambda_fn3,), {}),
(dp.iter.Grouper, (lambda_fn3,), {}),
(dp.iter.Mapper, (lambda_fn1,), {}),
]
def _local_fns():
def _fn1(x):
return x
def _fn2(x):
return x % 2
def _fn3(x):
return x >= 5
return _fn1, _fn2, _fn3
fn1, fn2, fn3 = _local_fns()
datapipes_with_local_fn: list[
tuple[type[IterDataPipe], tuple, dict[str, Any]]
] = [
(dp.iter.Collator, (fn1,), {}),
(
dp.iter.Demultiplexer,
(
2,
fn2,
),
{},
),
(dp.iter.Filter, (fn3,), {}),
(dp.iter.Grouper, (fn3,), {}),
(dp.iter.Mapper, (fn1,), {}),
]
dp_compare_children = {dp.iter.Demultiplexer}
if HAS_DILL:
for dpipe, dp_args, dp_kwargs in (
datapipes_with_lambda_fn + datapipes_with_local_fn
):
if dpipe in dp_compare_children:
dp1, dp2 = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(
dp1, dp2, use_dill=True
)
else:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe, use_dill=True)
else:
msgs = (
r"^Lambda function is not supported by pickle",
r"^Local function is not supported by pickle",
)
for dps, msg in zip(
(datapipes_with_lambda_fn, datapipes_with_local_fn), msgs
):
for dpipe, dp_args, dp_kwargs in dps:
with self.assertWarnsRegex(UserWarning, msg):
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
with self.assertRaises((pickle.PicklingError, AttributeError)):
pickle.dumps(datapipe)
def test_docstring(self):
"""
Ensure functional form of IterDataPipe has the correct docstring from
the class form.
Regression test for https://github.com/pytorch/data/issues/792.
"""
input_dp = dp.iter.IterableWrapper(range(10))
for dp_funcname in [
"batch",
"collate",
"concat",
"demux",
"filter",
"fork",
"map",
"mux",
"read_from_stream",
# "sampler",
"shuffle",
"unbatch",
"zip",
]:
docstring = pydoc.render_doc(
thing=getattr(input_dp, dp_funcname), forceload=True
)
assert f"(functional name: ``{dp_funcname}``)" in docstring
assert "Args:" in docstring
assert "Example:" in docstring or "Examples:" in docstring
def test_iterable_wrapper_datapipe(self):
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
# Functional Test: values are unchanged and in the same order
self.assertEqual(input_ls, list(input_dp))
# Functional Test: deep copy by default when an iterator is initialized (first element is read)
it = iter(input_dp)
self.assertEqual(
0, next(it)
) # The deep copy only happens when the first element is read
input_ls.append(50)
self.assertEqual(list(range(1, 10)), list(it))
# Functional Test: shallow copy
input_ls2 = [1, 2, 3]
input_dp_shallow = dp.iter.IterableWrapper(input_ls2, deepcopy=False)
input_ls2.append(10)
self.assertEqual([1, 2, 3, 10], list(input_dp_shallow))
# Reset Test: reset the DataPipe
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(
input_dp, n_elements_before_reset
)
self.assertEqual(input_ls[:n_elements_before_reset], res_before_reset)
self.assertEqual(input_ls, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(input_ls), len(input_dp))
def test_concat_iterdatapipe(self):
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp2 = dp.iter.IterableWrapper(range(5))
# Functional Test: Raises exception for empty input
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concater()
# Functional Test: Raises exception for non-IterDataPipe input
with self.assertRaisesRegex(
TypeError, r"Expected all inputs to be `IterDataPipe`"
):
dp.iter.Concater(input_dp1, ()) # type: ignore[arg-type]
# Functional Test: Concatenate DataPipes as expected
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Reset Test: reset the DataPipe
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(
concat_dp, n_elements_before_reset
)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(list(range(10)) + list(range(5)), res_after_reset)
# __len__ Test: inherits length from source DataPipe
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(concat_dp)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_fork_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.fork(num_instances=0)
dp0 = input_dp.fork(num_instances=1, buffer_size=0)
self.assertEqual(dp0, input_dp)
# Functional Test: making sure all child DataPipe shares the same reference
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertTrue(all(n1 is n2 and n1 is n3 for n1, n2, n3 in zip(dp1, dp2, dp3)))
# Functional Test: one child DataPipe yields all value at a time
output1, output2, output3 = list(dp1), list(dp2), list(dp3)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
self.assertEqual(list(range(10)), output3)
# Functional Test: two child DataPipes yield value together
dp1, dp2 = input_dp.fork(num_instances=2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Functional Test: one child DataPipe yields all value first, but buffer_size = 5 being too small
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=4)
it1 = iter(dp1)
for _ in range(4):
next(it1)
with self.assertRaises(BufferError):
next(it1)
with self.assertRaises(BufferError):
list(dp2)
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=5)
with self.assertRaises(BufferError):
list(dp2)
# Functional Test: one child DataPipe yields all value first with unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=-1)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Unlimited buffer size is set")
l1, l2 = list(dp1), list(dp2)
for d1, d2 in zip(l1, l2):
self.assertEqual(d1, d2)
# Functional Test: two child DataPipes yield value together with buffer size 1
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=1)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Functional Test: two child DataPipes yield shallow copies with copy equals shallow
dp1, dp2 = input_dp.map(_to_list).fork(num_instances=2, copy="shallow")
for n1, n2 in zip(dp1, dp2):
self.assertIsNot(n1, n2)
self.assertEqual(n1, n2)
# Functional Test: two child DataPipes yield deep copies with copy equals deep
dp1, dp2 = (
input_dp.map(_to_list).map(_to_list).fork(num_instances=2, copy="deep")
)
for n1, n2 in zip(dp1, dp2):
self.assertIsNot(n1[0], n2[0])
self.assertEqual(n1, n2)
# Functional Test: fork DataPipe raises error for unknown copy method
with self.assertRaises(ValueError):
input_dp.fork(num_instances=2, copy="unknown")
# Functional Test: make sure logic related to slowest_ptr is working properly
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2, output3 = [], [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4: # yield all of dp3 when halfway through dp1, dp2
output3 = list(dp3)
break
self.assertEqual(list(range(5)), output1)
self.assertEqual(list(range(5)), output2)
self.assertEqual(list(range(10)), output3)
# Reset Test: DataPipe resets when a new iterator is created, even if this datapipe hasn't been read
dp1, dp2 = input_dp.fork(num_instances=2)
_ = iter(dp1)
output2 = []
with self.assertRaisesRegex(RuntimeError, r"iterator has been invalidated"):
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # This will reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"child DataPipes are not exhausted"
)
self.assertEqual(list(range(5)), output2)
# Reset Test: DataPipe resets when some of it has been read
dp1, dp2 = input_dp.fork(num_instances=2)
output1, output2 = [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # Reset both all child DataPipe
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"Some child DataPipes are not exhausted"
)
break
with warnings.catch_warnings(record=True) as wa:
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
self.assertEqual(list(range(5)) + list(range(10)), output1)
self.assertEqual(list(range(5)) + list(range(10)), output2)
# Reset Test: DataPipe reset, even when some other child DataPipes are not read
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(
list(range(10)), list(dp1)
) # Resets even though dp3 has not been read
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"Some child DataPipes are not exhausted"
)
output3 = []
for i, n3 in enumerate(dp3):
output3.append(n3)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
output1 = list(dp1) # Resets even though dp3 is only partially read
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"Some child DataPipes are not exhausted"
)
self.assertEqual(list(range(5)), output3)
self.assertEqual(list(range(10)), output1)
break
self.assertEqual(
list(range(10)), list(dp3)
) # dp3 has to read from the start again
# __len__ Test: Each DataPipe inherits the source datapipe's length
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertEqual(len(input_dp), len(dp1))
self.assertEqual(len(input_dp), len(dp2))
self.assertEqual(len(input_dp), len(dp3))
# Pickle Test:
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
traverse_dps(dp1) # This should not raise any error
for _ in zip(dp1, dp2, dp3):
pass
traverse_dps(dp2) # This should not raise any error either
def test_mux_iterdatapipe(self):
# Functional Test: Elements are yielded one at a time from each DataPipe, until they are all exhausted
input_dp1 = dp.iter.IterableWrapper(range(4))
input_dp2 = dp.iter.IterableWrapper(range(4, 8))
input_dp3 = dp.iter.IterableWrapper(range(8, 12))
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Uneven input Data Pipes
input_dp1 = dp.iter.IterableWrapper([1, 2, 3, 4])
input_dp2 = dp.iter.IterableWrapper([10])
input_dp3 = dp.iter.IterableWrapper([100, 200, 300])
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [1, 10, 100]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Empty Data Pipe
input_dp1 = dp.iter.IterableWrapper([0, 1, 2, 3])
input_dp2 = dp.iter.IterableWrapper([])
output_dp = input_dp1.mux(input_dp2)
self.assertEqual(len(input_dp2), len(output_dp))
self.assertEqual(list(input_dp2), list(output_dp))
# __len__ Test: raises TypeError when __len__ is called and an input doesn't have __len__
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp_no_len = IDP_NoLen(range(10))
output_dp = input_dp1.mux(input_dp_no_len)
with self.assertRaises(TypeError):
len(output_dp)
def test_demux_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.demux(num_instances=0, classifier_fn=lambda x: 0)
# Functional Test: split into 2 DataPipes and output them one at a time
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(0, 10, 2)), output1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Functional Test: split into 2 DataPipes and output them together
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i + 1) for i in range(0, 10, 2)], output)
# Functional Test: values of the same classification are lumped together, and buffer_size = 3 being too small
dp1, dp2 = input_dp.demux(
num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=4
)
it1 = iter(dp1)
with self.assertRaises(BufferError):
next(
it1
) # Buffer raises because first 5 elements all belong to the a different child
with self.assertRaises(BufferError):
list(dp2)
# Functional Test: values of the same classification are lumped together, and buffer_size = 5 is just enough
dp1, dp2 = input_dp.demux(
num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=5
)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(5)), output2)
# Functional Test: values of the same classification are lumped together, and unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.demux(
num_instances=2,
classifier_fn=lambda x: 0 if x >= 5 else 1,
buffer_size=-1,
)
exp_l = 1 if HAS_DILL else 2
self.assertEqual(len(wa), exp_l)
self.assertRegex(str(wa[-1].message), r"Unlimited buffer size is set")
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(5)), output2)
# Functional Test: classifier returns a value outside of [0, num_instance - 1]
dp0 = input_dp.demux(num_instances=1, classifier_fn=lambda x: x % 2)
it = iter(dp0[0])
with self.assertRaises(ValueError):
next(it)
next(it)
# Reset Test: DataPipe resets when a new iterator is created, even if this datapipe hasn't been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
_ = iter(dp1)
output2 = []
with self.assertRaisesRegex(RuntimeError, r"iterator has been invalidated"):
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # This will reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"child DataPipes are not exhausted"
)
self.assertEqual(list(range(1, 10, 2)), output2)
# Reset Test: DataPipe resets when some of it has been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = [], []
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
if n1 == 4:
break
with warnings.catch_warnings(record=True) as wa:
iter(dp1) # Reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"Some child DataPipes are not exhausted"
)
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual([0, 2, 4] + list(range(0, 10, 2)), output1)
self.assertEqual([1, 3, 5] + list(range(1, 10, 2)), output2)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
# Reset Test: DataPipe reset, even when not all child DataPipes are exhausted
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1 = list(dp1)
self.assertEqual(list(range(0, 10, 2)), output1)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(
list(range(0, 10, 2)), list(dp1)
) # Reset even when dp2 is not read
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"Some child DataPipes are not exhausted"
)
output2 = []
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 1:
self.assertEqual(list(range(1, 5, 2)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(
list(range(0, 10, 2)), list(dp1)
) # Can reset even when dp2 is partially read
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"Some child DataPipes are not exhausted"
)
break
output2 = list(dp2) # output2 has to read from beginning again
self.assertEqual(list(range(1, 10, 2)), output2)
# Functional Test: drop_none = True
dp1, dp2 = input_dp.demux(
num_instances=2,
classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=True,
)
self.assertEqual([2, 4, 6, 8], list(dp1))
self.assertEqual([1, 3, 7, 9], list(dp2))
# Functional Test: drop_none = False
dp1, dp2 = input_dp.demux(
num_instances=2,
classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=False,
)
it1 = iter(dp1)
with self.assertRaises(ValueError):
next(it1)
# __len__ Test: __len__ not implemented
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
with self.assertRaises(TypeError):
len(
dp1
) # It is not implemented as we do not know length for each child in advance
with self.assertRaises(TypeError):
len(dp2)
# Pickle Test:
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=odd_or_even)
traverse_dps(dp1) # This should not raise any error
for _ in zip(dp1, dp2):
pass
traverse_dps(dp2) # This should not raise any error either
def test_map_iterdatapipe(self):
target_length = 10
input_dp = dp.iter.IterableWrapper(range(target_length))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
# Functional Test: apply to each element correctly
map_dp = input_dp.map(fn)
self.assertEqual(target_length, len(map_dp))
for x, y in zip(map_dp, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# Functional Test: works with partial function
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
for x, y in zip(map_dp, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
# __len__ Test: inherits length from source DataPipe
self.assertEqual(target_length, len(map_dp))
input_dp_nl = IDP_NoLen(range(target_length))
map_dp_nl = input_dp_nl.map(lambda x: x)
for x, y in zip(map_dp_nl, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# __len__ Test: inherits length from source DataPipe - raises error when invalid
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
# Reset Test: DataPipe resets properly
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(
map_dp, n_elements_before_reset
)
self.assertEqual(list(range(n_elements_before_reset)), res_before_reset)
self.assertEqual(list(range(10)), res_after_reset)
@suppress_warnings # Suppress warning for lambda fn
def test_map_tuple_list_with_col_iterdatapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def fn_n1_def(d0, d1=1):
return d0 + d1
def fn_n1_kwargs(d0, d1, **kwargs):
return d0 + d1
def fn_n1_pos(d0, d1, *args):
return d0 + d1
def fn_n1_sep_pos(d0, *args, d1):
return d0 + d1
def fn_cmplx(d0, d1=1, *args, d2, **kwargs):
return d0 + d1
p_fn_n1 = partial(fn_n1, d1=1)
p_fn_cmplx = partial(fn_cmplx, d2=2)
p_fn_cmplx_large_arg = partial(
fn_cmplx, d2={i: list(range(i)) for i in range(10_000)}
)
def _helper(ref_fn, fn, input_col=None, output_col=None, error=None):
for constr in (list, tuple):
datapipe = dp.iter.IterableWrapper(
[constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))]
)
if ref_fn is None:
with self.assertRaises(error):
res_dp = datapipe.map(fn, input_col, output_col)
list(res_dp)
else:
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
_helper(lambda data: data, fn_n1_def, 0, 1)
_helper(
lambda data: (data[0], data[1], data[0] + data[1]), fn_n1_def, [0, 1], 2
)
_helper(lambda data: data, p_fn_n1, 0, 1)
_helper(lambda data: data, p_fn_cmplx, 0, 1)
_helper(lambda data: data, p_fn_cmplx_large_arg, 0, 1)
_helper(
lambda data: (data[0], data[1], data[0] + data[1]), p_fn_cmplx, [0, 1], 2
)
_helper(lambda data: (data[0] + data[1],), fn_n1_pos, [0, 1, 2])
# Replacing with one input column and default output column
_helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1)
_helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1)
# The index of input column is out of range
_helper(None, fn_1n, 3, error=IndexError)
# Unmatched input columns with fn arguments
_helper(None, fn_n1, 1, error=ValueError)
_helper(None, fn_n1, [0, 1, 2], error=ValueError)
_helper(None, operator.add, 0, error=ValueError)
_helper(None, operator.add, [0, 1, 2], error=ValueError)
_helper(None, fn_cmplx, 0, 1, ValueError)
_helper(None, fn_n1_pos, 1, error=ValueError)
_helper(None, fn_n1_def, [0, 1, 2], 1, error=ValueError)
_helper(None, p_fn_n1, [0, 1], error=ValueError)
_helper(None, fn_1n, [1, 2], error=ValueError)
# _helper(None, p_fn_cmplx, [0, 1, 2], error=ValueError)
_helper(None, fn_n1_sep_pos, [0, 1, 2], error=ValueError)
# Fn has keyword-only arguments
_helper(None, fn_n1_kwargs, 1, error=ValueError)
_helper(None, fn_cmplx, [0, 1], 2, ValueError)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0])
_helper(
lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])),
fn_nn,
[2, 1],
)
# output_col can only be specified when input_col is not None
_helper(None, fn_n1, None, 1, error=ValueError)
# output_col can only be single-element list or tuple
_helper(None, fn_n1, None, [0, 1], error=ValueError)
# Single-element list as output_col
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0])
# Replacing with one input column and single specified output column
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0)
_helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2)
# The index of output column is out of range
_helper(None, fn_1n, 1, 3, error=IndexError)
_helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1)
_helper(
lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]),
fn_nn,
[1, 2],
0,
)
# Appending the output at the end
_helper(lambda data: (*data, -data[1]), fn_11, 1, -1)
_helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1)
_helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1)
_helper(
lambda data: (*data, (-data[1], -data[2], data[1] + data[2])),
fn_nn,
[1, 2],
-1,
)
# Handling built-in functions (e.g. `dict`, `iter`, `int`, `str`) whose signatures cannot be inspected
_helper(lambda data: (str(data[0]), data[1], data[2]), str, 0)
_helper(lambda data: (data[0], data[1], int(data[2])), int, 2)
# Handle nn.Module and Callable (without __name__ implemented)
_helper(lambda data: (data[0] + 1, data[1], data[2]), Add1Module(), 0)
_helper(lambda data: (data[0] + 1, data[1], data[2]), Add1Callable(), 0)
@suppress_warnings # Suppress warning for lambda fn
@skipIfTorchDynamo()
def test_map_dict_with_col_iterdatapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def fn_n1_def(d0, d1=1):
return d0 + d1
p_fn_n1 = partial(fn_n1, d1=1)
def fn_n1_pos(d0, d1, *args):
return d0 + d1
def fn_n1_kwargs(d0, d1, **kwargs):
return d0 + d1
def fn_kwonly(*, d0, d1):
return d0 + d1
def fn_has_nondefault_kwonly(d0, *, d1):
return d0 + d1
def fn_cmplx(d0, d1=1, *args, d2, **kwargs):
return d0 + d1
p_fn_cmplx = partial(fn_cmplx, d2=2)
p_fn_cmplx_large_arg = partial(
fn_cmplx, d2={i: list(range(i)) for i in range(10_000)}
)
# Prevent modification in-place to support resetting
def _dict_update(data, newdata, remove_idx=None):
_data = dict(data)
_data.update(newdata)
if remove_idx:
for idx in remove_idx:
del _data[idx]
return _data
def _helper(ref_fn, fn, input_col=None, output_col=None, error=None):
datapipe = dp.iter.IterableWrapper(
[
{"x": 0, "y": 1, "z": 2},
{"x": 3, "y": 4, "z": 5},
{"x": 6, "y": 7, "z": 8},
]
)
if ref_fn is None:
with self.assertRaises(error):
res_dp = datapipe.map(fn, input_col, output_col)
list(res_dp)
else:
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
_helper(lambda data: data, fn_n1_def, "x", "y")
_helper(lambda data: data, p_fn_n1, "x", "y")
_helper(lambda data: data, p_fn_cmplx, "x", "y")
_helper(lambda data: data, p_fn_cmplx_large_arg, "x", "y")
_helper(
lambda data: _dict_update(data, {"z": data["x"] + data["y"]}),
p_fn_cmplx,
["x", "y", "z"],
"z",
)
_helper(
lambda data: _dict_update(data, {"z": data["x"] + data["y"]}),
fn_n1_def,
["x", "y"],
"z",
)
_helper(None, fn_n1_pos, "x", error=ValueError)
_helper(None, fn_n1_kwargs, "x", error=ValueError)
# non-default kw-only args
_helper(None, fn_kwonly, ["x", "y"], error=ValueError)
_helper(None, fn_has_nondefault_kwonly, ["x", "y"], error=ValueError)
_helper(None, fn_cmplx, ["x", "y"], error=ValueError)
# Replacing with one input column and default output column
_helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y")
_helper(
lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y"
)
# The key of input column is not in dict
_helper(None, fn_1n, "a", error=KeyError)
# Unmatched input columns with fn arguments
_helper(None, fn_n1, "y", error=ValueError)
_helper(None, fn_1n, ["x", "y"], error=ValueError)
_helper(None, fn_n1_def, ["x", "y", "z"], error=ValueError)
_helper(None, p_fn_n1, ["x", "y"], error=ValueError)
_helper(None, fn_n1_kwargs, ["x", "y", "z"], error=ValueError)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(
lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]),
fn_n1,
["z", "x"],
)
_helper(
lambda data: _dict_update(
data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]
),
fn_nn,
["z", "y"],
)
# output_col can only be specified when input_col is not None
_helper(None, fn_n1, None, "x", error=ValueError)
# output_col can only be single-element list or tuple
_helper(None, fn_n1, None, ["x", "y"], error=ValueError)
# Single-element list as output_col
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"])
# Replacing with one input column and single specified output column
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x")
_helper(
lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}),
fn_1n,
"y",
"z",
)
_helper(
lambda data: _dict_update(data, {"y": data["x"] + data["z"]}),
fn_n1,
["x", "z"],
"y",
)
_helper(
lambda data: _dict_update(
data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}
),
fn_nn,
["y", "z"],
"x",
)
# Adding new key to dict for the output
_helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a")
_helper(
lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}),
fn_1n,
"y",
"a",
)
_helper(
lambda data: _dict_update(data, {"a": data["x"] + data["z"]}),
fn_n1,
["x", "z"],
"a",
)
_helper(
lambda data: _dict_update(
data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}
),
fn_nn,
["y", "z"],
"a",
)
def test_collate_iterdatapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = dp.iter.IterableWrapper(arrs)
def _collate_fn(batch, default_type=torch.float):
return torch.tensor(sum(batch), dtype=default_type)
# Functional Test: defaults to the default collate function when a custom one is not specified
collate_dp = input_dp.collate()
for x, y in zip(arrs, collate_dp):
self.assertEqual(torch.tensor(x), y)
# Functional Test: custom collate function
collate_dp = input_dp.collate(collate_fn=_collate_fn)
for x, y in zip(arrs, collate_dp):
self.assertEqual(torch.tensor(sum(x), dtype=torch.float), y)
# Functional Test: custom, partial collate function
collate_dp = input_dp.collate(partial(_collate_fn, default_type=torch.int))
for x, y in zip(arrs, collate_dp):
self.assertEqual(torch.tensor(sum(x), dtype=torch.int), y)
# Reset Test: reset the DataPipe and results are still correct
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(
collate_dp, n_elements_before_reset
)
self.assertEqual([torch.tensor(6, dtype=torch.int)], res_before_reset)
for x, y in zip(arrs, res_after_reset):
self.assertEqual(torch.tensor(sum(x), dtype=torch.int), y)
# __len__ Test: __len__ is inherited
self.assertEqual(len(input_dp), len(collate_dp))
# __len__ Test: verify that it has no valid __len__ when the source doesn't have it
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(collate_dp_nl)
for x, y in zip(arrs, collate_dp_nl):
self.assertEqual(torch.tensor(x), y)
def test_batch_iterdatapipe(self):
arrs = list(range(10))
input_dp = dp.iter.IterableWrapper(arrs)
# Functional Test: raise error when input argument `batch_size = 0`
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Functional Test: by default, do not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs : i * bs + len(batch)])
# Functional Test: Drop the last batch when specified
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
for i, batch in enumerate(batch_dp):
self.assertEqual(batch, arrs[i * bs : i * bs + len(batch)])
# __len__ test: verifying that the overall length and of each batch is correct
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
# __len__ Test: the length is missing if the source DataPipe doesn't have length
self.assertEqual(len(batch_dp), 2)
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(batch_dp_nl)
# Reset Test: Ensures that the DataPipe can properly reset
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(
batch_dp, n_elements_before_reset
)
self.assertEqual([[0, 1, 2, 3]], res_before_reset)
self.assertEqual([[0, 1, 2, 3], [4, 5, 6, 7]], res_after_reset)
def test_unbatch_iterdatapipe(self):
target_length = 6
prebatch_dp = dp.iter.IterableWrapper(range(target_length))
# Functional Test: Unbatch DataPipe should be the same as pre-batch DataPipe
input_dp = prebatch_dp.batch(3)
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length) # __len__ is as expected
for i, res in zip(range(target_length), unbatch_dp):
self.assertEqual(i, res)
# Functional Test: unbatch works for an input with nested levels
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(range(target_length), unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
# Functional Test: unbatch works for an input with nested levels
unbatch_dp = input_dp.unbatch()
expected_dp = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertEqual(len(list(unbatch_dp)), 4)
for j, res in zip(expected_dp, unbatch_dp):
self.assertEqual(j, res)
# Functional Test: unbatching multiple levels at the same time
unbatch_dp = input_dp.unbatch(unbatch_level=2)
expected_dp2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
# Functional Test: unbatching all levels at the same time
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
# Functional Test: raises error when input unbatch_level is less than -1
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
with self.assertRaises(ValueError):
unbatch_dp = input_dp.unbatch(unbatch_level=-2)
for i in unbatch_dp:
print(i)
# Functional Test: raises error when input unbatch_level is too high
with self.assertRaises(IndexError):
unbatch_dp = input_dp.unbatch(unbatch_level=5)
for i in unbatch_dp:
print(i)
# Reset Test: unbatch_dp resets properly
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(
unbatch_dp, n_elements_before_reset
)
self.assertEqual([0, 1, 2], res_before_reset)
self.assertEqual([0, 1, 2, 3, 4, 5], res_after_reset)
def test_filter_datapipe(self):
input_ds = dp.iter.IterableWrapper(range(10))
def _filter_fn(data, val):
return data >= val
# Functional Test: filter works with partial function
filter_dp = input_ds.filter(partial(_filter_fn, val=5))
self.assertEqual(list(filter_dp), list(range(5, 10)))
def _non_bool_fn(data):
return 1
# Functional Test: filter function must return bool
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
list(filter_dp)
# Functional Test: Specify input_col
tuple_input_ds = dp.iter.IterableWrapper([(d - 1, d, d + 1) for d in range(10)])
# Single input_col
input_col_1_dp = tuple_input_ds.filter(partial(_filter_fn, val=5), input_col=1)
self.assertEqual(
list(input_col_1_dp), [(d - 1, d, d + 1) for d in range(5, 10)]
)
# Multiple input_col
def _mul_filter_fn(a, b):
return a + b < 10
input_col_2_dp = tuple_input_ds.filter(_mul_filter_fn, input_col=[0, 2])
self.assertEqual(list(input_col_2_dp), [(d - 1, d, d + 1) for d in range(5)])
# invalid input col
with self.assertRaises(ValueError):
tuple_input_ds.filter(_mul_filter_fn, input_col=0)
p_mul_filter_fn = partial(_mul_filter_fn, b=1)
out = tuple_input_ds.filter(p_mul_filter_fn, input_col=0)
self.assertEqual(list(out), [(d - 1, d, d + 1) for d in range(10)])
def _mul_filter_fn_with_defaults(a, b=1):
return a + b < 10
out = tuple_input_ds.filter(_mul_filter_fn_with_defaults, input_col=0)
self.assertEqual(list(out), [(d - 1, d, d + 1) for d in range(10)])
def _mul_filter_fn_with_kw_only(*, a, b):
return a + b < 10
with self.assertRaises(ValueError):
tuple_input_ds.filter(_mul_filter_fn_with_kw_only, input_col=0)
def _mul_filter_fn_with_kw_only_1_default(*, a, b=1):
return a + b < 10
with self.assertRaises(ValueError):
tuple_input_ds.filter(_mul_filter_fn_with_kw_only_1_default, input_col=0)
# __len__ Test: DataPipe has no valid len
with self.assertRaisesRegex(TypeError, r"has no len"):
len(filter_dp)
# Reset Test: DataPipe resets correctly
filter_dp = input_ds.filter(partial(_filter_fn, val=5))
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(
filter_dp, n_elements_before_reset
)
self.assertEqual(list(range(5, 10))[:n_elements_before_reset], res_before_reset)
self.assertEqual(list(range(5, 10)), res_after_reset)
def test_sampler_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore[var-annotated]
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
dp.iter.Sampler(
input_dp, sampler=RandomSampler, sampler_kwargs={"replacement": True}
)
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_stream_reader_iterdatapipe(self):
from io import StringIO
input_dp = dp.iter.IterableWrapper(
[("f1", StringIO("abcde")), ("f2", StringIO("bcdef"))]
)
expected_res = ["abcde", "bcdef"]
# Functional Test: Read full chunk
dp1 = input_dp.read_from_stream()
self.assertEqual([d[1] for d in dp1], expected_res)
# Functional Test: Read full chunk
dp2 = input_dp.read_from_stream(chunk=1)
self.assertEqual([d[1] for d in dp2], [c for s in expected_res for c in s])
# `__len__` Test
with self.assertRaises(TypeError):
len(dp1)
def test_shuffler_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(list(range(10)))
with self.assertRaises(AssertionError):
input_dp.shuffle(buffer_size=0)
# Functional Test: No seed
shuffler_dp = input_dp.shuffle()
self.assertEqual(set(range(10)), set(shuffler_dp))
# Functional Test: With global seed
torch.manual_seed(123)
shuffler_dp = input_dp.shuffle()
res = list(shuffler_dp)
torch.manual_seed(123)
self.assertEqual(list(shuffler_dp), res)
# Functional Test: Set seed
shuffler_dp = input_dp.shuffle().set_seed(123)
res = list(shuffler_dp)
shuffler_dp.set_seed(123)
self.assertEqual(list(shuffler_dp), res)
# Functional Test: deactivate shuffling via set_shuffle
unshuffled_dp = input_dp.shuffle().set_shuffle(False)
self.assertEqual(list(unshuffled_dp), list(input_dp))
# Reset Test:
shuffler_dp = input_dp.shuffle()
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(
shuffler_dp, n_elements_before_reset
)
self.assertEqual(5, len(res_before_reset))
for x in res_before_reset:
self.assertTrue(x in set(range(10)))
self.assertEqual(set(range(10)), set(res_after_reset))
# __len__ Test: returns the length of the input DataPipe
shuffler_dp = input_dp.shuffle()
self.assertEqual(10, len(shuffler_dp))
# Serialization Test
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
def _serialization_helper(bs):
shuffler_dp = input_dp.shuffle(buffer_size=bs)
it = iter(shuffler_dp)
for _ in range(2):
next(it)
shuffler_dp_copy = pickle.loads(pickle.dumps(shuffler_dp))
_simple_graph_snapshot_restoration(
shuffler_dp_copy.datapipe,
shuffler_dp.datapipe._number_of_samples_yielded,
)
exp = list(it)
shuffler_dp_copy._snapshot_state = _SnapshotState.Restored
self.assertEqual(exp, list(shuffler_dp_copy))
buffer_sizes = [2, 5, 15]
for bs in buffer_sizes:
_serialization_helper(bs)
def test_zip_iterdatapipe(self):
# Functional Test: raises TypeError when an input is not of type `IterDataPipe`
with self.assertRaises(TypeError):
dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), list(range(10))) # type: ignore[arg-type]
# Functional Test: raises TypeError when an input does not have valid length
zipped_dp = dp.iter.Zipper(
dp.iter.IterableWrapper(range(10)), IDP_NoLen(range(5))
) # type: ignore[var-annotated]
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(zipped_dp)
# Functional Test: zips the results properly
exp = [(i, i) for i in range(5)]
self.assertEqual(list(zipped_dp), exp)
# Functional Test: zips the inputs properly even when lengths are different (zips to the shortest)
zipped_dp = dp.iter.Zipper(
dp.iter.IterableWrapper(range(10)), dp.iter.IterableWrapper(range(5))
)
# __len__ Test: length matches the length of the shortest input
self.assertEqual(len(zipped_dp), 5)
# Reset Test:
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(
zipped_dp, n_elements_before_reset
)
expected_res = [(i, i) for i in range(5)]
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
| TestFunctionalIterDataPipe |
python | mkdocs__mkdocs | mkdocs/tests/plugin_tests.py | {
"start": 761,
"end": 1393
} | class ____(plugins.BasePlugin[_DummyPluginConfig]):
def on_page_content(self, html, **kwargs) -> str:
"""Modify page content by prepending `foo` config value."""
return f'{self.config.foo} {html}'
def on_nav(self, nav, **kwargs) -> None:
"""Do nothing (return None) to not modify item."""
return None
def on_page_read_source(self, **kwargs) -> str:
"""Create new source by prepending `foo` config value to 'source'."""
return f'{self.config.foo} source'
def on_pre_build(self, **kwargs) -> None:
"""Do nothing (return None)."""
return None
| DummyPlugin |
python | plotly__plotly.py | plotly/graph_objs/_cone.py | {
"start": 215,
"end": 83034
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "cone"
_valid_props = {
"anchor",
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"lighting",
"lightposition",
"meta",
"metasrc",
"name",
"opacity",
"reversescale",
"scene",
"showlegend",
"showscale",
"sizemode",
"sizeref",
"stream",
"text",
"textsrc",
"type",
"u",
"uhoverformat",
"uid",
"uirevision",
"usrc",
"v",
"vhoverformat",
"visible",
"vsrc",
"w",
"whoverformat",
"wsrc",
"x",
"xhoverformat",
"xsrc",
"y",
"yhoverformat",
"ysrc",
"z",
"zhoverformat",
"zsrc",
}
@property
def anchor(self):
"""
Sets the cones' anchor with respect to their x/y/z positions.
Note that "cm" denote the cone's center of mass which
corresponds to 1/4 from the tail to tip.
The 'anchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['tip', 'tail', 'cm', 'center']
Returns
-------
Any
"""
return self["anchor"]
@anchor.setter
def anchor(self, val):
self["anchor"] = val
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here u/v/w norm) or the bounds set
in `cmin` and `cmax` Defaults to `false` when `cmin` and `cmax`
are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as u/v/w norm and if set, `cmin` must be set as
well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as u/v/w norm. Has no effect when `cauto` is
`false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as u/v/w norm and if set, `cmax` must be set as
well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.cone.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'u', 'v', 'w', 'norm', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.cone.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variable `norm`
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.cone.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def lighting(self):
"""
The 'lighting' property is an instance of Lighting
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.Lighting`
- A dict of string/value properties that will be passed
to the Lighting constructor
Returns
-------
plotly.graph_objs.cone.Lighting
"""
return self["lighting"]
@lighting.setter
def lighting(self, val):
self["lighting"] = val
@property
def lightposition(self):
"""
The 'lightposition' property is an instance of Lightposition
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.Lightposition`
- A dict of string/value properties that will be passed
to the Lightposition constructor
Returns
-------
plotly.graph_objs.cone.Lightposition
"""
return self["lightposition"]
@lightposition.setter
def lightposition(self, val):
self["lightposition"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the surface. Please note that in the case
of using high `opacity` values for example a value greater than
or equal to 0.5 on two surfaces (and 0.25 with four surfaces),
an overlay of multiple transparent surfaces may not perfectly
be sorted in depth by the webgl API. This behavior may be
improved in the near future and is subject to change.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `cmin` will
correspond to the last color in the array and `cmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def scene(self):
"""
Sets a reference between this trace's 3D coordinate system and
a 3D scene. If "scene" (the default value), the (x,y,z)
coordinates refer to `layout.scene`. If "scene2", the (x,y,z)
coordinates refer to `layout.scene2`, and so on.
The 'scene' property is an identifier of a particular
subplot, of type 'scene', that may be specified as the string 'scene'
optionally followed by an integer >= 1
(e.g. 'scene', 'scene1', 'scene2', 'scene3', etc.)
Returns
-------
str
"""
return self["scene"]
@scene.setter
def scene(self, val):
self["scene"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def sizemode(self):
"""
Determines whether `sizeref` is set as a "scaled" (i.e
unitless) scalar (normalized by the max u/v/w norm in the
vector field) or as "absolute" value (in the same units as the
vector field). To display sizes in actual vector length use
"raw".
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['scaled', 'absolute', 'raw']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
@property
def sizeref(self):
"""
Adjusts the cone size scaling. The size of the cones is
determined by their u/v/w norm multiplied a factor and
`sizeref`. This factor (computed internally) corresponds to the
minimum "time" to travel across two successive x/y/z positions
at the average velocity of those two successive positions. All
cones in a given trace use the same factor. With `sizemode` set
to "raw", its default value is 1. With `sizemode` set to
"scaled", `sizeref` is unitless, its default value is 0.5. With
`sizemode` set to "absolute", `sizeref` has the same units as
the u/v/w vector field, its the default value is half the
sample's maximum vector norm.
The 'sizeref' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.cone.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets the text elements associated with the cones. If trace
`hoverinfo` contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def u(self):
"""
Sets the x components of the vector field.
The 'u' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["u"]
@u.setter
def u(self, val):
self["u"] = val
@property
def uhoverformat(self):
"""
Sets the hover text formatting rulefor `u` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'uhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uhoverformat"]
@uhoverformat.setter
def uhoverformat(self, val):
self["uhoverformat"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def usrc(self):
"""
Sets the source reference on Chart Studio Cloud for `u`.
The 'usrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["usrc"]
@usrc.setter
def usrc(self, val):
self["usrc"] = val
@property
def v(self):
"""
Sets the y components of the vector field.
The 'v' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["v"]
@v.setter
def v(self, val):
self["v"] = val
@property
def vhoverformat(self):
"""
Sets the hover text formatting rulefor `v` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'vhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["vhoverformat"]
@vhoverformat.setter
def vhoverformat(self, val):
self["vhoverformat"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def vsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `v`.
The 'vsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["vsrc"]
@vsrc.setter
def vsrc(self, val):
self["vsrc"] = val
@property
def w(self):
"""
Sets the z components of the vector field.
The 'w' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["w"]
@w.setter
def w(self, val):
self["w"] = val
@property
def whoverformat(self):
"""
Sets the hover text formatting rulefor `w` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'whoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["whoverformat"]
@whoverformat.setter
def whoverformat(self, val):
self["whoverformat"] = val
@property
def wsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `w`.
The 'wsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["wsrc"]
@wsrc.setter
def wsrc(self, val):
self["wsrc"] = val
@property
def x(self):
"""
Sets the x coordinates of the vector field and of the displayed
cones.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
Sets the y coordinates of the vector field and of the displayed
cones.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def z(self):
"""
Sets the z coordinates of the vector field and of the displayed
cones.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zhoverformat(self):
"""
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `zaxis.hoverformat`.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
anchor
Sets the cones' anchor with respect to their x/y/z
positions. Note that "cm" denote the cone's center of
mass which corresponds to 1/4 from the tail to tip.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here u/v/w norm) or the
bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as u/v/w norm. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.cone.ColorBar` instance or
dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.cone.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `norm` Anything contained in tag `<extra>`
is displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.cone.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.cone.Lighting` instance or
dict with compatible properties
lightposition
:class:`plotly.graph_objects.cone.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
sizemode
Determines whether `sizeref` is set as a "scaled" (i.e
unitless) scalar (normalized by the max u/v/w norm in
the vector field) or as "absolute" value (in the same
units as the vector field). To display sizes in actual
vector length use "raw".
sizeref
Adjusts the cone size scaling. The size of the cones is
determined by their u/v/w norm multiplied a factor and
`sizeref`. This factor (computed internally)
corresponds to the minimum "time" to travel across two
successive x/y/z positions at the average velocity of
those two successive positions. All cones in a given
trace use the same factor. With `sizemode` set to
"raw", its default value is 1. With `sizemode` set to
"scaled", `sizeref` is unitless, its default value is
0.5. With `sizemode` set to "absolute", `sizeref` has
the same units as the u/v/w vector field, its the
default value is half the sample's maximum vector norm.
stream
:class:`plotly.graph_objects.cone.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with the cones. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
u
Sets the x components of the vector field.
uhoverformat
Sets the hover text formatting rulefor `u` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
usrc
Sets the source reference on Chart Studio Cloud for
`u`.
v
Sets the y components of the vector field.
vhoverformat
Sets the hover text formatting rulefor `v` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
vsrc
Sets the source reference on Chart Studio Cloud for
`v`.
w
Sets the z components of the vector field.
whoverformat
Sets the hover text formatting rulefor `w` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
wsrc
Sets the source reference on Chart Studio Cloud for
`w`.
x
Sets the x coordinates of the vector field and of the
displayed cones.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates of the vector field and of the
displayed cones.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the z coordinates of the vector field and of the
displayed cones.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
anchor=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
lighting=None,
lightposition=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
reversescale=None,
scene=None,
showlegend=None,
showscale=None,
sizemode=None,
sizeref=None,
stream=None,
text=None,
textsrc=None,
u=None,
uhoverformat=None,
uid=None,
uirevision=None,
usrc=None,
v=None,
vhoverformat=None,
visible=None,
vsrc=None,
w=None,
whoverformat=None,
wsrc=None,
x=None,
xhoverformat=None,
xsrc=None,
y=None,
yhoverformat=None,
ysrc=None,
z=None,
zhoverformat=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Cone object
Use cone traces to visualize vector fields. Specify a vector
field using 6 1D arrays, 3 position arrays `x`, `y` and `z` and
3 vector component arrays `u`, `v`, `w`. The cones are drawn
exactly at the positions given by `x`, `y` and `z`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Cone`
anchor
Sets the cones' anchor with respect to their x/y/z
positions. Note that "cm" denote the cone's center of
mass which corresponds to 1/4 from the tail to tip.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here u/v/w norm) or the
bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as u/v/w norm. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.cone.ColorBar` instance or
dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.cone.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `norm` Anything contained in tag `<extra>`
is displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.cone.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.cone.Lighting` instance or
dict with compatible properties
lightposition
:class:`plotly.graph_objects.cone.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
sizemode
Determines whether `sizeref` is set as a "scaled" (i.e
unitless) scalar (normalized by the max u/v/w norm in
the vector field) or as "absolute" value (in the same
units as the vector field). To display sizes in actual
vector length use "raw".
sizeref
Adjusts the cone size scaling. The size of the cones is
determined by their u/v/w norm multiplied a factor and
`sizeref`. This factor (computed internally)
corresponds to the minimum "time" to travel across two
successive x/y/z positions at the average velocity of
those two successive positions. All cones in a given
trace use the same factor. With `sizemode` set to
"raw", its default value is 1. With `sizemode` set to
"scaled", `sizeref` is unitless, its default value is
0.5. With `sizemode` set to "absolute", `sizeref` has
the same units as the u/v/w vector field, its the
default value is half the sample's maximum vector norm.
stream
:class:`plotly.graph_objects.cone.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with the cones. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
u
Sets the x components of the vector field.
uhoverformat
Sets the hover text formatting rulefor `u` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
usrc
Sets the source reference on Chart Studio Cloud for
`u`.
v
Sets the y components of the vector field.
vhoverformat
Sets the hover text formatting rulefor `v` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
vsrc
Sets the source reference on Chart Studio Cloud for
`v`.
w
Sets the z components of the vector field.
whoverformat
Sets the hover text formatting rulefor `w` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
wsrc
Sets the source reference on Chart Studio Cloud for
`w`.
x
Sets the x coordinates of the vector field and of the
displayed cones.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates of the vector field and of the
displayed cones.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the z coordinates of the vector field and of the
displayed cones.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Cone
"""
super().__init__("cone")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Cone
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Cone`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("anchor", arg, anchor)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("lighting", arg, lighting)
self._set_property("lightposition", arg, lightposition)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("reversescale", arg, reversescale)
self._set_property("scene", arg, scene)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("sizemode", arg, sizemode)
self._set_property("sizeref", arg, sizeref)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("u", arg, u)
self._set_property("uhoverformat", arg, uhoverformat)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("usrc", arg, usrc)
self._set_property("v", arg, v)
self._set_property("vhoverformat", arg, vhoverformat)
self._set_property("visible", arg, visible)
self._set_property("vsrc", arg, vsrc)
self._set_property("w", arg, w)
self._set_property("whoverformat", arg, whoverformat)
self._set_property("wsrc", arg, wsrc)
self._set_property("x", arg, x)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("ysrc", arg, ysrc)
self._set_property("z", arg, z)
self._set_property("zhoverformat", arg, zhoverformat)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "cone"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Cone |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-amazon-product-extraction/llama_index/packs/amazon_product_extraction/base.py | {
"start": 854,
"end": 1582
} | class ____(BaseModel):
"""Data model for an Amazon Product."""
title: str = Field(..., description="Title of product")
category: str = Field(..., description="Category of product")
discount: float = Field(..., description="Discount of product")
price: float = Field(..., description="Price of product")
rating: float = Field(..., description="Rating of product")
description: str = Field(..., description="Description of product")
img_description: str = Field(..., description="Description of product image")
inventory: str = Field(..., description="Inventory of product")
DEFAULT_PROMPT_TEMPLATE_STR = """\
Can you extract the following fields from this product, in JSON format?
"""
| Product |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 34396,
"end": 36564
} | class ____(nn.Module):
"""
DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For DPT, it includes 2 stages:
* DPTReassembleStage
* DPTFeatureFusionStage.
Args:
config (dict): config dict.
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
# postprocessing: only required in case of a non-hierarchical backbone (e.g. ViT, BEiT)
if config.backbone_config is not None and config.backbone_config.model_type == "swinv2":
self.reassemble_stage = None
else:
self.reassemble_stage = DPTReassembleStage(config)
self.convs = nn.ModuleList()
for channel in config.neck_hidden_sizes:
self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
# fusion
self.fusion_stage = DPTFeatureFusionStage(config)
def forward(
self,
hidden_states: list[torch.Tensor],
patch_height: Optional[int] = None,
patch_width: Optional[int] = None,
) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")
# postprocess hidden states
if self.reassemble_stage is not None:
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
# fusion blocks
output = self.fusion_stage(features)
return output
| DPTNeck |
python | PrefectHQ__prefect | tests/cli/test_shell.py | {
"start": 4312,
"end": 5337
} | class ____:
def test_run_shell_process_basic(self, tmp_path):
"""Test basic command execution"""
test_file = tmp_path / "test.txt"
run_shell_process(f"touch {test_file}")
assert test_file.exists()
def test_run_shell_process_with_cwd(self, tmp_path):
"""Test command execution with custom working directory"""
subdir = tmp_path / "subdir"
subdir.mkdir()
test_file = "test.txt"
run_shell_process(f"touch {test_file}", popen_kwargs={"cwd": str(subdir)})
assert (subdir / test_file).exists()
def test_run_shell_process_with_env(self, tmp_path):
"""Test command execution with custom environment variables"""
custom_env = os.environ.copy()
custom_env["TEST_VAR"] = "hello"
run_shell_process(
"echo $TEST_VAR > output.txt",
popen_kwargs={"env": custom_env, "cwd": str(tmp_path)},
)
assert (tmp_path / "output.txt").read_text().strip() == "hello"
| TestRunShellProcess |
python | xlwings__xlwings | tests/test_range.py | {
"start": 31598,
"end": 32252
} | class ____(TestBase):
def test_merge(self):
sheet = self.wb1.sheets[0]
self.assertEqual(sheet["A1"].merge_area, sheet["A1"])
self.assertEqual(sheet["A1"].merge_cells, False)
sheet["A1:A2"].merge()
self.assertEqual(sheet["A1"].merge_area, sheet["A1:A2"])
self.assertEqual(sheet["A1"].merge_cells, True)
sheet["A1:B2"].merge()
self.assertEqual(sheet["A1"].merge_area, sheet["A1:B2"])
sheet["A1:B2"].unmerge()
self.assertEqual(sheet["A1"].merge_area, sheet["A1"])
sheet["A1:B2"].merge(True)
self.assertEqual(sheet["A1"].merge_area, sheet["A1:B1"])
| TestMerging |
python | django__django | tests/model_forms/tests.py | {
"start": 3657,
"end": 3805
} | class ____(forms.Form):
items = forms.ModelMultipleChoiceField(
Inventory.objects.all(), to_field_name="barcode"
)
| SelectInventoryForm |
python | ray-project__ray | rllib/examples/envs/classes/debug_counter_env.py | {
"start": 1007,
"end": 3091
} | class ____(MultiAgentEnv):
def __init__(self, config):
super().__init__()
self.num_agents = config["num_agents"]
self.base_episode_len = config.get("base_episode_len", 103)
# Observation dims:
# 0=agent ID.
# 1=episode ID (0.0 for obs after reset).
# 2=env ID (0.0 for obs after reset).
# 3=ts (of the agent).
self.observation_space = gym.spaces.Dict(
{
aid: gym.spaces.Box(float("-inf"), float("inf"), (4,))
for aid in range(self.num_agents)
}
)
# Actions are always:
# (episodeID, envID) as floats.
self.action_space = gym.spaces.Dict(
{
aid: gym.spaces.Box(-float("inf"), float("inf"), shape=(2,))
for aid in range(self.num_agents)
}
)
self.timesteps = [0] * self.num_agents
self.terminateds = set()
self.truncateds = set()
def reset(self, *, seed=None, options=None):
self.timesteps = [0] * self.num_agents
self.terminateds = set()
self.truncateds = set()
return {
i: np.array([i, 0.0, 0.0, 0.0], dtype=np.float32)
for i in range(self.num_agents)
}, {}
def step(self, action_dict):
obs, rew, terminated, truncated = {}, {}, {}, {}
for i, action in action_dict.items():
self.timesteps[i] += 1
obs[i] = np.array([i, action[0], action[1], self.timesteps[i]])
rew[i] = self.timesteps[i] % 3
terminated[i] = False
truncated[i] = (
True if self.timesteps[i] > self.base_episode_len + i else False
)
if terminated[i]:
self.terminateds.add(i)
if truncated[i]:
self.truncateds.add(i)
terminated["__all__"] = len(self.terminateds) == self.num_agents
truncated["__all__"] = len(self.truncateds) == self.num_agents
return obs, rew, terminated, truncated, {}
| MultiAgentDebugCounterEnv |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/emr.py | {
"start": 12284,
"end": 13684
} | class ____(AwsBaseWaiterTrigger):
"""
Poll an Emr Serverless job run and wait for it to be completed.
:param application_id: The ID of the application the job in being run on.
:param job_id: The ID of the job run.
:waiter_delay: polling period in seconds to check for the status
:param waiter_max_attempts: The maximum number of attempts to be made
:param aws_conn_id: Reference to AWS connection id
"""
def __init__(
self,
application_id: str,
job_id: str | None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str | None = "aws_default",
) -> None:
super().__init__(
serialized_fields={"application_id": application_id, "job_id": job_id},
waiter_name="serverless_job_completed",
waiter_args={"applicationId": application_id, "jobRunId": job_id},
failure_message="Serverless Job failed",
status_message="Serverless Job status is",
status_queries=["jobRun.state", "jobRun.stateDetails"],
return_key="job_id",
return_value=job_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrServerlessHook(self.aws_conn_id)
| EmrServerlessStartJobTrigger |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 24182,
"end": 24341
} | class ____(Structure):
_fields_ = (
("name", lc_str),
("minor_version", mach_version_helper),
("header_addr", p_uint32),
)
| fvmlib |
python | pytorch__pytorch | torch/backends/__init__.py | {
"start": 2996,
"end": 3586
} | class ____(PropModule):
fp32_precision = ContextProp(
_get_fp32_precision_getter("generic", "all"),
_set_fp32_precision_setter("generic", "all"),
)
sys.modules[__name__] = GenericModule(sys.modules[__name__], __name__)
from torch.backends import (
cpu as cpu,
cuda as cuda,
cudnn as cudnn,
cusparselt as cusparselt,
kleidiai as kleidiai,
mha as mha,
miopen as miopen,
mkl as mkl,
mkldnn as mkldnn,
mps as mps,
nnpack as nnpack,
openmp as openmp,
opt_einsum as opt_einsum,
quantized as quantized,
)
| GenericModule |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/version.py | {
"start": 22103,
"end": 22173
} | class ____(Matcher):
version_class = SemanticVersion
| SemanticMatcher |
python | jina-ai__jina | jina/excepts.py | {
"start": 911,
"end": 1040
} | class ____(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
| BadConfigSource |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-dappier/llama_index/tools/dappier/real_time_search/base.py | {
"start": 147,
"end": 2427
} | class ____(BaseToolSpec):
"""Dappier Real Time Search tool spec."""
spec_functions = ["search_real_time_data", "search_stock_market_data"]
def __init__(self, api_key: Optional[str] = None) -> None:
"""
Initialize the Dappier Real Time Search tool spec.
To obtain an API key, visit: https://platform.dappier.com/profile/api-keys
"""
from dappier import Dappier
self.api_key = api_key or os.environ.get("DAPPIER_API_KEY")
if not self.api_key:
raise ValueError(
"API key is required. Provide it as a parameter or set DAPPIER_API_KEY in environment variables.\n"
"To obtain an API key, visit: https://platform.dappier.com/profile/api-keys"
)
self.client = Dappier(api_key=self.api_key)
def search_real_time_data(self, query: str) -> str:
"""
Performs a real-time data search.
Args:
query (str): The user-provided input string for retrieving
real-time google web search results including the latest news,
weather, travel, deals and more.
Returns:
str: A response message containing the real-time data results.
"""
ai_model_id = "am_01j0rzq4tvfscrgzwac7jv1p4c"
response = self.client.search_real_time_data(
query=query, ai_model_id=ai_model_id
)
return response.message if response else "No real-time data found."
def search_stock_market_data(self, query: str) -> str:
"""
Performs a stock market data search.
Args:
query (str): The user-provided input string for retrieving
real-time financial news, stock prices, and trades from polygon.io,
with AI-powered insights and up-to-the-minute updates to keep you
informed on all your financial interests.
Returns:
str: A response message containing the stock market data results.
"""
ai_model_id = "am_01j749h8pbf7ns8r1bq9s2evrh"
response = self.client.search_real_time_data(
query=query, ai_model_id=ai_model_id
)
return response.message if response else "No stock market data found."
| DappierRealTimeSearchToolSpec |
python | davidhalter__jedi | test/test_inference/test_pyc.py | {
"start": 368,
"end": 2394
} | class ____:
pass
"""
@pytest.fixture
def pyc_project_path(tmpdir):
path = tmpdir.strpath
dummy_package_path = os.path.join(path, "dummy_package")
os.mkdir(dummy_package_path)
with open(os.path.join(dummy_package_path, "__init__.py"), 'w', newline=''):
pass
dummy_path = os.path.join(dummy_package_path, 'dummy.py')
with open(dummy_path, 'w', newline='') as f:
f.write(SRC)
import compileall
compileall.compile_file(dummy_path)
os.remove(dummy_path)
# To import pyc modules, we must move them out of the __pycache__
# directory and rename them to remove ".cpython-%s%d"
# see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files
pycache = os.path.join(dummy_package_path, "__pycache__")
for f in os.listdir(pycache):
dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "")
dst = os.path.join(dummy_package_path, dst)
shutil.copy(os.path.join(pycache, f), dst)
try:
yield path
finally:
shutil.rmtree(path)
@pytest.mark.parametrize('load_unsafe_extensions', [False, True])
def test_pyc(pyc_project_path, environment, load_unsafe_extensions):
"""
The list of completion must be greater than 2.
"""
path = os.path.join(pyc_project_path, 'blub.py')
if not isinstance(environment, InterpreterEnvironment):
# We are using the same version for pyc completions here, because it
# was compiled in that version. However with interpreter environments
# we also have the same version and it's easier to debug.
environment = SameEnvironment()
environment = environment
project = jedi.Project(pyc_project_path, load_unsafe_extensions=load_unsafe_extensions)
s = jedi.Script(
"from dummy_package import dummy; dummy.",
path=path,
environment=environment,
project=project,
)
if load_unsafe_extensions:
assert len(s.complete()) >= 2
else:
assert not s.complete()
| Bar |
python | redis__redis-py | redis/cache.py | {
"start": 260,
"end": 388
} | class ____(Enum):
time_based = "time_based"
frequency_based = "frequency_based"
@dataclass(frozen=True)
| EvictionPolicyType |
python | getsentry__sentry | src/sentry_plugins/twilio/plugin.py | {
"start": 1691,
"end": 3315
} | class ____(forms.Form):
account_sid = forms.CharField(
label=_("Account SID"), required=True, widget=forms.TextInput(attrs={"class": "span6"})
)
auth_token = forms.CharField(
label=_("Auth Token"),
required=True,
widget=forms.PasswordInput(render_value=True, attrs={"class": "span6"}),
)
sms_from = forms.CharField(
label=_("SMS From #"),
required=True,
help_text=_("Digits only"),
widget=forms.TextInput(attrs={"placeholder": "e.g. 3305093095"}),
)
sms_to = forms.CharField(
label=_("SMS To #s"),
required=True,
help_text=_("Recipient(s) phone numbers separated by commas or lines"),
widget=forms.Textarea(attrs={"placeholder": "e.g. 3305093095, 5555555555"}),
)
def clean_sms_from(self):
data = self.cleaned_data["sms_from"]
if not validate_phone(data):
raise forms.ValidationError(f"{data} is not a valid phone number.")
return clean_phone(data)
def clean_sms_to(self):
data = self.cleaned_data["sms_to"]
phones = split_sms_to(data)
if len(phones) > 10:
raise forms.ValidationError(f"Max of 10 phone numbers, {len(phones)} were given.")
for phone in phones:
if not validate_phone(phone):
raise forms.ValidationError(f"{phone} is not a valid phone number.")
return ",".join(sorted(set(map(clean_phone, phones))))
def clean(self) -> dict[str, Any] | None:
# TODO: Ping Twilio and check credentials (?)
return self.cleaned_data
| TwilioConfigurationForm |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 287795,
"end": 288431
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseAdministratorEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| EnterpriseAdministratorConnection |
python | huggingface__transformers | tests/utils/import_structures/import_structure_register_with_comments.py | {
"start": 707,
"end": 862
} | class ____:
def __init__(self):
pass
@requires()
# That's a statement
def b0():
pass
@requires(backends=("torch",))
# That's a statement
| B0 |
python | kamyu104__LeetCode-Solutions | Python/maximum-frequency-of-an-element-after-performing-operations-i.py | {
"start": 1119,
"end": 1782
} | class ____(object):
def maxFrequency(self, nums, k, numOperations):
"""
:type nums: List[int]
:type k: int
:type numOperations: int
:rtype: int
"""
cnt = collections.defaultdict(int) # defaultdict is much faster than Counter
for x in nums:
cnt[x] += 1
diff = defaultdict(int)
for x in nums:
diff[x] += 0
diff[x-k] += 1
diff[x+k+1] -= 1
result = curr = 0
for x, c in sorted(diff.iteritems()):
curr += c
result = max(result, cnt[x]+min(curr-cnt[x], numOperations))
return result
| Solution2 |
python | weaviate__weaviate-python-client | weaviate/collections/aggregations/near_text/executor.py | {
"start": 622,
"end": 7269
} | class ____(Generic[ConnectionType], _BaseExecutor[ConnectionType]):
@overload
def near_text(
self,
query: Union[List[str], str],
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
move_to: Optional[Move] = None,
move_away: Optional[Move] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[AggregateReturn]: ...
@overload
def near_text(
self,
query: Union[List[str], str],
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
move_to: Optional[Move] = None,
move_away: Optional[Move] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Union[str, GroupByAggregate],
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[AggregateGroupByReturn]: ...
@overload
def near_text(
self,
query: Union[List[str], str],
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
move_to: Optional[Move] = None,
move_away: Optional[Move] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[Union[str, GroupByAggregate]] = None,
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[Union[AggregateReturn, AggregateGroupByReturn]]: ...
def near_text(
self,
query: Union[List[str], str],
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
move_to: Optional[Move] = None,
move_away: Optional[Move] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[Union[str, GroupByAggregate]] = None,
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[Union[AggregateReturn, AggregateGroupByReturn]]:
"""Aggregate metrics over the objects returned by a near text vector search on this collection.
At least one of `certainty`, `distance`, or `object_limit` must be specified here for the vector search.
This method requires a vectorizer capable of handling text, e.g. `text2vec-contextionary`, `text2vec-openai`, etc.
Args:
query: The text(s) to search on.
certainty: The minimum certainty of the text search.
distance: The maximum distance of the text search.
move_to: The vector to move the search towards.
move_away: The vector to move the search away from.
object_limit: The maximum number of objects to return from the text search prior to the aggregation.
filters: The filters to apply to the search.
group_by: How to group the aggregation by.
total_count: Whether to include the total number of objects that match the query in the response.
return_metrics: A list of property metrics to aggregate together after the text search.
Returns:
Depending on the presence of the `group_by` argument, either a `AggregateReturn` object or a `AggregateGroupByReturn that includes the aggregation objects.
Raises:
weaviate.exceptions.WeaviateQueryError: If an error occurs while performing the query against Weaviate.
weaviate.exceptions.WeaviateInvalidInputError: If any of the input arguments are of the wrong type.
"""
return_metrics = (
return_metrics
if (return_metrics is None or isinstance(return_metrics, list))
else [return_metrics]
)
if isinstance(group_by, str):
group_by = GroupByAggregate(prop=group_by)
if self._connection._weaviate_version.is_lower_than(1, 29, 0):
# use gql, remove once 1.29 is the minimum supported version
def resp(res: dict) -> Union[AggregateReturn, AggregateGroupByReturn]:
return (
self._to_aggregate_result(res, return_metrics)
if group_by is None
else self._to_group_by_result(res, return_metrics)
)
builder = self._base(return_metrics, filters, total_count)
builder = self._add_groupby_to_builder(builder, group_by)
builder = self._add_near_text_to_builder(
builder=builder,
query=query,
certainty=certainty,
distance=distance,
move_to=move_to,
move_away=move_away,
object_limit=object_limit,
target_vector=target_vector,
)
return executor.execute(
response_callback=resp,
method=self._do,
query=builder,
)
else:
# use grpc
request = self._grpc.near_text(
near_text=query,
certainty=certainty,
distance=distance,
move_away=move_away,
move_to=move_to,
target_vector=target_vector,
aggregations=(
[metric.to_grpc() for metric in return_metrics]
if return_metrics is not None
else []
),
filters=_FilterToGRPC.convert(filters) if filters is not None else None,
group_by=group_by._to_grpc() if group_by is not None else None,
limit=group_by.limit if group_by is not None else None,
objects_count=total_count,
object_limit=object_limit,
)
def respGrpc(
res: aggregate_pb2.AggregateReply,
) -> Union[AggregateReturn, AggregateGroupByReturn]:
return self._to_result(group_by is not None, res)
return executor.execute(
response_callback=respGrpc,
method=self._connection.grpc_aggregate,
request=request,
)
| _NearTextExecutor |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/bitbucket.py | {
"start": 622,
"end": 16428
} | class ____(UserService):
"""Provider service for Bitbucket."""
vcs_provider_slug = BITBUCKET
allauth_provider = BitbucketOAuth2Provider
base_api_url = "https://api.bitbucket.org"
# TODO replace this with a less naive check
url_pattern = re.compile(r"bitbucket.org")
https_url_pattern = re.compile(r"^https:\/\/[^@]+@bitbucket.org/")
def sync_repositories(self):
"""Sync repositories from Bitbucket API."""
remote_ids = []
# Get user repos
try:
repos = self.paginate(
"https://bitbucket.org/api/2.0/repositories/",
role="member",
)
for repo in repos:
remote_repository = self.create_repository(repo)
if remote_repository:
remote_ids.append(remote_repository.remote_id)
except (TypeError, ValueError):
log.warning("Error syncing Bitbucket repositories")
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.allauth_provider.name
)
)
# Because privileges aren't returned with repository data, run query
# again for repositories that user has admin role for, and update
# existing repositories.
try:
resp = self.paginate(
"https://bitbucket.org/api/2.0/repositories/",
role="admin",
)
RemoteRepositoryRelation.objects.filter(
user=self.user,
account=self.account,
remote_repository__vcs_provider=self.vcs_provider_slug,
remote_repository__remote_id__in=[r["uuid"] for r in resp],
).update(admin=True)
except (TypeError, ValueError):
log.warning("Error syncing Bitbucket admin repositories")
return remote_ids
def sync_organizations(self):
"""
Sync Bitbucket workspaces (organizations).
This method only creates the relationships between the
organizations and the user, as all the repositories
are already created in the sync_repositories method.
"""
organization_remote_ids = []
try:
workspaces = self.paginate(
f"{self.base_api_url}/2.0/workspaces/",
role="member",
)
for workspace in workspaces:
remote_organization = self.create_organization(workspace)
remote_organization.get_remote_organization_relation(self.user, self.account)
organization_remote_ids.append(remote_organization.remote_id)
except ValueError:
log.warning("Error syncing Bitbucket organizations")
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.allauth_provider.name
)
)
return organization_remote_ids, []
def create_repository(self, fields, privacy=None):
"""
Update or create a repository from Bitbucket API response.
.. note::
The :py:data:`admin` property is not set during creation, as
permissions are not part of the returned repository data from
Bitbucket.
:param fields: dictionary of response data from API
:param privacy: privacy level to support
:param organization: remote organization to associate with
:type organization: RemoteOrganization
:rtype: RemoteRepository
"""
privacy = privacy or settings.DEFAULT_PRIVACY_LEVEL
if any(
[
(privacy == "private"),
(fields["is_private"] is False and privacy == "public"),
]
):
repo, _ = RemoteRepository.objects.get_or_create(
remote_id=fields["uuid"], vcs_provider=self.vcs_provider_slug
)
self._update_repository_from_fields(repo, fields)
# The repositories API doesn't return the admin status of the user,
# so we default to False, and then update it later using another API call.
remote_repository_relation = repo.get_remote_repository_relation(
self.user, self.account
)
remote_repository_relation.admin = False
remote_repository_relation.save()
return repo
log.debug(
"Not importing repository because mismatched type.",
repository=fields["name"],
)
def update_repository(self, remote_repository: RemoteRepository):
# Bitbucket doesn't return the admin status of the user,
# so we need to infer it by filtering the repositories the user has admin/read access to.
repo_from_admin_access = self._get_repository(remote_repository, role="admin")
repo_from_member_access = self._get_repository(remote_repository, role="member")
repo = repo_from_admin_access or repo_from_member_access
relation = remote_repository.get_remote_repository_relation(self.user, self.account)
if not repo:
log.info(
"User no longer has access to the repository, removing remote relationship.",
remote_repository_id=remote_repository.remote_id,
)
relation.delete()
return
self._update_repository_from_fields(remote_repository, repo)
relation.admin = bool(repo_from_admin_access)
relation.save()
def _get_repository(self, remote_repository, role):
"""
Get a single repository by its remote ID where the user has a specific role.
Bitbucket doesn't provide an endpoint to get a single repository by its ID (it requires the group ID as well),
and it also doesn't return the user's role in the repository, so we filter the repositories by role
and then look for the repository with the matching ID.
"""
repos = self.paginate(
f"{self.base_api_url}/2.0/repositories/",
role=role,
q=f'uuid="{remote_repository.remote_id}"',
)
for repo in repos:
if repo["uuid"] == remote_repository.remote_id:
return repo
return None
def _update_repository_from_fields(self, repo, fields):
# All repositories are created under a workspace,
# which we consider an organization.
organization = self.create_organization(fields["workspace"])
repo.organization = organization
repo.name = fields["name"]
repo.full_name = fields["full_name"]
repo.description = fields["description"]
repo.private = fields["is_private"]
# Default to HTTPS, use SSH for private repositories
clone_urls = {u["name"]: u["href"] for u in fields["links"]["clone"]}
repo.clone_url = self.https_url_pattern.sub(
"https://bitbucket.org/",
clone_urls.get("https"),
)
repo.ssh_url = clone_urls.get("ssh")
if repo.private:
repo.clone_url = repo.ssh_url
repo.html_url = fields["links"]["html"]["href"]
repo.vcs = fields["scm"]
mainbranch = fields.get("mainbranch") or {}
repo.default_branch = mainbranch.get("name")
avatar_url = fields["links"]["avatar"]["href"] or ""
repo.avatar_url = re.sub(r"\/16\/$", r"/32/", avatar_url)
if not repo.avatar_url:
repo.avatar_url = self.default_user_avatar_url
repo.save()
def create_organization(self, fields):
"""
Update or create remote organization from Bitbucket API response.
:param fields: dictionary response of data from API
:rtype: RemoteOrganization
.. note::
This method caches organizations by their remote ID to avoid
unnecessary database queries, specially when creating
multiple repositories that belong to the same organization.
"""
organization_id = fields["uuid"]
if organization_id in self._organizations_cache:
return self._organizations_cache[organization_id]
organization, _ = RemoteOrganization.objects.get_or_create(
remote_id=organization_id,
vcs_provider=self.vcs_provider_slug,
)
organization.slug = fields.get("slug")
organization.name = fields.get("name")
organization.url = fields["links"]["html"]["href"]
organization.avatar_url = fields["links"]["avatar"]["href"]
if not organization.avatar_url:
organization.avatar_url = self.default_org_avatar_url
organization.save()
self._organizations_cache[organization_id] = organization
return organization
def get_next_url_to_paginate(self, response):
return response.json().get("next")
def get_paginated_results(self, response):
return response.json().get("values", [])
def get_webhook_data(self, project, integration):
"""Get webhook JSON data to post to the API."""
return json.dumps(
{
"description": "Read the Docs ({domain})".format(
domain=settings.PRODUCTION_DOMAIN,
),
"url": self.get_webhook_url(project, integration),
"active": True,
"secret": integration.secret,
"events": ["repo:push"],
}
)
def get_provider_data(self, project, integration):
"""
Gets provider data from Bitbucket Webhooks API.
:param project: project
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: Dictionary containing provider data from the API or None
:rtype: dict
"""
if integration.provider_data:
return integration.provider_data
owner, repo = build_utils.get_bitbucket_username_repo(url=project.repo)
url = f"{self.base_api_url}/2.0/repositories/{owner}/{repo}/hooks"
rtd_webhook_url = self.get_webhook_url(project, integration)
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
integration_id=integration.pk,
url=url,
)
try:
resp = self.session.get(url)
if resp.status_code == 200:
recv_data = resp.json()
for webhook_data in recv_data["values"]:
if webhook_data["url"] == rtd_webhook_url:
integration.provider_data = webhook_data
integration.save()
log.info(
"Bitbucket integration updated with provider data for project.",
)
break
else:
log.info(
"Bitbucket project does not exist or user does not have permissions.",
)
except Exception:
log.exception(
"Bitbucket webhook Listing failed for project.",
)
return integration.provider_data
def setup_webhook(self, project, integration=None) -> bool:
"""
Set up Bitbucket project webhook for project.
:param project: project to set up webhook for
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: boolean based on webhook set up success, and requests Response object
"""
owner, repo = build_utils.get_bitbucket_username_repo(url=project.repo)
url = f"{self.base_api_url}/2.0/repositories/{owner}/{repo}/hooks"
if not integration:
integration, _ = Integration.objects.get_or_create(
project=project,
integration_type=Integration.BITBUCKET_WEBHOOK,
)
data = self.get_webhook_data(project, integration)
resp = None
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
integration_id=integration.pk,
url=url,
)
try:
resp = self.session.post(
url,
data=data,
headers={"content-type": "application/json"},
)
if resp.status_code == 201:
recv_data = resp.json()
integration.provider_data = recv_data
integration.save()
log.debug(
"Bitbucket webhook creation successful for project.",
)
return True
if resp.status_code in [401, 403, 404]:
log.info(
"Bitbucket project does not exist or user does not have permissions.",
)
else:
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
log.warning(
"Bitbucket webhook creation failed.",
debug_data=debug_data,
)
# Catch exceptions with request or deserializing JSON
except (RequestException, ValueError):
log.exception("Bitbucket webhook creation failed for project.")
return False
def update_webhook(self, project, integration) -> bool:
"""
Update webhook integration.
:param project: project to set up webhook for
:type project: Project
:param integration: Webhook integration to update
:type integration: Integration
:returns: boolean based on webhook set up success, and requests Response object
"""
structlog.contextvars.bind_contextvars(project_slug=project.slug)
provider_data = self.get_provider_data(project, integration)
# Handle the case where we don't have a proper provider_data set
# This happens with a user-managed webhook previously
if not provider_data:
return self.setup_webhook(project, integration)
data = self.get_webhook_data(project, integration)
resp = None
try:
# Expect to throw KeyError here if provider_data is invalid
url = provider_data["links"]["self"]["href"]
resp = self.session.put(
url,
data=data,
headers={"content-type": "application/json"},
)
if resp.status_code == 200:
recv_data = resp.json()
integration.provider_data = recv_data
integration.save()
log.info("Bitbucket webhook update successful for project.")
return True
# Bitbucket returns 404 when the webhook doesn't exist. In this
# case, we call ``setup_webhook`` to re-configure it from scratch
if resp.status_code == 404:
return self.setup_webhook(project, integration)
# Response data should always be JSON, still try to log if not though
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
log.error(
"Bitbucket webhook update failed.",
debug_data=debug_data,
)
# Catch exceptions with request or deserializing JSON
except (KeyError, RequestException, TypeError, ValueError):
log.exception("Bitbucket webhook update failed for project.")
return False
| BitbucketService |
python | apache__avro | lang/py/avro/errors.py | {
"start": 3666,
"end": 3777
} | class ____(AvroException):
"""Raised when attempting IPC on a closed connection."""
| ConnectionClosedException |
python | optuna__optuna | optuna/visualization/_rank.py | {
"start": 1380,
"end": 1490
} | class ____(NamedTuple):
name: str
range: tuple[float, float]
is_log: bool
is_cat: bool
| _AxisInfo |
python | pytorch__pytorch | test/inductor/test_triton_kernels.py | {
"start": 2783,
"end": 97341
} | class ____(torch._inductor.test_case.TestCase):
def _kernel_launched_in_code(self, kernel_name: str, code: str) -> bool:
if inductor_config.cpp_wrapper:
return f"launchKernel({kernel_name}" in code
return f"{kernel_name}.run(" in code
@requires_gpu
def test_triton_kernel_with_kernel_param(self):
@triton.jit
def pass_kernel(kernel):
pass
@torch.compile(backend="eager")
def f(x):
grid = (x.numel(),)
pass_kernel[grid](kernel=x)
t1 = torch.rand(5, device=GPU_TYPE)
f(t1)
# No need to assert anything, the goal is to make sure dynamo does
# not crash
@requires_gpu
def test_triton_kernel_higher_order_func(self):
from torch._higher_order_ops.triton_kernel_wrap import kernel_side_table
add_kernel_id = kernel_side_table.add_kernel(add_kernel)
t1 = torch.rand(5, device=GPU_TYPE)
t2 = torch.rand(5, device=GPU_TYPE)
torch_add = t1 + t2
# Test higher order function with mutation
output = torch.zeros_like(t1)
n_elements = output.numel()
constant_args_idx = kernel_side_table.add_constant_args(
{"n_elements": n_elements, "BLOCK_SIZE": 16}
)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
triton_kernel_wrapper_mutation(
kernel_idx=add_kernel_id,
constant_args_idx=constant_args_idx,
grid=[grid],
tma_descriptor_metadata={},
kwargs={
"in_ptr0": t1,
"in_ptr1": t2,
"out_ptr": output,
},
)
self.assertEqual(output, torch_add)
# Make sure it is modified
self.assertNotEqual(output, torch.zeros_like(t1))
# Test higher order function without mutation
output = torch.zeros_like(t1)
out_dict = triton_kernel_wrapper_functional(
kernel_idx=add_kernel_id,
constant_args_idx=constant_args_idx,
grid=[grid],
tma_descriptor_metadata={},
kwargs={
"in_ptr0": t1,
"in_ptr1": t2,
"out_ptr": output,
},
tensors_to_clone=["in_ptr0", "in_ptr1", "out_ptr"],
)
self.assertEqual(out_dict["out_ptr"], torch_add)
# Make sure it is NOT modified
self.assertEqual(output, torch.zeros_like(t1))
@requires_gpu
def test_triton_kernel_functionalize(self):
from functorch import make_fx
from torch._higher_order_ops.triton_kernel_wrap import kernel_side_table
from torch._subclasses.functional_tensor import (
CppFunctionalizeAPI,
FunctionalTensorMode,
PythonFunctionalizeAPI,
)
kernel_side_table.reset_table()
def f(x, output):
out = triton_kernel_wrapper_functional(
kernel_idx=kernel_side_table.add_kernel(mul2_kernel),
constant_args_idx=kernel_side_table.add_constant_args(
{"n_elements": output.numel(), "BLOCK_SIZE": 16}
),
grid=[(x.numel(),)],
tma_descriptor_metadata={},
kwargs={
"in_ptr0": x,
"out_ptr": output,
},
tensors_to_clone=["in_ptr0", "out_ptr"],
)
return out["out_ptr"]
t1 = torch.rand(5, device=GPU_TYPE)
t2 = torch.rand(5, device=GPU_TYPE)
with FunctionalTensorMode():
gm = make_fx(PythonFunctionalizeAPI().functionalize(f))(t1, t2)
# Make sure t2 was not modified
self.assertNotEqual(gm(t1, t2), t2)
gm = make_fx(CppFunctionalizeAPI().functionalize(f))(t1, t2)
# Make sure t2 was not modified
self.assertNotEqual(gm(t1, t2), t2)
gm = make_fx(torch.func.functionalize(f))(t1, t2)
# Make sure t2 was not modified
self.assertNotEqual(gm(t1, t2), t2)
gm = make_fx(f, tracing_mode="fake")(t1, t2)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x_1, output_1):
triton_kernel_wrapper_functional_proxy = torch.ops.higher_order.triton_kernel_wrapper_functional(kernel_idx = 0, constant_args_idx = 3, grid = [(5,)], tma_descriptor_metadata = {}, kwargs = {'in_ptr0': x_1, 'out_ptr': output_1}, tensors_to_clone = ['in_ptr0', 'out_ptr']); x_1 = output_1 = None
getitem = triton_kernel_wrapper_functional_proxy['in_ptr0']; getitem = None
getitem_1 = triton_kernel_wrapper_functional_proxy['out_ptr']; triton_kernel_wrapper_functional_proxy = None
return getitem_1""",
)
@requires_gpu
def test_triton_kernel_mutation_type(self):
from torch._higher_order_ops.triton_kernel_wrap import kernel_side_table
from torch._subclasses.fake_tensor import FakeTensorMode
from torch._subclasses.functional_tensor import (
FunctionalTensor,
FunctionalTensorMode,
)
def prep():
x = torch.ones(4, device=GPU_TYPE, requires_grad=True)
with FunctionalTensorMode():
x_func = FunctionalTensor.to_functional(x)
self.assertTrue(torch._is_functional_tensor(x_func.elem))
return x_func
# normal mutation only
with FakeTensorMode():
x_func = prep()
with FunctionalTensorMode():
x_func.mul_(2)
self.assertFalse(
torch._functionalize_are_all_mutations_hidden_from_autograd(x_func.elem)
)
# triton kernel mutation only
with FakeTensorMode():
x_func = prep()
with FunctionalTensorMode():
triton_kernel_wrapper_mutation(
kernel_idx=kernel_side_table.add_kernel(mul2_inplace_kernel),
constant_args_idx=kernel_side_table.add_constant_args(
{"n_elements": x_func.numel(), "BLOCK_SIZE": 16}
),
grid=[(x_func.numel(),)],
tma_descriptor_metadata={},
kwargs={
"ptr": x_func,
},
)
self.assertTrue(
torch._functionalize_are_all_mutations_hidden_from_autograd(x_func.elem)
)
# normal mutation + triton kernel mutation
with FakeTensorMode():
x_func = prep()
with FunctionalTensorMode():
x_func.mul_(2)
triton_kernel_wrapper_mutation(
kernel_idx=kernel_side_table.add_kernel(mul2_inplace_kernel),
constant_args_idx=kernel_side_table.add_constant_args(
{"n_elements": x_func.numel(), "BLOCK_SIZE": 16}
),
grid=[(x_func.numel(),)],
tma_descriptor_metadata={},
kwargs={
"ptr": x_func,
},
)
self.assertFalse(
torch._functionalize_are_all_mutations_hidden_from_autograd(x_func.elem)
)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_with_views(self, dynamic, backend):
def call_triton_take_view(x: torch.Tensor):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
mul2_kernel[grid](x, output, n_elements, BLOCK_SIZE=16)
return output
def call_triton_return_view(x: torch.Tensor):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
mul2_kernel[grid](x, output, n_elements, BLOCK_SIZE=16)
return output.view(4, 4)
t = torch.rand(4, 4, device=GPU_TYPE)
t_view = t.view(16)
compiled_func = torch.compile(
call_triton_take_view, backend=backend, fullgraph=True, dynamic=dynamic
)
self.assertEqual(2 * t_view, compiled_func(t_view))
self.assertEqual(2 * t, compiled_func(t_view).view(4, 4))
compiled_func = torch.compile(
call_triton_return_view, backend=backend, fullgraph=True, dynamic=dynamic
)
self.assertEqual(2 * t_view, compiled_func(t).view(16))
self.assertEqual(2 * t, compiled_func(t))
@requires_gpu
def test_no_nan_kernels(self):
@triton.jit
def add_one_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
output = x + 1
tl.store(out_ptr + offsets, output, mask=mask)
def add_one(x, out):
n_elements = x.numel()
add_one_kernel[(n_elements,)](x, out, n_elements, BLOCK_SIZE=4)
class AddOne(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
out = torch.empty_like(x)
add_one(x, out)
ctx.save_for_backward(out)
return out
@staticmethod
def backward(ctx, grad):
(saved,) = ctx.saved_tensors
out = torch.empty_like(grad)
add_one(saved, out)
return out
@torch.compile
def f(x):
return AddOne.apply(x)
log_stream, ctx = logs_to_string("torch._inductor.codecache", "output_code")
x = torch.randn(3, requires_grad=True, device=GPU_TYPE)
with ctx():
y = f(x)
output_code = "\n".join(log_stream.getvalue().strip().split("\n")[3:]).strip()
self.assertTrue(len(output_code) > 0, msg="output code is not empty")
if inductor_config.cpp_wrapper:
self.assertEqual(
output_code.count("std::numeric_limits<double>::quiet_NaN()"), 0
)
else:
self.assertEqual(output_code.count('float("nan")'), 0)
self.assertEqual(output_code.count("float('nan')"), 0)
@requires_gpu
@common_utils.parametrize("grad_fn", [torch.no_grad, torch.enable_grad])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_with_grad_option(self, grad_fn, backend):
def call_triton(x: torch.Tensor):
with grad_fn():
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
mul2_kernel[grid](x, output, n_elements, BLOCK_SIZE=16)
return output
t = torch.rand(5, device=GPU_TYPE)
compiled_func = torch.compile(call_triton, backend=backend, fullgraph=True)
self.assertEqual(2 * t, compiled_func(t))
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_inner_triton_function(self, backend):
def f(x: torch.Tensor):
@triton.jit
def pow2_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
output = x * x
tl.store(out_ptr + offsets, output, mask=mask)
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
pow2_kernel[grid](x, output, n_elements, BLOCK_SIZE=16)
return output
t = torch.rand(5, device=GPU_TYPE)
compiled_func = torch.compile(f, backend=backend, fullgraph=True)
# TODO(oulgen): NYI - Support this
# self.assertEqual(t * t, compiled_func(t))
@requires_gpu
@common_utils.parametrize("grad", [False, True])
@common_utils.parametrize("dynamic", [False, True])
@inductor_config.patch("implicit_fallbacks", False)
def test_triton_kernel_no_clones(self, grad, dynamic):
from torch._inductor.utils import run_and_get_code
def call_triton(x: torch.Tensor, y: torch.Tensor, output: torch.Tensor):
n_elements = output.numel()
tmp = torch.add(x, 1)
grid = (x.numel(),)
add_kernel.run(
x, y, output, n_elements, warmup=False, grid=grid, BLOCK_SIZE=16
)
return output, tmp
t1 = torch.rand(5, device=GPU_TYPE, requires_grad=grad)
t2 = torch.rand(5, device=GPU_TYPE, requires_grad=grad)
o1 = torch.zeros_like(t1, requires_grad=grad)
torch_add = call_triton(t1, t2, o1)
metrics.reset()
o2 = torch.zeros_like(t1, requires_grad=grad)
test, (code,) = run_and_get_code(
torch.compile(call_triton, dynamic=dynamic), t1, t2, o2
)
if not grad:
self.assertEqual(metrics.generated_kernel_count, 1)
self.assertEqual(torch_add, test)
# These two asserts are not optimal since it requires original aten
# to be in the metadata, so there might be false negatives
self.assertNotIn(
"aoti_torch_copy_" if inductor_config.cpp_wrapper else "aten.copy", code
)
self.assertNotIn(
"aoti_torch_clone" if inductor_config.cpp_wrapper else "aten.clone", code
)
# The following checks that there are only the tensor output is in
# the compiled graph
if dynamic and grad:
if inductor_config.cpp_wrapper:
self.assertIn("output_handles[0] = ", code)
self.assertIn("output_handles[1] = ", code)
else:
self.assertIn("return (buf0, s92, )", code)
else:
self.assertIn(
"output_handles[0] = "
if inductor_config.cpp_wrapper
else "return (buf0, )",
code,
)
@requires_gpu
def test_triton_kernel_caching(self):
from torch._inductor.utils import run_and_get_code
def add_in_loop(
x: torch.Tensor,
y: torch.Tensor,
):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel_autotuned[grid](x, y, output, n_elements)
return output
def call_triton_add(
x: torch.Tensor,
y: torch.Tensor,
):
for _ in range(4):
x = add_in_loop(x, y)
return x
t1 = torch.ones(5, device=GPU_TYPE)
t2 = torch.ones(5, device=GPU_TYPE)
test, (code,) = run_and_get_code(torch.compile(call_triton_add), t1, t2)
self.assertEqual(test, 5 * torch.ones(5, device=GPU_TYPE))
self.assertTrue("add_kernel_autotuned_1.run" not in code)
@requires_gpu
def test_triton_kernel_caching_duplicate(self):
from torch._inductor.utils import run_and_get_code
class C:
@triton.jit
def pass_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
tl.store(out_ptr + offsets, x, mask=mask)
class D:
@triton.jit
def pass_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
tl.store(out_ptr + offsets, x, mask=mask)
def call_triton(x: torch.Tensor):
output1 = torch.zeros_like(x)
output2 = torch.zeros_like(x)
n_elements = output1.numel()
grid = (n_elements,)
C.pass_kernel[grid](x, output1, n_elements, BLOCK_SIZE=16)
D.pass_kernel[grid](x, output2, n_elements, BLOCK_SIZE=16)
return output1 + output2
t = torch.ones(5, device=GPU_TYPE)
test, (code,) = run_and_get_code(torch.compile(call_triton), t)
# Make sure we emitted two kernels here
self.assertTrue(self._kernel_launched_in_code("pass_kernel_0", code))
self.assertTrue(self._kernel_launched_in_code("pass_kernel_1", code))
@requires_gpu
def test_triton_kernel_various_args(self):
@triton.autotune(
configs=[triton.Config({"BLOCK_SIZE": 128})],
key=[],
)
@triton.jit
def pass_kernel(
out_ptr,
n_elements,
dummy_None,
dummy_empty,
dummy_float,
BLOCK_SIZE: "tl.constexpr",
RANDOM_SIZE: "tl.constexpr",
):
pass
@torch.compile
def call_triton(output):
n_elements = output.numel()
grid = (n_elements,)
pass_kernel[grid](
output,
n_elements,
None,
torch.empty_like(output),
3.1415926,
RANDOM_SIZE=0,
)
return output
output = torch.randn(5, device=GPU_TYPE)
# Make sure this does not crash
call_triton(output)
@requires_gpu
def test_triton_kernel_dependancies(self):
def call_triton(
x: torch.Tensor,
y: torch.Tensor,
):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel_autotuned[grid](x, y, output, n_elements)
output2 = torch.zeros_like(output)
add_kernel_autotuned[grid](output, y, output2, n_elements)
output3 = torch.add(output2, 1)
return output3
t1 = torch.rand(5, device=GPU_TYPE)
t2 = torch.rand(5, device=GPU_TYPE)
torch_result = call_triton(t1, t2)
compiled_result = torch.compile(call_triton)(t1, t2)
self.assertEqual(torch_result, compiled_result)
@requires_gpu
def test_triton_kernel_reinplace_inplaceable_pass(self):
def call_triton(
x: torch.Tensor,
y: torch.Tensor,
):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel_autotuned[grid](x, y, output, n_elements)
add_kernel_autotuned[grid](output, x, output, n_elements)
return output
t1 = torch.rand(5, device=GPU_TYPE)
t2 = torch.rand(5, device=GPU_TYPE)
torch_result = call_triton(t1, t2)
compiled_result = torch.compile(call_triton)(t1, t2)
self.assertEqual(torch_result, compiled_result)
@requires_gpu
@common_utils.parametrize("grad", [False, True])
def test_triton_kernel_multi_kernel(self, grad):
@triton.jit
def mul2_and_add_and_zero_negatives_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
ACTIVATION: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
indirection_kernel(
in_ptr0,
in_ptr0,
n_elements,
BLOCK_SIZE=BLOCK_SIZE,
ACTIVATION="mul2_inplace_kernel",
)
indirection_kernel(
in_ptr1,
in_ptr1,
n_elements,
BLOCK_SIZE=BLOCK_SIZE,
ACTIVATION="mul2_inplace_kernel",
)
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
if ACTIVATION == "zero_negs":
output = zero_negs(output)
tl.store(out_ptr + offsets, output, mask=mask)
@torch.compile
def call_triton(
x: torch.Tensor,
y: torch.Tensor,
xi: torch.Tensor,
yi: torch.Tensor,
output: torch.Tensor,
outputi: torch.Tensor,
):
n_elements = output.numel()
grid = (x.numel(),)
mul2_and_add_and_zero_negatives_kernel[grid](
x, y, output, n_elements, BLOCK_SIZE=16, ACTIVATION="zero_negs"
)
mul2_and_add_and_zero_negatives_kernel[grid](
xi, yi, outputi, n_elements, BLOCK_SIZE=16, ACTIVATION=None
)
return (output, outputi)
t1 = torch.tensor(
[-2.0, -1.0, 0.0, 1.0, 2.0], device=GPU_TYPE, requires_grad=grad
)
t2 = torch.tensor(
[-2.0, -1.0, 0.0, 1.0, 2.0], device=GPU_TYPE, requires_grad=grad
)
float_result = 2 * t1 + 2 * t2
float_result = float_result.where(float_result >= 0, 0.0)
t1i = torch.randint(-2, 2, (5,), device=GPU_TYPE)
t2i = torch.randint(-2, 2, (5,), device=GPU_TYPE)
o = torch.zeros_like(t1, requires_grad=grad)
oi = torch.zeros_like(t1i)
int_result = 2 * t1i + 2 * t2i
(result, resulti) = call_triton(t1, t2, t1i, t2i, o, oi)
self.assertEqual(float_result, result)
self.assertEqual(int_result, resulti)
@requires_gpu
def test_triton_kernel_constants(self):
@triton.jit
def mulC_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
CONSTANT_NAME: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
if CONSTANT_NAME == STRING_CONSTANT_C:
output = CONSTANT_C * x
if BOOL_CONSTANT_C:
output *= CONSTANT_C
tl.store(out_ptr + offsets, output, mask=mask)
def call_triton(
x: torch.Tensor,
):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = (x.numel(),)
mulC_kernel[grid](
x, output, n_elements, BLOCK_SIZE=16, CONSTANT_NAME="CONSTANT_C"
)
return output
# Triton kernels capture global constants by their parse time value
# not runtime value
global CONSTANT_C
prev_c = CONSTANT_C
# If the behavior of triton kernels change, this test will fail
CONSTANT_C = tl.constexpr(10)
assert CONSTANT_C != prev_c
t = torch.randn(5, device=GPU_TYPE)
torch_result = call_triton(t)
compiled_result = torch.compile(call_triton)(t)
self.assertEqual(torch_result, compiled_result)
# reset back
CONSTANT_C = prev_c
@requires_gpu
@common_utils.parametrize("grad", [False, True])
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
@common_utils.parametrize("grid_type", [1, 2, 3])
def test_triton_kernel_autotune(self, grad, dynamic, backend, grid_type):
def call_triton(x: torch.Tensor, y: torch.Tensor, output: torch.Tensor):
n_elements = output.numel()
def grid_fn(meta):
return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
if grid_type == 1:
grid = (n_elements,)
elif grid_type == 2:
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
elif grid_type == 3:
grid = grid_fn
add_kernel_autotuned[grid](x, y, output, n_elements)
return output
t1 = torch.rand(256, device=GPU_TYPE, requires_grad=grad)
t2 = torch.rand(256, device=GPU_TYPE, requires_grad=grad)
output = torch.zeros_like(t1, requires_grad=grad)
torch_add = call_triton(t1, t2, output)
compiled_func = torch.compile(
call_triton, backend=backend, fullgraph=True, dynamic=dynamic
)
output2 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(compiled_func(t1, t2, output2), torch_add)
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
@inductor_config.patch("unsafe_ignore_unsupported_triton_autotune_args", True)
def test_triton_kernel_autotune_with_unsupported_args(self, backend):
def call_triton(x: torch.Tensor, y: torch.Tensor):
output = torch.zeros_like(x)
n_elements = output.numel()
add_kernel_autotuned_with_unsupported_args[(n_elements,)](
x, y, output, n_elements
)
return output
t1 = torch.rand(256, device=GPU_TYPE)
t2 = torch.rand(256, device=GPU_TYPE)
torch_add = call_triton(t1, t2)
compiled_func = torch.compile(call_triton, backend=backend, fullgraph=True)
compiled_add = compiled_func(t1, t2)
self.assertEqual(compiled_add, torch_add)
@requires_gpu
@common_utils.parametrize("grad", [False, True])
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
@common_utils.parametrize("grid_type", [1, 2, 3])
@common_utils.parametrize("tdlp", ["0", "1"])
def test_triton_kernel_2d_autotune(self, grad, dynamic, backend, grid_type, tdlp):
import os
os.environ["TORCHINDUCTOR_DUMP_LAUNCH_PARAMS"] = tdlp
def call_triton(x: torch.Tensor, y: torch.Tensor, output: torch.Tensor):
x_elements = output.size()[0]
y_elements = output.size()[1]
def grid_fn(meta):
return (
triton.cdiv(x_elements, meta["BLOCK_SIZE_X"]),
triton.cdiv(y_elements, meta["BLOCK_SIZE_Y"]),
)
if grid_type == 1:
grid = (x_elements, y_elements)
elif grid_type == 2:
grid = lambda meta: (
triton.cdiv(x_elements, meta["BLOCK_SIZE_X"]),
triton.cdiv(y_elements, meta["BLOCK_SIZE_Y"]),
)
elif grid_type == 3:
grid = grid_fn
add_kernel_2d_autotuned[grid](x, y, output, x_elements, y_elements)
return output
t1 = torch.rand((512, 256), device=GPU_TYPE, requires_grad=grad)
t2 = torch.rand((512, 256), device=GPU_TYPE, requires_grad=grad)
output = torch.zeros_like(t1, requires_grad=grad)
torch_result = call_triton(t1, t2, output)
compiled_func = torch.compile(
call_triton, backend=backend, fullgraph=True, dynamic=dynamic
)
output2 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(compiled_func(t1, t2, output2), torch_result)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
def test_triton_kernel_tracing(self, dynamic):
def call_triton_add(
x: torch.Tensor,
y: torch.Tensor,
grid_type: int,
num=1,
positional=False,
autotuned=False,
):
output = torch.empty_like(x)
n_elements = output.numel()
def grid_fn(meta):
return (triton.cdiv(num, meta["BLOCK_SIZE"]),)
if grid_type == 0:
grid = (x.numel(),)
elif grid_type == 1:
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
elif grid_type == 2:
grid = grid_fn
else:
grid = [x.numel()]
if autotuned:
capture_triton(add_kernel_autotuned)[grid](x, y, output, n_elements)
else:
if positional:
capture_triton(add_kernel)[grid](x, y, output, n_elements, 16)
else:
capture_triton(add_kernel)[grid](
x, y, output, n_elements, BLOCK_SIZE=16
)
return output
t0 = torch.rand(5, device=GPU_TYPE, requires_grad=True)
t1 = torch.rand(5, device=GPU_TYPE, requires_grad=True)
t2 = torch.rand(5, device=GPU_TYPE, requires_grad=True)
t3 = torch.rand(5, device=GPU_TYPE, requires_grad=True)
torch_add = t2 + t3
tests = [
functools.partial(call_triton_add, grid_type=0),
functools.partial(call_triton_add, grid_type=1),
functools.partial(call_triton_add, grid_type=1, num=1, positional=True),
functools.partial(call_triton_add, grid_type=2, num=200),
functools.partial(call_triton_add, grid_type=3),
functools.partial(call_triton_add, grid_type=0, autotuned=True),
functools.partial(call_triton_add, grid_type=1, num=1, autotuned=True),
functools.partial(call_triton_add, grid_type=2, num=200, autotuned=True),
functools.partial(call_triton_add, grid_type=3, autotuned=True),
]
from functorch import make_fx
tracing_mode = "symbolic" if dynamic else "fake"
for test in tests:
gm = make_fx(test, tracing_mode=tracing_mode)(t0, t1)
result = test(t2, t3)
self.assertEqual(result, torch_add)
@requires_gpu
@common_utils.parametrize("grad", [False, True])
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
@inductor_config.patch("implicit_fallbacks", False)
def test_triton_kernel_native(self, grad, dynamic, backend):
def call_triton_add(
x: torch.Tensor,
y: torch.Tensor,
output: torch.Tensor,
grid_type: int,
num=1,
positional=False,
):
n_elements = output.numel()
def grid_fn(meta):
return (triton.cdiv(num, meta["BLOCK_SIZE"]),)
if grid_type == 0:
grid = (x.numel(),)
elif grid_type == 1:
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
else:
grid = grid_fn
if positional:
add_kernel[grid](x, y, output, n_elements, 16)
else:
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=16)
return output
t1 = torch.rand(5, device=GPU_TYPE, requires_grad=grad)
t2 = torch.rand(5, device=GPU_TYPE, requires_grad=grad)
o1 = torch.zeros_like(t1, requires_grad=grad)
torch_add = t1 + t2
# No Dynamo -- Make sure triton kernel works
self.assertEqual(call_triton_add(t1, t2, o1, 1), torch_add)
# No Dynamo -- Make sure triton kernel works (with positional BLOCK_SIZE)
o2 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(call_triton_add(t1, t2, o2, 1, True), torch_add)
# With Dynamo
compiled_func = torch.compile(
call_triton_add, backend=backend, fullgraph=True, dynamic=dynamic
)
# With simple kernel
o3 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(compiled_func(t1, t2, o3, 0), torch_add)
# With lambda kernel
o4 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(compiled_func(t1, t2, o4, 1), torch_add)
# With lambda kernel (with positional BLOCK_SIZE)
o5 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(compiled_func(t1, t2, o5, 1, 1, True), torch_add)
# With user defined function kernel
o6 = torch.zeros_like(t1, requires_grad=grad)
self.assertEqual(compiled_func(t1, t2, o6, 2, 200), torch_add)
@requires_gpu
def test_triton_kernel_mutation_not_mark_dirty(self):
@torch.compile
def f(x):
n_elements = x.numel()
add_kernel[(n_elements,)](x, x, x, n_elements, 16)
return x
x = torch.randn(5, device=GPU_TYPE, requires_grad=True)
x_cloned = x.clone()
out = x_cloned.sin()
f(x_cloned)
out.sum().backward()
@requires_gpu
@inductor_config.patch("allow_buffer_reuse", True)
def test_triton_kernel_inputs_buffer_reuse(self):
def _mul2(x):
y = torch.empty_like(x)
mul2_kernel[(10,)](
in_ptr0=x,
out_ptr=y,
n_elements=x.numel(),
BLOCK_SIZE=1,
)
return y
@torch.compile
def f(x):
for _ in range(4):
# The output of one kernel is the input to the next kernel, but
# at some point we should reuse buffers not allocate new ones.
x = _mul2(x)
return x + 1
x = torch.randn(10, device=GPU_TYPE, dtype=torch.float32)
eager_out = f(x)
compiled_out, (code,) = run_and_get_code(torch.compile(f), x)
self.assertEqual(compiled_out, eager_out)
# Check that we're allocating the minimal # of buffers.
code_string = (
"aoti_torch_empty_strided("
if inductor_config.cpp_wrapper
else f"empty_strided_{GPU_TYPE}((10, ), (1, ), torch.float32)"
)
num_bufs_allocated = code.count(code_string)
self.assertEqual(num_bufs_allocated, 2)
# Check we're reusing buffers if not allocating.
num_bufs_reused = code.count(
"// reuse" if inductor_config.cpp_wrapper else "# reuse"
)
self.assertEqual(num_bufs_reused, 3)
@requires_gpu
def test_triton_kernel_matmul_tracking(self):
@triton.jit
def ones_kernel(x_ptr, n_elements, BLOCK_SIZE: "tl.constexpr"):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = 1.0
tl.store(x_ptr + offsets, x, mask=mask)
@torch.compile
def f(x):
out = torch.zeros_like(x)
ones_kernel[(4,)](out, 16, BLOCK_SIZE=16)
return torch.mm(out, x) + 10
x = torch.randn(4, 4, device=GPU_TYPE)
torch_out = f(x)
python_out = torch.mm(torch.ones(4, 4, device=GPU_TYPE), x) + 10
self.assertEqual(torch_out, python_out)
@requires_gpu
def test_triton_kernel_strided_input(self):
def f(inp):
# left has strides [256, 1]
left, right = torch.split(inp, [128, 128], dim=1)
out = torch.empty_like(left)
X_BLOCK_SIZE, Y_BLOCK_SIZE = 32, 16
grid = (left.size(1) // X_BLOCK_SIZE, left.size(0) // Y_BLOCK_SIZE)
double_strided_kernel[grid](
in_ptr=left,
out_ptr=out,
in_y_stride=left.stride(0),
out_y_stride=out.stride(0),
X_BLOCK_SIZE=X_BLOCK_SIZE,
Y_BLOCK_SIZE=Y_BLOCK_SIZE,
)
return out
inp = torch.randn(64, 256, device=GPU_TYPE)
eager_out = f(inp)
compiled_out = torch.compile(f)(inp)
self.assertEqual(compiled_out, eager_out)
@inductor_config.patch(
triton_kernel_default_layout_constraint="needs_fixed_stride_order"
)
@requires_gpu
def test_layout_constraint_needs_fixed_stride_order(self):
# Construct a custom op whose output strides are (1, 2)
@torch.library.custom_op("mylib::weird_op_with_lowering", mutates_args={})
def weird_op_with_lowering(x: torch.Tensor) -> torch.Tensor:
return torch.empty_strided((2, 2), (1, 2), dtype=x.dtype, device=x.device)
@weird_op_with_lowering.register_fake
def _(x):
return torch.empty_strided((2, 2), (1, 2), dtype=x.dtype, device=x.device)
# The lowering for the custom op produces output strides (2, 1).
from torch._inductor.lowering import empty_strided, register_lowering
@register_lowering(torch.ops.mylib.weird_op_with_lowering)
def _(x):
return empty_strided(
x.shape, (2, 1), dtype=x.dtype, device=torch.device(GPU_TYPE, 0)
)
# Triton kernel that has different behavior depending on the input strides.
@triton.jit
def kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
output = offsets
tl.store(out_ptr + offsets, output, mask=mask)
def arange_out(x, out):
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
kernel[grid](x, out, n_elements, BLOCK_SIZE=4)
def f(x):
y = weird_op_with_lowering(x)
# Inductor lowering will decide that y is better having strides (2, 1).
# This is different from the strides at tracing time (1, 2).
# Under the "needs_fixed_stride_order" config, inductor will coerce
# y to have strides (1, 2) before passing it to arange_out.
# If it doesn't, then the result will be different from eager mode.
arange_out(x, y)
return x + y
x = torch.randn(2, 2, device=GPU_TYPE)
eager_out = f(x)
compiled_inductor_f = torch.compile(f, backend="inductor", fullgraph=True)
compiled_inductor_out = compiled_inductor_f(x)
self.assertEqual(compiled_inductor_out, eager_out)
@requires_gpu
def test_triton_kernel_strided_input_nonzero_offset(self):
def f(inp):
# right has strides [256, 1] and storage offset 128
left, right = torch.split(inp, [128, 128], dim=1)
out = torch.empty_like(right)
X_BLOCK_SIZE, Y_BLOCK_SIZE = 32, 16
grid = (right.size(1) // X_BLOCK_SIZE, right.size(0) // Y_BLOCK_SIZE)
double_strided_kernel[grid](
in_ptr=right,
out_ptr=out,
in_y_stride=right.stride(0),
out_y_stride=out.stride(0),
X_BLOCK_SIZE=X_BLOCK_SIZE,
Y_BLOCK_SIZE=Y_BLOCK_SIZE,
)
return out
inp = torch.randn(64, 256, device=GPU_TYPE)
eager_out = f(inp)
compiled_out = torch.compile(f)(inp)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
def test_triton_kernel_slice_and_view_input(self):
def f(inp):
# left has strides [256, 1]
left = inp[:, :128]
left = left.view(64, 4, 32)
out = torch.empty_like(left)
X_BLOCK_SIZE, Y_BLOCK_SIZE = 32, 16
grid = (
(left.size(1) * left.size(2)) // X_BLOCK_SIZE,
left.size(0) // Y_BLOCK_SIZE,
)
double_strided_kernel[grid](
in_ptr=left,
out_ptr=out,
in_y_stride=left.stride(0),
out_y_stride=out.stride(0),
X_BLOCK_SIZE=X_BLOCK_SIZE,
Y_BLOCK_SIZE=Y_BLOCK_SIZE,
)
return out + left
inp = torch.randn(64, 256, device=GPU_TYPE)
eager_out = f(inp)
compiled_out = torch.compile(f)(inp)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
def test_triton_kernel_fallback(self):
def f(x, y):
out = torch.zeros_like(x)
out2 = torch.zeros_like(x)
# torch.mm is ExternKernelOut
add_kernel[(4,)](x, torch.mm(x, y), out, 4, 16)
# torch.sort creates fallback kernel and hence MultiOutput
add_kernel[(4,)](x, torch.sort(y).values, out, 4, 16)
return out, out2
x = torch.randn(4, 4, device=GPU_TYPE)
y = torch.randn(4, 4, device=GPU_TYPE)
eager_out = f(x, y)
compiled_out = torch.compile(f)(x, y)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
def test_triton_kernel_to_cpu(self):
def f(x, y):
out = torch.zeros_like(x)
add_kernel[(1,)](x, y, out, 16, 16)
out_cpu = out.cpu() + 1
return out_cpu
x = torch.randn(4, 4, device=GPU_TYPE)
y = torch.randn(4, 4, device=GPU_TYPE)
eager_out = f(x, y)
compiled_out = torch.compile(f)(x, y)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
def test_triton_kernel_out_of_order(self):
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
BLOCK_SIZE: "tl.constexpr",
out_ptr,
n_elements,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
def f(x, y):
out = torch.zeros_like(x)
n_elements = x.numel()
add_kernel[(n_elements,)](x, y, 4, out, n_elements)
return out
x = torch.randn(4, device=GPU_TYPE)
y = torch.randn(4, device=GPU_TYPE)
eager_out = f(x, y)
compiled_out = torch.compile(f)(x, y)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
@dynamo_config.patch(capture_dynamic_output_shape_ops=True)
@dynamo_config.patch(capture_scalar_outputs=True)
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_unbacked_shape_tensor(self, backend):
@triton.jit
def square(
in_ptr,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr + offsets, mask=mask)
output = x * x
tl.store(out_ptr + offsets, output, mask=mask)
def f(x):
x = x[x > 2]
n_elements = x.numel()
output = torch.zeros_like(x)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
square[grid](x, output, n_elements, BLOCK_SIZE=16)
return output
x = torch.randn(4, device=GPU_TYPE)
eager_out = f(x)
compiled_out = torch.compile(f, fullgraph=True, backend=backend)(x)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
@common_utils.parametrize("dump_launch_params", ["0", "1"])
@common_utils.parametrize("dynamic", [False, True])
def test_triton_kernel_equal_to_1_arg(self, dynamic, dump_launch_params):
os.environ["TORCHINDUCTOR_DUMP_LAUNCH_PARAMS"] = dump_launch_params
@triton.jit
def add_kernel_half_n_elements(
in_ptr0,
in_ptr1,
out_ptr,
half_n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < half_n_elements * 2
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
def f(x, y):
out = torch.empty_like(x)
half_n_elements = x.numel() // 2
add_kernel_half_n_elements[(half_n_elements,)](
x, y, out, half_n_elements, BLOCK_SIZE=16
)
return out
x = torch.randn(2, device=GPU_TYPE)
y = torch.randn(2, device=GPU_TYPE)
eager_out = f(x, y)
compiled_out, sources = run_and_get_code(
torch.compile(f, dynamic=dynamic), x, y
)
if triton_version_uses_attrs_dict():
self.assertFalse("equal_to" in sources[0])
else:
if dynamic:
# when half_n_elements passed to the Triton kernel is
# dynamic, equal_to_1 specialization can't be enforced
# also, equal_to_1 specialization doesn't occur (or appear in the signature)
# for newer versions of triton (i.e. the ones where triton_version_uses_attrs_dict() == True)
self.assertTrue(_triton_get_ast_equal_to_str(()) in sources[0])
else:
self.assertTrue(_triton_get_ast_equal_to_str((3,)) in sources[0])
self.assertEqual(compiled_out, eager_out)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
def test_triton_kernel_equal_to_1_float_arg(self, dynamic):
def f(x, y):
out = torch.empty_like(x)
n_elements = x.numel()
scaling_factor = (n_elements**0) / 1.0
add_kernel_with_scaling[(n_elements,)](
x,
y,
out,
n_elements,
scaling_factor,
BLOCK_SIZE=16,
)
return out
x = torch.randn(2, device=GPU_TYPE)
y = torch.randn(2, device=GPU_TYPE)
eager_out = f(x, y)
compiled_out, sources = run_and_get_code(
torch.compile(f, dynamic=dynamic), x, y
)
# float 1.0 (both literal or symbolic)
# should not be added to equal_to_1
if not triton_version_uses_attrs_dict():
self.assertTrue(_triton_get_ast_equal_to_str(()) in sources[0])
self.assertEqual(compiled_out, eager_out)
@requires_gpu
def test_triton_kernel_with_imported_symbol(self):
@triton.jit
def add_kernel_with_imported_symbol(
in_ptr,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr + offsets, mask=mask)
output = fast_dividef(x, 3.14)
tl.store(out_ptr + offsets, output, mask=mask)
def f(x):
out = torch.empty_like(x)
n_elements = x.numel()
add_kernel_with_imported_symbol[(n_elements,)](
x, out, n_elements, BLOCK_SIZE=16
)
return out
x = torch.randn(4, device=GPU_TYPE)
eager_out = f(x)
compiled_out = torch.compile(f)(x)
self.assertEqual(compiled_out, eager_out)
@unittest.skipIf(
not HAS_GPU or not hasattr(triton, "constexpr_function"),
"newer triton version required",
)
def test_triton_kernel_with_constexpr_function(self):
@triton.jit
def kernel(x_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
FIRST_DIM: tl.constexpr = x.shape[0]
output = x + log2(FIRST_DIM)
tl.store(output_ptr + offsets, output, mask=mask)
def f(x):
out = torch.zeros_like(x)
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
kernel[grid](x, out, n_elements, BLOCK_SIZE=16)
return out
x = torch.randn(16, device=GPU_TYPE)
eager_out = f(x)
compiled_out, (triton_code,) = run_and_get_code(
torch.compile(f, fullgraph=True), x
)
self.assertIn("@triton.constexpr_function", triton_code)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
def test_triton_kernel_with_imported_symbol_with_custom_name(self):
@triton.jit
def add_kernel_with_imported_symbol(
in_ptr,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr + offsets, mask=mask)
output = my_fast_dividef(x, 3.14)
tl.store(out_ptr + offsets, output, mask=mask)
def f(x):
out = torch.empty_like(x)
n_elements = x.numel()
add_kernel_with_imported_symbol[(n_elements,)](
x, out, n_elements, BLOCK_SIZE=16
)
return out
x = torch.randn(4, device=GPU_TYPE)
eager_out = f(x)
compiled_out = torch.compile(f)(x)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
@common_utils.parametrize("size", [4, 16])
@common_utils.parametrize("dynamic", [False, True])
def test_triton_kernel_different_shapes(self, size, dynamic):
from torch._inductor.utils import run_and_get_code
def f(x, y, xx, yy):
n_elements = x.numel()
output_1 = torch.zeros_like(x)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel[grid](x, y, output_1, n_elements, BLOCK_SIZE=4)
n_elements = xx.numel()
output_2 = torch.zeros_like(xx)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel[grid](xx, yy, output_2, n_elements, BLOCK_SIZE=4)
return output_1, output_2
x = torch.rand(size, device=GPU_TYPE)
y = torch.rand(size, device=GPU_TYPE)
xx = torch.rand(size, size, device=GPU_TYPE)
yy = torch.rand(size, size, device=GPU_TYPE)
args = [x, y, xx, yy]
eager_out = f(*args)
compiled_out, (code,) = run_and_get_code(
torch.compile(f, fullgraph=True, dynamic=dynamic, backend="inductor"), *args
)
if size == 4 and not dynamic:
# Produce 2 kernels due to divisibility
self.assertTrue(self._kernel_launched_in_code("add_kernel_0", code))
self.assertTrue(self._kernel_launched_in_code("add_kernel_1", code))
else:
# size == 16 or dynamic
# Only one kernel
self.assertTrue(self._kernel_launched_in_code("add_kernel_0", code))
self.assertFalse(self._kernel_launched_in_code("add_kernel_1", code))
self.assertEqual(compiled_out, eager_out)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_triton_dtype(self, dynamic, backend):
@triton.jit
def add_kernel_with_dtype(
in_ptr0,
in_ptr1,
out_ptr,
dtype: "tl.constexpr",
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask).to(dtype)
y = tl.load(in_ptr1 + offsets, mask=mask).to(dtype)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
def f(x, y, dtype_torch, dtype_triton):
output = torch.zeros_like(x).to(dtype=dtype_torch)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel_with_dtype[grid](
x, y, output, dtype_triton, n_elements, BLOCK_SIZE=4
)
return output
x = torch.randn(4, device=GPU_TYPE)
y = torch.randn(4, device=GPU_TYPE)
args_list = [(x, y, torch.float32, tl.float32)]
if torch.cuda.is_bf16_supported(including_emulation=False):
args_list.append((x, y, torch.bfloat16, tl.bfloat16))
for args in args_list:
eager_out = f(*args)
compiled_out = torch.compile(
f, fullgraph=True, backend=backend, dynamic=dynamic
)(*args)
self.assertEqual(compiled_out, eager_out)
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_special_kwargs_with_autotune(self, backend):
@triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 128}),
triton.Config({"BLOCK_SIZE": 64}),
],
key=["n_elements"],
)
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@torch.compile(fullgraph=True, backend=backend)
def f(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel[grid](
x,
y,
output,
n_elements,
num_warps=8,
num_stages=3,
)
return output
x = torch.randn(4, device=GPU_TYPE)
f(x, x)
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_empty_autotune_config_dict(self, backend):
@triton.autotune(
configs=[
triton.Config({}, num_stages=2),
triton.Config({}, num_stages=3),
],
key=["n_elements"],
)
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@torch.compile(fullgraph=True, backend=backend)
def f(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel[grid](
x,
y,
output,
n_elements,
BLOCK_SIZE=128,
)
return output
x = torch.randn(4, device=GPU_TYPE)
f(x, x)
@requires_gpu
@common_utils.parametrize("autotune", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_special_params(self, autotune, backend):
@triton.jit
def special_params_kernel(
in_ptr,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
num_warps: "tl.constexpr",
num_stages: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr + offsets, mask=mask)
output = x * num_stages + num_warps
tl.store(out_ptr + offsets, output, mask=mask)
NUM_WARPS = 4
NUM_STAGES = 3
if autotune:
special_params_kernel = triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE": 128},
num_stages=NUM_STAGES,
num_warps=NUM_WARPS,
),
triton.Config(
{"BLOCK_SIZE": 64},
num_stages=NUM_STAGES,
num_warps=NUM_WARPS,
),
],
key=["n_elements"],
)(special_params_kernel)
kwargs = {}
else:
kwargs = {
"BLOCK_SIZE": 128,
"num_stages": NUM_STAGES,
"num_warps": NUM_WARPS,
}
def f(x):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
special_params_kernel[grid](
x,
output,
n_elements,
**kwargs,
)
return output
x = torch.randn(4, device=GPU_TYPE)
eager_out = f(x)
compiled_out = torch.compile(f, fullgraph=True, backend=backend)(x)
expected_out = x * NUM_STAGES + NUM_WARPS
self.assertEqual(eager_out, expected_out)
self.assertEqual(compiled_out, expected_out)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("tma_version", ["new", "old"])
def test_on_device_tma(self, dynamic, tma_version):
if tma_version == "new" and not has_triton_tensor_descriptor_host_tma():
self.skipTest("requires triton.tools.tensor_descriptor TMA support")
if tma_version == "old" and not has_triton_experimental_host_tma():
self.skipTest("requires triton.tools.experimental_descriptor TMA support")
kernel = (
add_kernel_on_device_tma_new_api
if tma_version == "new"
else add_kernel_on_device_tma_old_api
)
def f(a, b):
BLOCK_SIZE = 32
out = torch.zeros_like(a)
m, n = out.size()
# Allocate workspace for on-device TMA descriptors
# Need 128 bytes per descriptor, 3 descriptors total
if tma_version == "old":
workspace = torch.zeros(3 * 128, dtype=torch.uint8, device=a.device)
else:
workspace = None
grid = lambda meta: (
triton.cdiv(m, meta["BLOCK_SIZE"]),
triton.cdiv(n, meta["BLOCK_SIZE"]),
)
kernel[grid](
a,
b,
out,
m,
n,
workspace,
BLOCK_SIZE=BLOCK_SIZE,
)
return out
a = torch.randn((32, 32), device=GPU_TYPE)
b = torch.randn((32, 32), device=GPU_TYPE)
expected_out = a + b
triton.set_allocator(
lambda size, align, stream: torch.empty(
size, dtype=torch.int8, device=GPU_TYPE
)
)
eager_out = f(a, b)
compiled_out = torch.compile(f, fullgraph=True, dynamic=dynamic)(a, b)
self.assertEqual(eager_out, expected_out)
self.assertEqual(compiled_out, expected_out)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_multiple_outputs(self, dynamic, backend):
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
out_ptr2,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
tl.store(out_ptr2 + offsets, output + 1, mask=mask)
@torch.compile(fullgraph=True, backend=backend, dynamic=dynamic)
def f(x, y, z):
output = torch.empty_like(x)
output2 = torch.empty_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel[grid](x, y, output, output2, n_elements, BLOCK_SIZE=16)
# The z return is intentional: we're testing training
return output, output2, z**2
x = torch.randn(3, requires_grad=True, device=GPU_TYPE)
y = torch.randn(3, requires_grad=True, device=GPU_TYPE)
z = torch.randn(3, requires_grad=True, device=GPU_TYPE)
out, out2, out3 = f(x, y, z)
self.assertEqual(out, x + y)
self.assertEqual(out2, x + y + 1)
self.assertEqual(out3, z**2)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("tma_version", ["new", "old"])
def test_tma_capture_and_functionalize(self, dynamic, tma_version):
if tma_version == "new" and not has_triton_tensor_descriptor_host_tma():
self.skipTest("requires triton.tools.tensor_descriptor TMA support")
if tma_version == "old" and not has_triton_experimental_host_tma():
self.skipTest("requires triton.tools.experimental_descriptor TMA support")
from torch._higher_order_ops.triton_kernel_wrap import kernel_side_table
kernel_side_table.reset_table()
kernel = (
add_kernel_with_tma_1d_new_api
if tma_version == "new"
else add_kernel_with_tma_1d_old_api
)
def f(a, b):
BLOCK_SIZE = 256
out = torch.zeros_like(a)
n_elements = out.numel()
desc_a, desc_b, desc_out = (
create_tensor_descriptor_shim(
t, [BLOCK_SIZE], new_api=(tma_version == "new")
)
for t in (a, b, out)
)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
kernel[grid](
desc_a,
desc_b,
desc_out,
BLOCK_SIZE=BLOCK_SIZE,
)
return out
a = torch.randn(301, device=GPU_TYPE)
b = torch.randn(301, device=GPU_TYPE)
backend = torch._dynamo.testing.AotEagerAndRecordGraphs()
_ = f(a, b)
torch.compile(
f,
fullgraph=True,
backend=backend,
dynamic=dynamic,
)(a, b)
if dynamic:
if tma_version == "new":
self.assertExpectedInline(
backend.fw_graphs[0].code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1):
zeros_like = torch.ops.aten.zeros_like.default(arg1_1, pin_memory = False)
add_2 = arg0_1 + 256; arg0_1 = None
sub_1 = add_2 - 1; add_2 = None
floordiv = sub_1 // 256; sub_1 = None
triton_kernel_wrapper_functional_proxy = torch.ops.higher_order.triton_kernel_wrapper_functional(kernel_idx = 0, constant_args_idx = 0, grid = [(floordiv, 1, 1)], tma_descriptor_metadata = {'in_desc_ptr0': ('stable', ([256],)), 'in_desc_ptr1': ('stable', ([256],)), 'out_desc_ptr': ('stable', ([256],))}, kwargs = {'in_desc_ptr0': arg1_1, 'in_desc_ptr1': arg2_1, 'out_desc_ptr': zeros_like}, tensors_to_clone = ['out_desc_ptr']); floordiv = arg1_1 = arg2_1 = zeros_like = None
getitem = triton_kernel_wrapper_functional_proxy['out_desc_ptr']; triton_kernel_wrapper_functional_proxy = None
return (getitem,)""",
)
elif tma_version == "old":
self.assertExpectedInline(
backend.fw_graphs[0].code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1):
zeros_like = torch.ops.aten.zeros_like.default(arg1_1, pin_memory = False)
add_2 = arg0_1 + 256
sub_1 = add_2 - 1; add_2 = None
floordiv = sub_1 // 256; sub_1 = None
triton_kernel_wrapper_functional_proxy = torch.ops.higher_order.triton_kernel_wrapper_functional(kernel_idx = 0, constant_args_idx = 0, grid = [(floordiv, 1, 1)], tma_descriptor_metadata = {'in_desc_ptr0': ('experimental', ([arg0_1], [256], 4)), 'in_desc_ptr1': ('experimental', ([arg0_1], [256], 4)), 'out_desc_ptr': ('experimental', ([arg0_1], [256], 4))}, kwargs = {'in_desc_ptr0': arg1_1, 'in_desc_ptr1': arg2_1, 'out_desc_ptr': zeros_like}, tensors_to_clone = ['out_desc_ptr']); floordiv = arg0_1 = arg1_1 = arg2_1 = zeros_like = None
getitem = triton_kernel_wrapper_functional_proxy['out_desc_ptr']; triton_kernel_wrapper_functional_proxy = None
return (getitem,)""",
)
else:
if tma_version == "new":
self.assertExpectedInline(
backend.fw_graphs[0].code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
zeros_like = torch.ops.aten.zeros_like.default(arg0_1, pin_memory = False)
triton_kernel_wrapper_functional_proxy = torch.ops.higher_order.triton_kernel_wrapper_functional(kernel_idx = 0, constant_args_idx = 0, grid = [(2, 1, 1)], tma_descriptor_metadata = {'in_desc_ptr0': ('stable', ([256],)), 'in_desc_ptr1': ('stable', ([256],)), 'out_desc_ptr': ('stable', ([256],))}, kwargs = {'in_desc_ptr0': arg0_1, 'in_desc_ptr1': arg1_1, 'out_desc_ptr': zeros_like}, tensors_to_clone = ['out_desc_ptr']); arg0_1 = arg1_1 = zeros_like = None
getitem = triton_kernel_wrapper_functional_proxy['out_desc_ptr']; triton_kernel_wrapper_functional_proxy = None
return (getitem,)""",
)
elif tma_version == "old":
self.assertExpectedInline(
backend.fw_graphs[0].code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
zeros_like = torch.ops.aten.zeros_like.default(arg0_1, pin_memory = False)
triton_kernel_wrapper_functional_proxy = torch.ops.higher_order.triton_kernel_wrapper_functional(kernel_idx = 0, constant_args_idx = 0, grid = [(2, 1, 1)], tma_descriptor_metadata = {'in_desc_ptr0': ('experimental', ([301], [256], 4)), 'in_desc_ptr1': ('experimental', ([301], [256], 4)), 'out_desc_ptr': ('experimental', ([301], [256], 4))}, kwargs = {'in_desc_ptr0': arg0_1, 'in_desc_ptr1': arg1_1, 'out_desc_ptr': zeros_like}, tensors_to_clone = ['out_desc_ptr']); arg0_1 = arg1_1 = zeros_like = None
getitem = triton_kernel_wrapper_functional_proxy['out_desc_ptr']; triton_kernel_wrapper_functional_proxy = None
return (getitem,)""",
)
@requires_gpu
@common_utils.parametrize("after_data_ptr", [False, True])
@common_utils.parametrize("after_create_desc", [False, True])
@common_utils.parametrize("tma_version", ["new", "old"])
def test_tma_graph_breaks(self, after_data_ptr, after_create_desc, tma_version):
if tma_version == "new" and not has_triton_tensor_descriptor_host_tma():
self.skipTest("requires triton.tools.tensor_descriptor TMA support")
if tma_version == "old" and not has_triton_experimental_host_tma():
self.skipTest("requires triton.tools.experimental_descriptor TMA support")
kernel = (
add_kernel_with_tma_1d_new_api
if tma_version == "new"
else add_kernel_with_tma_1d_old_api
)
def f(a, b):
BLOCK_SIZE = 256
out = torch.zeros_like(a)
n_elements = out.numel()
if after_data_ptr:
torch._dynamo.graph_break()
descs = [
create_tensor_descriptor_shim(
t, [BLOCK_SIZE], new_api=(tma_version == "new")
)
for t in (a, b, out)
]
if after_create_desc:
torch._dynamo.graph_break()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
kernel[grid](
*descs,
BLOCK_SIZE=BLOCK_SIZE,
)
return out
a = torch.randn(301, device=GPU_TYPE)
b = torch.randn(301, device=GPU_TYPE)
expected_out = a + b
eager_out = f(a, b)
compiled_out = torch.compile(
f,
fullgraph=False,
backend="eager",
dynamic=False,
)(a, b)
self.assertEqual(eager_out, expected_out)
self.assertEqual(compiled_out, expected_out)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
@common_utils.parametrize("tma_version", ["new", "old"])
def test_tma_descriptor_1d(self, dynamic, backend, tma_version):
if tma_version == "new" and not has_triton_tensor_descriptor_host_tma():
self.skipTest("requires triton.tools.tensor_descriptor TMA support")
if tma_version == "old" and not has_triton_experimental_host_tma():
self.skipTest("requires triton.tools.experimental_descriptor TMA support")
kernel = (
add_kernel_with_tma_1d_new_api
if tma_version == "new"
else add_kernel_with_tma_1d_old_api
)
def f(a, b):
BLOCK_SIZE = 256
out = torch.zeros_like(a)
n_elements = out.numel()
desc_a, desc_b, desc_out = (
create_tensor_descriptor_shim(
t, [BLOCK_SIZE], new_api=(tma_version == "new")
)
for t in (a, b, out)
)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
kernel[grid](
desc_a,
desc_b,
desc_out,
BLOCK_SIZE=BLOCK_SIZE,
)
return out
a = torch.randn(301, device=GPU_TYPE)
b = torch.randn(301, device=GPU_TYPE)
expected_out = a + b
eager_out = f(a, b)
compiled_out = torch.compile(
f,
fullgraph=True,
backend=backend,
dynamic=dynamic,
)(a, b)
self.assertEqual(eager_out, expected_out)
self.assertEqual(compiled_out, expected_out)
@requires_gpu
@common_utils.parametrize("tma_version", ["new", "old"])
def test_tma_descriptor_dedup(self, tma_version):
if tma_version == "new" and not has_triton_tensor_descriptor_host_tma():
self.skipTest("requires triton.tools.tensor_descriptor TMA support")
if tma_version == "old" and not has_triton_experimental_host_tma():
self.skipTest("requires triton.tools.experimental_descriptor TMA support")
kernel = (
add_kernel_with_tma_1d_new_api
if tma_version == "new"
else add_kernel_with_tma_1d_old_api
)
def f(a):
BLOCK_SIZE = 256
out = torch.zeros_like(a)
n_elements = out.numel()
desc_a, desc_out = (
create_tensor_descriptor_shim(
t, [BLOCK_SIZE], new_api=(tma_version == "new")
)
for t in (a, out)
)
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
kernel[grid](
desc_a,
desc_a,
desc_out,
BLOCK_SIZE=BLOCK_SIZE,
)
return out
a = torch.randn(301, device=GPU_TYPE)
expected_out = a + a
eager_out = f(a)
compiled_out, (code,) = run_and_get_code(
torch.compile(
f,
fullgraph=True,
backend="inductor",
dynamic=True,
),
a,
)
self.assertEqual(eager_out, expected_out)
self.assertEqual(compiled_out, expected_out)
# 2 calls: one for two inputs (dedupped), one for the output
if tma_version == "new":
self.assertEqual(code.count("TensorDescriptor.from_tensor("), 2)
else:
self.assertEqual(code.count("create_1d_tma_descriptor("), 2)
@requires_gpu
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("backend", ["eager", "aot_eager"])
@common_utils.parametrize("tma_version", ["new", "old"])
def test_tma_descriptor_2d(self, dynamic, backend, tma_version):
if tma_version == "new" and not has_triton_tensor_descriptor_host_tma():
self.skipTest("requires triton.tools.tensor_descriptor TMA support")
if tma_version == "old" and not has_triton_experimental_host_tma():
self.skipTest("requires triton.tools.experimental_descriptor TMA support")
kernel = (
add_kernel_with_tma_2d_new_api
if tma_version == "new"
else add_kernel_with_tma_2d_old_api
)
def f(a, b):
BLOCK_SIZE_X = 16
BLOCK_SIZE_Y = 32
out = torch.zeros_like(a)
x_size, y_size = out.size()
desc_a, desc_b, desc_out = (
create_tensor_descriptor_shim(
t, [BLOCK_SIZE_X, BLOCK_SIZE_Y], new_api=(tma_version == "new")
)
for t in (a, b, out)
)
grid = lambda meta: (
triton.cdiv(x_size, meta["BLOCK_SIZE_X"]),
triton.cdiv(y_size, meta["BLOCK_SIZE_Y"]),
)
kernel[grid](
desc_a,
desc_b,
desc_out,
BLOCK_SIZE_X=BLOCK_SIZE_X,
BLOCK_SIZE_Y=BLOCK_SIZE_Y,
)
return out
a = torch.randn((25, 16), device=GPU_TYPE)
b = torch.randn((25, 16), device=GPU_TYPE)
expected_out = a + b
eager_out = f(a, b)
compiled_out = torch.compile(
f,
fullgraph=True,
backend=backend,
dynamic=dynamic,
)(a, b)
self.assertEqual(eager_out, expected_out)
self.assertEqual(compiled_out, expected_out)
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_num_ctas(self, backend):
@triton.jit
def kernel(X):
return
@torch.compile(fullgraph=True, backend=backend)
def f(x):
kernel[(1,)](x, num_ctas=1)
kernel.run(x, num_ctas=1, grid=(1,), warmup=False)
return x
msg = "Passing num_ctas directly to the Triton kernel is not supported. Please use a Config in @triton.autotune instead."
with self.assertRaisesRegex(torch._dynamo.exc.Unsupported, msg):
x = torch.randn(4, device=GPU_TYPE)
f(x)
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_triton_kernel_special_kwargs_without_autotune(self, backend):
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@torch.compile(fullgraph=True, backend=backend)
def f(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
add_kernel[grid](
x,
y,
output,
n_elements,
BLOCK_SIZE=128,
num_warps=8,
num_stages=3,
)
return output
x = torch.randn(4, device=GPU_TYPE)
f(x, x)
@requires_gpu
@common_utils.parametrize("backend", ["eager", "aot_eager", "inductor"])
@common_utils.parametrize("autotune_at_compile_time", [True, False])
def test_triton_kernel_restore_value(self, backend, autotune_at_compile_time):
if autotune_at_compile_time and backend != "inductor":
raise unittest.SkipTest("compile-time autotuning only exists in inductor")
@triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 16}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_SIZE": 32}, num_stages=3, num_warps=8),
],
key=[],
restore_value=["in_ptr0"],
)
@triton.jit
def increment_kernel(
in_ptr0,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
output = x + 1
tl.store(in_ptr0 + offsets, output, mask=mask)
@torch.compile(fullgraph=True, backend=backend)
def f(x):
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
increment_kernel[grid](x, n_elements=n_elements)
return x
x = torch.rand(4, device=GPU_TYPE)
prev = x.clone()
with inductor_config.patch(
{"triton.autotune_at_compile_time": autotune_at_compile_time}
):
f(x)
# make sure x was restored after autotuning
torch.testing.assert_close(x, prev + 1)
@requires_gpu
@parametrize("dtype", (torch.float16, torch.float32, torch.float64))
def test_triton_kernel_float64_constant(self, dtype):
def f(x):
return x * (0.12 * x.shape[0])
x = torch.ones(200, device=GPU_TYPE, dtype=dtype)
eager_out = f(x)
compiled_out = torch.compile(f, dynamic=True)(x)
self.assertEqual(compiled_out, eager_out)
# TODO enable this test case on XPU.
@requires_gpu_and_triton
@parametrize("cfg", ["normal", "cpp_wrapper"])
def test_triton_kernel_dtype_view(self, cfg):
# https://github.com/pytorch/pytorch/issues/136159
if cfg == "normal":
config_kwargs = {"cpp_wrapper": False}
elif cfg == "cpp_wrapper":
config_kwargs = {"cpp_wrapper": True}
with inductor_config.patch(**config_kwargs):
@triton.jit
def _triton_kernel(out_ptr, numel, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
offsets = BLOCK_SIZE * pid + tl.arange(0, BLOCK_SIZE)
mask = offsets < numel
ones = tl.full((BLOCK_SIZE,), 1, tl.float16)
tl.store(out_ptr + offsets, ones, mask)
def fn(x):
buf = torch.empty(x.shape, device=x.device, dtype=torch.float16)
# the buf.view() should be a view sharing the same storage as buf.
bfloat_buf = buf.view(dtype=torch.bfloat16)
BLOCK_SIZE = 256
numel = buf.numel()
grid = (triton.cdiv(numel, BLOCK_SIZE),)
_triton_kernel[grid](bfloat_buf, numel, BLOCK_SIZE)
return buf, bfloat_buf
fn_c = torch.compile(fn)
x = torch.randn(8, device=GPU_TYPE)
out_c = fn_c(x)
out_e = fn(x)
# expect view() to be an actual view, sharing the same data as the original buffer
# verify first that this is true in the eager output
self.assertEqual(out_e[0].data_ptr(), out_e[1].data_ptr())
# .. and also in the compiled output
self.assertEqual(out_c[0].data_ptr(), out_c[1].data_ptr())
self.assertEqual(out_e[0], out_c[0])
self.assertEqual(out_e[1], out_c[1])
# TODO enable this test case on XPU.
@requires_gpu
def test_i64_input(self):
# The i64 "seed" input needs to be marked as "i64", not "i32".
@triton.jit
def triton_add_noise_(x_ptr, y_ptr, seed, numel, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(x_ptr + offsets, mask=(offsets < numel))
rnd = tl.rand(seed, offsets)
res = x + rnd
tl.store(y_ptr + offsets, res, mask=(offsets < numel))
def add_noise(x, seed):
y = torch.empty_like(x)
numel = x.numel()
BLOCK_SIZE = 256
def grid(meta):
return (triton.cdiv(numel, meta["BLOCK_SIZE"]),)
triton_add_noise_[grid](x, y, seed, numel, BLOCK_SIZE)
return y
def fn(x):
x = x * x
seed = torch.randint(
low=2**32, high=2**62, size=(1,), dtype=torch.int64
).item()
return add_noise(x, seed)
inp = torch.rand(400, device=GPU_TYPE)
torch._dynamo.mark_dynamic(inp, 0)
fn_c = torch.compile(fn, fullgraph=True)
with dynamo_config.patch(capture_scalar_outputs=True):
res = fn_c(inp)
self.assertTrue(((res < 2) & (res >= 0)).all().item())
@requires_gpu
@parametrize("wrapped", [False, True])
@parametrize("autotune", [False, True])
def test_constexpr_dynamic_shapes(self, wrapped, autotune):
# https://github.com/pytorch/pytorch/issues/136504
@triton.jit
def triton_(
x_ptr,
y_ptr,
NUMEL: tl.constexpr,
IS_ODD: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(0)
offsets = BLOCK_SIZE * pid + tl.arange(0, BLOCK_SIZE)
mask = offsets < NUMEL
data = tl.load(x_ptr + offsets, mask)
result = data * data
if IS_ODD:
result = result + 1
tl.store(y_ptr + offsets, result, mask)
if autotune:
triton_ = triton.autotune(
[
triton.Config(kwargs={"BLOCK_SIZE": 128}),
triton.Config(kwargs={"BLOCK_SIZE": 256}),
],
key=[],
)(triton_)
def triton_kernel_impl(x: torch.Tensor) -> torch.Tensor:
y = torch.empty_like(x)
numel = x.numel()
args = [x, y, numel, numel % 2 == 0]
if not autotune:
args.append(256) # BLOCK_SIZE
def grid(meta):
return (triton.cdiv(numel, meta["BLOCK_SIZE"]),)
if wrapped:
capture_triton(triton_)[grid](*args)
else:
triton_[grid](*args)
return y
if wrapped:
triton_kernel = torch.library.triton_op(
"constexpr_test::square", triton_kernel_impl, mutates_args={}
)
else:
triton_kernel = triton_kernel_impl
def fn(x):
return triton_kernel(x)
fn_c = torch.compile(fn, dynamic=True)
x = torch.randn(512 + 5, device=GPU_TYPE)
res = fn_c(x)
self.assertEqual(x * x, res)
x2 = torch.randn(1024 + 5, device=GPU_TYPE)
res2 = fn_c(x2)
self.assertEqual(x2 * x2, res2)
@requires_gpu
def test_triton_kernel_none_args(self):
# https://github.com/pytorch/pytorch/issues/115344
@triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 32}, num_stages=5, num_warps=2),
triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4),
],
key=["n_elements"],
)
@triton.jit
def sin_kernel(
in_ptr0,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
if in_ptr0 is not None:
x = tl.load(in_ptr0 + offsets, mask=mask)
else:
x = 0.0
output = tl.sin(x)
tl.store(out_ptr + offsets, output, mask=mask)
def sin_triton(x, out):
n_elements = out.numel()
sin_kernel[(n_elements,)](x, out, n_elements)
x = torch.randn(65, device=GPU_TYPE)
out = torch.empty_like(x)
out_compiled = torch.empty_like(x)
sin_triton_compiled = torch.compile(fullgraph=True)(sin_triton)
sin_triton(x, out)
sin_triton_compiled(x, out_compiled)
self.assertEqual(out, out_compiled)
sin_triton(None, out)
sin_triton_compiled(None, out_compiled)
self.assertEqual(out, out_compiled)
@requires_gpu
def test_triton_kernel_global_constexpr(self):
@triton.jit
def triton_(in_ptr, out_ptr, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(in_ptr + offsets)
output = x + FLOAT_CONSTANT_C
tl.store(out_ptr + offsets, output)
def fn(x):
y = torch.empty_like(x)
BLOCK_SIZE = 256
grid = (triton.cdiv(x.numel(), BLOCK_SIZE),)
triton_[grid](x, y, BLOCK_SIZE)
return y
# make sure FLOAT_CONSTANT_C is NOT annotated
self.assertFalse("FLOAT_CONSTANT_C" in globals().get("__annotations__", {}))
# sanity check: STRING_CONSTANT_C _should_ be annotated
self.assertTrue("STRING_CONSTANT_C" in globals().get("__annotations__", {}))
x = torch.randn(512, device=GPU_TYPE)
expected = x + 3.14
actual = torch.compile(fn)(x)
self.assertEqual(expected, actual)
@requires_gpu
@unittest.skipIf(
not triton_version_uses_attrs_dict(),
"Test is only valid for new triton versions where attrs is represented by a raw dict",
)
def test_triton_attrs_dict_equal_1_None_format(self):
@triton.jit
def triton_(in_ptr, out_ptr, numel, add_amount, BLOCK_SIZE: tl.constexpr):
offsets = tl.arange(0, BLOCK_SIZE)
x = tl.load(in_ptr + offsets, mask=(offsets < numel))
output = x * x
if add_amount is not None:
output = output + add_amount
tl.store(out_ptr + offsets, output, mask=(offsets < numel))
def fn(x):
y = torch.empty_like(x)
BLOCK_SIZE = 256
grid = (1,)
triton_[grid](x, y, x.numel(), None, BLOCK_SIZE)
return y
x = torch.full((1,), 2.5, device=GPU_TYPE)
expected = fn(x)
fn_c = torch.compile(fn)
res, code = run_and_get_code(fn_c, x)
self.assertEqual(expected, res)
FileCheck().check("triton_meta=").check("'constants':").check("'numel': 1").run(
code[0]
)
FileCheck().check("triton_meta=").check("'constants':").check(
"'add_amount': None"
).run(code[0])
FileCheck().check("triton_meta=").check("'constants':").check(
"'BLOCK_SIZE': 256"
).run(code[0])
FileCheck().check("triton_meta=").check("'signature':").check(
"'numel': 'constexpr'"
).run(code[0])
FileCheck().check("triton_meta=").check("'signature':").check(
"'add_amount': 'constexpr'"
).run(code[0])
FileCheck().check("triton_meta=").check("'signature':").check(
"'BLOCK_SIZE': 'constexpr'"
).run(code[0])
@requires_gpu
@inductor_config.patch({"triton.autotune_at_compile_time": True})
@parametrize("quotes", ["single", "double"])
def test_kernel_with_docstring(self, quotes):
kernel = (
kernel_with_docstring_single_quotes
if quotes == "single"
else kernel_with_docstring_double_quotes
)
# https://github.com/pytorch/pytorch/issues/155006
def fn(sz):
x = torch.empty(sz, device=GPU_TYPE)
BLOCK_SIZE = 32
grid = (triton.cdiv(sz, BLOCK_SIZE),)
kernel[grid](x, sz, BLOCK_SIZE)
return x
actual = fn(345)
expected = torch.compile(fn, fullgraph=True)(345)
self.assertEqual(actual, expected)
@requires_gpu
@skipIfXpu(
msg="XPU Triton result in nan, "
"https://github.com/intel/torch-xpu-ops/issues/2330"
)
@skipIfRocm
@inductor_config.patch({"triton.autotune_at_compile_time": True})
@parametrize("quotes", ["single", "double"])
def test_kernel_inline_asm(self, quotes):
kernel = (
kernel_inline_asm_single_quotes
if quotes == "single"
else kernel_inline_asm_double_quotes
)
# https://github.com/pytorch/pytorch/issues/155006
def fn(inp):
sz = inp.size(0)
x = torch.empty(sz, device=GPU_TYPE)
BLOCK_SIZE = 32
grid = (triton.cdiv(sz, BLOCK_SIZE),)
kernel[grid](inp, x, sz, BLOCK_SIZE)
return x
inp = torch.randn(345, device=GPU_TYPE)
actual = fn(inp)
expected = torch.compile(fn, fullgraph=True)(inp)
self.assertEqual(actual, expected)
@requires_gpu
@inductor_config.patch("emulate_precision_casts", True)
def test_triton_kernel_emulate_precision_unaffected(self):
@triton.jit
def triton_(in_ptr, out_ptr, numel, add_amount, BLOCK_SIZE: tl.constexpr):
offsets = tl.arange(0, BLOCK_SIZE)
x = tl.load(in_ptr + offsets, mask=(offsets < numel))
output = x * x
if add_amount is not None:
output = output + add_amount
tl.store(out_ptr + offsets, output, mask=(offsets < numel))
def fn(x):
y = torch.empty_like(x)
BLOCK_SIZE = 256
grid = (1,)
triton_[grid](x, y, x.numel(), None, BLOCK_SIZE)
return y
t1 = torch.rand(5, device=GPU_TYPE)
fn = torch.compile(fn)
_, (code,) = run_and_get_code(fn, t1)
self.assertTrue("enable_fp_fusion" not in code)
@requires_gpu
@inductor_config.patch("emulate_precision_casts", True)
@inductor_config.patch("max_autotune_gemm_backends", "TRITON")
def test_triton_kernel_emulate_precision_mm_kernels_do_not_change(self):
from torch._inductor.utils import run_and_get_code
@torch.compile(mode="max-autotune")
def fn(a, b):
return a @ b
t1 = torch.rand(512, 512, device=GPU_TYPE)
t2 = torch.rand(512, 512, device=GPU_TYPE)
try:
_, (code,) = run_and_get_code(fn, t1, t2)
self.assertTrue("enable_fp_fusion" not in code)
except Exception as e:
if "NoValidChoicesError" in str(e):
raise unittest.SkipTest(
"where inductor has no triton mm kernels available, this test is meaningless"
) from e
raise
def make_mutation_test(fn):
@requires_gpu
def test_fn(self):
from torch._higher_order_ops.triton_kernel_wrap import identify_mutated_tensors
kernel, inputs, tma_descriptor_metadata, outputs = fn()
self.assertListEqual(
identify_mutated_tensors(kernel, inputs, tma_descriptor_metadata),
outputs,
)
return test_fn
# Triton codegen suffers from scoping issues.
# Define helpers here
if HAS_GPU:
@triton.jit
def helper_id(p):
return p
@triton.jit
def helper_add_and_out(x, y, out_ptr):
return x + y, out_ptr
| KernelTests |
python | django__django | django/forms/widgets.py | {
"start": 27055,
"end": 28167
} | class ____(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
("unknown", _("Unknown")),
("true", _("Yes")),
("false", _("No")),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {
True: "true",
False: "false",
"true": "true",
"false": "false",
# For backwards compatibility with Django < 2.2.
"2": "true",
"3": "false",
}[value]
except KeyError:
return "unknown"
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
True: True,
"True": True,
"False": False,
False: False,
"true": True,
"false": False,
# For backwards compatibility with Django < 2.2.
"2": True,
"3": False,
}.get(value)
| NullBooleanSelect |
python | kubernetes-client__python | kubernetes/client/models/core_v1_event_series.py | {
"start": 383,
"end": 4514
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'last_observed_time': 'datetime'
}
attribute_map = {
'count': 'count',
'last_observed_time': 'lastObservedTime'
}
def __init__(self, count=None, last_observed_time=None, local_vars_configuration=None): # noqa: E501
"""CoreV1EventSeries - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._last_observed_time = None
self.discriminator = None
if count is not None:
self.count = count
if last_observed_time is not None:
self.last_observed_time = last_observed_time
@property
def count(self):
"""Gets the count of this CoreV1EventSeries. # noqa: E501
Number of occurrences in this series up to the last heartbeat time # noqa: E501
:return: The count of this CoreV1EventSeries. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this CoreV1EventSeries.
Number of occurrences in this series up to the last heartbeat time # noqa: E501
:param count: The count of this CoreV1EventSeries. # noqa: E501
:type: int
"""
self._count = count
@property
def last_observed_time(self):
"""Gets the last_observed_time of this CoreV1EventSeries. # noqa: E501
Time of the last occurrence observed # noqa: E501
:return: The last_observed_time of this CoreV1EventSeries. # noqa: E501
:rtype: datetime
"""
return self._last_observed_time
@last_observed_time.setter
def last_observed_time(self, last_observed_time):
"""Sets the last_observed_time of this CoreV1EventSeries.
Time of the last occurrence observed # noqa: E501
:param last_observed_time: The last_observed_time of this CoreV1EventSeries. # noqa: E501
:type: datetime
"""
self._last_observed_time = last_observed_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CoreV1EventSeries):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CoreV1EventSeries):
return True
return self.to_dict() != other.to_dict()
| CoreV1EventSeries |
python | django__django | tests/admin_inlines/admin.py | {
"start": 9530,
"end": 9620
} | class ____(admin.ModelAdmin):
inlines = [ClassStackedVertical]
| ClassAdminStackedVertical |
python | getsentry__sentry | src/sentry/integrations/msteams/webhook.py | {
"start": 6146,
"end": 6532
} | class ____(Enum):
INSTALLATION_UPDATE = "installationUpdate"
MESSAGE = "message"
CONVERSATION_UPDATE = "conversationUpdate"
UNKNOWN = "unknown"
@classmethod
def get_from_value(cls, value: str) -> MsTeamsEvents:
try:
return MsTeamsEvents(value)
except Exception:
return MsTeamsEvents.UNKNOWN
@all_silo_endpoint
| MsTeamsEvents |
python | mahmoud__glom | glom/matching.py | {
"start": 12606,
"end": 13485
} | class ____:
"""used by MType.__call__ to wrap a sub-spec for comparison"""
__slots__ = ('spec')
def __init__(self, spec):
self.spec = spec
def __eq__(self, other):
return _MExpr(self, '=', other)
def __ne__(self, other):
return _MExpr(self, '!', other)
def __gt__(self, other):
return _MExpr(self, '>', other)
def __lt__(self, other):
return _MExpr(self, '<', other)
def __ge__(self, other):
return _MExpr(self, 'g', other)
def __le__(self, other):
return _MExpr(self, 'l', other)
def __repr__(self):
return f'M({bbrepr(self.spec)})'
def glomit(self, target, scope):
match = scope[glom](target, self.spec, scope)
if match:
return target
raise MatchError('expected truthy value from {0!r}, got {1!r}', self.spec, match)
| _MSubspec |
python | doocs__leetcode | solution/0900-0999/0920.Number of Music Playlists/Solution2.py | {
"start": 0,
"end": 442
} | class ____:
def numMusicPlaylists(self, n: int, goal: int, k: int) -> int:
mod = 10**9 + 7
f = [0] * (goal + 1)
f[0] = 1
for i in range(1, goal + 1):
g = [0] * (goal + 1)
for j in range(1, n + 1):
g[j] = f[j - 1] * (n - j + 1)
if j > k:
g[j] += f[j] * (j - k)
g[j] %= mod
f = g
return f[n]
| Solution |
python | pytorch__pytorch | test/custom_operator/test_custom_ops.py | {
"start": 324,
"end": 5582
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.library_path = get_custom_op_library_path()
ops.load_library(self.library_path)
def test_custom_library_is_loaded(self):
self.assertIn(self.library_path, ops.loaded_libraries)
def test_op_with_no_abstract_impl_pystub(self):
x = torch.randn(3, device="meta")
if utils.requires_set_python_module():
with self.assertRaisesRegex(RuntimeError, "pointwise"):
torch.ops.custom.tan(x)
else:
# Smoketest
torch.ops.custom.tan(x)
def test_op_with_incorrect_abstract_impl_pystub(self):
x = torch.randn(3, device="meta")
with self.assertRaisesRegex(RuntimeError, "pointwise"):
torch.ops.custom.cos(x)
@unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows")
def test_dynamo_pystub_suggestion(self):
x = torch.randn(3)
@torch.compile(backend="eager", fullgraph=True)
def f(x):
return torch.ops.custom.asin(x)
with self.assertRaisesRegex(
RuntimeError,
r"(?s)Operator does not support running with fake tensors.*you may need to `import nonexistent`",
):
f(x)
def test_abstract_impl_pystub_faketensor(self):
from functorch import make_fx
x = torch.randn(3, device="cpu")
self.assertNotIn("my_custom_ops", sys.modules.keys())
with self.assertRaises(
torch._subclasses.fake_tensor.UnsupportedOperatorException
):
gm = make_fx(torch.ops.custom.nonzero.default, tracing_mode="symbolic")(x)
torch.ops.import_module("my_custom_ops")
gm = make_fx(torch.ops.custom.nonzero.default, tracing_mode="symbolic")(x)
self.assertExpectedInline(
"""\
def forward(self, arg0_1):
nonzero = torch.ops.custom.nonzero.default(arg0_1); arg0_1 = None
return nonzero
""".strip(),
gm.code.strip(),
)
def test_abstract_impl_pystub_meta(self):
x = torch.randn(3, device="meta")
self.assertNotIn("my_custom_ops2", sys.modules.keys())
with self.assertRaisesRegex(NotImplementedError, r"'my_custom_ops2'"):
torch.ops.custom.sin.default(x)
torch.ops.import_module("my_custom_ops2")
torch.ops.custom.sin.default(x)
def test_calling_custom_op_string(self):
output = ops.custom.op2("abc", "def")
self.assertLess(output, 0)
output = ops.custom.op2("abc", "abc")
self.assertEqual(output, 0)
def test_calling_custom_op(self):
output = ops.custom.op(torch.ones(5), 2.0, 3)
self.assertEqual(type(output), list)
self.assertEqual(len(output), 3)
for tensor in output:
self.assertTrue(tensor.allclose(torch.ones(5) * 2))
output = ops.custom.op_with_defaults(torch.ones(5))
self.assertEqual(type(output), list)
self.assertEqual(len(output), 1)
self.assertTrue(output[0].allclose(torch.ones(5)))
def test_calling_custom_op_with_autograd(self):
x = torch.randn((5, 5), requires_grad=True)
y = torch.randn((5, 5), requires_grad=True)
output = ops.custom.op_with_autograd(x, 2, y)
self.assertTrue(output.allclose(x + 2 * y + x * y))
go = torch.ones((), requires_grad=True)
output.sum().backward(go, False, True)
grad = torch.ones(5, 5)
self.assertEqual(x.grad, y + grad)
self.assertEqual(y.grad, x + grad * 2)
# Test with optional arg.
x.grad.zero_()
y.grad.zero_()
z = torch.randn((5, 5), requires_grad=True)
output = ops.custom.op_with_autograd(x, 2, y, z)
self.assertTrue(output.allclose(x + 2 * y + x * y + z))
go = torch.ones((), requires_grad=True)
output.sum().backward(go, False, True)
self.assertEqual(x.grad, y + grad)
self.assertEqual(y.grad, x + grad * 2)
self.assertEqual(z.grad, grad)
def test_calling_custom_op_with_autograd_in_nograd_mode(self):
with torch.no_grad():
x = torch.randn((5, 5), requires_grad=True)
y = torch.randn((5, 5), requires_grad=True)
output = ops.custom.op_with_autograd(x, 2, y)
self.assertTrue(output.allclose(x + 2 * y + x * y))
def test_calling_custom_op_inside_script_module(self):
model = Model()
output = model.forward(torch.ones(5))
self.assertTrue(output.allclose(torch.ones(5) + 1))
def test_saving_and_loading_script_module_with_custom_op(self):
model = Model()
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually.
with tempfile.NamedTemporaryFile() as file:
file.close()
model.save(file.name)
loaded = torch.jit.load(file.name)
output = loaded.forward(torch.ones(5))
self.assertTrue(output.allclose(torch.ones(5) + 1))
if __name__ == "__main__":
run_tests()
| TestCustomOperators |
python | pytorch__pytorch | torch/_inductor/runtime/caching/implementations.py | {
"start": 983,
"end": 1224
} | class ____:
"""Sentinel class representing a cache miss.
Used to distinguish between a cached None value and a cache miss
when None is a valid cached value.
"""
# Singleton instance for cache miss sentinel
miss = Miss()
| Miss |
python | dask__distributed | distributed/shuffle/tests/test_shuffle.py | {
"start": 9495,
"end": 15636
} | class ____(SchedulerPlugin):
def __init__(self, prefixes, count, worker):
self.prefixes = prefixes
self.count = count
self.worker = worker
self.counter = defaultdict(int)
self.event = asyncio.Event()
async def start(self, scheduler):
self.scheduler = scheduler
def transition(self, key, start, finish, *args, **kwargs):
if (
finish == "processing"
and key_split(key) in self.prefixes
and self.scheduler.tasks[key].processing_on
and self.scheduler.tasks[key].processing_on.address == self.worker
):
self.counter[key_split(key)] += 1
if self.counter[key_split(key)] == self.count:
self.event.set()
return key, start, finish
@contextlib.asynccontextmanager
async def wait_until_worker_has_tasks(prefix, worker, count, scheduler):
plugin = ObserveTasksPlugin([prefix], count, worker)
scheduler.add_plugin(plugin, name="observe-tasks")
await plugin.start(scheduler)
try:
yield plugin.event
finally:
scheduler.remove_plugin("observe-tasks")
async def wait_for_tasks_in_state(
prefix: str,
state: str,
count: int,
dask_worker: Worker | Scheduler,
interval: float = 0.01,
) -> None:
tasks: Mapping[Key, SchedulerTaskState | WorkerTaskState]
if isinstance(dask_worker, Worker):
tasks = dask_worker.state.tasks
elif isinstance(dask_worker, Scheduler):
tasks = dask_worker.tasks
else:
raise TypeError(dask_worker)
while (
len(
[
key
for key, ts in tasks.items()
if prefix in key_split(key) and ts.state == state
]
)
< count
):
await asyncio.sleep(interval)
async def wait_until_new_shuffle_is_initialized(
scheduler: Scheduler, interval: float = 0.01, timeout: int | None = None
) -> ShuffleId:
deadline = Deadline.after(timeout)
scheduler_plugin = scheduler.plugins["shuffle"]
assert isinstance(scheduler_plugin, ShuffleSchedulerPlugin)
while not scheduler_plugin.shuffle_ids() and not deadline.expired:
await asyncio.sleep(interval)
shuffle_ids = scheduler_plugin.shuffle_ids()
assert len(shuffle_ids) == 1
return next(iter(shuffle_ids))
@gen_cluster(client=True, nthreads=[("", 1)] * 2)
async def test_closed_worker_during_transfer(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = df.shuffle("x", force=True)
fut = c.compute([shuffled, df], sync=True)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, b)
await assert_worker_cleanup(b, close=True)
result, expected = await fut
dd.assert_eq(result, expected)
await c.close()
await assert_worker_cleanup(a)
await assert_scheduler_cleanup(s)
@gen_cluster(
client=True,
nthreads=[("", 1)] * 2,
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_restarting_during_transfer_raises_killed_worker(c, s, a, b):
await c.register_plugin(BlockedShuffleReceiveShuffleWorkerPlugin(), name="shuffle")
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-02-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
shuffle_extA = a.plugins["shuffle"]
shuffle_extB = b.plugins["shuffle"]
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.compute(out.x.size)
await asyncio.gather(
shuffle_extA.in_shuffle_receive.wait(), shuffle_extB.in_shuffle_receive.wait()
)
shuffle_extA.block_shuffle_receive.set()
shuffle_extB.block_shuffle_receive.set()
await assert_worker_cleanup(b, close=True)
with pytest.raises(KilledWorker):
await out
assert sum(event["action"] == "p2p-failed" for _, event in s.get_events("p2p")) == 1
await c.close()
await assert_worker_cleanup(a)
await assert_scheduler_cleanup(s)
@gen_cluster(
client=True,
nthreads=[("", 1), ("", 1)],
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_erred_task_before_p2p_does_not_log_event(c, s, a, b):
def block_and_fail_eventually(df, semaphore, event):
acquired = semaphore.acquire(timeout=0)
if acquired:
return df
event.wait()
raise RuntimeError("test error")
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-02-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
semaphore = await Semaphore(max_leases=s.total_nthreads * 2 + 1)
event = Event()
df = df.map_partitions(block_and_fail_eventually, semaphore, event, meta=df._meta)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
shuffle_ext = s.plugins["shuffle"]
out = c.compute(out)
await async_poll_for(lambda: shuffle_ext.active_shuffles, timeout=5)
await event.set()
with pytest.raises(RuntimeError, match="test error"):
await out
assert all(event["action"] != "p2p-failed" for _, event in s.get_events("p2p"))
@gen_cluster(
client=True,
nthreads=[("", 1)] * 2,
config={"distributed.scheduler.allowed-failures": 1},
)
async def test_restarting_does_not_log_p2p_failed(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.compute(out.x.size)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, b)
await assert_worker_cleanup(b, close=True)
await out
assert not s.get_events("p2p")
await c.close()
await assert_worker_cleanup(a)
await assert_scheduler_cleanup(s)
| ObserveTasksPlugin |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 60347,
"end": 65906
} | class ____:
# Define the array class here, so run this on matrices elsewhere.
array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
res = linalg.qr(a, mode='complete')
Q, R = res.Q, res.R
assert_(Q.dtype == a_dtype)
assert_(R.dtype == a_dtype)
assert_(isinstance(Q, a_type))
assert_(isinstance(R, a_type))
assert_(Q.shape == (m, m))
assert_(R.shape == (m, n))
assert_almost_equal(dot(Q, R), a)
assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m))
assert_almost_equal(np.triu(R), R)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize(["m", "n"], [
(3, 0),
(0, 3),
(0, 0)
])
def test_qr_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
self.check_qr(a)
h, tau = np.linalg.qr(a, mode='raw')
assert_equal(h.dtype, np.double)
assert_equal(tau.dtype, np.double)
assert_equal(h.shape, (n, m))
assert_equal(tau.shape, (k,))
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = self.array([[1, 2], [3, 4]])
b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
def check_qr_stacked(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape[-2:]
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape[-2:] == (m, m))
assert_(r.shape[-2:] == (m, n))
assert_almost_equal(matmul(q, r), a)
I_mat = np.identity(q.shape[-1])
stack_I_mat = np.broadcast_to(I_mat,
q.shape[:-2] + (q.shape[-1],) * 2)
assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat)
assert_almost_equal(np.triu(r[..., :, :]), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape[-2:] == (m, k))
assert_(r1.shape[-2:] == (k, n))
assert_almost_equal(matmul(q1, r1), a)
I_mat = np.identity(q1.shape[-1])
stack_I_mat = np.broadcast_to(I_mat,
q1.shape[:-2] + (q1.shape[-1],) * 2)
assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),
stack_I_mat)
assert_almost_equal(np.triu(r1[..., :, :]), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize("size", [
(3, 4), (4, 3), (4, 4),
(3, 0), (0, 3)])
@pytest.mark.parametrize("outer_size", [
(2, 2), (2,), (2, 3, 4)])
@pytest.mark.parametrize("dt", [
np.single, np.double,
np.csingle, np.cdouble])
def test_stacked_inputs(self, outer_size, size, dt):
rng = np.random.default_rng(123)
A = rng.normal(size=outer_size + size).astype(dt)
B = rng.normal(size=outer_size + size).astype(dt)
self.check_qr_stacked(A)
self.check_qr_stacked(A + 1.j * B)
| TestQR |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/shim_components/multi_asset.py | {
"start": 451,
"end": 1678
} | class ____(ShimScaffolder[MultiAssetScaffoldParams]):
@classmethod
def get_scaffold_params(cls) -> type[MultiAssetScaffoldParams]:
return MultiAssetScaffoldParams
def get_text(self, request: ScaffoldRequest[MultiAssetScaffoldParams]) -> str:
asset_keys = (
request.params.asset_key
if request.params and request.params.asset_key
# Default to two sample assets based on the filename
else [
f"{request.target_path.stem}/first_asset",
f"{request.target_path.stem}/second_asset",
]
)
specs_str = textwrap.indent(
",\n".join(
f"dg.AssetSpec(key=dg.AssetKey({AssetKey.from_user_string(key).path!r}))"
for key in asset_keys
),
prefix=" " * 20,
)
return textwrap.dedent(
f"""\
import dagster as dg
@dg.multi_asset(
specs=[
{specs_str}
]
)
def {request.target_path.stem}(context: dg.AssetExecutionContext):
...
"""
)
scaffold_with(MultiAssetScaffolder)(multi_asset)
| MultiAssetScaffolder |
python | chroma-core__chroma | chromadb/segment/impl/distributed/segment_directory.py | {
"start": 8539,
"end": 13393
} | class ____(SegmentDirectory, EnforceOverrides):
_memberlist_provider: MemberlistProvider
_curr_memberlist_mutex: threading.Lock
_curr_memberlist: Optional[Memberlist]
_routing_mode: RoutingMode
def __init__(self, system: System):
super().__init__(system)
self._memberlist_provider = self.require(MemberlistProvider)
memberlist_name = system.settings.require("worker_memberlist_name")
self._memberlist_provider.set_memberlist_name(memberlist_name)
self._routing_mode = system.settings.require(
"chroma_segment_directory_routing_mode"
)
self._curr_memberlist = None
self._curr_memberlist_mutex = threading.Lock()
@override
def start(self) -> None:
self._curr_memberlist = self._memberlist_provider.get_memberlist()
self._memberlist_provider.register_updated_memberlist_callback(
self._update_memberlist
)
return super().start()
@override
def stop(self) -> None:
self._memberlist_provider.unregister_updated_memberlist_callback(
self._update_memberlist
)
return super().stop()
@override
def get_segment_endpoints(self, segment: Segment, n: int) -> List[str]:
if self._curr_memberlist is None or len(self._curr_memberlist) == 0:
raise ValueError("Memberlist is not initialized")
# assign() will throw an error if n is greater than the number of members
# clamp n to the number of members to align with the contract of this method
# which is to return at most n endpoints
n = min(n, len(self._curr_memberlist))
# Check if all members in the memberlist have a node set,
# if so, route using the node
# NOTE(@hammadb) 1/8/2024: This is to handle the migration between routing
# using the member id and routing using the node name
# We want to route using the node name over the member id
# because the node may have a disk cache that we want a
# stable identifier for over deploys.
can_use_node_routing = (
all([m.node != "" and len(m.node) != 0 for m in self._curr_memberlist])
and self._routing_mode == RoutingMode.NODE
)
if can_use_node_routing:
# If we are using node routing and the segments
assignments = assign(
segment["collection"].hex,
[m.node for m in self._curr_memberlist],
murmur3hasher,
n,
)
else:
# Query to the same collection should end up on the same endpoint
assignments = assign(
segment["collection"].hex,
[m.id for m in self._curr_memberlist],
murmur3hasher,
n,
)
assignments_set = set(assignments)
out_endpoints = []
for member in self._curr_memberlist:
is_chosen_with_node_routing = (
can_use_node_routing and member.node in assignments_set
)
is_chosen_with_id_routing = (
not can_use_node_routing and member.id in assignments_set
)
if is_chosen_with_node_routing or is_chosen_with_id_routing:
# If the memberlist has an ip, use it, otherwise use the member id with the headless service
# this is for backwards compatibility with the old memberlist which only had ids
if member.ip is not None and member.ip != "":
endpoint = f"{member.ip}:50051"
out_endpoints.append(endpoint)
else:
service_name = self.extract_service_name(member.id)
endpoint = f"{member.id}.{service_name}.{KUBERNETES_NAMESPACE}.{HEADLESS_SERVICE}:50051"
out_endpoints.append(endpoint)
return out_endpoints
@override
def register_updated_segment_callback(
self, callback: Callable[[Segment], None]
) -> None:
raise NotImplementedError()
@trace_method(
"RendezvousHashSegmentDirectory._update_memberlist",
OpenTelemetryGranularity.ALL,
)
def _update_memberlist(self, memberlist: Memberlist) -> None:
with self._curr_memberlist_mutex:
add_attributes_to_current_span(
{"new_memberlist": [m.id for m in memberlist]}
)
self._curr_memberlist = memberlist
def extract_service_name(self, pod_name: str) -> Optional[str]:
# Split the pod name by the hyphen
parts = pod_name.split("-")
# The service name is expected to be the prefix before the last hyphen
if len(parts) > 1:
return "-".join(parts[:-1])
return None
| RendezvousHashSegmentDirectory |
python | openai__openai-python | src/openai/resources/beta/threads/runs/runs.py | {
"start": 153512,
"end": 154995
} | class ____:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
self.create = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
runs.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
runs.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
runs.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
runs.list, # pyright: ignore[reportDeprecated],
)
)
self.cancel = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
runs.cancel, # pyright: ignore[reportDeprecated],
)
)
self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
runs.submit_tool_outputs, # pyright: ignore[reportDeprecated],
)
)
@cached_property
def steps(self) -> AsyncStepsWithStreamingResponse:
return AsyncStepsWithStreamingResponse(self._runs.steps)
| AsyncRunsWithStreamingResponse |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_workflow.py | {
"start": 41969,
"end": 44805
} | class ____:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.organization = Factories.create_organization()
self.project = Factories.create_project(organization=self.organization)
self.workflow = Factories.create_workflow()
self.workflow_trigger = Factories.create_data_condition_group(
organization=self.organization
)
self.workflow.when_condition_group = self.workflow_trigger
self.workflow.save()
self.action_filter = Factories.create_data_condition_group(organization=self.organization)
self.action = Factories.create_action()
self.action_and_filter = Factories.create_data_condition_group_action(
condition_group=self.action_filter,
action=self.action,
)
self.workflow_actions = Factories.create_workflow_data_condition_group(
workflow=self.workflow,
condition_group=self.action_filter,
)
self.trigger_condition = Factories.create_data_condition(
condition_group=self.workflow_trigger,
comparison=1,
condition_result=True,
)
self.action_condition = Factories.create_data_condition(
condition_group=self.action_filter,
comparison=1,
condition_result=True,
)
@pytest.mark.parametrize(
"instance_attr",
[
"workflow",
"workflow_trigger",
"action_filter",
"action_and_filter",
"workflow_actions",
"trigger_condition",
"action_condition",
],
)
def test_delete_workflow(self, instance_attr: str) -> None:
instance = getattr(self, instance_attr)
instance_id = instance.id
cls = instance.__class__
delete_workflow(self.workflow)
assert not cls.objects.filter(id=instance_id).exists()
def test_delete_workflow__no_actions(self) -> None:
Action.objects.get(id=self.action.id).delete()
assert not DataConditionGroupAction.objects.filter(id=self.action_and_filter.id).exists()
workflow_id = self.workflow.id
delete_workflow(self.workflow)
assert not Workflow.objects.filter(id=workflow_id).exists()
def test_delete_workflow__no_workflow_triggers(self) -> None:
# TODO - when this condition group is deleted, it's removing the workflow
# it's basically inverted from what's expected on the cascade delete
self.workflow.when_condition_group = None
self.workflow.save()
DataConditionGroup.objects.get(id=self.workflow_trigger.id).delete()
workflow_id = self.workflow.id
delete_workflow(self.workflow)
assert not Workflow.objects.filter(id=workflow_id).exists()
| TestDeleteWorkflow |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 60107,
"end": 60380
} | class ____(sgqlc.types.Enum):
"""State of the project; either 'open' or 'closed'
Enumeration Choices:
* `CLOSED`: The project is closed.
* `OPEN`: The project is open.
"""
__schema__ = github_schema
__choices__ = ("CLOSED", "OPEN")
| ProjectState |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 324901,
"end": 325086
} | class ____(CompositeMark):
"""ErrorBand schema wrapper."""
_schema = {"$ref": "#/definitions/ErrorBand"}
def __init__(self, *args):
super().__init__(*args)
| ErrorBand |
python | astropy__astropy | astropy/extern/configobj/validate.py | {
"start": 14553,
"end": 46678
} | class ____(object):
"""
Validator is an object that allows you to register a set of 'checks'.
These checks take input and test that it conforms to the check.
This can also involve converting the value from a string into
the correct datatype.
The ``check`` method takes an input string which configures which
check is to be used and applies that check to a supplied value.
An example input string would be:
'int_range(param1, param2)'
You would then provide something like:
>>> def int_range_check(value, min, max):
... # turn min and max from strings to integers
... min = int(min)
... max = int(max)
... # check that value is of the correct type.
... # possible valid inputs are integers or strings
... # that represent integers
... if not isinstance(value, (int, long, string_type)):
... raise VdtTypeError(value)
... elif isinstance(value, string_type):
... # if we are given a string
... # attempt to convert to an integer
... try:
... value = int(value)
... except ValueError:
... raise VdtValueError(value)
... # check the value is between our constraints
... if not min <= value:
... raise VdtValueTooSmallError(value)
... if not value <= max:
... raise VdtValueTooBigError(value)
... return value
>>> fdict = {'int_range': int_range_check}
>>> vtr1 = Validator(fdict)
>>> vtr1.check('int_range(20, 40)', '30')
30
>>> vtr1.check('int_range(20, 40)', '60')
Traceback (most recent call last):
VdtValueTooBigError: the value "60" is too big.
New functions can be added with : ::
>>> vtr2 = Validator()
>>> vtr2.functions['int_range'] = int_range_check
Or by passing in a dictionary of functions when Validator
is instantiated.
Your functions *can* use keyword arguments,
but the first argument should always be 'value'.
If the function doesn't take additional arguments,
the parentheses are optional in the check.
It can be written with either of : ::
keyword = function_name
keyword = function_name()
The first program to utilise Validator() was Michael Foord's
ConfigObj, an alternative to ConfigParser which supports lists and
can validate a config file using a config schema.
For more details on using Validator with ConfigObj see:
https://configobj.readthedocs.org/en/latest/configobj.html
"""
# this regex does the initial parsing of the checks
_func_re = re.compile(r'([^\(\)]+?)\((.*)\)', re.DOTALL)
# this regex takes apart keyword arguments
_key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL)
# this regex finds keyword=list(....) type values
_list_arg = _list_arg
# this regex takes individual values out of lists - in one pass
_list_members = _list_members
# These regexes check a set of arguments for validity
# and then pull the members out
_paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL)
_matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL)
def __init__(self, functions=None):
"""
>>> vtri = Validator()
"""
self.functions = {
'': self._pass,
'integer': is_integer,
'float': is_float,
'boolean': is_boolean,
'ip_addr': is_ip_addr,
'string': is_string,
'list': is_list,
'tuple': is_tuple,
'int_list': is_int_list,
'float_list': is_float_list,
'bool_list': is_bool_list,
'ip_addr_list': is_ip_addr_list,
'string_list': is_string_list,
'mixed_list': is_mixed_list,
'pass': self._pass,
'option': is_option,
'force_list': force_list,
}
if functions is not None:
self.functions.update(functions)
# tekNico: for use by ConfigObj
self.baseErrorClass = ValidateError
self._cache = {}
def check(self, check, value, missing=False):
"""
Usage: check(check, value)
Arguments:
check: string representing check to apply (including arguments)
value: object to be checked
Returns value, converted to correct type if necessary
If the check fails, raises a ``ValidateError`` subclass.
>>> vtor.check('yoda', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('yoda()', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('string(default="")', '', missing=True)
''
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if missing:
if default is None:
# no information needed here - to be handled by caller
raise VdtMissingValue()
value = self._handle_none(default)
if value is None:
return None
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _handle_none(self, value):
if value == 'None':
return None
elif value in ("'None'", '"None"'):
# Special case a quoted None
value = self._unquote(value)
return value
def _parse_with_caching(self, check):
if check in self._cache:
fun_name, fun_args, fun_kwargs, default = self._cache[check]
# We call list and dict below to work with *copies* of the data
# rather than the original (which are mutable of course)
fun_args = list(fun_args)
fun_kwargs = dict(fun_kwargs)
else:
fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
fun_kwargs = dict([(str(key), value) for (key, value) in list(fun_kwargs.items())])
self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
return fun_name, fun_args, fun_kwargs, default
def _check_value(self, value, fun_name, fun_args, fun_kwargs):
try:
fun = self.functions[fun_name]
except KeyError:
raise VdtUnknownCheckError(fun_name)
else:
return fun(value, *fun_args, **fun_kwargs)
def _parse_check(self, check):
fun_match = self._func_re.match(check)
if fun_match:
fun_name = fun_match.group(1)
arg_string = fun_match.group(2)
arg_match = self._matchfinder.match(arg_string)
if arg_match is None:
# Bad syntax
raise VdtParamError('Bad syntax in check "%s".' % check)
fun_args = []
fun_kwargs = {}
# pull out args of group 2
for arg in self._paramfinder.findall(arg_string):
# args may need whitespace removing (before removing quotes)
arg = arg.strip()
listmatch = self._list_arg.match(arg)
if listmatch:
key, val = self._list_handle(listmatch)
fun_kwargs[key] = val
continue
keymatch = self._key_arg.match(arg)
if keymatch:
val = keymatch.group(2)
if not val in ("'None'", '"None"'):
# Special case a quoted None
val = self._unquote(val)
fun_kwargs[keymatch.group(1)] = val
continue
fun_args.append(self._unquote(arg))
else:
# allows for function names without (args)
return check, (), {}, None
# Default must be deleted if the value is specified too,
# otherwise the check function will get a spurious "default" keyword arg
default = fun_kwargs.pop('default', None)
return fun_name, fun_args, fun_kwargs, default
def _unquote(self, val):
"""Unquote a value if necessary."""
if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
val = val[1:-1]
return val
def _list_handle(self, listmatch):
"""Take apart a ``keyword=list('val, 'val')`` type string."""
out = []
name = listmatch.group(1)
args = listmatch.group(2)
for arg in self._list_members.findall(args):
out.append(self._unquote(arg))
return name, out
def _pass(self, value):
"""
Dummy check that always passes
>>> vtor.check('', 0)
0
>>> vtor.check('', '0')
'0'
"""
return value
def get_default_value(self, check):
"""
Given a check, return the default value for the check
(converted to the right type).
If the check doesn't specify a default value then a
``KeyError`` will be raised.
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if default is None:
raise KeyError('Check "%s" has no default value.' % check)
value = self._handle_none(default)
if value is None:
return value
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _is_num_param(names, values, to_float=False):
"""
Return numbers from inputs or raise VdtParamError.
Lets ``None`` pass through.
Pass in keyword argument ``to_float=True`` to
use float for the conversion rather than int.
>>> _is_num_param(('', ''), (0, 1.0))
[0, 1]
>>> _is_num_param(('', ''), (0, 1.0), to_float=True)
[0.0, 1.0]
>>> _is_num_param(('a'), ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "a" for parameter "a".
"""
fun = to_float and float or int
out_params = []
for (name, val) in zip(names, values):
if val is None:
out_params.append(val)
elif isinstance(val, (int, long, float, string_type)):
try:
out_params.append(fun(val))
except ValueError as e:
raise VdtParamError(name, val)
else:
raise VdtParamError(name, val)
return out_params
# built in checks
# you can override these by setting the appropriate name
# in Validator.functions
# note: if the params are specified wrongly in your input string,
# you will also raise errors.
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2')
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9)
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35')
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35)
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, (int, long, string_type)):
raise VdtTypeError(value)
if isinstance(value, string_type):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
def is_float(value, min=None, max=None):
"""
A check that tests that a given value is a float
(an integer will be accepted), and optionally - that it is between bounds.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
This can accept negative values.
>>> vtor.check('float', '2')
2.0
From now on we multiply the value to avoid comparing decimals
>>> vtor.check('float', '-6.8') * 10
-68.0
>>> vtor.check('float', '12.2') * 10
122.0
>>> vtor.check('float', 8.4) * 10
84.0
>>> vtor.check('float', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('float(10.1)', '10.2') * 10
102.0
>>> vtor.check('float(max=20.2)', '15.1') * 10
151.0
>>> vtor.check('float(10.0)', '9.0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9.0" is too small.
>>> vtor.check('float(max=20.0)', '35.0')
Traceback (most recent call last):
VdtValueTooBigError: the value "35.0" is too big.
"""
(min_val, max_val) = _is_num_param(
('min', 'max'), (min, max), to_float=True)
if not isinstance(value, (int, long, float, string_type)):
raise VdtTypeError(value)
if not isinstance(value, float):
# if it's a string - does it represent a float ?
try:
value = float(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
bool_dict = {
True: True, 'on': True, '1': True, 'true': True, 'yes': True,
False: False, 'off': False, '0': False, 'false': False, 'no': False,
}
def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '')
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up')
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, string_type):
try:
return bool_dict[value.lower()]
except KeyError:
raise VdtTypeError(value)
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False
elif value == True:
return True
else:
raise VdtTypeError(value)
def is_ip_addr(value):
"""
Check that the supplied value is an Internet Protocol address, v.4,
represented by a dotted-quad string, i.e. '1.2.3.4'.
>>> vtor.check('ip_addr', '1 ')
'1'
>>> vtor.check('ip_addr', ' 1.2')
'1.2'
>>> vtor.check('ip_addr', ' 1.2.3 ')
'1.2.3'
>>> vtor.check('ip_addr', '1.2.3.4')
'1.2.3.4'
>>> vtor.check('ip_addr', '0.0.0.0')
'0.0.0.0'
>>> vtor.check('ip_addr', '255.255.255.255')
'255.255.255.255'
>>> vtor.check('ip_addr', '255.255.255.256')
Traceback (most recent call last):
VdtValueError: the value "255.255.255.256" is unacceptable.
>>> vtor.check('ip_addr', '1.2.3.4.5')
Traceback (most recent call last):
VdtValueError: the value "1.2.3.4.5" is unacceptable.
>>> vtor.check('ip_addr', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
value = value.strip()
try:
dottedQuadToNum(value)
except ValueError:
raise VdtValueError(value)
return value
def is_list(value, min=None, max=None):
"""
Check that the value is a list of values.
You can optionally specify the minimum and maximum number of members.
It does no check on list members.
>>> vtor.check('list', ())
[]
>>> vtor.check('list', [])
[]
>>> vtor.check('list', (1, 2))
[1, 2]
>>> vtor.check('list', [1, 2])
[1, 2]
>>> vtor.check('list(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
[1, 2, 3, 4]
>>> vtor.check('list', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('list', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
if isinstance(value, string_type):
raise VdtTypeError(value)
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return list(value)
def is_tuple(value, min=None, max=None):
"""
Check that the value is a tuple of values.
You can optionally specify the minimum and maximum number of members.
It does no check on members.
>>> vtor.check('tuple', ())
()
>>> vtor.check('tuple', [])
()
>>> vtor.check('tuple', (1, 2))
(1, 2)
>>> vtor.check('tuple', [1, 2])
(1, 2)
>>> vtor.check('tuple(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
(1, 2, 3, 4)
>>> vtor.check('tuple', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('tuple', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
return tuple(is_list(value, min, max))
def is_string(value, min=None, max=None):
"""
Check that the supplied value is a string.
You can optionally specify the minimum and maximum number of members.
>>> vtor.check('string', '0')
'0'
>>> vtor.check('string', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('string(2)', '12')
'12'
>>> vtor.check('string(2)', '1')
Traceback (most recent call last):
VdtValueTooShortError: the value "1" is too short.
>>> vtor.check('string(min=2, max=3)', '123')
'123'
>>> vtor.check('string(min=2, max=3)', '1234')
Traceback (most recent call last):
VdtValueTooLongError: the value "1234" is too long.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return value
def is_int_list(value, min=None, max=None):
"""
Check that the value is a list of integers.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an integer.
>>> vtor.check('int_list', ())
[]
>>> vtor.check('int_list', [])
[]
>>> vtor.check('int_list', (1, 2))
[1, 2]
>>> vtor.check('int_list', [1, 2])
[1, 2]
>>> vtor.check('int_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_integer(mem) for mem in is_list(value, min, max)]
def is_bool_list(value, min=None, max=None):
"""
Check that the value is a list of booleans.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a boolean.
>>> vtor.check('bool_list', ())
[]
>>> vtor.check('bool_list', [])
[]
>>> check_res = vtor.check('bool_list', (True, False))
>>> check_res == [True, False]
1
>>> check_res = vtor.check('bool_list', [True, False])
>>> check_res == [True, False]
1
>>> vtor.check('bool_list', [True, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_boolean(mem) for mem in is_list(value, min, max)]
def is_float_list(value, min=None, max=None):
"""
Check that the value is a list of floats.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a float.
>>> vtor.check('float_list', ())
[]
>>> vtor.check('float_list', [])
[]
>>> vtor.check('float_list', (1, 2.0))
[1.0, 2.0]
>>> vtor.check('float_list', [1, 2.0])
[1.0, 2.0]
>>> vtor.check('float_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_float(mem) for mem in is_list(value, min, max)]
def is_string_list(value, min=None, max=None):
"""
Check that the value is a list of strings.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a string.
>>> vtor.check('string_list', ())
[]
>>> vtor.check('string_list', [])
[]
>>> vtor.check('string_list', ('a', 'b'))
['a', 'b']
>>> vtor.check('string_list', ['a', 1])
Traceback (most recent call last):
VdtTypeError: the value "1" is of the wrong type.
>>> vtor.check('string_list', 'hello')
Traceback (most recent call last):
VdtTypeError: the value "hello" is of the wrong type.
"""
if isinstance(value, string_type):
raise VdtTypeError(value)
return [is_string(mem) for mem in is_list(value, min, max)]
def is_ip_addr_list(value, min=None, max=None):
"""
Check that the value is a list of IP addresses.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an IP address.
>>> vtor.check('ip_addr_list', ())
[]
>>> vtor.check('ip_addr_list', [])
[]
>>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
['1.2.3.4', '5.6.7.8']
>>> vtor.check('ip_addr_list', ['a'])
Traceback (most recent call last):
VdtValueError: the value "a" is unacceptable.
"""
return [is_ip_addr(mem) for mem in is_list(value, min, max)]
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max)
fun_dict = {
'integer': is_integer,
'float': is_float,
'ip_addr': is_ip_addr,
'string': is_string,
'boolean': is_boolean,
}
def is_mixed_list(value, *args):
"""
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('mixed_list("yoda")', ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'"
"""
try:
length = len(value)
except TypeError:
raise VdtTypeError(value)
if length < len(args):
raise VdtValueTooShortError(value)
elif length > len(args):
raise VdtValueTooLongError(value)
try:
return [fun_dict[arg](val) for arg, val in zip(args, value)]
except KeyError as e:
raise VdtParamError('mixed_list', e)
def is_option(value, *options):
"""
This check matches the value to any of a set of options.
>>> vtor.check('option("yoda", "jedi")', 'yoda')
'yoda'
>>> vtor.check('option("yoda", "jedi")', 'jed')
Traceback (most recent call last):
VdtValueError: the value "jed" is unacceptable.
>>> vtor.check('option("yoda", "jedi")', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
if not value in options:
raise VdtValueError(value)
return value
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... pprint(v.check(('test(%s)' % entry), 3))
(3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'min': '1', 'test': 'a b c'})
(3, (), {'min': '5', 'test': 'a, b, c'})
(3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'})
(3, (), {'min': '-100', 'test': '-99'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']})
(3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']})
(3, (), {'test': 'x=fish(3)'})
>>> v = Validator()
>>> v.check('integer(default=6)', '3')
3
>>> v.check('integer(default=6)', None, True)
6
>>> v.get_default_value('integer(default=6)')
6
>>> v.get_default_value('float(default=6)')
6.0
>>> v.get_default_value('pass(default=None)')
>>> v.get_default_value("string(default='None')")
'None'
>>> v.get_default_value('pass')
Traceback (most recent call last):
KeyError: 'Check "pass" has no default value.'
>>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
['1', '2', '3', '4']
>>> v = Validator()
>>> v.check("pass(default=None)", None, True)
>>> v.check("pass(default='None')", None, True)
'None'
>>> v.check('pass(default="None")', None, True)
'None'
>>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
['1', '2', '3', '4']
Bug test for unicode arguments
>>> v = Validator()
>>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test')
True
>>> v = Validator()
>>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234')
True
>>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test')
True
>>> v = Validator()
>>> default = v.get_default_value('string(default=None)')
>>> default == None
1
"""
return (value, args, keywargs)
def _test2():
"""
>>>
>>> v = Validator()
>>> v.get_default_value('string(default="#ff00dd")')
'#ff00dd'
>>> v.get_default_value('integer(default=3) # comment')
3
"""
def _test3():
r"""
>>> vtor.check('string(default="")', '', missing=True)
''
>>> vtor.check('string(default="\n")', '', missing=True)
'\n'
>>> print(vtor.check('string(default="\n")', '', missing=True))
<BLANKLINE>
<BLANKLINE>
>>> vtor.check('string()', '\n')
'\n'
>>> vtor.check('string(default="\n\n\n")', '', missing=True)
'\n\n\n'
>>> vtor.check('string()', 'random \n text goes here\n\n')
'random \n text goes here\n\n'
>>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")',
... '', missing=True)
' \nrandom text\ngoes \n here\n\n '
>>> vtor.check("string(default='\n\n\n')", '', missing=True)
'\n\n\n'
>>> vtor.check("option('\n','a','b',default='\n')", '', missing=True)
'\n'
>>> vtor.check("string_list()", ['foo', '\n', 'bar'])
['foo', '\n', 'bar']
>>> vtor.check("string_list(default=list('\n'))", '', missing=True)
['\n']
"""
if __name__ == '__main__':
# run the code tests in doctest format
import sys
import doctest
m = sys.modules.get('__main__')
globs = m.__dict__.copy()
globs.update({
'vtor': Validator(),
})
failures, tests = doctest.testmod(
m, globs=globs,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS)
assert not failures, '{} failures out of {} tests'.format(failures, tests)
| Validator |
python | django__django | tests/custom_lookups/tests.py | {
"start": 7397,
"end": 13823
} | class ____(TestCase):
def test_custom_name_lookup(self):
a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16))
Author.objects.create(name="a2", birthdate=date(2012, 2, 29))
with (
register_lookup(models.DateField, YearTransform),
register_lookup(models.DateField, YearTransform, lookup_name="justtheyear"),
register_lookup(YearTransform, Exactly),
register_lookup(YearTransform, Exactly, lookup_name="isactually"),
):
qs1 = Author.objects.filter(birthdate__testyear__exactly=1981)
qs2 = Author.objects.filter(birthdate__justtheyear__isactually=1981)
self.assertSequenceEqual(qs1, [a1])
self.assertSequenceEqual(qs2, [a1])
def test_custom_lookup_with_subquery(self):
class NotEqual(models.Lookup):
lookup_name = "ne"
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
# Although combining via (*lhs_params, *rhs_params) would be
# more resilient, the "simple" way works too.
params = lhs_params + rhs_params
return "%s <> %s" % (lhs, rhs), params
author = Author.objects.create(name="Isabella")
with register_lookup(models.Field, NotEqual):
qs = Author.objects.annotate(
unknown_age=models.Subquery(
Author.objects.filter(age__isnull=True)
.order_by("name")
.values("name")[:1]
)
).filter(unknown_age__ne="Plato")
self.assertSequenceEqual(qs, [author])
qs = Author.objects.annotate(
unknown_age=Lower(
Author.objects.filter(age__isnull=True)
.order_by("name")
.values("name")[:1]
)
).filter(unknown_age__ne="plato")
self.assertSequenceEqual(qs, [author])
def test_custom_exact_lookup_none_rhs(self):
"""
__exact=None is transformed to __isnull=True if a custom lookup class
with lookup_name != 'exact' is registered as the `exact` lookup.
"""
field = Author._meta.get_field("birthdate")
OldExactLookup = field.get_lookup("exact")
author = Author.objects.create(name="author", birthdate=None)
try:
field.register_lookup(Exactly, "exact")
self.assertEqual(Author.objects.get(birthdate__exact=None), author)
finally:
field.register_lookup(OldExactLookup, "exact")
def test_basic_lookup(self):
a1 = Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
a3 = Author.objects.create(name="a3", age=3)
a4 = Author.objects.create(name="a4", age=4)
with register_lookup(models.IntegerField, Div3Lookup):
self.assertSequenceEqual(Author.objects.filter(age__div3=0), [a3])
self.assertSequenceEqual(
Author.objects.filter(age__div3=1).order_by("age"), [a1, a4]
)
self.assertSequenceEqual(Author.objects.filter(age__div3=2), [a2])
self.assertSequenceEqual(Author.objects.filter(age__div3=3), [])
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific SQL used"
)
def test_birthdate_month(self):
a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16))
a2 = Author.objects.create(name="a2", birthdate=date(2012, 2, 29))
a3 = Author.objects.create(name="a3", birthdate=date(2012, 1, 31))
a4 = Author.objects.create(name="a4", birthdate=date(2012, 3, 1))
with register_lookup(models.DateField, InMonth):
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)), [a3]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)), [a2]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)), [a1]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)), [a4]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)), []
)
def test_div3_extract(self):
with register_lookup(models.IntegerField, Div3Transform):
a1 = Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
a3 = Author.objects.create(name="a3", age=3)
a4 = Author.objects.create(name="a4", age=4)
baseqs = Author.objects.order_by("name")
self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a1, a2, a3, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), [])
self.assertSequenceEqual(
baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4]
)
def test_foreignobject_lookup_registration(self):
field = Article._meta.get_field("author")
with register_lookup(models.ForeignObject, Exactly):
self.assertIs(field.get_lookup("exactly"), Exactly)
# ForeignObject should ignore regular Field lookups
with register_lookup(models.Field, Exactly):
self.assertIsNone(field.get_lookup("exactly"))
def test_lookups_caching(self):
field = Article._meta.get_field("author")
# clear and re-cache
field.get_class_lookups.cache_clear()
self.assertNotIn("exactly", field.get_lookups())
# registration should bust the cache
with register_lookup(models.ForeignObject, Exactly):
# getting the lookups again should re-cache
self.assertIn("exactly", field.get_lookups())
# Unregistration should bust the cache.
self.assertNotIn("exactly", field.get_lookups())
| LookupTests |
python | numpy__numpy | numpy/typing/tests/data/pass/array_constructors.py | {
"start": 72,
"end": 136
} | class ____:
def __index__(self) -> int:
return 0
| Index |
python | ray-project__ray | ci/ray_ci/bisect/test_generic_validator.py | {
"start": 658,
"end": 1217
} | class ____:
def builds(self):
return MockBuildkiteBuild()
@mock.patch("ci.ray_ci.bisect.generic_validator.GenericValidator._get_buildkite")
@mock.patch("ci.ray_ci.bisect.generic_validator.GenericValidator._get_rayci_select")
def test_run(mock_get_rayci_select, mock_get_buildkite):
mock_get_rayci_select.return_value = "rayci_step_id"
mock_get_buildkite.return_value = MockBuildkite()
assert GenericValidator().run(Test({"name": "test"}), "revision")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| MockBuildkite |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 3789,
"end": 4057
} | class ____(GenericProperty[int, int]):
def validate_value(self, value: object) -> int:
if isinstance(value, (int, float)):
return int(value)
else:
raise StyleValueError(f"Expected a number here, got {value!r}")
| IntegerProperty |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/unit_tests/integration/config.py | {
"start": 524,
"end": 1389
} | class ____:
def __init__(self) -> None:
self._config: Dict[str, str] = {
"refresh_token": REFRESH_TOKEN,
"lwa_app_id": LWA_APP_ID,
"lwa_client_secret": LWA_CLIENT_SECRET,
"replication_start_date": CONFIG_START_DATE,
"replication_end_date": CONFIG_END_DATE,
"aws_environment": "PRODUCTION",
"region": "US",
"account_type": "Seller",
}
def with_start_date(self, start_date: datetime) -> ConfigBuilder:
self._config["replication_start_date"] = start_date.strftime(TIME_FORMAT)
return self
def with_end_date(self, end_date: datetime) -> ConfigBuilder:
self._config["replication_end_date"] = end_date.strftime(TIME_FORMAT)
return self
def build(self) -> Dict[str, str]:
return self._config
| ConfigBuilder |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 30019,
"end": 31504
} | class ____(PForTestCase):
# This test currently only tests that the vectorized and non-vectorized
# outputs have same shapes. This is needed since under XLA compilation,
# stateless random numbers can generate different random numbers.
# TODO(agarwal): switch to checking for actual values matching once
# b/149402339 is resolved.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
# TODO(agarwal): add tests for other random functions
def test_multinomial(self):
seeds = [[1, 2], [3, 4]]
logits = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
logits_0 = array_ops.gather(logits, 0)
logits_i = array_ops.gather(logits, i)
seeds_0 = array_ops.gather(seeds, 0)
seeds_i = array_ops.gather(seeds, i)
return (stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_0),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_0))
self._test_loop_fn(loop_fn, 2)
| StatelessRandomTest |
python | matplotlib__matplotlib | lib/matplotlib/tri/_triinterpolate.py | {
"start": 316,
"end": 9204
} | class ____:
"""
Abstract base class for classes used to interpolate on a triangular grid.
Derived classes implement the following methods:
- ``__call__(x, y)``,
where x, y are array-like point coordinates of the same shape, and
that returns a masked array of the same shape containing the
interpolated z-values.
- ``gradient(x, y)``,
where x, y are array-like point coordinates of the same
shape, and that returns a list of 2 masked arrays of the same shape
containing the 2 derivatives of the interpolator (derivatives of
interpolated z values with respect to x and y).
"""
def __init__(self, triangulation, z, trifinder=None):
_api.check_isinstance(Triangulation, triangulation=triangulation)
self._triangulation = triangulation
self._z = np.asarray(z)
if self._z.shape != self._triangulation.x.shape:
raise ValueError("z array must have same length as triangulation x"
" and y arrays")
_api.check_isinstance((TriFinder, None), trifinder=trifinder)
self._trifinder = trifinder or self._triangulation.get_trifinder()
# Default scaling factors : 1.0 (= no scaling)
# Scaling may be used for interpolations for which the order of
# magnitude of x, y has an impact on the interpolant definition.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._unit_x = 1.0
self._unit_y = 1.0
# Default triangle renumbering: None (= no renumbering)
# Renumbering may be used to avoid unnecessary computations
# if complex calculations are done inside the Interpolator.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._tri_renum = None
# __call__ and gradient docstrings are shared by all subclasses
# (except, if needed, relevant additions).
# However these methods are only implemented in subclasses to avoid
# confusion in the documentation.
_docstring__call__ = """
Returns a masked array containing interpolated values at the specified
(x, y) points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
np.ma.array
Masked array of the same shape as *x* and *y*; values corresponding
to (*x*, *y*) points outside of the triangulation are masked out.
"""
_docstringgradient = r"""
Returns a list of 2 masked arrays containing interpolated derivatives
at the specified (x, y) points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
dzdx, dzdy : np.ma.array
2 masked arrays of the same shape as *x* and *y*; values
corresponding to (x, y) points outside of the triangulation
are masked out.
The first returned array contains the values of
:math:`\frac{\partial z}{\partial x}` and the second those of
:math:`\frac{\partial z}{\partial y}`.
"""
def _interpolate_multikeys(self, x, y, tri_index=None,
return_keys=('z',)):
"""
Versatile (private) method defined for all TriInterpolators.
:meth:`_interpolate_multikeys` is a wrapper around method
:meth:`_interpolate_single_key` (to be defined in the child
subclasses).
:meth:`_interpolate_single_key actually performs the interpolation,
but only for 1-dimensional inputs and at valid locations (inside
unmasked triangles of the triangulation).
The purpose of :meth:`_interpolate_multikeys` is to implement the
following common tasks needed in all subclasses implementations:
- calculation of containing triangles
- dealing with more than one interpolation request at the same
location (e.g., if the 2 derivatives are requested, it is
unnecessary to compute the containing triangles twice)
- scaling according to self._unit_x, self._unit_y
- dealing with points outside of the grid (with fill value np.nan)
- dealing with multi-dimensional *x*, *y* arrays: flattening for
:meth:`_interpolate_params` call and final reshaping.
(Note that np.vectorize could do most of those things very well for
you, but it does it by function evaluations over successive tuples of
the input arrays. Therefore, this tends to be more time-consuming than
using optimized numpy functions - e.g., np.dot - which can be used
easily on the flattened inputs, in the child-subclass methods
:meth:`_interpolate_single_key`.)
It is guaranteed that the calls to :meth:`_interpolate_single_key`
will be done with flattened (1-d) array-like input parameters *x*, *y*
and with flattened, valid `tri_index` arrays (no -1 index allowed).
Parameters
----------
x, y : array-like
x and y coordinates where interpolated values are requested.
tri_index : array-like of int, optional
Array of the containing triangle indices, same shape as
*x* and *y*. Defaults to None. If None, these indices
will be computed by a TriFinder instance.
(Note: For point outside the grid, tri_index[ipt] shall be -1).
return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
Defines the interpolation arrays to return, and in which order.
Returns
-------
list of arrays
Each array-like contains the expected interpolated values in the
order defined by *return_keys* parameter.
"""
# Flattening and rescaling inputs arrays x, y
# (initial shape is stored for output)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
sh_ret = x.shape
if x.shape != y.shape:
raise ValueError("x and y shall have same shapes."
f" Given: {x.shape} and {y.shape}")
x = np.ravel(x)
y = np.ravel(y)
x_scaled = x/self._unit_x
y_scaled = y/self._unit_y
size_ret = np.size(x_scaled)
# Computes & ravels the element indexes, extract the valid ones.
if tri_index is None:
tri_index = self._trifinder(x, y)
else:
if tri_index.shape != sh_ret:
raise ValueError(
"tri_index array is provided and shall"
" have same shape as x and y. Given: "
f"{tri_index.shape} and {sh_ret}")
tri_index = np.ravel(tri_index)
mask_in = (tri_index != -1)
if self._tri_renum is None:
valid_tri_index = tri_index[mask_in]
else:
valid_tri_index = self._tri_renum[tri_index[mask_in]]
valid_x = x_scaled[mask_in]
valid_y = y_scaled[mask_in]
ret = []
for return_key in return_keys:
# Find the return index associated with the key.
try:
return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
except KeyError as err:
raise ValueError("return_keys items shall take values in"
" {'z', 'dzdx', 'dzdy'}") from err
# Sets the scale factor for f & df components
scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
# Computes the interpolation
ret_loc = np.empty(size_ret, dtype=np.float64)
ret_loc[~mask_in] = np.nan
ret_loc[mask_in] = self._interpolate_single_key(
return_key, valid_tri_index, valid_x, valid_y) * scale
ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
return ret
def _interpolate_single_key(self, return_key, tri_index, x, y):
"""
Interpolate at points belonging to the triangulation
(inside an unmasked triangles).
Parameters
----------
return_key : {'z', 'dzdx', 'dzdy'}
The requested values (z or its derivatives).
tri_index : 1D int array
Valid triangle index (cannot be -1).
x, y : 1D arrays, same shape as `tri_index`
Valid locations where interpolation is requested.
Returns
-------
1-d array
Returned array of the same size as *tri_index*
"""
raise NotImplementedError("TriInterpolator subclasses" +
"should implement _interpolate_single_key!")
| TriInterpolator |
python | tornadoweb__tornado | tornado/test/httputil_test.py | {
"start": 3394,
"end": 8982
} | class ____(unittest.TestCase):
def test_file_upload(self):
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_unquoted_names(self):
# quotes are optional unless special characters are present
data = b"""\
--1234
Content-Disposition: form-data; name=files; filename=ab.txt
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_special_filenames(self):
filenames = [
"a;b.txt",
'a"b.txt',
'a";b.txt',
'a;"b.txt',
'a";";.txt',
'a\\"b.txt',
"a\\b.txt",
]
for filename in filenames:
logging.debug("trying filename %r", filename)
str_data = """\
--1234
Content-Disposition: form-data; name="files"; filename="%s"
Foo
--1234--""" % filename.replace(
"\\", "\\\\"
).replace(
'"', '\\"'
)
data = utf8(str_data.replace("\n", "\r\n"))
args, files = form_data_args()
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], filename)
self.assertEqual(file["body"], b"Foo")
def test_non_ascii_filename_rfc5987(self):
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"; filename*=UTF-8''%C3%A1b.txt
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "áb.txt")
self.assertEqual(file["body"], b"Foo")
def test_non_ascii_filename_raw(self):
data = """\
--1234
Content-Disposition: form-data; name="files"; filename="测试.txt"
Foo
--1234--""".encode(
"utf-8"
).replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "测试.txt")
self.assertEqual(file["body"], b"Foo")
def test_boundary_starts_and_ends_with_quotes(self):
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
parse_multipart_form_data(b'"1234"', data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_missing_headers(self):
data = b"""\
--1234
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
with self.assertRaises(
HTTPInputError, msg="multipart/form-data missing headers"
):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_invalid_content_disposition(self):
data = b"""\
--1234
Content-Disposition: invalid; name="files"; filename="ab.txt"
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
with self.assertRaises(HTTPInputError, msg="Invalid multipart/form-data"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_line_does_not_end_with_correct_line_break(self):
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
with self.assertRaises(HTTPInputError, msg="Invalid multipart/form-data"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_content_disposition_header_without_name_parameter(self):
data = b"""\
--1234
Content-Disposition: form-data; filename="ab.txt"
Foo
--1234--""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
with self.assertRaises(
HTTPInputError, msg="multipart/form-data value missing name"
):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_data_after_final_boundary(self):
# The spec requires that data after the final boundary be ignored.
# http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html
# In practice, some libraries include an extra CRLF after the boundary.
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--
""".replace(
b"\n", b"\r\n"
)
args, files = form_data_args()
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
| MultipartFormDataTest |
python | sympy__sympy | sympy/functions/special/error_functions.py | {
"start": 76488,
"end": 78142
} | class ____(DefinedFunction):
"""
Helper function to make the $\\mathrm{erf}(z)$ function
tractable for the Gruntz algorithm.
"""
@classmethod
def eval(cls, arg):
if arg.is_zero:
return S.One
def _eval_aseries(self, n, args0, x, logx):
from sympy.series.order import Order
point = args0[0]
# Expansion at oo
if point is S.Infinity:
z = self.args[0]
l = [1/sqrt(pi) * factorial(2*k)*(-S(
4))**(-k)/factorial(k) * (1/z)**(2*k + 1) for k in range(n)]
o = Order(1/z**(2*n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
# Expansion at I*oo
t = point.extract_multiplicatively(I)
if t is S.Infinity:
z = self.args[0]
# TODO: is the series really correct?
l = [1/sqrt(pi) * factorial(2*k)*(-S(
4))**(-k)/factorial(k) * (1/z)**(2*k + 1) for k in range(n)]
o = Order(1/z**(2*n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
# All other points are not handled
return super()._eval_aseries(n, args0, x, logx)
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -2/sqrt(pi) + 2*z*_erfs(z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_intractable(self, z, **kwargs):
return (S.One - erf(z))*exp(z**2)
| _erfs |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 46385,
"end": 46599
} | class ____:
xlCommentAndIndicator = 1 # from enum XlCommentDisplayMode
xlCommentIndicatorOnly = -1 # from enum XlCommentDisplayMode
xlNoIndicator = 0 # from enum XlCommentDisplayMode
| CommentDisplayMode |
python | ipython__ipython | IPython/core/magics/code.py | {
"start": 5112,
"end": 5294
} | class ____(Exception):
"""Exception for interactively defined variable in magic_edit"""
def __init__(self, index):
self.index = index
@magics_class
| InteractivelyDefined |
python | tensorflow__tensorflow | tensorflow/lite/testing/zip_test_utils.py | {
"start": 2796,
"end": 8018
} | class ____:
"""Additional options for conversion, besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
# The inference input type passed to TFLiteConvert.
self.inference_input_type = None
# The inference output type passed to TFLiteConvert.
self.inference_output_type = None
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in MAP_TF_TO_NUMPY_TYPE:
dtype = MAP_TF_TO_NUMPY_TYPE[dtype]
if dtype in (tf.float32, tf.float16, tf.float64):
value = (max_value - min_value) * np.random.random_sample(shape) + min_value
elif dtype in (tf.complex64, tf.complex128):
real = (max_value - min_value) * np.random.random_sample(shape) + min_value
imag = (max_value - min_value) * np.random.random_sample(shape) + min_value
value = real + imag * 1j
elif dtype in (tf.uint32, tf.int32, tf.uint8, tf.int8, tf.int64, tf.uint16,
tf.int16):
value = np.random.randint(min_value, max_value + 1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.bytes_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
elif dtype == tf.bfloat16:
value = (max_value - min_value) * np.random.random_sample(shape) + min_value
# There is no bfloat16 type in numpy. Uses ml_dtypes.bfloat16 for Eigen.
dtype = ml_dtypes.bfloat16
else:
raise ValueError("Unsupported dtype: %s" % dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in MAP_TF_TO_NUMPY_TYPE:
dtype = MAP_TF_TO_NUMPY_TYPE[dtype]
if dtype in (tf.float32, tf.float16, tf.float64):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
elif dtype == tf.bool:
value = np.random.choice([True, False])
elif dtype == np.bytes_:
l = np.random.randint(1, 6)
value = "".join(np.random.choice(list(string.ascii_uppercase), size=l))
elif dtype == tf.bfloat16:
value = (max_value - min_value) * np.random.random() + min_value
# There is no bfloat16 type in numpy. Uses ml_dtypes.bfloat16 for Eigen.
dtype = ml_dtypes.bfloat16
else:
raise ValueError("Unsupported dtype: %s" % dtype)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return convert_to_constants.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.bytes_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
# bfloat16 promotes the value to string, not float. so we need to
# convert it to float explicitly.
if t.dtype == ml_dtypes.bfloat16:
values = ["{:.9f}".format(float(value)) for value in list(t.flatten())]
else:
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
# SerializeAsHexString returns bytes in PY3, so decode if appropriate.
return _pywrap_string_util.SerializeAsHexString(t.flatten()).decode("utf-8")
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consisting of keys "inputs" and "outputs"
"""
def write_tensor(fp, name, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("name,%s\n" % name)
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for name, value in example["inputs"].items():
if value is not None:
write_tensor(fp, name, value)
fp.write("outputs,%d\n" % len(example["outputs"]))
for name, value in example["outputs"].items():
write_tensor(fp, name, value)
| ExtraConvertOptions |
python | networkx__networkx | networkx/tests/test_convert_pandas.py | {
"start": 175,
"end": 13361
} | class ____:
def setup_method(self):
self.rng = np.random.RandomState(seed=5)
ints = self.rng.randint(1, 11, size=(3, 2))
a = ["A", "B", "C"]
b = ["D", "A", "E"]
df = pd.DataFrame(ints, columns=["weight", "cost"])
df[0] = a # Column label 0 (int)
df["b"] = b # Column label 'b' (str)
self.df = df
mdf = pd.DataFrame([[4, 16, "A", "D"]], columns=["weight", "cost", 0, "b"])
self.mdf = pd.concat([df, mdf])
def test_exceptions(self):
G = pd.DataFrame(["a"]) # adj
pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G)
G = pd.DataFrame(["a", 0.0]) # elist
pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G)
df = pd.DataFrame([[1, 1], [1, 0]], dtype=int, index=[1, 2], columns=["a", "b"])
pytest.raises(nx.NetworkXError, nx.from_pandas_adjacency, df)
def test_from_edgelist_all_attr(self):
Gtrue = nx.Graph(
[
("E", "C", {"cost": 9, "weight": 10}),
("B", "A", {"cost": 1, "weight": 7}),
("A", "D", {"cost": 7, "weight": 4}),
]
)
G = nx.from_pandas_edgelist(self.df, 0, "b", True)
assert graphs_equal(G, Gtrue)
# MultiGraph
MGtrue = nx.MultiGraph(Gtrue)
MGtrue.add_edge("A", "D", cost=16, weight=4)
MG = nx.from_pandas_edgelist(self.mdf, 0, "b", True, nx.MultiGraph())
assert graphs_equal(MG, MGtrue)
def test_from_edgelist_multi_attr(self):
Gtrue = nx.Graph(
[
("E", "C", {"cost": 9, "weight": 10}),
("B", "A", {"cost": 1, "weight": 7}),
("A", "D", {"cost": 7, "weight": 4}),
]
)
G = nx.from_pandas_edgelist(self.df, 0, "b", ["weight", "cost"])
assert graphs_equal(G, Gtrue)
def test_from_edgelist_multi_attr_incl_target(self):
Gtrue = nx.Graph(
[
("E", "C", {0: "C", "b": "E", "weight": 10}),
("B", "A", {0: "B", "b": "A", "weight": 7}),
("A", "D", {0: "A", "b": "D", "weight": 4}),
]
)
G = nx.from_pandas_edgelist(self.df, 0, "b", [0, "b", "weight"])
assert graphs_equal(G, Gtrue)
def test_from_edgelist_multidigraph_and_edge_attr(self):
# example from issue #2374
edges = [
("X1", "X4", {"Co": "zA", "Mi": 0, "St": "X1"}),
("X1", "X4", {"Co": "zB", "Mi": 54, "St": "X2"}),
("X1", "X4", {"Co": "zB", "Mi": 49, "St": "X3"}),
("X1", "X4", {"Co": "zB", "Mi": 44, "St": "X4"}),
("Y1", "Y3", {"Co": "zC", "Mi": 0, "St": "Y1"}),
("Y1", "Y3", {"Co": "zC", "Mi": 34, "St": "Y2"}),
("Y1", "Y3", {"Co": "zC", "Mi": 29, "St": "X2"}),
("Y1", "Y3", {"Co": "zC", "Mi": 24, "St": "Y3"}),
("Z1", "Z3", {"Co": "zD", "Mi": 0, "St": "Z1"}),
("Z1", "Z3", {"Co": "zD", "Mi": 14, "St": "X3"}),
]
Gtrue = nx.MultiDiGraph(edges)
data = {
"O": ["X1", "X1", "X1", "X1", "Y1", "Y1", "Y1", "Y1", "Z1", "Z1"],
"D": ["X4", "X4", "X4", "X4", "Y3", "Y3", "Y3", "Y3", "Z3", "Z3"],
"St": ["X1", "X2", "X3", "X4", "Y1", "Y2", "X2", "Y3", "Z1", "X3"],
"Co": ["zA", "zB", "zB", "zB", "zC", "zC", "zC", "zC", "zD", "zD"],
"Mi": [0, 54, 49, 44, 0, 34, 29, 24, 0, 14],
}
df = pd.DataFrame.from_dict(data)
G1 = nx.from_pandas_edgelist(
df, source="O", target="D", edge_attr=True, create_using=nx.MultiDiGraph
)
G2 = nx.from_pandas_edgelist(
df,
source="O",
target="D",
edge_attr=["St", "Co", "Mi"],
create_using=nx.MultiDiGraph,
)
assert graphs_equal(G1, Gtrue)
assert graphs_equal(G2, Gtrue)
def test_from_edgelist_one_attr(self):
Gtrue = nx.Graph(
[
("E", "C", {"weight": 10}),
("B", "A", {"weight": 7}),
("A", "D", {"weight": 4}),
]
)
G = nx.from_pandas_edgelist(self.df, 0, "b", "weight")
assert graphs_equal(G, Gtrue)
def test_from_edgelist_int_attr_name(self):
# note: this also tests that edge_attr can be `source`
Gtrue = nx.Graph(
[("E", "C", {0: "C"}), ("B", "A", {0: "B"}), ("A", "D", {0: "A"})]
)
G = nx.from_pandas_edgelist(self.df, 0, "b", 0)
assert graphs_equal(G, Gtrue)
def test_from_edgelist_invalid_attr(self):
pytest.raises(
nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", "misspell"
)
pytest.raises(nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", 1)
# see Issue #3562
edgeframe = pd.DataFrame([[0, 1], [1, 2], [2, 0]], columns=["s", "t"])
pytest.raises(
nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", True
)
pytest.raises(
nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", "weight"
)
pytest.raises(
nx.NetworkXError,
nx.from_pandas_edgelist,
edgeframe,
"s",
"t",
["weight", "size"],
)
def test_from_edgelist_no_attr(self):
Gtrue = nx.Graph([("E", "C", {}), ("B", "A", {}), ("A", "D", {})])
G = nx.from_pandas_edgelist(self.df, 0, "b")
assert graphs_equal(G, Gtrue)
def test_from_edgelist(self):
# Pandas DataFrame
G = nx.cycle_graph(10)
G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges))
edgelist = nx.to_edgelist(G)
source = [s for s, t, d in edgelist]
target = [t for s, t, d in edgelist]
weight = [d["weight"] for s, t, d in edgelist]
edges = pd.DataFrame({"source": source, "target": target, "weight": weight})
GG = nx.from_pandas_edgelist(edges, edge_attr="weight")
assert nodes_equal(G.nodes(), GG.nodes())
assert edges_equal(G.edges(), GG.edges())
GW = nx.to_networkx_graph(edges, create_using=nx.Graph)
assert nodes_equal(G.nodes(), GW.nodes())
assert edges_equal(G.edges(), GW.edges())
def test_to_edgelist_default_source_or_target_col_exists(self):
G = nx.path_graph(10)
G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges))
nx.set_edge_attributes(G, 0, name="source")
pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G)
# drop source column to test an exception raised for the target column
for u, v, d in G.edges(data=True):
d.pop("source", None)
nx.set_edge_attributes(G, 0, name="target")
pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G)
def test_to_edgelist_custom_source_or_target_col_exists(self):
G = nx.path_graph(10)
G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges))
nx.set_edge_attributes(G, 0, name="source_col_name")
pytest.raises(
nx.NetworkXError, nx.to_pandas_edgelist, G, source="source_col_name"
)
# drop source column to test an exception raised for the target column
for u, v, d in G.edges(data=True):
d.pop("source_col_name", None)
nx.set_edge_attributes(G, 0, name="target_col_name")
pytest.raises(
nx.NetworkXError, nx.to_pandas_edgelist, G, target="target_col_name"
)
def test_to_edgelist_edge_key_col_exists(self):
G = nx.path_graph(10, create_using=nx.MultiGraph)
G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges()))
nx.set_edge_attributes(G, 0, name="edge_key_name")
pytest.raises(
nx.NetworkXError, nx.to_pandas_edgelist, G, edge_key="edge_key_name"
)
def test_from_adjacency(self):
nodelist = [1, 2]
dftrue = pd.DataFrame(
[[1, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist
)
G = nx.Graph([(1, 1), (1, 2)])
df = nx.to_pandas_adjacency(G, dtype=int)
pd.testing.assert_frame_equal(df, dftrue)
@pytest.mark.parametrize("graph", [nx.Graph, nx.MultiGraph])
def test_roundtrip(self, graph):
# edgelist
Gtrue = graph([(1, 1), (1, 2)])
df = nx.to_pandas_edgelist(Gtrue)
G = nx.from_pandas_edgelist(df, create_using=graph)
assert graphs_equal(Gtrue, G)
# adjacency
adj = {1: {1: {"weight": 1}, 2: {"weight": 1}}, 2: {1: {"weight": 1}}}
Gtrue = graph(adj)
df = nx.to_pandas_adjacency(Gtrue, dtype=int)
G = nx.from_pandas_adjacency(df, create_using=graph)
assert graphs_equal(Gtrue, G)
def test_from_adjacency_named(self):
# example from issue #3105
data = {
"A": {"A": 0, "B": 0, "C": 0},
"B": {"A": 1, "B": 0, "C": 0},
"C": {"A": 0, "B": 1, "C": 0},
}
dftrue = pd.DataFrame(data, dtype=np.intp)
df = dftrue[["A", "C", "B"]]
G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph())
df = nx.to_pandas_adjacency(G, dtype=np.intp)
pd.testing.assert_frame_equal(df, dftrue)
@pytest.mark.parametrize("edge_attr", [["attr2", "attr3"], True])
def test_edgekey_with_multigraph(self, edge_attr):
df = pd.DataFrame(
{
"source": {"A": "N1", "B": "N2", "C": "N1", "D": "N1"},
"target": {"A": "N2", "B": "N3", "C": "N1", "D": "N2"},
"attr1": {"A": "F1", "B": "F2", "C": "F3", "D": "F4"},
"attr2": {"A": 1, "B": 0, "C": 0, "D": 0},
"attr3": {"A": 0, "B": 1, "C": 0, "D": 1},
}
)
Gtrue = nx.MultiGraph(
[
("N1", "N2", "F1", {"attr2": 1, "attr3": 0}),
("N2", "N3", "F2", {"attr2": 0, "attr3": 1}),
("N1", "N1", "F3", {"attr2": 0, "attr3": 0}),
("N1", "N2", "F4", {"attr2": 0, "attr3": 1}),
]
)
# example from issue #4065
G = nx.from_pandas_edgelist(
df,
source="source",
target="target",
edge_attr=edge_attr,
edge_key="attr1",
create_using=nx.MultiGraph(),
)
assert graphs_equal(G, Gtrue)
df_roundtrip = nx.to_pandas_edgelist(G, edge_key="attr1")
df_roundtrip = df_roundtrip.sort_values("attr1")
df_roundtrip.index = ["A", "B", "C", "D"]
pd.testing.assert_frame_equal(
df, df_roundtrip[["source", "target", "attr1", "attr2", "attr3"]]
)
def test_edgekey_with_normal_graph_no_action(self):
Gtrue = nx.Graph(
[
("E", "C", {"cost": 9, "weight": 10}),
("B", "A", {"cost": 1, "weight": 7}),
("A", "D", {"cost": 7, "weight": 4}),
]
)
G = nx.from_pandas_edgelist(self.df, 0, "b", True, edge_key="weight")
assert graphs_equal(G, Gtrue)
def test_nonexisting_edgekey_raises(self):
with pytest.raises(nx.exception.NetworkXError):
nx.from_pandas_edgelist(
self.df,
source="source",
target="target",
edge_key="Not_real",
edge_attr=True,
create_using=nx.MultiGraph(),
)
def test_multigraph_with_edgekey_no_edgeattrs(self):
Gtrue = nx.MultiGraph()
Gtrue.add_edge(0, 1, key=0)
Gtrue.add_edge(0, 1, key=3)
df = nx.to_pandas_edgelist(Gtrue, edge_key="key")
expected = pd.DataFrame({"source": [0, 0], "target": [1, 1], "key": [0, 3]})
pd.testing.assert_frame_equal(expected, df)
G = nx.from_pandas_edgelist(df, edge_key="key", create_using=nx.MultiGraph)
assert graphs_equal(Gtrue, G)
def test_to_pandas_adjacency_with_nodelist():
G = nx.complete_graph(5)
nodelist = [1, 4]
expected = pd.DataFrame(
[[0, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist
)
pd.testing.assert_frame_equal(
expected, nx.to_pandas_adjacency(G, nodelist, dtype=int)
)
def test_to_pandas_edgelist_with_nodelist():
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (1, 3)], weight=2.0)
G.add_edge(0, 5, weight=100)
df = nx.to_pandas_edgelist(G, nodelist=[1, 2])
assert 0 not in df["source"].to_numpy()
assert 100 not in df["weight"].to_numpy()
def test_from_pandas_adjacency_with_index_collisions():
"""See gh-7407"""
df = pd.DataFrame(
[
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0],
],
index=[1010001, 2, 1, 1010002],
columns=[1010001, 2, 1, 1010002],
)
G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph)
expected = nx.DiGraph([(1010001, 2), (2, 1), (1, 1010002)])
assert nodes_equal(G.nodes, expected.nodes)
assert edges_equal(G.edges, expected.edges, directed=True)
| TestConvertPandas |
python | coleifer__peewee | tests/expressions.py | {
"start": 223,
"end": 440
} | class ____(ModelTestCase):
requires = [Person]
def assertNames(self, exp, x):
query = Person.select().where(exp).order_by(Person.name)
self.assertEqual([p.name for p in query], x)
| BaseNamesTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/slots3.py | {
"start": 500,
"end": 595
} | class ____:
__slots__ = ("bar1",)
foo = MyDescriptor(slot="_foo_descriptor")
| ClassBParent |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 53067,
"end": 53392
} | class ____(Aggregate):
"""
ArgmaxDef schema wrapper.
Parameters
----------
argmax : str, :class:`FieldName`
"""
_schema = {"$ref": "#/definitions/ArgmaxDef"}
def __init__(self, argmax: Optional[str | SchemaBase] = Undefined, **kwds):
super().__init__(argmax=argmax, **kwds)
| ArgmaxDef |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 7242,
"end": 7469
} | class ____(NamedTuple):
name: str
model: nn.Module
inp: Union[torch.tensor, tuple]
sync_interval: int
throw_on_early_termination: bool = False
hook: Callable = None
state: Any = None
| DDPUnevenTestInput |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/vector/numpy_to_torch.py | {
"start": 324,
"end": 1616
} | class ____(ArrayConversion):
"""Wraps a numpy-based environment so that it can be interacted with through PyTorch Tensors.
Example:
>>> import torch
>>> import gymnasium as gym
>>> from gymnasium.wrappers.vector import NumpyToTorch
>>> envs = gym.make_vec("CartPole-v1", 3)
>>> envs = NumpyToTorch(envs)
>>> obs, _ = envs.reset(seed=123)
>>> type(obs)
<class 'torch.Tensor'>
>>> action = torch.tensor(envs.action_space.sample())
>>> obs, reward, terminated, truncated, info = envs.step(action)
>>> envs.close()
>>> type(obs)
<class 'torch.Tensor'>
>>> type(reward)
<class 'torch.Tensor'>
>>> type(terminated)
<class 'torch.Tensor'>
>>> type(truncated)
<class 'torch.Tensor'>
"""
def __init__(self, env: VectorEnv, device: Device | None = None):
"""Wrapper class to change inputs and outputs of environment to PyTorch tensors.
Args:
env: The NumPy-based vector environment to wrap
device: The device the torch Tensors should be moved to
"""
super().__init__(env, env_xp=np, target_xp=torch, target_device=device)
self.device: Device | None = device
| NumpyToTorch |
python | pytorch__pytorch | torchgen/api/types/types.py | {
"start": 5737,
"end": 6210
} | class ____(CType):
# This template is explicitly specialized, so the only valid
# elems are those we have specializations for (e.g., float, double, ...)
# scalar_t is also a common argument here (when we are codegen in
# a templated context)
elem: BaseCType
def cpp_type(self, *, strip_ref: bool = False) -> str:
return f"at::vec::Vectorized<{self.elem.cpp_type()}>"
def remove_const_ref(self) -> CType:
return self
| VectorizedCType |
python | streamlit__streamlit | lib/streamlit/elements/space.py | {
"start": 997,
"end": 3931
} | class ____:
@gather_metrics("space")
def space(
self,
size: SpaceSize = "small",
) -> DeltaGenerator:
"""Add vertical or horizontal space.
This command adds space in the direction of its parent container. In
a vertical layout, it adds vertical space. In a horizontal layout, it
adds horizontal space.
Parameters
----------
size : "small", "medium", "large", "stretch", or int
The size of the space. This can be one of the following values:
- ``"small"`` (default): 0.75rem, which is the height of a widget
label. This is useful for aligning buttons with labeled widgets.
- ``"medium"``: 2.5rem, which is the height of a button or
(unlabeled) input field.
- ``"large"``: 4.25rem, which is the height of a labeled input
field or unlabeled media widget, like `st.file_uploader`.
- ``"stretch"``: Expands to fill remaining space in the container.
- An integer: Fixed size in pixels.
Examples
--------
**Example 1: Use vertical space to align elements**
Use small spaces to replace label heights. Use medium spaces to replace
two label heights or a button.
>>> import streamlit as st
>>>
>>> left, middle, right = st.columns(3)
>>>
>>> left.space("medium")
>>> left.button("Left button", width="stretch")
>>>
>>> middle.space("small")
>>> middle.text_input("Middle input")
>>>
>>> right.audio_input("Right uploader")
.. output::
https://doc-space-vertical.streamlit.app/
height: 260px
**Example 2: Add horizontal space in a container**
Use stretch space to float elements left and right.
>>> import streamlit as st
>>>
>>> with st.container(horizontal=True):
... st.button("Left")
... st.space("stretch")
... st.button("Right")
.. output::
https://doc-space-horizontal.streamlit.app/
height: 200px
"""
space_proto = SpaceProto()
validate_space_size(size)
# In vertical layouts, size controls height.
# In horizontal layouts, size controls width.
# We set both width and height configs to the same size value.
# The frontend uses FlexContext to determine container direction and
# applies ONLY the relevant dimension (width for horizontal, height for vertical)
# to avoid unintended cross-axis spacing.
layout_config = LayoutConfig(width=size, height=size)
return self.dg._enqueue("space", space_proto, layout_config=layout_config)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| SpaceMixin |
python | rq__rq | rq/timeouts.py | {
"start": 47,
"end": 137
} | class ____(Exception):
"""Base exception for timeouts."""
pass
| BaseTimeoutException |
python | wandb__wandb | tests/system_tests/test_functional/metaflow/flow_decoclass.py | {
"start": 487,
"end": 1737
} | class ____(FlowSpec):
# Not obvious how to support metaflow.IncludeFile
seed = Parameter("seed", default=1337)
test_size = Parameter("test_size", default=0.2)
raw_data = Parameter(
"raw_data",
default=pathlib.Path(__file__).parent / "wine.csv",
help="path to the raw data",
)
@step
def start(self):
self.raw_df = pd.read_csv(self.raw_data)
self.next(self.split_data)
@step
def split_data(self):
X = self.raw_df.drop("Wine", axis=1) # noqa: N806
y = self.raw_df[["Wine"]]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=self.test_size, random_state=self.seed
)
self.next(self.train)
@step
def train(self):
self.clf = RandomForestClassifier(
n_estimators=2,
max_depth=2,
random_state=self.seed,
)
self.clf.fit(self.X_train, self.y_train)
self.next(self.end)
@step
def end(self):
self.preds = self.clf.predict(self.X_test)
self.accuracy = accuracy_score(self.y_test, self.preds)
if __name__ == "__main__":
wandb.setup()
WandbExampleFlowDecoClass()
| WandbExampleFlowDecoClass |
python | huggingface__transformers | src/transformers/models/idefics2/modeling_idefics2.py | {
"start": 21526,
"end": 22255
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Idefics2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| Idefics2RMSNorm |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax_error.py | {
"start": 1865,
"end": 1959
} | class ____(NamedTuple):
my_var: int | str # [unsupported-binary-operation]
| CustomNamedTuple2 |
python | pytorch__pytorch | torch/fx/proxy.py | {
"start": 3541,
"end": 17696
} | class ____:
graph: Graph
record_stack_traces: bool = False
# When record_stack_traces is True, only reocrd stack traces
# with forward function names.
# This helps when we want stack trace back to model code
_record_forward_stack_traces_only: bool = False
# Feature flag for mutable schema checking
# Enableby default in 1.12
check_mutable_operations: bool = False
# Feature flag for assert tracing
trace_asserts: bool = False
# Feature flag for proxying accesses to buffer values
proxy_buffer_attributes: bool = False
# Name of the function to be traced. It will only be used when
# ``root`` is an instance of ``nn.Module``
traced_func_name: str = "forward"
# Maps the containing module's name to the operator name
scope: Scope
# Records the module call stack
module_stack: OrderedDict[str, tuple[str, Any]]
# Mapping of node name to module scope
node_name_to_scope: dict[str, tuple[str, type]]
@compatibility(is_backward_compatible=True)
def create_node(
self,
kind: str,
target: Target,
args: tuple[Argument, ...],
kwargs: dict[str, Argument],
name: Optional[str] = None,
type_expr: Optional[Any] = None,
) -> Node:
"""
Inserts a graph node given target, args, kwargs, and name.
This method can be overridden to do extra checking, validation, or
modification of values used in node creation. For example, one might
want to disallow in-place operations from being recorded.
"""
if kind == "call_function" and self.check_mutable_operations:
check_for_mutable_operation(target, args, kwargs)
node = self.graph.create_node(kind, target, args, kwargs, name, type_expr)
# TODO node_name_to_scope will be depreciated in favor of
# node.meta['nn_module_stack']
self.node_name_to_scope[node.name] = (
self.scope.module_path,
self.scope.module_type,
)
# Optionally set stack trace on the created Node for debugging purposes
if fx_traceback.has_preserved_node_meta():
current_meta: dict[str, Any] = fx_traceback.get_current_meta()
stack_trace = current_meta.get("stack_trace")
if stack_trace:
node.stack_trace = stack_trace
if fx_traceback.GRADIENT_ACC_SPECIAL_STACK in stack_trace:
node.meta["is_gradient_acc"] = True
# Explicitly set the stack_trace, nn_module_stack and source_fn on the node.meta
# If other meta fields are needed, they can be added here
for field in _COPY_META_FIELDS:
if field in current_meta:
node.meta[field] = copy.copy(current_meta[field])
# Here we decrement to account for the sequence_nr having
# just been incremented while tracing this lowered aten op.
new_seq_nr = torch.autograd._get_sequence_nr() - 1
# The sequence_nr increments every time a new autograd Node
# is created. During the FWD pass we store the sequence_nr
# corresponding to the last autograd Node created on this fx
# node's meta. A single aten op can create multiple autograd
# nodes as is the case with in-place foreach ops. During the
# BWD pass we retrieve the sequence_nr stored on the current
# executing autograd Node. See NOTE [ Sequence Number ].
if current_meta.get("in_grad_fn", 0) > 0:
annotation_log.debug("seq_nr from current_meta")
new_seq_nr = current_meta["grad_fn_seq_nr"][-1]
# See Note [Functionalization View Replay Annotation]
# Overriding some node meta with the original node meta of the
# regenerated node.
replay_node: Node = fx_traceback.get_current_replay_node()
if replay_node is not None:
node.meta["is_functional_regenerated"] = True
if "seq_nr" in replay_node.meta:
annotation_log.debug("seq_nr from replay_node")
new_seq_nr = replay_node.meta["seq_nr"]
if "custom" in replay_node.meta:
node.meta["custom"] = replay_node.meta.get("custom")
if "stack_trace" in replay_node.meta:
node.stack_trace = replay_node.meta.get("stack_trace")
annotation_log.debug("Assigning new_seq_nr %s to %s", new_seq_nr, node.name)
node.meta["seq_nr"] = new_seq_nr
elif self.module_stack:
node.meta["nn_module_stack"] = copy.copy(self.module_stack)
if self.record_stack_traces and not node.stack_trace:
user_stack_summary = CapturedTraceback.extract().summary()
if user_stack_summary:
user_stack_summary = self._filter_traceback_frames(user_stack_summary)
if user_stack_summary:
node.stack_trace = "".join(user_stack_summary.format()).strip()
log.debug("create_node %s", node)
return node
def _filter_traceback_frames(
self, user_stack_summary: traceback.StackSummary
) -> traceback.StackSummary:
# This method can be overridden to customize the frame filtering logic
# for the recorded stack trace
user_frames = []
if self._record_forward_stack_traces_only:
user_frames = [
frame
for frame in user_stack_summary
if (
frame.name == "forward"
or frame.filename.endswith("torch/__init__.py")
)
]
else:
first_forward = -1
for i, frame in enumerate(user_stack_summary):
if frame.name == "forward":
user_frames = user_stack_summary[i:]
first_forward = i
break
# Not having a "forward" call in the stacktrace implies the
# stacktrace will probably be irrelevant
if first_forward == -1:
user_frames = []
from torch.fx.experimental.symbolic_shapes import uninteresting_files
user_frames = [
frame
for frame in user_frames
if frame.filename not in uninteresting_files()
]
return traceback.StackSummary.from_list(user_frames)
@compatibility(is_backward_compatible=True)
def proxy(self, node: Node) -> "Proxy":
return Proxy(node, self)
@compatibility(is_backward_compatible=True)
def create_proxy(
self,
kind: str,
target: Target,
args: tuple[Any, ...],
kwargs: dict[str, Any],
name: Optional[str] = None,
type_expr: Optional[Any] = None,
# fix noqa when updating bc tests
proxy_factory_fn: Callable[[Node], "Proxy"] = None, # noqa: RUF013
):
"""
Create a Node from the given arguments, then return the Node
wrapped in a Proxy object.
If kind = 'placeholder', then we're creating a Node that
represents the parameter of a function. If we need to encode
a default parameter, we use the ``args`` tuple. ``args`` is
otherwise empty for ``placeholder`` Nodes.
"""
args_ = self.create_arg(args)
kwargs_ = self.create_arg(kwargs)
assert isinstance(args_, tuple)
assert isinstance(kwargs_, dict)
node = self.create_node(kind, target, args_, kwargs_, name, type_expr)
if not proxy_factory_fn:
proxy = self.proxy(node)
else:
proxy = proxy_factory_fn(node)
return proxy
def _find_user_frame(self):
"""
Find the Python stack frame executing the user code during
symbolic tracing.
"""
# We have to do a little dance here. Basically, walk up the callstack and
# record the first frame not in the pytorch source. This is the frame executing
# the user code during tracing.
frame = inspect.currentframe()
pt_files = [
"torch/fx/proxy.py",
"torch/fx/_symbolic_trace.py",
"torch/fx/experimental/proxy_tensor.py",
"torch/_ops.py",
"torch/_tensor.py",
"torch/utils/_python_dispatch.py",
"torch/_prims_common/wrappers.py",
"torch/_refs/__init__.py",
"torch/_refs/nn/functional/__init__.py",
"torch/utils/_stats.py",
]
while frame:
frame = frame.f_back
if frame and all(
not frame.f_code.co_filename.endswith(file) for file in pt_files
):
break
if not frame:
return None
return frame
@compatibility(is_backward_compatible=True)
def create_arg(self, a: Any) -> Argument:
"""
A method that lowers the objects seen as arguments during symbolic evaluation
into Argument types that can be stored in IR.
Can be override to support more trace-specific types.
"""
# IMPORTANT: Are you here because you are trying to proxy a new type into
# the graph? Please Please Please contact someone on the PyTorch Compiler team;
# the considerations are subtle.
#
# 1) When you add a new type, all of the downstream consumers and pass writers
# need to handle the new type. torch.fx is intended to be easy to write
# passes for, so we will push back against new types.
# 2) In torch.compile's IR, there are only specific operations that go
# into the graph. In particular, Tensor operations should go into the graph,
# but non-Tensor operations shouldn't. What that means is that constructors
# for new types *SHOULD NOT* become nodes in the FX graph.
handler = _create_arg_bypass.get(type(a))
if handler is not None:
# this is just a performance optimization and can be removed if needed
# for common types, we have a fast path to avoid isinstance() overhead
# this doesn't remove the checks below since we need to handle subclasses
return handler(self, a)
if isinstance(a, Proxy):
return a.node # most common arg type goes first
elif hasattr(a, "__fx_create_arg__"):
return a.__fx_create_arg__(self)
# aggregates
elif isinstance(a, tuple):
if hasattr(a, "_fields"):
# NamedTuple constructors don't seem to like getting a generator
# expression as an argument to their constructor, so build this
# intermediate tuple and unpack it into the NamedTuple constructor
args = [self.create_arg(elem) for elem in a]
return type(a)(*args) # type: ignore[arg-type]
return type(a)([self.create_arg(elem) for elem in a])
elif isinstance(a, list):
return [self.create_arg(elem) for elem in a]
elif isinstance(a, dict):
return _create_arg_dict(self, a)
elif isinstance(a, slice):
return slice(
self.create_arg(a.start),
self.create_arg(a.stop),
self.create_arg(a.step),
)
elif isinstance(a, range):
return range(
self.create_arg(a.start),
self.create_arg(a.stop),
self.create_arg(a.step),
)
elif isinstance(a, (torch._ops.OpOverload, torch._ops.HigherOrderOperator)):
return a
elif is_dataclass(a):
kwargs = {
field.name: self.create_arg(getattr(a, field.name))
for field in fields(a)
}
return self.create_node("call_function", a.__class__, (), kwargs)
elif isinstance(a, (*base_types, enum.Enum)) or a is None or a is ...:
return a
raise NotImplementedError(f"argument of type: {type(a)}")
@compatibility(is_backward_compatible=True)
def to_bool(self, obj: "Proxy") -> bool:
"""Called when a proxy object is being converted to a boolean, such as
when used in control flow. Normally we don't know what to do because
we don't know the value of the proxy, but a custom tracer can attach more
information to the graph node using create_node and can choose to return a value.
"""
raise TraceError(
"symbolically traced variables cannot be used as inputs to control flow"
)
@compatibility(is_backward_compatible=True)
def iter(self, obj: "Proxy") -> Iterator:
"""Called when a proxy object is being iterated over, such as
when used in control flow. Normally we don't know what to do because
we don't know the value of the proxy, but a custom tracer can attach more
information to the graph node using create_node and can choose to return an iterator.
"""
raise TraceError(
"Proxy object cannot be iterated. This can be "
"attempted when the Proxy is used in a loop or"
" as a *args or **kwargs function argument. "
"See the torch.fx docs on pytorch.org for a "
"more detailed explanation of what types of "
"control flow can be traced, and check out the"
" Proxy docstring for help troubleshooting "
"Proxy iteration errors"
)
@compatibility(is_backward_compatible=True)
def keys(self, obj: "Proxy") -> Any:
"""Called when a proxy object is has the keys() method called.
This is what happens when ** is called on a proxy. This should return an
iterator it ** is suppose to work in your custom tracer.
"""
return Attribute(obj, "keys")()
# used in Proxy object when just appending to the graph while not tracing.
@compatibility(is_backward_compatible=True)
| TracerBase |
python | getsentry__sentry | tests/sentry/autofix/test_webhooks.py | {
"start": 650,
"end": 7826
} | class ____(APITestCase):
@override_settings(SEER_AUTOFIX_GITHUB_APP_USER_ID="12345")
@patch(
"sentry.seer.autofix.webhooks.get_autofix_state_from_pr_id",
return_value=AutofixState(
run_id=1,
request={
"project_id": 2,
"organization_id": 4,
"issue": {"id": 3, "title": "Test issue"},
"repos": [
{"provider": "github", "owner": "test", "name": "test", "external_id": "123"}
],
},
updated_at=datetime.now(timezone.utc),
status=AutofixStatus.PROCESSING,
steps=[],
),
)
@patch("sentry.seer.autofix.webhooks.analytics.record")
@patch("sentry.seer.autofix.webhooks.metrics.incr")
def test_opened(
self, mock_metrics_incr, mock_analytics_record, mock_get_autofix_state_from_pr_id
):
handle_github_pr_webhook_for_autofix(
self.organization,
"opened",
{"id": 1, "merged": False},
{"id": settings.SEER_AUTOFIX_GITHUB_APP_USER_ID},
)
mock_metrics_incr.assert_called_with("ai.autofix.pr.opened")
assert_last_analytics_event(
mock_analytics_record,
AiAutofixPrOpenedEvent(
organization_id=self.organization.id,
integration="github",
project_id=2,
group_id=3,
run_id=1,
),
)
@override_settings(SEER_AUTOFIX_GITHUB_APP_USER_ID="12345")
@patch(
"sentry.seer.autofix.webhooks.get_autofix_state_from_pr_id",
return_value=AutofixState(
run_id=1,
request={
"project_id": 2,
"organization_id": 4,
"issue": {"id": 3, "title": "Test issue"},
"repos": [
{"provider": "github", "owner": "test", "name": "test", "external_id": "123"}
],
},
updated_at=datetime.now(timezone.utc),
status=AutofixStatus.PROCESSING,
steps=[],
),
)
@patch("sentry.seer.autofix.webhooks.analytics.record")
@patch("sentry.seer.autofix.webhooks.metrics.incr")
def test_closed(
self, mock_metrics_incr, mock_analytics_record, mock_get_autofix_state_from_pr_id
):
handle_github_pr_webhook_for_autofix(
self.organization,
"closed",
{"id": 1, "merged": False},
{"id": settings.SEER_AUTOFIX_GITHUB_APP_USER_ID},
)
mock_metrics_incr.assert_called_with("ai.autofix.pr.closed")
assert_last_analytics_event(
mock_analytics_record,
AiAutofixPrClosedEvent(
organization_id=self.organization.id,
integration="github",
project_id=2,
group_id=3,
run_id=1,
),
)
@override_settings(SEER_AUTOFIX_GITHUB_APP_USER_ID="12345")
@patch(
"sentry.seer.autofix.webhooks.get_autofix_state_from_pr_id",
return_value=AutofixState(
run_id=1,
request={
"project_id": 2,
"organization_id": 4,
"issue": {"id": 3, "title": "Test issue"},
"repos": [
{"provider": "github", "owner": "test", "name": "test", "external_id": "123"}
],
},
updated_at=datetime.now(timezone.utc),
status=AutofixStatus.PROCESSING,
steps=[],
),
)
@patch("sentry.seer.autofix.webhooks.analytics.record")
@patch("sentry.seer.autofix.webhooks.metrics.incr")
def test_merged(
self, mock_metrics_incr, mock_analytics_record, mock_get_autofix_state_from_pr_id
):
handle_github_pr_webhook_for_autofix(
self.organization,
"closed",
{"id": 1, "merged": True},
{"id": settings.SEER_AUTOFIX_GITHUB_APP_USER_ID},
)
mock_metrics_incr.assert_called_with("ai.autofix.pr.merged")
assert_last_analytics_event(
mock_analytics_record,
AiAutofixPrMergedEvent(
organization_id=self.organization.id,
integration="github",
project_id=2,
group_id=3,
run_id=1,
),
)
@override_settings(SEER_AUTOFIX_GITHUB_APP_USER_ID="12345")
@patch(
"sentry.seer.autofix.webhooks.get_autofix_state_from_pr_id",
return_value=None,
)
@patch("sentry.seer.autofix.webhooks.analytics.record")
@patch("sentry.seer.autofix.webhooks.metrics.incr")
def test_no_run(
self, mock_metrics_incr, mock_analytics_record, mock_get_autofix_state_from_pr_id
):
handle_github_pr_webhook_for_autofix(
self.organization,
"closed",
{"id": 1, "merged": True},
{"id": settings.SEER_AUTOFIX_GITHUB_APP_USER_ID},
)
for key in ["ai.autofix.pr.merged", "ai.autofix.pr.closed", "ai.autofix.pr.opened"]:
assert call(key) not in mock_metrics_incr.call_args_list
assert_not_analytics_event(mock_analytics_record, AiAutofixPrClosedEvent)
assert_not_analytics_event(mock_analytics_record, AiAutofixPrMergedEvent)
assert_not_analytics_event(mock_analytics_record, AiAutofixPrOpenedEvent)
@override_settings(SEER_AUTOFIX_GITHUB_APP_USER_ID=None)
@patch(
"sentry.seer.autofix.webhooks.get_autofix_state_from_pr_id",
return_value=None,
)
@patch("sentry.seer.autofix.webhooks.analytics.record")
@patch("sentry.seer.autofix.webhooks.metrics.incr")
def test_no_settings_github_app_id_set(
self, mock_metrics_incr, mock_analytics_record, mock_get_autofix_state_from_pr_id
):
handle_github_pr_webhook_for_autofix(
self.organization,
"closed",
{"id": 1, "merged": True},
{"id": "5655"},
)
for key in ["ai.autofix.pr.merged", "ai.autofix.pr.closed", "ai.autofix.pr.opened"]:
assert call(key) not in mock_metrics_incr.call_args_list
assert call(key) not in mock_analytics_record.call_args_list
@override_settings(SEER_AUTOFIX_GITHUB_APP_USER_ID="12345")
@patch(
"sentry.seer.autofix.webhooks.get_autofix_state_from_pr_id",
return_value=None,
)
@patch("sentry.seer.autofix.webhooks.analytics.record")
@patch("sentry.seer.autofix.webhooks.metrics.incr")
def test_no_different_github_app(
self, mock_metrics_incr, mock_analytics_record, mock_get_autofix_state_from_pr_id
):
handle_github_pr_webhook_for_autofix(
self.organization,
"closed",
{"id": 1, "merged": True},
{"id": "321"},
)
for key in ["ai.autofix.pr.merged", "ai.autofix.pr.closed", "ai.autofix.pr.opened"]:
assert call(key) not in mock_metrics_incr.call_args_list
assert call(key) not in mock_analytics_record.call_args_list
| AutofixPrWebhookTest |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 72915,
"end": 77186
} | class ____(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(
self,
amplitude=amplitude.default,
x_0=x_0.default,
y_0=y_0.default,
r_in=None,
width=None,
r_out=None,
**kwargs,
):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_in": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| Ring2D |
python | apache__airflow | providers/mongo/tests/unit/mongo/sensors/test_mongo.py | {
"start": 1282,
"end": 2494
} | class ____:
def setup_method(self, method):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.context = Context()
@patch("airflow.providers.mongo.sensors.mongo.MongoHook", spec=MongoHook)
def test_execute_operator(self, mock_mongo_hook_constructor):
test_collection = "coll"
test_query = {"test_key": "test"}
test_conn_id = "mongo_default"
test_db = "test_db"
mock_mongo_hook_find = mock_mongo_hook_constructor.return_value.find
mock_mongo_hook_find.return_value = {"document_key": "document_val"}
sensor = MongoSensor(
collection=test_collection,
query=test_query,
mongo_conn_id=test_conn_id,
mongo_db=test_db,
task_id="test_task",
dag=self.dag,
)
result = sensor.poke(self.context)
mock_mongo_hook_constructor.assert_called_once_with(mongo_conn_id=test_conn_id)
assert result is True
mock_mongo_hook_find.assert_called_once_with(
test_collection, test_query, mongo_db=test_db, find_one=True
)
| TestMongoSensor |
python | pytorch__pytorch | torch/fx/experimental/optimization.py | {
"start": 9409,
"end": 17809
} | class ____:
def __init__(self, n):
self.parent: list[Optional[int]] = [None] * n
self.size: list[int] = [0] * n
def make_set(self, v: int):
self.parent[v] = v
self.size[v] = 1
def find(self, v: int) -> int:
par = self.parent[v]
if v == par:
return v
assert par is not None
self.parent[v] = self.find(par)
return cast(int, self.parent[v])
def join(self, a: int, b: int):
a, b = self.find(a), self.find(b)
if a == b:
return a
if self.size[a] < self.size[b]:
a, b = b, a
self.parent[b] = a
self.size[a] += self.size[b]
def optimize_for_inference(
model: torch.nn.Module,
pass_config: Optional[dict[str, Any]] = None,
tracer: type[fx.Tracer] = fx.Tracer,
) -> torch.nn.Module:
"""
Performs a set of optimization passes to optimize a model for the
purposes of inference. Specifically, the passes that are run are:
1. Conv/BN fusion
2. Dropout removal
3. MKL layout optimizations
The third optimization takes a function `use_mkl_heuristic` that's used
to determine whether a subgraph should be explicitly run in MKL layout.
Note: As FX does not currently handle aliasing, this pass currently
assumes nothing aliases. If that isn't true, use at your own risk.
"""
default_pass_config = {
"conv_bn_fuse": True,
"remove_dropout": True,
"mkldnn_layout_optimize": {"heuristic": use_mkl_length},
}
if pass_config is None:
pass_config = {}
default_pass_config.update(pass_config)
if default_pass_config["conv_bn_fuse"]:
model = fuse(model)
if default_pass_config["remove_dropout"]:
model = remove_dropout(model)
if default_pass_config["mkldnn_layout_optimize"] is False:
return model
if not isinstance(default_pass_config["mkldnn_layout_optimize"], dict):
raise RuntimeError("mkldnn_layout_optimize config is not a dict")
if "heuristic" not in default_pass_config["mkldnn_layout_optimize"]:
raise RuntimeError("Heuristic not found in mkldnn_layout_optimize config")
use_mkl_heuristic = default_pass_config["mkldnn_layout_optimize"]["heuristic"]
cur_tracer = tracer()
fx_graph = cur_tracer.trace(copy.deepcopy(model))
fx.GraphModule(cur_tracer.root, fx_graph)
modules: dict[str, nn.Module] = dict(model.named_modules())
class MklSupport(Enum):
NO = 1
YES = 2
UNKNOWN = 3
# Inserts to_mkldnn and to_dense around every node we want to be a MKLDNN node.
# If the op is in `mkldnn_supported` then we always treat it as a MKLDNN node.
# However, if it's in `mkldnn_supported_unknown`, then we only treat it as
# a MKLDNN node if its inputs are MKLDNN nodes.
for node in list(fx_graph.nodes):
supports_mkldnn = MklSupport.NO
if node.op == "call_module":
cur_module = modules[node.target]
if type(cur_module) in mkldnn_supported:
supports_mkldnn = MklSupport.YES
sample_parameter = next(cur_module.parameters(), None)
if sample_parameter is not None:
assert sample_parameter.dtype == torch.float, (
"this pass is only for torch.float modules"
)
assert sample_parameter.device == torch.device("cpu"), (
"this pass is only for CPU modules"
)
elif node.op == "call_function":
if node.target in mkldnn_supported:
supports_mkldnn = MklSupport.YES
elif node.target in mkldnn_supported_unknown:
supports_mkldnn = MklSupport.UNKNOWN
if supports_mkldnn != MklSupport.NO:
if supports_mkldnn == MklSupport.UNKNOWN:
if not any(arg.target == "to_dense" for arg in node.args):
continue
with fx_graph.inserting_before(node):
mkldnn_args = fx.map_arg(
node.args, lambda n: fx_graph.call_method("to_mkldnn", (n,))
)
node.args = cast(tuple[fx.node.Argument], mkldnn_args)
with fx_graph.inserting_after(node):
dense_x = fx_graph.create_node("call_method", "to_dense", (node,))
node.replace_all_uses_with(dense_x)
dense_x.args = (node,)
# Does pre-conversion of all modules into MKLDNN (when possible)
old_modules = modules_to_mkldnn(list(fx_graph.nodes), modules)
fx_graph.old_modules = old_modules # type: ignore[attr-defined]
# optimizes all a -> to_dense -> to_mkldnn -> b patterns into a -> b
for node in fx_graph.nodes:
if node.op == "call_method" and node.target == "to_dense":
prv_node = node.args[0]
users = list(node.users)
for user in users:
if user.op == "call_method" and user.target == "to_mkldnn":
user.replace_all_uses_with(prv_node)
fx_graph.erase_node(user)
if len(node.users) == 0:
fx_graph.erase_node(node)
num_nodes = len(fx_graph.nodes)
uf = UnionFind(num_nodes)
def get_color(n):
if hasattr(n, "color"): # Current node is part of a MKL subgraph
return uf.find(n.color)
if hasattr(n, "start_color"): # Current node is input to MKL subgraph
return uf.find(n.start_color)
return None
# This code is to find each MKLDNN subgraph. Each MKLDNN subgraph consists
# of input nodes (which are only `to_mkldnn` calls), output nodes
# (`to_dense` calls), and intermediate nodes, which are run entirely on
# MKLDNN layout tensors.
#
# Specifically, this code does a flood fill on a directed acyclic graph
# (DAG), starting from each possible "start node" (i.e: `to_mkldnn` nodes).
# If every node only had one input, this would be sufficient. However, in
# the case that a node has multiple inputs coming from different start
# nodes (i.e. colors), we need to join these 2 colors into 1. That's done
# using a Disjoint Set Union.
for cur_idx, node in enumerate(fx_graph.nodes):
if node.op == "call_method" and node.target == "to_mkldnn":
node.start_color = cur_idx
uf.make_set(cur_idx)
elif node.op == "call_method" and node.target == "to_dense":
assert get_color(node.args[0]) is not None
node.end_color = get_color(node.args[0])
else:
cur_colors = [
get_color(i)
for i in node.all_input_nodes
if isinstance(i, fx.Node)
if get_color(i) is not None
]
if len(cur_colors) == 0:
continue
assert not any(i is None for i in cur_colors)
cur_colors = sorted(cur_colors)
node.color = cur_colors[0]
for other_color in cur_colors[1:]:
uf.join(cur_colors[0], other_color)
mkldnn_graphs: dict[int, MklSubgraph] = defaultdict(lambda: MklSubgraph(fx_graph))
for node in fx_graph.nodes:
if hasattr(node, "color"):
mkldnn_graphs[uf.find(node.color)].nodes.append(node)
if hasattr(node, "start_color"):
mkldnn_graphs[uf.find(node.start_color)].start_nodes.append(node)
if hasattr(node, "end_color"):
mkldnn_graphs[uf.find(node.end_color)].end_nodes.append(node)
# Now that we have all the subgraphs, we need to decide which MKLDNN
# subgraphs we actually want to keep in MKLDNN.
for graph in mkldnn_graphs.values():
if not use_mkl_heuristic(graph):
for node in graph.start_nodes + graph.end_nodes:
prv = node.args[0]
node.replace_all_uses_with(prv) # type: ignore[arg-type]
fx_graph.erase_node(node)
reset_modules(graph.nodes, modules, old_modules)
mkldnn_conversions = 0
for node in fx_graph.nodes:
if node.target == "to_mkldnn" or node.target == "to_dense":
mkldnn_conversions += 1
logging.getLogger(__name__).info("mkldnn conversions: %s", mkldnn_conversions)
fx_graph.lint()
result = fx.GraphModule(model, fx_graph)
return result
| UnionFind |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/check_results.py | {
"start": 264,
"end": 8540
} | class ____:
benchmark_name: str
metric_name: str
actual_value: int
def replace_with_zeros(num):
"""
Keeps the first three digits of an integer and replaces the rest with zeros.
Args:
num (int): The number to modify.
Returns:
int: The modified number.
Raises:
ValueError: If the input is not an integer.
"""
# Check if input is an integer
if not isinstance(num, int):
raise ValueError("Input must be an integer")
# Calculate the number of digits to remove
digits_to_remove = len(str(abs(num))) - 4
# Replace digits with zeros
if digits_to_remove > 0:
modified_num = (num // 10**digits_to_remove) * 10**digits_to_remove
else:
modified_num = num
return modified_num
def main():
# Expected file is the file that have the results that we are comparing against.
# Expected has the following format:
# benchmark_name, metric name, expected value, noise margin (as percentage)
# Example:
# add_loop_eager,compile_time_instruction_count,283178305, 0.01 (1% noise margin)
expected_file_path = sys.argv[1]
# Result file is the file that have the results of the current run. It has the following format:
# benchmark_name, metric name, expected value, noise margin (as percentage)
# Example:
# add_loop_eager,compile_time_instruction_count,283178305
result_file_path = sys.argv[2]
# A path where a new expected results file will be written that can be used to replace expected_results.csv
# in case of failure. In case of no failure the content of this file will match expected_file_path.
reference_expected_results_path = sys.argv[3]
# Read expected data file.
expected_data: dict[str, ExpectedFileEntry] = {}
with open(expected_file_path) as f:
reader = csv.reader(f)
for row in reader:
if len(row) == 0:
continue
entry = ExpectedFileEntry(
benchmark_name=row[0].strip(),
metric_name=row[1].strip(),
expected_value=int(row[2]),
noise_margin=float(row[3]),
)
key = (entry.benchmark_name, entry.metric_name)
assert key not in expected_data, f"Duplicate entry for {key}"
expected_data[key] = entry
# Read result data file.
result_data: dict[str, ResultFileEntry] = {}
with open(result_file_path) as f:
reader = csv.reader(f)
for row in reader:
entry = ResultFileEntry(
benchmark_name=row[0].strip(),
metric_name=row[1].strip(),
actual_value=int(row[2]),
)
key = (entry.benchmark_name, entry.metric_name)
assert key not in result_data, f"Duplicate entry for {key}"
result_data[key] = entry
fail = False
new_expected = copy.deepcopy(expected_data)
for key, entry in expected_data.items():
if key not in result_data:
print(f"Missing entry for {key} in result file")
sys.exit(1)
low = entry.expected_value - entry.expected_value * entry.noise_margin
high = entry.expected_value + entry.expected_value * entry.noise_margin
result = result_data[key].actual_value
ratio = float(result - entry.expected_value) * 100 / entry.expected_value
def log(event_name):
scribe.open_source_signpost(
subsystem="pr_time_benchmarks",
name=event_name,
parameters=json.dumps(
{
"benchmark_name": entry.benchmark_name,
"metric_name": entry.metric_name,
"actual_value": result,
"expected_value": entry.expected_value,
"noise_margin": entry.noise_margin,
"change_ratio": ratio,
}
),
)
new_entry = copy.deepcopy(entry)
# only change if abs(ratio) > entry.noise_margin /5.
new_entry.expected_value = (
replace_with_zeros(result)
if abs(ratio) > entry.noise_margin * 100 / 5
else entry.expected_value
)
new_expected[key] = new_entry
if result > high:
fail = True
print(
f"REGRESSION: benchmark {key} failed, actual result {result} "
f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin * 100:+.2f}% "
f"if this is an expected regression, please update the expected results.\n"
)
print(
"please update all results that changed significantly, and not only the failed ones"
)
log("fail_regression")
elif result < low:
fail = True
print(
f"WIN: benchmark {key} failed, actual result {result} is {ratio:+.2f}% lower than "
f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}% "
f"please OPEN THE TEST RESULTS update ALL BENCHMARKS RESULT with the new printed expected results. ALL ALL ALL\n"
)
print(
"please update all results that changed significantly, and not only the failed ones"
)
log("fail_win")
else:
print(
f"PASS: benchmark {key} pass, actual result {result} {ratio:+.2f}% is within "
f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}%\n"
)
log("pass")
# Log all benchmarks that do not have a regression test enabled for them.
for key, entry in result_data.items():
if key not in expected_data:
print(
f"MISSING REGRESSION TEST: benchmark {key} does not have a regression test enabled for it.\n"
)
scribe.open_source_signpost(
subsystem="pr_time_benchmarks",
name="missing_regression_test",
parameters=json.dumps(
{
"benchmark_name": entry.benchmark_name,
"metric_name": entry.metric_name,
}
),
)
with open(reference_expected_results_path, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for entry in new_expected.values():
# Write the data to the CSV file
# print(f"{entry.benchmark_name},{entry.metric_name,},{round(entry.expected_value)},{entry.noise_margin}")
writer.writerow(
[
entry.benchmark_name,
entry.metric_name,
entry.expected_value,
entry.noise_margin,
]
)
# Three empty rows for merge conflicts.
writer.writerow([])
writer.writerow([])
writer.writerow([])
print("=" * 80)
print("=" * 80)
print("=" * 80)
print("To update expected results, run the following command:")
print()
print("cat > benchmarks/dynamo/pr_time_benchmarks/expected_results.csv << EOF")
with open(reference_expected_results_path) as f:
print(f.read().rstrip())
print("EOF")
print()
print("=" * 80)
print("=" * 80)
print("=" * 80)
if fail:
print(
f"There was some failures you can use the new reference expected result stored at path:"
f"{reference_expected_results_path} and printed above\n"
)
print(
"To reproduce locally follow the following instructions, note that absolute instructions count are going "
"to be different than on the CI, hence you might want to run locally with and without your change:\n"
"cd benchmarks/dynamo/pr_time_benchmarks/ \n"
"python benchmarks/BENCHMARK.py result.csv \n"
"note that BENCHMARK.py is the name of the file containing the failing benchmark."
)
sys.exit(1)
else:
print("All benchmarks passed")
if __name__ == "__main__":
main()
| ResultFileEntry |
python | dagster-io__dagster | python_modules/libraries/dagster-sling/dagster_sling/components/sling_replication_collection/scaffolder.py | {
"start": 224,
"end": 608
} | class ____(Scaffolder):
def scaffold(self, request: ScaffoldRequest) -> None:
scaffold_component(request, {"replications": [{"path": "replication.yaml"}]})
replication_path = request.target_path / "replication.yaml"
with open(replication_path, "w") as f:
yaml.dump({"source": {}, "target": {}, "streams": {}}, f)
| SlingReplicationComponentScaffolder |
python | pytest-dev__pytest | testing/test_pluginmanager.py | {
"start": 539,
"end": 8667
} | class ____:
def test_addhooks_conftestplugin(
self, pytester: Pytester, _config_for_test: Config
) -> None:
pytester.makepyfile(
newhooks="""
def pytest_myhook(xyz):
"new hook"
"""
)
conf = pytester.makeconftest(
"""
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(newhooks)
def pytest_myhook(xyz):
return xyz + 1
"""
)
config = _config_for_test
pm = config.pluginmanager
pm.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=config.pluginmanager)
)
config.pluginmanager._importconftest(
conf,
importmode="prepend",
rootpath=pytester.path,
consider_namespace_packages=False,
)
# print(config.pluginmanager.get_plugins())
res = config.hook.pytest_myhook(xyz=10)
assert res == [11]
def test_addhooks_nohooks(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import sys
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(sys)
"""
)
res = pytester.runpytest()
assert res.ret != 0
res.stderr.fnmatch_lines(["*did not find*sys*"])
def test_do_option_postinitialize(self, pytester: Pytester) -> None:
config = pytester.parseconfigure()
assert not hasattr(config.option, "test123")
p = pytester.makepyfile(
"""
def pytest_addoption(parser):
parser.addoption('--test123', action="store_true",
default=True)
"""
)
config.pluginmanager._importconftest(
p,
importmode="prepend",
rootpath=pytester.path,
consider_namespace_packages=False,
)
assert config.option.test123
def test_configure(self, pytester: Pytester) -> None:
config = pytester.parseconfig()
values = []
class A:
def pytest_configure(self):
values.append(self)
config.pluginmanager.register(A())
assert len(values) == 0
config._do_configure()
assert len(values) == 1
config.pluginmanager.register(A()) # leads to a configured() plugin
assert len(values) == 2
assert values[0] != values[1]
config._ensure_unconfigure()
config.pluginmanager.register(A())
assert len(values) == 2
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="requires a case-insensitive file system",
)
def test_conftestpath_case_sensitivity(self, pytester: Pytester) -> None:
"""Unit test for issue #9765."""
config = pytester.parseconfig()
pytester.makepyfile(**{"tests/conftest.py": ""})
conftest = pytester.path.joinpath("tests/conftest.py")
conftest_upper_case = pytester.path.joinpath("TESTS/conftest.py")
mod = config.pluginmanager._importconftest(
conftest,
importmode="prepend",
rootpath=pytester.path,
consider_namespace_packages=False,
)
plugin = config.pluginmanager.get_plugin(str(conftest))
assert plugin is mod
mod_uppercase = config.pluginmanager._importconftest(
conftest_upper_case,
importmode="prepend",
rootpath=pytester.path,
consider_namespace_packages=False,
)
plugin_uppercase = config.pluginmanager.get_plugin(str(conftest_upper_case))
assert plugin_uppercase is mod_uppercase
# No str(conftestpath) normalization so conftest should be imported
# twice and modules should be different objects
assert mod is not mod_uppercase
def test_hook_tracing(self, _config_for_test: Config) -> None:
pytestpm = _config_for_test.pluginmanager # fully initialized with plugins
saveindent = []
class api1:
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
class api2:
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
raise ValueError()
values: list[str] = []
pytestpm.trace.root.setwriter(values.append)
undo = pytestpm.enable_tracing()
try:
indent = pytestpm.trace.root.indent
p = api1()
pytestpm.register(p)
assert pytestpm.trace.root.indent == indent
assert len(values) >= 2
assert "pytest_plugin_registered" in values[0]
assert "finish" in values[1]
values[:] = []
with pytest.raises(ValueError):
pytestpm.register(api2())
assert pytestpm.trace.root.indent == indent
assert saveindent[0] > indent
finally:
undo()
def test_hook_proxy(self, pytester: Pytester) -> None:
"""Test the gethookproxy function(#2016)"""
config = pytester.parseconfig()
session = Session.from_config(config)
pytester.makepyfile(**{"tests/conftest.py": "", "tests/subdir/conftest.py": ""})
conftest1 = pytester.path.joinpath("tests/conftest.py")
conftest2 = pytester.path.joinpath("tests/subdir/conftest.py")
config.pluginmanager._importconftest(
conftest1,
importmode="prepend",
rootpath=pytester.path,
consider_namespace_packages=False,
)
ihook_a = session.gethookproxy(pytester.path / "tests")
assert ihook_a is not None
config.pluginmanager._importconftest(
conftest2,
importmode="prepend",
rootpath=pytester.path,
consider_namespace_packages=False,
)
ihook_b = session.gethookproxy(pytester.path / "tests")
assert ihook_a is not ihook_b
def test_hook_with_addoption(self, pytester: Pytester) -> None:
"""Test that hooks can be used in a call to pytest_addoption"""
pytester.makepyfile(
newhooks="""
import pytest
@pytest.hookspec(firstresult=True)
def pytest_default_value():
pass
"""
)
pytester.makepyfile(
myplugin="""
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(newhooks)
def pytest_addoption(parser, pluginmanager):
default_value = pluginmanager.hook.pytest_default_value()
parser.addoption("--config", help="Config, defaults to %(default)s", default=default_value)
"""
)
pytester.makeconftest(
"""
pytest_plugins=("myplugin",)
def pytest_default_value():
return "default_value"
"""
)
res = pytester.runpytest("--help")
res.stdout.fnmatch_lines(["*--config=CONFIG*default_value*"])
def test_default_markers(pytester: Pytester) -> None:
result = pytester.runpytest("--markers")
result.stdout.fnmatch_lines(["*tryfirst*first*", "*trylast*last*"])
def test_importplugin_error_message(
pytester: Pytester, pytestpm: PytestPluginManager
) -> None:
"""Don't hide import errors when importing plugins and provide
an easy to debug message.
See #375 and #1998.
"""
pytester.syspathinsert(pytester.path)
pytester.makepyfile(
qwe="""\
def test_traceback():
raise ImportError('Not possible to import: ☺')
test_traceback()
"""
)
with pytest.raises(ImportError) as excinfo:
pytestpm.import_plugin("qwe")
assert str(excinfo.value).endswith(
'Error importing plugin "qwe": Not possible to import: ☺'
)
assert "in test_traceback" in str(excinfo.traceback[-1])
| TestPytestPluginInteractions |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 20431,
"end": 21005
} | class ____(ProjectUsersMixin, FormView):
# We only use this to display the form in the list view.
http_method_names = ["get"]
template_name = "projects/project_users.html"
def _get_invitations(self):
return Invitation.objects.for_object(self.get_project())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["users"] = self.get_queryset()
context["invitations"] = self._get_invitations()
context["is_last_user"] = self._is_last_user()
return context
| ProjectUsersList |
python | ray-project__ray | python/ray/train/tests/test_iter_torch_batches_gpu.py | {
"start": 4073,
"end": 4395
} | class ____(NumpyBatchCollateFn):
"""Collate function that returns id and value as a dictionary of tensors."""
def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, torch.Tensor]:
assert isinstance(batch, dict)
return convert_ndarray_batch_to_torch_tensor_batch(batch)
| DictNumpyBatchCollateFn |
python | catalyst-team__catalyst | tests/pipelines/test_vae.py | {
"start": 1594,
"end": 10055
} | class ____(dl.IRunner):
def __init__(self, hid_features, logdir, engine):
super().__init__()
self.hid_features = hid_features
self._logdir = logdir
self._engine = engine
def get_engine(self):
return self._engine
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"csv": dl.CSVLogger(logdir=self._logdir),
"tensorboard": dl.TensorboardLogger(logdir=self._logdir),
}
@property
def num_epochs(self) -> int:
return 1
def get_loaders(self):
loaders = {
"train": DataLoader(
MNIST(DATA_ROOT, train=False),
batch_size=32,
),
"valid": DataLoader(
MNIST(DATA_ROOT, train=False),
batch_size=32,
),
}
return loaders
def get_model(self):
model = self.model if self.model is not None else VAE(28 * 28, self.hid_features)
return model
def get_optimizer(self, model):
return optim.Adam(model.parameters(), lr=0.02)
def get_callbacks(self):
return {
"backward": dl.BackwardCallback(metric_key="loss"),
"optimizer": dl.OptimizerCallback(metric_key="loss"),
"checkpoint": dl.CheckpointCallback(
self._logdir,
loader_key="valid",
metric_key="loss",
minimize=True,
topk=3,
),
}
def on_loader_start(self, runner):
super().on_loader_start(runner)
self.meters = {
key: metrics.AdditiveMetric(compute_on_call=False)
for key in ["loss_ae", "loss_kld", "loss"]
}
def handle_batch(self, batch):
x, _ = batch
x = x.view(x.size(0), -1)
x_, loc, log_scale = self.model(x, deterministic=not self.is_train_loader)
loss_ae = F.mse_loss(x_, x)
loss_kld = (
-0.5 * torch.sum(1 + log_scale - loc.pow(2) - log_scale.exp(), dim=1)
).mean()
loss = loss_ae + loss_kld * 0.01
self.batch_metrics = {"loss_ae": loss_ae, "loss_kld": loss_kld, "loss": loss}
for key in ["loss_ae", "loss_kld", "loss"]:
self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)
def on_loader_end(self, runner):
for key in ["loss_ae", "loss_kld", "loss"]:
self.loader_metrics[key] = self.meters[key].compute()[0]
super().on_loader_end(runner)
def predict_batch(self, batch):
random_latent_vectors = torch.randn(1, self.hid_features).to(self.engine.device)
generated_images = self.model.decoder(random_latent_vectors).detach()
return generated_images
def train_experiment(engine=None):
with TemporaryDirectory() as logdir:
runner = CustomRunner(64, logdir, engine)
runner.run()
if isinstance(engine, (dl.CPUEngine, dl.GPUEngine)):
runner.predict_batch(None)[0].cpu().numpy().reshape(28, 28)
def train_experiment_from_configs(*auxiliary_configs: str):
run_experiment_from_configs(
Path(__file__).parent / "configs",
f"{Path(__file__).stem}.yml",
*auxiliary_configs,
)
# Device
@mark.skipif(not IS_CPU_REQUIRED, reason="CUDA device is not available")
def test_run_on_cpu():
train_experiment(dl.CPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not IS_CPU_REQUIRED, reason="CPU device is not available"
)
def test_config_run_on_cpu():
train_experiment_from_configs("engine_cpu.yml")
@mark.skipif(
not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]), reason="CUDA device is not available"
)
def test_run_on_torch_cuda0():
train_experiment(dl.GPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]),
reason="CUDA device is not available",
)
def test_config_run_on_torch_cuda0():
train_experiment_from_configs("engine_gpu.yml")
@mark.skipif(
not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_run_on_amp():
train_experiment(dl.GPUEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_config_run_on_amp():
train_experiment_from_configs("engine_gpu_amp.yml")
# DP
@mark.skipif(
not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_dp():
train_experiment(dl.DataParallelEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_dp():
train_experiment_from_configs("engine_dp.yml")
@mark.skipif(
not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_amp_dp():
train_experiment(dl.DataParallelEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_dp():
train_experiment_from_configs("engine_dp_amp.yml")
# DDP
# @mark.skipif(
# not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
# reason="No CUDA>=2 found",
# )
# def test_run_on_torch_ddp():
# train_experiment(dl.DistributedDataParallelEngine())
# @mark.skipif(
# not IS_CONFIGS_REQUIRED
# or not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
# reason="No CUDA>=2 found",
# )
# def test_config_run_on_torch_ddp():
# train_experiment_from_configs("engine_ddp.yml")
# @mark.skipif(
# not all(
# [
# IS_DDP_AMP_REQUIRED,
# IS_CUDA_AVAILABLE,
# NUM_CUDA_DEVICES >= 2,
# SETTINGS.amp_required,
# ]
# ),
# reason="No CUDA>=2 or AMP found",
# )
# def test_run_on_amp_ddp():
# train_experiment(dl.DistributedDataParallelEngine(fp16=True))
# @mark.skipif(
# not IS_CONFIGS_REQUIRED
# or not all(
# [
# IS_DDP_AMP_REQUIRED,
# IS_CUDA_AVAILABLE,
# NUM_CUDA_DEVICES >= 2,
# SETTINGS.amp_required,
# ]
# ),
# reason="No CUDA>=2 or AMP found",
# )
# def test_config_run_on_amp_ddp():
# train_experiment_from_configs("engine_ddp_amp.yml")
# def _train_fn(local_rank, world_size):
# process_group_kwargs = {
# "backend": "nccl",
# "world_size": world_size,
# }
# os.environ["WORLD_SIZE"] = str(world_size)
# os.environ["RANK"] = str(local_rank)
# os.environ["LOCAL_RANK"] = str(local_rank)
# dist.init_process_group(**process_group_kwargs)
# train_experiment(dl.Engine())
# dist.destroy_process_group()
# @mark.skipif(
# not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
# reason="No CUDA>=2 found",
# )
# def test_run_on_torch_ddp_spawn():
# world_size: int = torch.cuda.device_count()
# mp.spawn(
# _train_fn,
# args=(world_size,),
# nprocs=world_size,
# join=True,
# )
# def _train_fn_amp(local_rank, world_size):
# process_group_kwargs = {
# "backend": "nccl",
# "world_size": world_size,
# }
# os.environ["WORLD_SIZE"] = str(world_size)
# os.environ["RANK"] = str(local_rank)
# os.environ["LOCAL_RANK"] = str(local_rank)
# dist.init_process_group(**process_group_kwargs)
# train_experiment(dl.Engine(fp16=True))
# dist.destroy_process_group()
# @mark.skipif(
# not all(
# [
# IS_DDP_AMP_REQUIRED,
# IS_CUDA_AVAILABLE,
# NUM_CUDA_DEVICES >= 2,
# SETTINGS.amp_required,
# ]
# ),
# reason="No CUDA>=2 or AMP found",
# )
# def test_run_on_torch_ddp_amp_spawn():
# world_size: int = torch.cuda.device_count()
# mp.spawn(
# _train_fn_amp,
# args=(world_size,),
# nprocs=world_size,
# join=True,
# )
# dist.destroy_process_group()
| CustomRunner |
python | scrapy__scrapy | tests/test_stats.py | {
"start": 2134,
"end": 4473
} | class ____:
def test_collector(self, crawler: Crawler) -> None:
stats = StatsCollector(crawler)
assert stats.get_stats() == {}
assert stats.get_value("anything") is None
assert stats.get_value("anything", "default") == "default"
stats.set_value("test", "value")
assert stats.get_stats() == {"test": "value"}
stats.set_value("test2", 23)
assert stats.get_stats() == {"test": "value", "test2": 23}
assert stats.get_value("test2") == 23
stats.inc_value("test2")
assert stats.get_value("test2") == 24
stats.inc_value("test2", 6)
assert stats.get_value("test2") == 30
stats.max_value("test2", 6)
assert stats.get_value("test2") == 30
stats.max_value("test2", 40)
assert stats.get_value("test2") == 40
stats.max_value("test3", 1)
assert stats.get_value("test3") == 1
stats.min_value("test2", 60)
assert stats.get_value("test2") == 40
stats.min_value("test2", 35)
assert stats.get_value("test2") == 35
stats.min_value("test4", 7)
assert stats.get_value("test4") == 7
def test_dummy_collector(self, crawler: Crawler) -> None:
stats = DummyStatsCollector(crawler)
assert stats.get_stats() == {}
assert stats.get_value("anything") is None
assert stats.get_value("anything", "default") == "default"
stats.set_value("test", "value")
stats.inc_value("v1")
stats.max_value("v2", 100)
stats.min_value("v3", 100)
stats.open_spider()
stats.set_value("test", "value")
assert stats.get_stats() == {}
def test_deprecated_spider_arg(self, crawler: Crawler, spider: Spider) -> None:
stats = StatsCollector(crawler)
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a 'spider' argument to StatsCollector.set_value\(\) is deprecated",
):
stats.set_value("test", "value", spider=spider)
assert stats.get_stats() == {"test": "value"}
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a 'spider' argument to StatsCollector.get_stats\(\) is deprecated",
):
assert stats.get_stats(spider) == {"test": "value"}
| TestStatsCollector |
python | getsentry__sentry | src/sentry/models/organizationonboardingtask.py | {
"start": 1315,
"end": 1544
} | class ____(enum.IntEnum):
COMPLETE = 1
# deprecated - no longer used
# PENDING = 2
SKIPPED = 3
@classmethod
def values(cls) -> list[int]:
return [member.value for member in cls]
| OnboardingTaskStatus |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.