language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pandas-dev__pandas | pandas/tests/series/accessors/test_cat_accessor.py | {
"start": 371,
"end": 9646
} | class ____:
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
],
)
def test_getname_categorical_accessor(self, method):
# GH#17509
ser = Series([1, 2, 3], name="A").astype("category")
expected = "A"
result = method(ser).name
assert result == expected
def test_cat_accessor(self):
ser = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(ser.cat.categories, Index(["a", "b"]))
assert not ser.cat.ordered
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
res = ser.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
ser[:] = "a"
ser = ser.cat.remove_unused_categories()
tm.assert_index_equal(ser.cat.categories, Index(["a"]))
def test_cat_accessor_api(self):
# GH#9322
assert Series.cat is CategoricalAccessor
ser = Series(list("aabbcde")).astype("category")
assert isinstance(ser.cat, CategoricalAccessor)
invalid = Series([1])
with pytest.raises(AttributeError, match="only use .cat accessor"):
invalid.cat
assert not hasattr(invalid, "cat")
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
cat = Series(list("aabbcde")).astype("category")
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
cat.cat.xlabel = "a"
def test_categorical_delegations(self):
# invalid accessor
msg = r"Can only use \.cat accessor with a 'category' dtype"
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat()
with pytest.raises(AttributeError, match=msg):
Series(["a", "b", "c"]).cat
with pytest.raises(AttributeError, match=msg):
Series(np.arange(5.0)).cat
with pytest.raises(AttributeError, match=msg):
Series([Timestamp("20130101")]).cat
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(ser.cat.categories, exp_categories)
ser = ser.cat.rename_categories([1, 2, 3])
exp_categories = Index([1, 2, 3])
tm.assert_index_equal(ser.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype="int8")
tm.assert_series_equal(ser.cat.codes, exp_codes)
assert ser.cat.ordered
ser = ser.cat.as_unordered()
assert not ser.cat.ordered
ser = ser.cat.as_ordered()
assert ser.cat.ordered
# reorder
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
ser = ser.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(ser.cat.categories, exp_categories)
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
# remove unused categories
ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
ser = ser.cat.remove_unused_categories()
tm.assert_index_equal(ser.cat.categories, exp_categories)
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
msg = "'Series' object has no attribute 'set_categories'"
with pytest.raises(AttributeError, match=msg):
ser.set_categories([4, 3, 2, 1])
# right: ser.cat.set_categories([4,3,2,1])
# GH#18862 (let Series.cat.rename_categories take callables)
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
result = ser.cat.rename_categories(lambda x: x.upper())
expected = Series(
Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"idx",
[
date_range("1/1/2015", periods=5, unit="ns"),
date_range("1/1/2015", periods=5, tz="MET", unit="ns"),
period_range("1/1/2015", freq="D", periods=5),
timedelta_range("1 days", "10 days"),
],
)
def test_dt_accessor_api_for_categorical(self, idx):
# https://github.com/pandas-dev/pandas/issues/10661
ser = Series(idx)
cat = ser.astype("category")
# only testing field (like .day)
# and bool (is_month_start)
attr_names = type(ser._values)._datetimelike_ops
assert isinstance(cat.dt, Properties)
special_func_defs = [
("strftime", ("%Y-%m-%d",), {}),
("round", ("D",), {}),
("floor", ("D",), {}),
("ceil", ("D",), {}),
("asfreq", ("D",), {}),
("as_unit", ("s"), {}),
]
if idx.dtype == "M8[ns]":
# exclude dt64tz since that is already localized and would raise
tup = ("tz_localize", ("UTC",), {})
special_func_defs.append(tup)
elif idx.dtype.kind == "M":
# exclude dt64 since that is not localized so would raise
tup = ("tz_convert", ("EST",), {})
special_func_defs.append(tup)
_special_func_names = [f[0] for f in special_func_defs]
_ignore_names = ["components", "tz_localize", "tz_convert"]
func_names = [
fname
for fname in dir(ser.dt)
if not (
fname.startswith("_")
or fname in attr_names
or fname in _special_func_names
or fname in _ignore_names
)
]
func_defs = [(fname, (), {}) for fname in func_names]
func_defs.extend(
f_def for f_def in special_func_defs if f_def[0] in dir(ser.dt)
)
for func, args, kwargs in func_defs:
warn_cls = []
if func == "to_period" and getattr(idx, "tz", None) is not None:
# dropping TZ
warn_cls.append(UserWarning)
elif func == "to_pytimedelta":
# GH 57463
warn_cls.append(Pandas4Warning)
if warn_cls:
warn_cls = tuple(warn_cls)
else:
warn_cls = None
with tm.assert_produces_warning(warn_cls):
res = getattr(cat.dt, func)(*args, **kwargs)
exp = getattr(ser.dt, func)(*args, **kwargs)
tm.assert_equal(res, exp)
for attr in attr_names:
res = getattr(cat.dt, attr)
exp = getattr(ser.dt, attr)
tm.assert_equal(res, exp)
def test_dt_accessor_api_for_categorical_invalid(self):
invalid = Series([1, 2, 3]).astype("category")
msg = "Can only use .dt accessor with datetimelike"
with pytest.raises(AttributeError, match=msg):
invalid.dt
assert not hasattr(invalid, "str")
def test_set_categories_setitem(self):
# GH#43334
df = DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}, dtype="category")
df["Survived"] = df["Survived"].cat.rename_categories(["No", "Yes"])
df["Sex"] = df["Sex"].cat.rename_categories(["female", "male"])
# values should not be coerced to NaN
assert list(df["Sex"]) == ["female", "male", "male"]
assert list(df["Survived"]) == ["Yes", "No", "Yes"]
df["Sex"] = Categorical(df["Sex"], categories=["female", "male"], ordered=False)
df["Survived"] = Categorical(
df["Survived"], categories=["No", "Yes"], ordered=False
)
# values should not be coerced to NaN
assert list(df["Sex"]) == ["female", "male", "male"]
assert list(df["Survived"]) == ["Yes", "No", "Yes"]
def test_categorical_of_booleans_is_boolean(self):
# https://github.com/pandas-dev/pandas/issues/46313
df = DataFrame(
{"int_cat": [1, 2, 3], "bool_cat": [True, False, False]}, dtype="category"
)
value = df["bool_cat"].cat.categories.dtype
expected = np.dtype(np.bool_)
assert value is expected
| TestCatAccessor |
python | huggingface__transformers | tests/models/csm/test_modeling_csm.py | {
"start": 1411,
"end": 4485
} | class ____:
def __init__(
self,
parent,
ignore_index=-100,
batch_size=3,
seq_length=7,
is_training=True,
depth_decoder_config={
"num_codebooks": 10,
"backbone_hidden_size": 64,
"vocab_size": 6,
"hidden_size": 64,
"intermediate_size": 128,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 10,
},
codec_config={
"model_type": "mimi",
"audio_channels": 1,
"chunk_in_sec": None,
"hidden_size": 32,
"num_filters": 8,
"num_residual_layers": 1,
"upsampling_ratios": [8, 4],
"codebook_size": 64,
"vector_quantization_hidden_dimension": 64,
"upsample_groups": 32,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"sliding_window": 4,
"codebook_dim": 64,
"use_cache": False,
},
config={
"num_codebooks": 10,
"vocab_size": 6,
"text_vocab_size": 99,
"hidden_size": 64,
"intermediate_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 10,
"bos_token_id": 1,
"pad_token_id": 2,
"eos_token_id": 3,
"codebook_pad_token_id": 2,
"codebook_eos_token_id": 3,
},
):
self.parent = parent
self.is_training = is_training
self.ignore_index = ignore_index
self.depth_decoder_config = depth_decoder_config
self.codec_config = codec_config
self.config = config
self.seq_length = seq_length
self.batch_size = batch_size
self.num_hidden_layers = config["num_hidden_layers"]
self.vocab_size = config["vocab_size"]
self.hidden_size = config["hidden_size"]
self.num_attention_heads = config["num_attention_heads"]
self.pad_token_id = config["pad_token_id"]
def get_config(self):
return CsmConfig(
depth_decoder_config=self.depth_decoder_config,
codec_config=self.codec_config,
**self.config,
)
def prepare_config_and_inputs(self):
config = self.get_config()
input_ids = ids_tensor([self.batch_size, self.seq_length, config.num_codebooks], config.vocab_size - 1) + 1
attention_mask = input_ids[..., -1].ne(1).to(torch_device)
return config, input_ids, attention_mask
def prepare_config_and_inputs_for_common(self):
config, input_ids, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
| CsmModelTester |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 212433,
"end": 213616
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self, name: str, site: str, site_api_key: str, start_date: str, product_catalog: str
):
"""Airbyte Source for Chargebee.
Documentation can be found at https://apidocs.chargebee.com/docs/api
Args:
name (str): The name of the destination.
site (str): The site prefix for your Chargebee instance.
site_api_key (str): Chargebee API Key. See the docs for more information on how to obtain this key.
start_date (str): UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.
product_catalog (str): Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section.
"""
self.site = check.str_param(site, "site")
self.site_api_key = check.str_param(site_api_key, "site_api_key")
self.start_date = check.str_param(start_date, "start_date")
self.product_catalog = check.str_param(product_catalog, "product_catalog")
super().__init__("Chargebee", name)
| ChargebeeSource |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_bootstrap63.py | {
"start": 306,
"end": 1534
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("bootstrap63.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with default title."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [67991424, 68001152]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_title(
{
"name": "Title",
"border": {"color": "yellow"},
"fill": {"color": "red"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/admin_views/test_actions.py | {
"start": 19377,
"end": 21385
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = ExternalSubscriber.objects.create(
name="John Doe", email="john@example.org"
)
cls.s2 = Subscriber.objects.create(
name="Max Mustermann", email="max@example.org"
)
cls.user = User.objects.create_user(
username="user",
password="secret",
email="user@example.com",
is_staff=True,
)
permission = Permission.objects.get(codename="change_subscriber")
cls.user.user_permissions.add(permission)
def setUp(self):
self.client.force_login(self.user)
def test_model_admin_no_delete_permission(self):
"""
Permission is denied if the user doesn't have delete permission for the
model (Subscriber).
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "delete_selected",
}
url = reverse("admin:admin_views_subscriber_changelist")
response = self.client.post(url, action_data)
self.assertRedirects(response, url, fetch_redirect_response=False)
response = self.client.get(response.url)
self.assertContains(response, "No action selected.")
def test_model_admin_no_delete_permission_externalsubscriber(self):
"""
Permission is denied if the user doesn't have delete permission for a
related model (ExternalSubscriber).
"""
permission = Permission.objects.get(codename="delete_subscriber")
self.user.user_permissions.add(permission)
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk, self.s2.pk],
"action": "delete_selected",
"post": "yes",
}
response = self.client.post(
reverse("admin:admin_views_subscriber_changelist"), delete_confirmation_data
)
self.assertEqual(response.status_code, 403)
| AdminActionsPermissionTests |
python | huggingface__transformers | src/transformers/models/glm4v/modular_glm4v.py | {
"start": 62671,
"end": 70726
} | class ____(Qwen2_5_VLForConditionalGeneration):
_checkpoint_conversion_mapping = {}
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.Tensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Glm4vCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Glm4vForConditionalGeneration
>>> model = Glm4vForConditionalGeneration.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
>>> processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
>>> messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
return Glm4vCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
use_cache=use_cache,
**kwargs,
)
# GLM-4.1V position_ids are prepareed with rope_deltas in forward
model_inputs["position_ids"] = None
if cache_position[0] != 0:
model_inputs["pixel_values"] = None
model_inputs["pixel_values_videos"] = None
return model_inputs
def _get_image_nums_and_video_nums(
self,
input_ids: Optional[torch.LongTensor],
inputs_embeds: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Returns:
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
"""
if inputs_embeds is not None:
is_image = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.image_start_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
is_video_start = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.video_start_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
is_video_end = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.video_end_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
else:
is_image = input_ids == self.config.image_start_token_id
is_video_start = input_ids == self.config.video_start_token_id
is_video_end = input_ids == self.config.video_end_token_id
# Cumulative sum to track if we're inside a video span
# We'll assume well-formed video tags (i.e. matching starts and ends)
video_level = torch.cumsum(is_video_start.int() - is_video_end.int(), dim=1)
inside_video = video_level > 0 # shape (batch_size, seq_length)
# Mask out image tokens that are inside video spans
standalone_images = is_image & (~inside_video)
# Count per batch
image_counts = standalone_images.sum(dim=1)
video_counts = is_video_start.sum(dim=1)
return image_counts, video_counts
| Glm4vForConditionalGeneration |
python | kamyu104__LeetCode-Solutions | Python/diagonal-traverse-ii.py | {
"start": 71,
"end": 688
} | class ____(object):
def findDiagonalOrder(self, nums):
"""
:type nums: List[List[int]]
:rtype: List[int]
"""
result, dq, col = [], collections.deque(), 0
for i in xrange(len(nums)+max(itertools.imap(len, nums))-1):
new_dq = collections.deque()
if i < len(nums):
dq.appendleft((i, 0))
for r, c in dq:
result.append(nums[r][c])
if c+1 < len(nums[r]):
new_dq.append((r, c+1))
dq = new_dq
return result
# Time: O(m * n)
# Space: O(m * n)
| Solution |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/__init__.py | {
"start": 25577,
"end": 25664
} | class ____(TestMessage):
"""Single sanity test message for one file."""
| SanityMessage |
python | sanic-org__sanic | sanic/mixins/static.py | {
"start": 854,
"end": 6768
} | class ____(BaseMixin, metaclass=SanicMeta):
def __init__(self, *args, **kwargs) -> None:
self._future_statics: set[FutureStatic] = set()
def _apply_static(self, static: FutureStatic) -> Route:
raise NotImplementedError # noqa
def static(
self,
uri: str,
file_or_directory: Union[PathLike, str],
pattern: str = r"/?.+",
use_modified_since: bool = True,
use_content_range: bool = False,
stream_large_files: Union[bool, int] = False,
name: str = "static",
host: Optional[str] = None,
strict_slashes: Optional[bool] = None,
content_type: Optional[str] = None,
apply: bool = True,
resource_type: Optional[str] = None,
index: Optional[Union[str, Sequence[str]]] = None,
directory_view: bool = False,
directory_handler: Optional[DirectoryHandler] = None,
):
"""Register a root to serve files from. The input can either be a file or a directory.
This method provides an easy and simple way to set up the route necessary to serve static files.
Args:
uri (str): URL path to be used for serving static content.
file_or_directory (Union[PathLike, str]): Path to the static file
or directory with static files.
pattern (str, optional): Regex pattern identifying the valid
static files. Defaults to `r"/?.+"`.
use_modified_since (bool, optional): If true, send file modified
time, and return not modified if the browser's matches the
server's. Defaults to `True`.
use_content_range (bool, optional): If true, process header for
range requests and sends the file part that is requested.
Defaults to `False`.
stream_large_files (Union[bool, int], optional): If `True`, use
the `StreamingHTTPResponse.file_stream` handler rather than
the `HTTPResponse.file handler` to send the file. If this
is an integer, it represents the threshold size to switch
to `StreamingHTTPResponse.file_stream`. Defaults to `False`,
which means that the response will not be streamed.
name (str, optional): User-defined name used for url_for.
Defaults to `"static"`.
host (Optional[str], optional): Host IP or FQDN for the
service to use.
strict_slashes (Optional[bool], optional): Instruct Sanic to
check if the request URLs need to terminate with a slash.
content_type (Optional[str], optional): User-defined content type
for header.
apply (bool, optional): If true, will register the route
immediately. Defaults to `True`.
resource_type (Optional[str], optional): Explicitly declare a
resource to be a `"file"` or a `"dir"`.
index (Optional[Union[str, Sequence[str]]], optional): When
exposing against a directory, index is the name that will
be served as the default file. When multiple file names are
passed, then they will be tried in order.
directory_view (bool, optional): Whether to fallback to showing
the directory viewer when exposing a directory. Defaults
to `False`.
directory_handler (Optional[DirectoryHandler], optional): An
instance of DirectoryHandler that can be used for explicitly
controlling and subclassing the behavior of the default
directory handler.
Returns:
List[sanic.router.Route]: Routes registered on the router.
Examples:
Serving a single file:
```python
app.static('/foo', 'path/to/static/file.txt')
```
Serving all files from a directory:
```python
app.static('/static', 'path/to/static/directory')
```
Serving large files with a specific threshold:
```python
app.static('/static', 'path/to/large/files', stream_large_files=1000000)
```
""" # noqa: E501
name = self.generate_name(name)
if strict_slashes is None and self.strict_slashes is not None:
strict_slashes = self.strict_slashes
if not isinstance(file_or_directory, (str, bytes, PurePath)):
raise ValueError(
f"Static route must be a valid path, not {file_or_directory}"
)
try:
file_or_directory = Path(file_or_directory).resolve()
except TypeError:
raise TypeError(
"Static file or directory must be a path-like object or string"
)
if directory_handler and (directory_view or index):
raise ValueError(
"When explicitly setting directory_handler, you cannot "
"set either directory_view or index. Instead, pass "
"these arguments to your DirectoryHandler instance."
)
if not directory_handler:
directory_handler = DirectoryHandler(
uri=uri,
directory=file_or_directory,
directory_view=directory_view,
index=index,
)
static = FutureStatic(
uri,
file_or_directory,
pattern,
use_modified_since,
use_content_range,
stream_large_files,
name,
host,
strict_slashes,
content_type,
resource_type,
directory_handler,
)
self._future_statics.add(static)
if apply:
self._apply_static(static)
| StaticMixin |
python | hynek__structlog | tests/test_twisted.py | {
"start": 1436,
"end": 2520
} | class ____:
def test_msg(self):
"""
log.msg renders correctly.
"""
bl = build_bl()
assert "foo=42 event='event'" == bl.msg("event", foo=42)
def test_errVanilla(self):
"""
log.err renders correctly if no failure is attached.
"""
bl = build_bl()
assert "foo=42 event='event'" == bl.err("event", foo=42)
def test_errWithFailure(self):
"""
Failures are correctly injected into the log entries.
"""
bl = build_bl(
processors=[EventAdapter(dictRenderer=KeyValueRenderer())]
)
try:
raise ValueError
except ValueError:
# Use str() for comparison to avoid tricky
# deep-compares of Failures.
assert str(
(
(),
{
"_stuff": Failure(ValueError()),
"_why": "foo=42 event='event'",
},
)
) == str(bl.err("event", foo=42))
| TestBoundLogger |
python | networkx__networkx | networkx/algorithms/flow/networksimplex.py | {
"start": 243,
"end": 25098
} | class ____:
def __init__(
self, G, multigraph, demand="demand", capacity="capacity", weight="weight"
):
# Number all nodes and edges and hereafter reference them using ONLY their numbers
self.node_list = list(G) # nodes
self.node_indices = {u: i for i, u in enumerate(self.node_list)} # node indices
self.node_demands = [
G.nodes[u].get(demand, 0) for u in self.node_list
] # node demands
self.edge_sources = [] # edge sources
self.edge_targets = [] # edge targets
if multigraph:
self.edge_keys = [] # edge keys
self.edge_indices = {} # edge indices
self.edge_capacities = [] # edge capacities
self.edge_weights = [] # edge weights
if not multigraph:
edges = G.edges(data=True)
else:
edges = G.edges(data=True, keys=True)
inf = float("inf")
edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0)
for i, e in enumerate(edges):
self.edge_sources.append(self.node_indices[e[0]])
self.edge_targets.append(self.node_indices[e[1]])
if multigraph:
self.edge_keys.append(e[2])
self.edge_indices[e[:-1]] = i
self.edge_capacities.append(e[-1].get(capacity, inf))
self.edge_weights.append(e[-1].get(weight, 0))
# spanning tree specific data to be initialized
self.edge_count = None # number of edges
self.edge_flow = None # edge flows
self.node_potentials = None # node potentials
self.parent = None # parent nodes
self.parent_edge = None # edges to parents
self.subtree_size = None # subtree sizes
self.next_node_dft = None # next nodes in depth-first thread
self.prev_node_dft = None # previous nodes in depth-first thread
self.last_descendent_dft = None # last descendants in depth-first thread
self._spanning_tree_initialized = (
False # False until initialize_spanning_tree() is called
)
def initialize_spanning_tree(self, n, faux_inf):
self.edge_count = len(self.edge_indices) # number of edges
self.edge_flow = list(
chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands))
) # edge flows
self.node_potentials = [
faux_inf if d <= 0 else -faux_inf for d in self.node_demands
] # node potentials
self.parent = list(chain(repeat(-1, n), [None])) # parent nodes
self.parent_edge = list(
range(self.edge_count, self.edge_count + n)
) # edges to parents
self.subtree_size = list(chain(repeat(1, n), [n + 1])) # subtree sizes
self.next_node_dft = list(
chain(range(1, n), [-1, 0])
) # next nodes in depth-first thread
self.prev_node_dft = list(range(-1, n)) # previous nodes in depth-first thread
self.last_descendent_dft = list(
chain(range(n), [n - 1])
) # last descendants in depth-first thread
self._spanning_tree_initialized = True # True only if all the assignments pass
def find_apex(self, p, q):
"""
Find the lowest common ancestor of nodes p and q in the spanning tree.
"""
size_p = self.subtree_size[p]
size_q = self.subtree_size[q]
while True:
while size_p < size_q:
p = self.parent[p]
size_p = self.subtree_size[p]
while size_p > size_q:
q = self.parent[q]
size_q = self.subtree_size[q]
if size_p == size_q:
if p != q:
p = self.parent[p]
size_p = self.subtree_size[p]
q = self.parent[q]
size_q = self.subtree_size[q]
else:
return p
def trace_path(self, p, w):
"""
Returns the nodes and edges on the path from node p to its ancestor w.
"""
Wn = [p]
We = []
while p != w:
We.append(self.parent_edge[p])
p = self.parent[p]
Wn.append(p)
return Wn, We
def find_cycle(self, i, p, q):
"""
Returns the nodes and edges on the cycle containing edge i == (p, q)
when the latter is added to the spanning tree.
The cycle is oriented in the direction from p to q.
"""
w = self.find_apex(p, q)
Wn, We = self.trace_path(p, w)
Wn.reverse()
We.reverse()
if We != [i]:
We.append(i)
WnR, WeR = self.trace_path(q, w)
del WnR[-1]
Wn += WnR
We += WeR
return Wn, We
def augment_flow(self, Wn, We, f):
"""
Augment f units of flow along a cycle represented by Wn and We.
"""
for i, p in zip(We, Wn):
if self.edge_sources[i] == p:
self.edge_flow[i] += f
else:
self.edge_flow[i] -= f
def trace_subtree(self, p):
"""
Yield the nodes in the subtree rooted at a node p.
"""
yield p
l = self.last_descendent_dft[p]
while p != l:
p = self.next_node_dft[p]
yield p
def remove_edge(self, s, t):
"""
Remove an edge (s, t) where parent[t] == s from the spanning tree.
"""
size_t = self.subtree_size[t]
prev_t = self.prev_node_dft[t]
last_t = self.last_descendent_dft[t]
next_last_t = self.next_node_dft[last_t]
# Remove (s, t).
self.parent[t] = None
self.parent_edge[t] = None
# Remove the subtree rooted at t from the depth-first thread.
self.next_node_dft[prev_t] = next_last_t
self.prev_node_dft[next_last_t] = prev_t
self.next_node_dft[last_t] = t
self.prev_node_dft[t] = last_t
# Update the subtree sizes and last descendants of the (old) ancestors
# of t.
while s is not None:
self.subtree_size[s] -= size_t
if self.last_descendent_dft[s] == last_t:
self.last_descendent_dft[s] = prev_t
s = self.parent[s]
def make_root(self, q):
"""
Make a node q the root of its containing subtree.
"""
ancestors = []
while q is not None:
ancestors.append(q)
q = self.parent[q]
ancestors.reverse()
for p, q in zip(ancestors, islice(ancestors, 1, None)):
size_p = self.subtree_size[p]
last_p = self.last_descendent_dft[p]
prev_q = self.prev_node_dft[q]
last_q = self.last_descendent_dft[q]
next_last_q = self.next_node_dft[last_q]
# Make p a child of q.
self.parent[p] = q
self.parent[q] = None
self.parent_edge[p] = self.parent_edge[q]
self.parent_edge[q] = None
self.subtree_size[p] = size_p - self.subtree_size[q]
self.subtree_size[q] = size_p
# Remove the subtree rooted at q from the depth-first thread.
self.next_node_dft[prev_q] = next_last_q
self.prev_node_dft[next_last_q] = prev_q
self.next_node_dft[last_q] = q
self.prev_node_dft[q] = last_q
if last_p == last_q:
self.last_descendent_dft[p] = prev_q
last_p = prev_q
# Add the remaining parts of the subtree rooted at p as a subtree
# of q in the depth-first thread.
self.prev_node_dft[p] = last_q
self.next_node_dft[last_q] = p
self.next_node_dft[last_p] = q
self.prev_node_dft[q] = last_p
self.last_descendent_dft[q] = last_p
def add_edge(self, i, p, q):
"""
Add an edge (p, q) to the spanning tree where q is the root of a subtree.
"""
last_p = self.last_descendent_dft[p]
next_last_p = self.next_node_dft[last_p]
size_q = self.subtree_size[q]
last_q = self.last_descendent_dft[q]
# Make q a child of p.
self.parent[q] = p
self.parent_edge[q] = i
# Insert the subtree rooted at q into the depth-first thread.
self.next_node_dft[last_p] = q
self.prev_node_dft[q] = last_p
self.prev_node_dft[next_last_p] = last_q
self.next_node_dft[last_q] = next_last_p
# Update the subtree sizes and last descendants of the (new) ancestors
# of q.
while p is not None:
self.subtree_size[p] += size_q
if self.last_descendent_dft[p] == last_p:
self.last_descendent_dft[p] = last_q
p = self.parent[p]
def update_potentials(self, i, p, q):
"""
Update the potentials of the nodes in the subtree rooted at a node
q connected to its parent p by an edge i.
"""
if q == self.edge_targets[i]:
d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q]
else:
d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q]
for q in self.trace_subtree(q):
self.node_potentials[q] += d
def reduced_cost(self, i):
"""Returns the reduced cost of an edge i."""
c = (
self.edge_weights[i]
- self.node_potentials[self.edge_sources[i]]
+ self.node_potentials[self.edge_targets[i]]
)
return c if self.edge_flow[i] == 0 else -c
def find_entering_edges(self):
"""Yield entering edges until none can be found."""
if self.edge_count == 0:
return
# Entering edges are found by combining Dantzig's rule and Bland's
# rule. The edges are cyclically grouped into blocks of size B. Within
# each block, Dantzig's rule is applied to find an entering edge. The
# blocks to search is determined following Bland's rule.
B = int(ceil(sqrt(self.edge_count))) # pivot block size
M = (self.edge_count + B - 1) // B # number of blocks needed to cover all edges
m = 0 # number of consecutive blocks without eligible
# entering edges
f = 0 # first edge in block
while m < M:
# Determine the next block of edges.
l = f + B
if l <= self.edge_count:
edges = range(f, l)
else:
l -= self.edge_count
edges = chain(range(f, self.edge_count), range(l))
f = l
# Find the first edge with the lowest reduced cost.
i = min(edges, key=self.reduced_cost)
c = self.reduced_cost(i)
if c >= 0:
# No entering edge found in the current block.
m += 1
else:
# Entering edge found.
if self.edge_flow[i] == 0:
p = self.edge_sources[i]
q = self.edge_targets[i]
else:
p = self.edge_targets[i]
q = self.edge_sources[i]
yield i, p, q
m = 0
# All edges have nonnegative reduced costs. The current flow is
# optimal.
def residual_capacity(self, i, p):
"""Returns the residual capacity of an edge i in the direction away
from its endpoint p.
"""
return (
self.edge_capacities[i] - self.edge_flow[i]
if self.edge_sources[i] == p
else self.edge_flow[i]
)
def find_leaving_edge(self, Wn, We):
"""Returns the leaving edge in a cycle represented by Wn and We."""
j, s = min(
zip(reversed(We), reversed(Wn)),
key=lambda i_p: self.residual_capacity(*i_p),
)
t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j]
return j, s, t
@not_implemented_for("undirected")
@nx._dispatchable(
node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
)
def network_simplex(G, demand="demand", capacity="capacity", weight="weight"):
r"""Find a minimum cost flow satisfying all demands in digraph G.
This is a primal network simplex algorithm that uses the leaving
arc rule to prevent cycling.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand : string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight : string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost : integer, float
Cost of a minimum cost flow satisfying all demands.
flowDict : dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
Notes
-----
This algorithm is not guaranteed to work if edge weights or demands
are floating point numbers (overflows and roundoff errors can
cause problems). As a workaround you can use integer numbers by
multiplying the relevant edge attributes by a convenient
constant factor (eg 100).
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost
Examples
--------
A simple example of a min cost flow problem.
>>> G = nx.DiGraph()
>>> G.add_node("a", demand=-5)
>>> G.add_node("d", demand=5)
>>> G.add_edge("a", "b", weight=3, capacity=4)
>>> G.add_edge("a", "c", weight=6, capacity=10)
>>> G.add_edge("b", "d", weight=1, capacity=9)
>>> G.add_edge("c", "d", weight=2, capacity=5)
>>> flowCost, flowDict = nx.network_simplex(G)
>>> flowCost
24
>>> flowDict
{'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
The mincost flow algorithm can also be used to solve shortest path
problems. To find the shortest path between two nodes u and v,
give all edges an infinite capacity, give node u a demand of -1 and
node v a demand a 1. Then run the network simplex. The value of a
min cost flow will be the distance between u and v and edges
carrying positive flow will indicate the path.
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from(
... [
... ("s", "u", 10),
... ("s", "x", 5),
... ("u", "v", 1),
... ("u", "x", 2),
... ("v", "y", 1),
... ("x", "u", 3),
... ("x", "v", 5),
... ("x", "y", 2),
... ("y", "s", 7),
... ("y", "v", 6),
... ]
... )
>>> G.add_node("s", demand=-1)
>>> G.add_node("v", demand=1)
>>> flowCost, flowDict = nx.network_simplex(G)
>>> flowCost == nx.shortest_path_length(G, "s", "v", weight="weight")
True
>>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0])
[('s', 'x'), ('u', 'v'), ('x', 'u')]
>>> nx.shortest_path(G, "s", "v", weight="weight")
['s', 'x', 'u', 'v']
It is possible to change the name of the attributes used for the
algorithm.
>>> G = nx.DiGraph()
>>> G.add_node("p", spam=-4)
>>> G.add_node("q", spam=2)
>>> G.add_node("a", spam=-2)
>>> G.add_node("d", spam=-1)
>>> G.add_node("t", spam=2)
>>> G.add_node("w", spam=3)
>>> G.add_edge("p", "q", cost=7, vacancies=5)
>>> G.add_edge("p", "a", cost=1, vacancies=4)
>>> G.add_edge("q", "d", cost=2, vacancies=3)
>>> G.add_edge("t", "q", cost=1, vacancies=2)
>>> G.add_edge("a", "t", cost=2, vacancies=4)
>>> G.add_edge("d", "w", cost=3, vacancies=4)
>>> G.add_edge("t", "w", cost=4, vacancies=1)
>>> flowCost, flowDict = nx.network_simplex(
... G, demand="spam", capacity="vacancies", weight="cost"
... )
>>> flowCost
37
>>> flowDict
{'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
References
----------
.. [1] Z. Kiraly, P. Kovacs.
Efficient implementation of minimum-cost flow algorithms.
Acta Universitatis Sapientiae, Informatica 4(1):67--118. 2012.
.. [2] R. Barr, F. Glover, D. Klingman.
Enhancement of spanning tree labeling procedures for network
optimization.
INFOR 17(1):16--34. 1979.
"""
###########################################################################
# Problem essentials extraction and sanity check
###########################################################################
if len(G) == 0:
raise nx.NetworkXError("graph has no nodes")
multigraph = G.is_multigraph()
# extracting data essential to problem
DEAF = _DataEssentialsAndFunctions(
G, multigraph, demand=demand, capacity=capacity, weight=weight
)
###########################################################################
# Quick Error Detection
###########################################################################
inf = float("inf")
for u, d in zip(DEAF.node_list, DEAF.node_demands):
if abs(d) == inf:
raise nx.NetworkXError(f"node {u!r} has infinite demand")
for e, w in zip(DEAF.edge_indices, DEAF.edge_weights):
if abs(w) == inf:
raise nx.NetworkXError(f"edge {e!r} has infinite weight")
if not multigraph:
edges = nx.selfloop_edges(G, data=True)
else:
edges = nx.selfloop_edges(G, data=True, keys=True)
for e in edges:
if abs(e[-1].get(weight, 0)) == inf:
raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight")
###########################################################################
# Quick Infeasibility Detection
###########################################################################
if sum(DEAF.node_demands) != 0:
raise nx.NetworkXUnfeasible("total node demand is not zero")
for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities):
if c < 0:
raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity")
if not multigraph:
edges = nx.selfloop_edges(G, data=True)
else:
edges = nx.selfloop_edges(G, data=True, keys=True)
for e in edges:
if e[-1].get(capacity, inf) < 0:
raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity")
###########################################################################
# Initialization
###########################################################################
# Add a dummy node -1 and connect all existing nodes to it with infinite-
# capacity dummy edges. Node -1 will serve as the root of the
# spanning tree of the network simplex method. The new edges will used to
# trivially satisfy the node demands and create an initial strongly
# feasible spanning tree.
for i, d in enumerate(DEAF.node_demands):
# Must be greater-than here. Zero-demand nodes must have
# edges pointing towards the root to ensure strong feasibility.
if d > 0:
DEAF.edge_sources.append(-1)
DEAF.edge_targets.append(i)
else:
DEAF.edge_sources.append(i)
DEAF.edge_targets.append(-1)
faux_inf = (
3
* max(
sum(c for c in DEAF.edge_capacities if c < inf),
sum(abs(w) for w in DEAF.edge_weights),
sum(abs(d) for d in DEAF.node_demands),
)
or 1
)
n = len(DEAF.node_list) # number of nodes
DEAF.edge_weights.extend(repeat(faux_inf, n))
DEAF.edge_capacities.extend(repeat(faux_inf, n))
# Construct the initial spanning tree.
DEAF.initialize_spanning_tree(n, faux_inf)
###########################################################################
# Pivot loop
###########################################################################
for i, p, q in DEAF.find_entering_edges():
Wn, We = DEAF.find_cycle(i, p, q)
j, s, t = DEAF.find_leaving_edge(Wn, We)
DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s))
# Do nothing more if the entering edge is the same as the leaving edge.
if i != j:
if DEAF.parent[t] != s:
# Ensure that s is the parent of t.
s, t = t, s
if We.index(i) > We.index(j):
# Ensure that q is in the subtree rooted at t.
p, q = q, p
DEAF.remove_edge(s, t)
DEAF.make_root(q)
DEAF.add_edge(i, p, q)
DEAF.update_potentials(i, p, q)
###########################################################################
# Infeasibility and unboundedness detection
###########################################################################
if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)):
raise nx.NetworkXUnfeasible("no flow satisfies all node demands")
if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any(
e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0
for e in nx.selfloop_edges(G, data=True)
):
raise nx.NetworkXUnbounded("negative cycle with infinite capacity found")
###########################################################################
# Flow cost calculation and flow dict construction
###########################################################################
del DEAF.edge_flow[DEAF.edge_count :]
flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow))
flow_dict = {n: {} for n in DEAF.node_list}
def add_entry(e):
"""Add a flow dict entry."""
d = flow_dict[e[0]]
for k in e[1:-2]:
try:
d = d[k]
except KeyError:
t = {}
d[k] = t
d = t
d[e[-2]] = e[-1]
DEAF.edge_sources = (
DEAF.node_list[s] for s in DEAF.edge_sources
) # Use original nodes.
DEAF.edge_targets = (
DEAF.node_list[t] for t in DEAF.edge_targets
) # Use original nodes.
if not multigraph:
for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow):
add_entry(e)
edges = G.edges(data=True)
else:
for e in zip(
DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow
):
add_entry(e)
edges = G.edges(data=True, keys=True)
for e in edges:
if e[0] != e[1]:
if e[-1].get(capacity, inf) == 0:
add_entry(e[:-1] + (0,))
else:
w = e[-1].get(weight, 0)
if w >= 0:
add_entry(e[:-1] + (0,))
else:
c = e[-1][capacity]
flow_cost += w * c
add_entry(e[:-1] + (c,))
return flow_cost, flow_dict
| _DataEssentialsAndFunctions |
python | spyder-ide__spyder | spyder/plugins/updatemanager/workers.py | {
"start": 2151,
"end": 2280
} | class ____:
"""Enum with the different update types."""
Major = "major"
Minor = "minor"
Micro = "micro"
| UpdateType |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/operators/test_sql.py | {
"start": 2654,
"end": 4592
} | class ____:
def _construct_operator(self, **kwargs):
dag = DAG(
"test_dag",
schedule=None,
start_date=datetime.datetime(2017, 1, 1),
render_template_as_native_obj=True,
)
return BaseSQLOperator(
task_id="test_task",
conn_id="{{ conn_id }}",
database="{{ database }}",
hook_params="{{ hook_params }}",
**kwargs,
dag=dag,
)
def test_templated_fields(self):
operator = self._construct_operator()
operator.render_template_fields(
{"conn_id": "my_conn_id", "database": "my_database", "hook_params": {"key": "value"}}
)
assert operator.conn_id == "my_conn_id"
assert operator.database == "my_database"
assert operator.hook_params == {"key": "value"}
def test_when_provider_min_airflow_version_is_3_0_or_higher_remove_obsolete_get_hook_method(self):
"""
Once this test starts failing due to the fact that the minimum Airflow version is now 3.0.0 or higher
for this provider, you should remove the obsolete get_hook method in the BaseSQLOperator operator
and remove this test. This test was added to make sure to not forget to remove the fallback code
for backward compatibility with Airflow 2.8.x which isn't need anymore once this provider depends on
Airflow 3.0.0 or higher.
"""
min_airflow_version = get_provider_min_airflow_version("apache-airflow-providers-common-sql")
# Check if the current Airflow version is 3.0.0 or higher
if min_airflow_version[0] >= 3:
method_source = inspect.getsource(BaseSQLOperator.get_hook)
raise AirflowProviderDeprecationWarning(
f"Check TODO's to remove obsolete get_hook method in BaseSQLOperator:\n\r\n\r\t\t\t{method_source}"
)
| TestBaseSQLOperator |
python | scrapy__scrapy | scrapy/core/http2/protocol.py | {
"start": 1645,
"end": 2026
} | class ____(H2Error):
def __init__(
self,
remote_ip_address: IPv4Address | IPv6Address | None,
event: ConnectionTerminated,
) -> None:
self.remote_ip_address = remote_ip_address
self.terminate_event = event
def __str__(self) -> str:
return f"Received GOAWAY frame from {self.remote_ip_address!r}"
| RemoteTerminatedConnection |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_dunder_call.py | {
"start": 2924,
"end": 3704
} | class ____:
@classmethod
def get_first_subclass(cls):
for subklass in cls.__subclasses__():
return subklass
return object
# Test no lint raised for attributes.
my_instance_name = x.__class__.__name__
my_pkg_version = pkg.__version__
# Allow use of dunder methods on non instantiated classes
MANUAL_SELF = int.__add__(1, 1)
MY_DICT = {"a": 1, "b": 2}
dict.__setitem__(MY_DICT, "key", "value")
# Still flag instantiated classes
INSTANTIATED_SELF = int("1").__add__(1) # [unnecessary-dunder-call]
{"a": 1, "b": 2}.__setitem__("key", "value") # [unnecessary-dunder-call]
# We also exclude dunder methods called on super
# since we can't apply alternate operators/functions here.
a = [1, 2, 3]
assert super(type(a), a).__str__() == "[1, 2, 3]"
| Base |
python | openai__openai-python | src/openai/types/responses/response_function_web_search_param.py | {
"start": 821,
"end": 1000
} | class ____(TypedDict, total=False):
type: Required[Literal["open_page"]]
"""The action type."""
url: Required[str]
"""The URL opened by the model."""
| ActionOpenPage |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/focus_component_class.py | {
"start": 210,
"end": 680
} | class ____(Widget, can_focus=True):
COMPONENT_CLASSES = {"tester--text"}
DEFAULT_CSS = """
Tester {
height: auto;
}
Tester:focus > .tester--text {
background: red;
}
"""
def __init__(self, n: int) -> None:
self.n = n
super().__init__()
def render(self) -> RenderResult:
return Text(
f"test widget {self.n}", style=self.get_component_rich_style("tester--text")
)
| Tester |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/tests/test_vector_stores_vectorx.py | {
"start": 3678,
"end": 5400
} | class ____(VectorXTestSetup):
def setUp(self):
self.embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-MiniLM-L6-v2", device="cpu"
)
def test_create_vector_store_from_params(self):
vector_store = VectorXVectorStore.from_params(
api_token=self.vecx_api_token,
index_name=self.test_index_name,
encryption_key=self.encryption_key,
dimension=self.dimension,
space_type=self.space_type,
)
self.assertIsNotNone(vector_store)
self.assertEqual(vector_store.index_name, self.test_index_name)
def test_create_vector_store_with_documents(self):
vector_store = VectorXVectorStore.from_params(
api_token=self.vecx_api_token,
index_name=self.test_index_name,
encryption_key=self.encryption_key,
dimension=self.dimension,
space_type=self.space_type,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
self.test_documents,
storage_context=storage_context,
embed_model=self.embed_model,
)
self.assertIsNotNone(index)
def test_invalid_params(self):
with pytest.raises(Exception):
VectorXVectorStore.from_params(
api_token="invalid:invalid:region",
index_name=self.test_index_name,
encryption_key=self.encryption_key,
dimension=self.dimension,
space_type=self.space_type,
)
# ------------------ Custom Retrieval Tests ------------------
| TestVectorXVectorStore |
python | huggingface__transformers | src/transformers/models/dinov2/modeling_dinov2.py | {
"start": 15889,
"end": 16741
} | class ____(nn.Module):
def __init__(self, config: Dinov2Config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool = False) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if all_hidden_states:
all_hidden_states.append(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=tuple(all_hidden_states) if all_hidden_states else None,
)
@auto_docstring
| Dinov2Encoder |
python | facelessuser__soupsieve | tests/test_level2/test_hover.py | {
"start": 49,
"end": 554
} | class ____(util.TestCase):
"""Test hover selector."""
def test_hover(self):
"""Test hover."""
markup = """
<div>
<p>Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
"""
self.assert_selector(
markup,
"a:hover",
[],
flags=util.HTML
)
| TestHover |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-accepted-invitations.py | {
"start": 5925,
"end": 6954
} | class ____(object):
def maximumInvitations(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def augment(adj, u, lookup, match):
for v in adj[u]:
if v in lookup:
continue
lookup.add(v)
if v not in match or augment(adj, match[v], lookup, match):
match[v] = u # greedily match
return True
return False
def hungarian(adj):
match = {}
for i in adj.iterkeys():
augment(adj, i, set(), match)
return len(match)
adj = collections.defaultdict(list)
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if not grid[i][j]:
continue
if len(grid) < len(grid[0]):
adj[i].append(j)
else:
adj[j].append(i)
return hungarian(adj)
| Solution3 |
python | getsentry__sentry | src/sentry/feedback/endpoints/organization_feedback_categories.py | {
"start": 2046,
"end": 2342
} | class ____(TypedDict):
"""Corresponds to GenerateFeedbackLabelGroupsRequest in Seer."""
labels: list[str]
# Providing the LLM context so it knows what labels are used in the same context and are direct children
feedbacks_context: list[LabelGroupFeedbacksContext]
| LabelGroupsRequest |
python | doocs__leetcode | solution/3100-3199/3155.Maximum Number of Upgradable Servers/Solution.py | {
"start": 0,
"end": 327
} | class ____:
def maxUpgrades(
self, count: List[int], upgrade: List[int], sell: List[int], money: List[int]
) -> List[int]:
ans = []
for cnt, cost, income, cash in zip(count, upgrade, sell, money):
ans.append(min(cnt, (cnt * income + cash) // (cost + income)))
return ans
| Solution |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/widgets/debugging.py | {
"start": 984,
"end": 1456
} | class ____(IPython3Lexer):
# Detect !cmd command and highlight them
tokens = IPython3Lexer.tokens
spyder_tokens = [
(r'(!)(\w+)(.*\n)', bygroups(Operator, Keyword, using(Python3Lexer))),
(r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, using(Python3Lexer))),
(r'(?s)(\s*)(%%profile)([^\n]*\n)(.*)', bygroups(
Text, Operator, Text, using(Python3Lexer))),
]
tokens['root'] = spyder_tokens + tokens['root']
| SpyderIPy3Lexer |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_installation_external_issue_actions.py | {
"start": 181,
"end": 2984
} | class ____(APITestCase):
def setUp(self) -> None:
self.superuser = self.create_user(email="a@example.com", is_superuser=True)
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.group = self.create_group(project=self.project)
self.sentry_app = self.create_sentry_app(
name="Testin", organization=self.org, webhook_url="https://example.com"
)
self.install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.url = reverse(
"sentry-api-0-sentry-app-installation-external-issue-actions", args=[self.install.uuid]
)
@responses.activate
def test_creates_external_issue(self) -> None:
self.login_as(user=self.user)
data = {
"groupId": self.group.id,
"action": "create",
"fields": {"title": "Hello"},
"uri": "/create-issues",
}
responses.add(
method=responses.POST,
url="https://example.com/create-issues",
json={
"project": "ProjectName",
"webUrl": "https://example.com/project/issue-id",
"identifier": "issue-1",
},
status=200,
content_type="application/json",
)
response = self.client.post(self.url, data=data, format="json")
external_issue = PlatformExternalIssue.objects.get()
assert response.status_code == 200
assert response.data == {
"id": str(external_issue.id),
"issueId": str(self.group.id),
"serviceType": self.sentry_app.slug,
"displayName": "ProjectName#issue-1",
"webUrl": "https://example.com/project/issue-id",
}
@responses.activate
def test_external_issue_doesnt_get_created(self) -> None:
self.login_as(user=self.user)
data = {
"groupId": self.group.id,
"action": "create",
"fields": {"title": "Hello"},
"uri": "/create-issues",
}
responses.add(
method=responses.POST,
url="https://example.com/create-issues",
status=500,
content_type="application/json",
)
response = self.client.post(self.url, data=data, format="json")
assert response.status_code == 500
assert (
response.content
== b'{"detail":"Issue occured while trying to contact testin to link issue"}'
)
assert not PlatformExternalIssue.objects.all()
| SentryAppInstallationExternalIssuesEndpointTest |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 51299,
"end": 54992
} | class ____(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.config = config
self.seed = seed
self.attention_type = config.attention_type
if self.attention_type == "original_full":
self.self = BigBirdPegasusSelfAttention(config)
elif self.attention_type == "block_sparse":
self.self = BigBirdPegasusBlockSparseAttention(config, seed)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
)
self.output = nn.Linear(config.hidden_size, config.hidden_size, bias=config.use_bias)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
if value == "original_full":
# copy all weights to new full attention class
attn_weights = BigBirdPegasusSelfAttention(self.config)
else:
# copy all weights to new sparse attention class
attn_weights = BigBirdPegasusBlockSparseAttention(self.config, self.seed)
attn_weights.query = self.self.query
attn_weights.value = self.self.value
attn_weights.key = self.self.key
self.self = attn_weights
self.attention_type = value
if not self.training:
self.self.eval()
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
):
if self.attention_type == "original_full":
self_outputs = self.self(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
else:
self_outputs = self.self(
hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
)
attention_output = self.output(self_outputs[0])
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with BartConfig->BigBirdPegasusConfig, Bart->BigBirdPegasusDecoder
| BigBirdPegasusEncoderAttention |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 40736,
"end": 42123
} | class ____(Expr, GroupByBase):
_parameters = [
"frame",
"cum_raw",
"cum_last",
"meta",
"aggregate",
"initial",
"columns",
]
@functools.cached_property
def _meta(self):
return self.meta
def _divisions(self):
return self.frame.divisions
def _layer(self) -> dict:
dsk = {(self._name, 0): (self.cum_raw._name, 0)}
name_cum = f"cum-last{self._name}"
for i in range(1, self.frame.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dsk[(name_cum, i)] = (self.cum_last._name, i - 1)
else:
# aggregate with previous cumulation results
dsk[(name_cum, i)] = ( # type: ignore[assignment]
_cum_agg_filled,
(name_cum, i - 1),
(self.cum_last._name, i - 1),
self.aggregate,
self.initial,
)
dsk[(self._name, i)] = ( # type: ignore[assignment]
_cum_agg_aligned,
(self.frame._name, i),
(name_cum, i),
self.by,
self.operand("columns"),
self.aggregate,
self.initial,
)
return dsk
| GroupByCumulativeFinalizer |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/beta.py | {
"start": 4032,
"end": 4685
} | class ____:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
@cached_property
def models(self) -> AsyncModelsWithRawResponse:
return AsyncModelsWithRawResponse(self._beta.models)
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
return AsyncMessagesWithRawResponse(self._beta.messages)
@cached_property
def files(self) -> AsyncFilesWithRawResponse:
return AsyncFilesWithRawResponse(self._beta.files)
@cached_property
def skills(self) -> AsyncSkillsWithRawResponse:
return AsyncSkillsWithRawResponse(self._beta.skills)
| AsyncBetaWithRawResponse |
python | getsentry__sentry | tests/sentry/seer/explorer/test_tools.py | {
"start": 68980,
"end": 74389
} | class ____(APITransactionTestCase, SnubaTestCase, OurLogTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.ten_mins_ago = before_now(minutes=10)
self.nine_mins_ago = before_now(minutes=9)
self.trace_id = uuid.uuid4().hex
# Create logs with various attributes
self.logs = [
self.create_ourlog(
{
"body": "User authentication failed",
"severity_text": "ERROR",
"severity_number": 17,
"trace_id": self.trace_id,
},
attributes={
"my-string-attribute": "custom value",
"my-boolean-attribute": True,
"my-double-attribute": 1.23,
"my-integer-attribute": 123,
},
timestamp=self.ten_mins_ago,
),
self.create_ourlog(
{
"body": "Request processed successfully",
"severity_text": "INFO",
"severity_number": 9,
},
timestamp=self.nine_mins_ago,
),
self.create_ourlog(
{
"body": "Database connection timeout",
"severity_text": "WARN",
"severity_number": 13,
"trace_id": self.trace_id,
},
timestamp=self.nine_mins_ago,
),
self.create_ourlog(
{
"body": "Another database connection timeout",
"severity_text": "WARN",
"severity_number": 13,
"trace_id": self.trace_id,
},
timestamp=self.nine_mins_ago,
),
]
self.store_ourlogs(self.logs)
@staticmethod
def get_id_str(item: TraceItem) -> str:
return item.item_id[::-1].hex()
def test_get_log_attributes_for_trace_basic(self) -> None:
result = get_log_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
)
assert result is not None
assert len(result["data"]) == 3
auth_log_expected = self.logs[0]
auth_log = None
for item in result["data"]:
if item["id"] == self.get_id_str(auth_log_expected):
auth_log = item
assert auth_log is not None
ts = datetime.fromisoformat(auth_log["timestamp"]).timestamp()
assert int(ts) == auth_log_expected.timestamp.seconds
for name, value, type in [
("message", "User authentication failed", "str"),
("project", self.project.slug, "str"),
("project.id", self.project.id, "int"),
("severity", "ERROR", "str"),
("my-string-attribute", "custom value", "str"),
("my-boolean-attribute", True, "double"),
("my-double-attribute", 1.23, "double"),
("my-integer-attribute", 123, "double"),
]:
assert auth_log["attributes"][name]["value"] == value, name
assert auth_log["attributes"][name]["type"] == type, f"{name} type mismatch"
def test_get_log_attributes_for_trace_substring_filter(self) -> None:
result = get_log_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
message_substring="database",
substring_case_sensitive=False,
)
assert result is not None
assert len(result["data"]) == 2
ids = [item["id"] for item in result["data"]]
assert self.get_id_str(self.logs[2]) in ids
assert self.get_id_str(self.logs[3]) in ids
result = get_log_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
message_substring="database",
substring_case_sensitive=True,
)
assert result is not None
assert len(result["data"]) == 1
assert result["data"][0]["id"] == self.get_id_str(self.logs[3])
def test_get_log_attributes_for_trace_limit_no_filter(self) -> None:
result = get_log_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
limit=1,
)
assert result is not None
assert len(result["data"]) == 1
assert result["data"][0]["id"] in [
self.get_id_str(self.logs[0]),
self.get_id_str(self.logs[2]),
self.get_id_str(self.logs[3]),
]
def test_get_log_attributes_for_trace_limit_with_filter(self) -> None:
result = get_log_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
message_substring="database",
substring_case_sensitive=False,
limit=2,
)
assert result is not None
assert len(result["data"]) == 2
ids = [item["id"] for item in result["data"]]
assert self.get_id_str(self.logs[2]) in ids
assert self.get_id_str(self.logs[3]) in ids
| TestLogsTraceQuery |
python | pytest-dev__pytest-django | pytest_django_test/app/models.py | {
"start": 168,
"end": 249
} | class ____(models.Model):
name: str = models.CharField(max_length=100)
| SecondItem |
python | ApeWorX__ape | src/ape/managers/project.py | {
"start": 41819,
"end": 62558
} | class ____(BaseManager):
"""
Manage dependencies for an Ape project.
Note: Every project gets its own dependency-set (DependencyManager).
"""
# Class-level cache
_cache: dict[DependencyAPI, Dependency] = {}
def __init__(self, project: Optional["ProjectManager"] = None):
self.project = project or self.local_project
@log_instead_of_fail(default="<DependencyManager>")
def __repr__(self) -> str:
result = "<DependencyManager"
project_id = None
if hasattr(self.project, "path"):
project_id = clean_path(self.project.path)
elif name := self.project.name:
project_id = name
return f"{result} project={project_id}>" if project_id else f"{result}>"
def __iter__(self) -> Iterator[Dependency]:
yield from self.specified
def __len__(self) -> int:
# NOTE: Using the config value keeps use lazy and fast.
return len(self.project.config.dependencies)
def __getitem__(self, name: str) -> DependencyVersionMap:
result = DependencyVersionMap(name)
# Always ensure the specified are included, even if not yet installed.
if versions := {d.version: d.project for d in self.get_project_dependencies(name=name)}:
result.extend(versions)
# Add remaining installed versions.
for dependency in self.get_versions(name):
if dependency.version not in result:
result[dependency.version] = dependency.project
return result
def __contains__(self, name: str) -> bool:
for dependency in self.all:
if name == dependency.name:
return True
return False
def keys(self) -> Iterator[str]:
_ = [x for x in self.specified] # Install specified if needed.
for dependency in self.all:
yield dependency.name
def items(self) -> Iterator[tuple[str, dict[str, "ProjectManager"]]]:
_ = [x for x in self.specified] # Install specified if needed.
for dependency in self.all:
yield dependency.name, {dependency.version: dependency.project}
def values(self) -> Iterator[dict[str, "ProjectManager"]]:
_ = [x for x in self.specified] # Install specified if needed.
for dependency in self.all:
yield {dependency.version: dependency.project}
@property
def config(self) -> ApeConfig:
return self.project.config
@cached_property
def packages_cache(self) -> PackagesCache:
"""
Where all dependency files go.
"""
return PackagesCache()
@cached_property
def types(self) -> dict[str, type[DependencyAPI]]:
dependency_classes: dict[str, type[DependencyAPI]] = {}
for _, (config_key, dependency_class) in self.plugin_manager.dependencies:
assert issubclass(dependency_class, DependencyAPI) # For mypy
if isinstance(config_key, tuple):
for sub_key in config_key:
dependency_classes[sub_key] = dependency_class
else:
# Single str-given.
dependency_classes[config_key] = dependency_class
return dependency_classes
@property
def specified(self) -> Iterator[Dependency]:
"""
All dependencies specified in the config.
"""
yield from self.get_project_dependencies(allow_install=False)
def get_project_dependencies(
self,
use_cache: bool = True,
config_override: Optional[dict] = None,
name: Optional[str] = None,
version: Optional[str] = None,
allow_install: bool = True,
strict: bool = False,
recurse: bool = True,
) -> Iterator[Dependency]:
"""
Get dependencies specified in the project's ``ape-config.yaml`` file.
Args:
use_cache (bool): Set to ``False`` to force-reinstall dependencies.
Defaults to ``True``. Does not work with ``allow_install=False``.
config_override (Optional[dict]): Override shared configuration for each dependency.
name (Optional[str]): Optionally only get dependencies with a certain name.
version (Optional[str]): Optionally only get dependencies with certain version.
allow_install (bool): Set to ``False`` to not allow installing uninstalled specified dependencies.
strict (bool): ``True`` requires the dependency to either be installed or install properly.
recurse (bool): Set to ``False`` to not recursively install dependencies of dependencies.
Returns:
Iterator[:class:`~ape.managers.project.Dependency`]
"""
for api in self.config_apis:
try:
api_version_id = api.version_id
except Exception:
api_version_id = None
if (name is not None and api.name != name and api.package_id != name) or (
version is not None and api_version_id != version
):
continue
# Ensure the dependency API data is known.
if api_version_id is not None:
dependency = self.add(api)
else:
# Errored.
dependency = Dependency(api)
if allow_install:
try:
dependency.install(
use_cache=use_cache, config_override=config_override, recurse=recurse
)
except ProjectError as err:
if strict:
raise # This error.
# This dependency has issues. Let's wait to until the user
# actually requests something before failing, and
# yield an uninstalled version of the specified dependency for
# them to fix.
logger.error(str(err))
yield dependency
@property
def config_apis(self) -> Iterator[DependencyAPI]:
for data in self.config.dependencies:
yield self.decode_dependency(**data)
# TODO: We may want to discern between dependencies where their API files are known
# versus dependencies where their projects are cached, as there is a difference.
@property
def all(self) -> Iterator[Dependency]:
"""
All installed dependencies, regardless of their project
affiliation. NOTE: By "installed" here, we simply
mean the API files are cached and known by Ape.
However, it does not guarantee the project is
installed.
"""
if not self.packages_cache.api_folder.is_dir():
return
for package_versions in self.packages_cache.api_folder.iterdir():
if not package_versions.is_dir():
continue
for api_file in package_versions.iterdir():
if not api_file.is_file():
continue
data = json.loads(api_file.read_text(encoding="utf-8"))
api = self.decode_dependency(**data)
if api.name == self.project.name:
# Don't include self as a dependency
# (happens when compiling a dependency)
continue
yield self._create_dependency(api)
# TODO: Remove this in 0.9.
@property
def installed(self) -> Iterator[Dependency]:
"""
DEPRECATED: Use ``.all``. Deprecated because of confusion
between this and uninstalled dependencies Ape still nows about
but require an extra install step, such as fetching from GitHub.
"""
yield from self.all
@property
def uri_map(self) -> dict[str, Url]:
"""
A map of URIs for filling out the dependencies
field in a package manifest.
NOTE: Only uses specified dependencies! Make sure
you are specifying all the needed dependencies in your
config file instead of only relying on globally-installed
packages.
"""
return {dep.name: Url(dep.api.uri) for dep in self.specified}
def get(
self, name: str, version: str, allow_install: bool = True
) -> Optional["ProjectManager"]:
if dependency := self._get(name, version, allow_install=allow_install, checked=set()):
return dependency.project
return None
def get_dependency_api(self, package_id: str, version: Optional[str] = None) -> DependencyAPI:
"""
Get a dependency API. If not given version and there are multiple,
returns the latest.
Args:
package_id (str): The package ID or name of the dependency.
version (str): The version of the dependency.
Returns:
:class:`~ape.api.projects.DependencyAPI`
"""
# Check by package ID first.
if dependency := self._get_dependency_api_by_package_id(package_id, version=version):
return dependency
elif dependency := self._get_dependency_api_by_package_id(
package_id, version=version, attr="name"
):
return dependency
package_str = f"{package_id}@{version}" if version else package_id
message = f"No matching dependency found with package ID '{package_str}'"
raise ProjectError(message)
def _get_dependency_api_by_package_id(
self, package_id: str, version: Optional[str] = None, attr: str = "package_id"
) -> Optional[DependencyAPI]:
matching = []
# First, only look at local configured packages (to give priority).
for api in self.config_apis:
if getattr(api, attr) != package_id:
continue
if (version and api.version_id == version) or not version:
matching.append(api)
if not matching:
# Nothing found: search in 'all'.
for dependency in self.all:
if getattr(dependency, attr) != package_id:
continue
if (version and dependency.api.version_id == version) or not version:
matching.append(dependency.api)
# else: prioritize the local dependencies, as that is most-likely what the user wants.
return sorted(matching, key=lambda d: d.version_id)[-1] if matching else None
def _get(
self,
name: str,
version: str,
allow_install: bool = True,
checked: Optional[set] = None,
) -> Optional[Dependency]:
checked = checked or set()
# Check already-installed first to prevent having to install anything.
name_matches = []
for dependency in self.all:
if dependency.package_id == name and dependency.version == version:
# If matching package-id, use that no matter what.
return dependency
elif dependency.name == name and dependency.version == version:
name_matches.append(dependency)
if name_matches:
if len(name_matches) == 1:
# Return match-by-name after full loop in case was checking by
# package ID, which is more precise.
return name_matches[0]
if name_matches:
# If one of the matches is in the `.specified` dependencies, use that one.
specified = [d.package_id for d in self.specified]
for dep_match in name_matches:
if dep_match.package_id in specified:
return dep_match
# Just use the first one and hope it is ok.
return name_matches[0]
# Was not found in this project's dependencies.
checked.add(self.project.project_id)
deps = [*self.all]
if allow_install:
deps.extend([*self.specified])
# Still not found - check dependencies of dependencies.
# NOTE: Purposely checking all specified first.
for dependency in deps:
try:
sub_project = dependency.project
except ProjectError:
continue
key = sub_project.project_id
if key in checked:
continue
checked.add(key)
if sub_dependency := sub_project.dependencies._get(
name, version, checked=checked, allow_install=allow_install
):
return sub_dependency
return None
def get_versions(self, name: str, allow_install: bool = True) -> Iterator[Dependency]:
"""
Get all installed versions of a dependency.
Args:
name (str): The name of the dependency.
allow_install (bool): Set to ``False`` to not allow installing.
Returns:
Iterator[:class:`~ape.managers.project.Dependency`]
"""
# First, check specified.
versions_yielded = set()
for dependency in self.get_project_dependencies(name=name, allow_install=allow_install):
if dependency.version in versions_yielded:
continue
yield dependency
versions_yielded.add(dependency.version)
# Yield any remaining installed.
using_package_id = False
for dependency in self.all:
if dependency.package_id != name:
continue
using_package_id = True
if dependency.version in versions_yielded:
continue
yield dependency
versions_yielded.add(dependency.version)
if using_package_id:
# Done.
return
# Never yield. Check if using short-name.
for dependency in self.all:
if dependency.name != name:
continue
elif dependency.version in versions_yielded:
continue
yield dependency
versions_yielded.add(dependency.version)
def _create_dependency(self, api: DependencyAPI) -> Dependency:
attempt_cache = True
try:
is_cached = self._cache.__contains__(api)
except ProjectError:
# Certain kinds of dependencies have no version ID
# when uninstalled, and it will cause the hash error.
is_cached = False
attempt_cache = False
if is_cached:
return self._cache[api]
# Create new instance.
dependency = Dependency(api, project=self.project)
# Only attempt cache if we are not getting an error hitting it.
if attempt_cache:
self._cache[api] = dependency
return dependency
def get_dependency(
self, dependency_id: str, version: str, allow_install: bool = True
) -> Dependency:
"""
Get a dependency.
Args:
dependency_id (str): The package ID of the dependency. You can also
provide the short-name of the dependency.
version (str): The version identifier.
allow_install (bool): If the dependency API is known but the
project is not installed, attempt to install it. Defaults to ``True``.
Raises:
:class:`~ape.exceptions.ProjectError`: When unable to find the
dependency.
Returns:
class:`~ape.managers.project.Dependency`
"""
version_options = _version_to_options(version)
# Also try the lower of the name
# so `OpenZeppelin` would give you `openzeppelin`.
id_options = [dependency_id]
if dependency_id.lower() != dependency_id:
# Ensure we try dependency_id without lower first.
id_options.append(dependency_id.lower())
def try_get():
for dep_id in id_options:
for v in version_options:
# NOTE: `allow_install=False` here because we install
# _after_ exhausting all options.
if dependency := self._get(dep_id, v, allow_install=False):
return dependency
if res := try_get():
return res
if allow_install:
# Try installing first.
self.install()
if res := try_get():
return res
raise ProjectError(f"Dependency '{dependency_id}' with version '{version}' not found.")
def decode_dependency(self, **item: Any) -> DependencyAPI:
"""
Decode data into a :class:`~ape.api.projects.DependencyAPI`.
Args:
**item: The same data you put in your ``dependencies:`` config.
Raises:
:class:`~ape.exceptions.ProjectError`: When unable to handle the
given API data.
Returns:
:class:`~ape.api.projects.DependencyAPI`
"""
for key, cls in self.types.items():
if key in item:
return cls.model_validate(item)
name = item.get("name") or f"{item}" # NOTE: Using 'or' for short-circuit eval
raise ProjectError(
f"No installed dependency API that supports '{name}'. "
f"Keys={', '.join([x for x in item.keys()])}"
)
def add(self, dependency: Union[dict, DependencyAPI]) -> Dependency:
"""
Add the dependency API data. This sets up a dependency such that
it can be fetched.
Args:
dependency (dict | :class:`~ape.api.projects.DependencyAPI`): The
API data necessary for fetching the dependency.
Returns:
class:`~ape.managers.project.Dependency`
"""
api = self.decode_dependency(**dependency) if isinstance(dependency, dict) else dependency
self.packages_cache.cache_api(api)
# Avoid infinite loop where Ape re-tries installing the dependency
# again and again in error situations.
install_if_not_found = False
try:
return self.get_dependency(
api.package_id,
api.version_id,
allow_install=install_if_not_found,
)
except ProjectError:
raise # Avoids bottom except.
except Exception as err:
raise ProjectError(
f"Failed to add dependency {api.name}@{api.version_id}: {err}"
) from err
def install(self, **dependency: Any) -> Union[Dependency, list[Dependency]]:
"""
Install dependencies.
Args:
**dependency: Dependency data, same to what you put in `dependencies:` config.
When excluded, installs all project-specified dependencies. Also, use
``use_cache=False`` to force re-installing and ``recurse=False`` to avoid
installing dependencies of dependencies.
Returns:
:class:`~ape.managers.project.Dependency` when given data else a list
of them, one for each specified.
"""
use_cache: bool = dependency.pop("use_cache", True)
recurse: bool = dependency.pop("recurse", True)
if dependency:
return self.install_dependency(dependency, use_cache=use_cache, recurse=recurse)
# Install all project's.
result: list[Dependency] = []
for dep in self.get_project_dependencies(
use_cache=use_cache, allow_install=True, recurse=recurse
):
result.append(dep)
return result
def install_dependency(
self,
dependency_data: Union[dict, DependencyAPI],
use_cache: bool = True,
config_override: Optional[dict] = None,
recurse: bool = True,
) -> Dependency:
dependency = self.add(dependency_data)
dependency.install(use_cache=use_cache, config_override=config_override, recurse=recurse)
return dependency
def unpack(self, base_path: Path, cache_name: str = ".cache"):
"""
Move dependencies into a .cache folder.
Ideal for isolated, temporary projects.
Args:
base_path (Path): The target path.
cache_name (str): The cache folder name to create
at the target path. Defaults to ``.cache`` because
that is what ``ape-solidity`` uses.
"""
cache_folder = base_path / cache_name
for dependency in self.specified:
dependency.unpack(cache_folder)
def _load_manifest(path: Union[Path, str]) -> PackageManifest:
path = Path(path)
return (
PackageManifest.model_validate_json(path.read_text())
if path.is_file()
else PackageManifest()
)
| DependencyManager |
python | plotly__plotly.py | plotly/graph_objs/layout/_newshape.py | {
"start": 235,
"end": 17830
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.newshape"
_valid_props = {
"drawdirection",
"fillcolor",
"fillrule",
"label",
"layer",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"name",
"opacity",
"showlegend",
"visible",
}
@property
def drawdirection(self):
"""
When `dragmode` is set to "drawrect", "drawline" or
"drawcircle" this limits the drag to be horizontal, vertical or
diagonal. Using "diagonal" there is no limit e.g. in drawing
lines in any direction. "ortho" limits the draw to be either
horizontal or vertical. "horizontal" allows horizontal extend.
"vertical" allows vertical extend.
The 'drawdirection' property is an enumeration that may be specified as:
- One of the following enumeration values:
['ortho', 'horizontal', 'vertical', 'diagonal']
Returns
-------
Any
"""
return self["drawdirection"]
@drawdirection.setter
def drawdirection(self, val):
self["drawdirection"] = val
@property
def fillcolor(self):
"""
Sets the color filling new shapes' interior. Please note that
if using a fillcolor with alpha greater than half, drag inside
the active shape starts moving the shape underneath, otherwise
a new shape could be started over.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
@property
def fillrule(self):
"""
Determines the path's interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
The 'fillrule' property is an enumeration that may be specified as:
- One of the following enumeration values:
['evenodd', 'nonzero']
Returns
-------
Any
"""
return self["fillrule"]
@fillrule.setter
def fillrule(self, val):
self["fillrule"] = val
@property
def label(self):
"""
The 'label' property is an instance of Label
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.newshape.Label`
- A dict of string/value properties that will be passed
to the Label constructor
Returns
-------
plotly.graph_objs.layout.newshape.Label
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def layer(self):
"""
Specifies whether new shapes are drawn below gridlines
("below"), between gridlines and traces ("between") or above
traces ("above").
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['below', 'above', 'between']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show new shape in. References
to these legends are "legend", "legend2", "legend3", etc.
Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for new shape. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.newshape.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.layout.newshape.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for new shape. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for new shape.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.newshape.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.layout.newshape.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def name(self):
"""
Sets new shape name. The name appears as the legend item.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of new shapes.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def showlegend(self):
"""
Determines whether or not new shape is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def visible(self):
"""
Determines whether or not new shape is visible. If
"legendonly", the shape is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
drawdirection
When `dragmode` is set to "drawrect", "drawline" or
"drawcircle" this limits the drag to be horizontal,
vertical or diagonal. Using "diagonal" there is no
limit e.g. in drawing lines in any direction. "ortho"
limits the draw to be either horizontal or vertical.
"horizontal" allows horizontal extend. "vertical"
allows vertical extend.
fillcolor
Sets the color filling new shapes' interior. Please
note that if using a fillcolor with alpha greater than
half, drag inside the active shape starts moving the
shape underneath, otherwise a new shape could be
started over.
fillrule
Determines the path's interior. For more info please
visit https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
label
:class:`plotly.graph_objects.layout.newshape.Label`
instance or dict with compatible properties
layer
Specifies whether new shapes are drawn below gridlines
("below"), between gridlines and traces ("between") or
above traces ("above").
legend
Sets the reference to a legend to show new shape in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for new shape. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.layout.newshape.Legendgrou
ptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for new shape. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
legendwidth
Sets the width (in px or fraction) of the legend for
new shape.
line
:class:`plotly.graph_objects.layout.newshape.Line`
instance or dict with compatible properties
name
Sets new shape name. The name appears as the legend
item.
opacity
Sets the opacity of new shapes.
showlegend
Determines whether or not new shape is shown in the
legend.
visible
Determines whether or not new shape is visible. If
"legendonly", the shape is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
drawdirection=None,
fillcolor=None,
fillrule=None,
label=None,
layer=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
name=None,
opacity=None,
showlegend=None,
visible=None,
**kwargs,
):
"""
Construct a new Newshape object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Newshape`
drawdirection
When `dragmode` is set to "drawrect", "drawline" or
"drawcircle" this limits the drag to be horizontal,
vertical or diagonal. Using "diagonal" there is no
limit e.g. in drawing lines in any direction. "ortho"
limits the draw to be either horizontal or vertical.
"horizontal" allows horizontal extend. "vertical"
allows vertical extend.
fillcolor
Sets the color filling new shapes' interior. Please
note that if using a fillcolor with alpha greater than
half, drag inside the active shape starts moving the
shape underneath, otherwise a new shape could be
started over.
fillrule
Determines the path's interior. For more info please
visit https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
label
:class:`plotly.graph_objects.layout.newshape.Label`
instance or dict with compatible properties
layer
Specifies whether new shapes are drawn below gridlines
("below"), between gridlines and traces ("between") or
above traces ("above").
legend
Sets the reference to a legend to show new shape in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for new shape. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.layout.newshape.Legendgrou
ptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for new shape. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
legendwidth
Sets the width (in px or fraction) of the legend for
new shape.
line
:class:`plotly.graph_objects.layout.newshape.Line`
instance or dict with compatible properties
name
Sets new shape name. The name appears as the legend
item.
opacity
Sets the opacity of new shapes.
showlegend
Determines whether or not new shape is shown in the
legend.
visible
Determines whether or not new shape is visible. If
"legendonly", the shape is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Newshape
"""
super().__init__("newshape")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Newshape
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Newshape`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("drawdirection", arg, drawdirection)
self._set_property("fillcolor", arg, fillcolor)
self._set_property("fillrule", arg, fillrule)
self._set_property("label", arg, label)
self._set_property("layer", arg, layer)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("showlegend", arg, showlegend)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Newshape |
python | ray-project__ray | python/ray/util/queue.py | {
"start": 262,
"end": 326
} | class ____(queue.Full):
pass
@PublicAPI(stability="beta")
| Full |
python | pyinstaller__pyinstaller | PyInstaller/building/makespec.py | {
"start": 2082,
"end": 4602
} | class ____(argparse.Action):
"""
A command line option which takes multiple source:dest pairs.
"""
def __init__(self, *args, default=None, metavar=None, **kwargs):
super().__init__(*args, default=[], metavar='SOURCE:DEST', **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
try:
# Find the only separator that isn't a Windows drive.
separator, = (m for m in re.finditer(rf"(^\w:[/\\])|[:{os.pathsep}]", value) if not m[1])
except ValueError:
# Split into SRC and DEST failed, wrong syntax
raise argparse.ArgumentError(self, f'Wrong syntax, should be {self.option_strings[0]}=SOURCE:DEST')
src = value[:separator.start()]
dest = value[separator.end():]
if not src or not dest:
# Syntax was correct, but one or both of SRC and DEST was not given
raise argparse.ArgumentError(self, "You have to specify both SOURCE and DEST")
# argparse is not particularly smart with copy by reference typed defaults. If the current list is the default,
# replace it before modifying it to avoid changing the default.
if getattr(namespace, self.dest) is self.default:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).append((src, dest))
def make_variable_path(filename, conversions=path_conversions):
if not os.path.isabs(filename):
# os.path.commonpath can not compare relative and absolute paths, and if filename is not absolute, none of the
# paths in conversions will match anyway.
return None, filename
for (from_path, to_name) in conversions:
assert os.path.abspath(from_path) == from_path, ("path '%s' should already be absolute" % from_path)
try:
common_path = os.path.commonpath([filename, from_path])
except ValueError:
# Per https://docs.python.org/3/library/os.path.html#os.path.commonpath, this raises ValueError in several
# cases which prevent computing a common path.
common_path = None
if common_path == from_path:
rest = filename[len(from_path):]
if rest.startswith(('\\', '/')):
rest = rest[1:]
return to_name, rest
return None, filename
def removed_key_option(x):
from PyInstaller.exceptions import RemovedCipherFeatureError
raise RemovedCipherFeatureError("Please remove your --key=xxx argument.")
| SourceDestAction |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/validators/metadata_validator.py | {
"start": 546,
"end": 14102
} | class ____:
docs_path: str
prerelease_tag: Optional[str] = None
disable_dockerhub_checks: bool = False
ValidationResult = Tuple[bool, Optional[Union[ValidationError, str]]]
Validator = Callable[[ConnectorMetadataDefinitionV0, ValidatorOptions], ValidationResult]
_SOURCE_DECLARATIVE_MANIFEST_DEFINITION_ID = "64a2f99c-542f-4af8-9a6f-355f1217b436"
def validate_metadata_images_in_dockerhub(
metadata_definition: ConnectorMetadataDefinitionV0, validator_opts: ValidatorOptions
) -> ValidationResult:
if validator_opts.disable_dockerhub_checks:
return True, None
metadata_definition_dict = metadata_definition.dict()
base_docker_image = get(metadata_definition_dict, "data.dockerRepository")
base_docker_version = get(metadata_definition_dict, "data.dockerImageTag")
oss_docker_image = get(metadata_definition_dict, "data.registryOverrides.oss.dockerRepository", base_docker_image)
oss_docker_version = get(metadata_definition_dict, "data.registryOverrides.oss.dockerImageTag", base_docker_version)
cloud_docker_image = get(metadata_definition_dict, "data.registryOverrides.cloud.dockerRepository", base_docker_image)
cloud_docker_version = get(metadata_definition_dict, "data.registryOverrides.cloud.dockerImageTag", base_docker_version)
normalization_docker_image = get(metadata_definition_dict, "data.normalizationConfig.normalizationRepository", None)
normalization_docker_version = get(metadata_definition_dict, "data.normalizationConfig.normalizationTag", None)
breaking_changes = get(metadata_definition_dict, "data.releases.breakingChanges", None)
breaking_change_versions = breaking_changes.keys() if breaking_changes else []
possible_docker_images = [
(base_docker_image, base_docker_version),
(oss_docker_image, oss_docker_version),
(cloud_docker_image, cloud_docker_version),
(normalization_docker_image, normalization_docker_version),
]
if not validator_opts.prerelease_tag:
possible_docker_images.extend([(base_docker_image, version) for version in breaking_change_versions])
# Filter out tuples with None and remove duplicates
images_to_check = list(set(filter(lambda x: None not in x, possible_docker_images)))
print(f"Checking that the following images are on dockerhub: {images_to_check}")
for image, version in images_to_check:
if not is_image_on_docker_hub(image, version, retries=3):
return False, f"Image {image}:{version} does not exist in DockerHub"
return True, None
def validate_at_least_one_language_tag(
metadata_definition: ConnectorMetadataDefinitionV0, _validator_opts: ValidatorOptions
) -> ValidationResult:
"""Ensure that there is at least one tag in the data.tags field that matches language:<LANG>."""
tags = get(metadata_definition, "data.tags", [])
if not any([tag.startswith("language:") for tag in tags]):
return False, "At least one tag must be of the form language:<LANG>"
return True, None
def validate_all_tags_are_keyvalue_pairs(
metadata_definition: ConnectorMetadataDefinitionV0, _validator_opts: ValidatorOptions
) -> ValidationResult:
"""Ensure that all tags are of the form <KEY>:<VALUE>."""
tags = get(metadata_definition, "data.tags", [])
for tag in tags:
if ":" not in tag:
return False, f"Tag {tag} is not of the form <KEY>:<VALUE>"
return True, None
def is_major_version(version: str) -> bool:
"""Check whether the version is of format N.0.0"""
semver_version = semver.Version.parse(version)
return semver_version.minor == 0 and semver_version.patch == 0 and semver_version.prerelease is None
def validate_major_version_bump_has_breaking_change_entry(
metadata_definition: ConnectorMetadataDefinitionV0, _validator_opts: ValidatorOptions
) -> ValidationResult:
"""Ensure that if the major version is incremented, there is a breaking change entry for that version."""
metadata_definition_dict = metadata_definition.dict()
image_tag = get(metadata_definition_dict, "data.dockerImageTag")
if not is_major_version(image_tag):
return True, None
# We are updating the same version since connector builder projects have a different concept of
# versioning.
# We do not check for breaking changes for source-declarative-connector in the metadata because the conenctor isn't directly used by any workspace.
# Breaking changes are instead tracked at the CDK level
if str(metadata_definition.data.definitionId) == _SOURCE_DECLARATIVE_MANIFEST_DEFINITION_ID:
return True, None
releases = get(metadata_definition_dict, "data.releases")
if not releases:
return (
False,
f"When doing a major version bump ({image_tag}), there must be a 'releases' property that contains 'breakingChanges' entries.",
)
breaking_changes = get(metadata_definition_dict, "data.releases.breakingChanges")
if breaking_changes is None or image_tag not in breaking_changes.keys():
return False, f"Major version {image_tag} needs a 'releases.breakingChanges' entry indicating what changed."
return True, None
def validate_docs_path_exists(metadata_definition: ConnectorMetadataDefinitionV0, validator_opts: ValidatorOptions) -> ValidationResult:
"""Ensure that the doc_path exists."""
if not pathlib.Path(validator_opts.docs_path).exists():
return False, f"Could not find {validator_opts.docs_path}."
return True, None
def validate_metadata_base_images_in_dockerhub(
metadata_definition: ConnectorMetadataDefinitionV0, validator_opts: ValidatorOptions
) -> ValidationResult:
if validator_opts.disable_dockerhub_checks:
return True, None
metadata_definition_dict = metadata_definition.dict()
image_address = get(metadata_definition_dict, "data.connectorBuildOptions.baseImage")
if image_address is None:
return True, None
try:
image_name, tag_with_sha_prefix, digest = image_address.split(":")
# As we query the DockerHub API we need to remove the docker.io prefix
image_name = image_name.replace("docker.io/", "")
except ValueError:
return False, f"Image {image_address} is not in the format <image>:<tag>@<sha>"
tag = tag_with_sha_prefix.split("@")[0]
print(f"Checking that the base images is on dockerhub: {image_address}")
if not is_image_on_docker_hub(image_name, tag, digest, retries=3):
return False, f"Image {image_address} does not exist in DockerHub"
return True, None
def validate_pypi_only_for_python(
metadata_definition: ConnectorMetadataDefinitionV0, _validator_opts: ValidatorOptions
) -> ValidationResult:
"""Ensure that if pypi publishing is enabled for a connector, it has a python language tag."""
pypi_enabled = get(metadata_definition, "data.remoteRegistries.pypi.enabled", False)
if not pypi_enabled:
return True, None
tags = get(metadata_definition, "data.tags", [])
if "language:python" not in tags and "language:low-code" not in tags:
return False, "If pypi publishing is enabled, the connector must have a python language tag."
return True, None
def validate_docker_image_tag_is_not_decremented(
metadata_definition: ConnectorMetadataDefinitionV0, _validator_opts: ValidatorOptions
) -> ValidationResult:
if _validator_opts and _validator_opts.disable_dockerhub_checks:
return True, None
if _validator_opts and _validator_opts.prerelease_tag:
return True, None
docker_image_name = get(metadata_definition, "data.dockerRepository")
if not docker_image_name:
return False, "The dockerRepository field is not set"
docker_image_tag = get(metadata_definition, "data.dockerImageTag")
if not docker_image_tag:
return False, "The dockerImageTag field is not set."
latest_released_version = get_latest_version_on_dockerhub(docker_image_name)
# This is happening when the connector has never been released to DockerHub
if not latest_released_version:
return True, None
if docker_image_tag == latest_released_version:
return True, None
current_semver_version = semver.Version.parse(docker_image_tag)
latest_released_semver_version = semver.Version.parse(latest_released_version)
if current_semver_version < latest_released_semver_version:
return (
False,
f"The dockerImageTag value ({current_semver_version}) can't be decremented: it should be equal to or above {latest_released_version}.",
)
return True, None
def check_is_dev_version(version: str) -> bool:
"""Check whether the version is a pre-release version."""
parsed_version = semver.VersionInfo.parse(version)
return parsed_version.prerelease is not None and not "rc" in parsed_version.prerelease
def check_is_release_candidate_version(version: str) -> bool:
"""Check whether the version is a release candidate version."""
parsed_version = semver.VersionInfo.parse(version)
return parsed_version.prerelease is not None and "rc" in parsed_version.prerelease
def check_is_major_release_candidate_version(version: str) -> bool:
"""Check whether the version is a major release candidate version.
Example: 2.0.0-rc.1
"""
if not check_is_release_candidate_version(version):
return False
# The version is a release candidate version
parsed_version = semver.VersionInfo.parse(version)
# No major version exists.
if parsed_version.major == 0:
return False
# The current release candidate is for a major version
if parsed_version.minor == 0 and parsed_version.patch == 0:
return True
def validate_rc_suffix_and_rollout_configuration(
metadata_definition: ConnectorMetadataDefinitionV0, _validator_opts: ValidatorOptions
) -> ValidationResult:
# Bypass validation for pre-releases
if _validator_opts and _validator_opts.prerelease_tag:
return True, None
docker_image_tag = get(metadata_definition, "data.dockerImageTag")
if docker_image_tag is None:
return False, "The dockerImageTag field is not set."
try:
is_major_release_candidate_version = check_is_major_release_candidate_version(docker_image_tag)
is_dev_version = check_is_dev_version(docker_image_tag)
is_rc_version = check_is_release_candidate_version(docker_image_tag)
is_prerelease = is_dev_version or is_rc_version
enabled_progressive_rollout = get(metadata_definition, "data.releases.rolloutConfiguration.enableProgressiveRollout", None)
# Major release candidate versions are not allowed
if is_major_release_candidate_version:
return (
False,
"The dockerImageTag has an -rc.<RC #> suffix for a major version. Release candidates for major version (with breaking changes) are not allowed.",
)
# Release candidates must have progressive rollout set to True or False
if is_rc_version and enabled_progressive_rollout is None:
return (
False,
"The dockerImageTag field has an -rc.<RC #> suffix but the connector is not set to use progressive rollout (releases.rolloutConfiguration.enableProgressiveRollout).",
)
# Progressive rollout can be enabled only for release candidates
if enabled_progressive_rollout is True and not is_prerelease:
return (
False,
"The dockerImageTag field should have an -rc.<RC #> suffix as the connector is set to use progressive rollout (releases.rolloutConfiguration.enableProgressiveRollout). Example: 2.1.0-rc.1",
)
except ValueError:
return False, f"The dockerImageTag field is not a valid semver version: {docker_image_tag}."
return True, None
PRE_UPLOAD_VALIDATORS = [
validate_all_tags_are_keyvalue_pairs,
validate_at_least_one_language_tag,
validate_major_version_bump_has_breaking_change_entry,
validate_docs_path_exists,
validate_metadata_base_images_in_dockerhub,
validate_pypi_only_for_python,
validate_docker_image_tag_is_not_decremented,
validate_rc_suffix_and_rollout_configuration,
]
POST_UPLOAD_VALIDATORS = PRE_UPLOAD_VALIDATORS + [
validate_metadata_images_in_dockerhub,
]
def validate_and_load(
file_path: pathlib.Path,
validators_to_run: List[Validator],
validator_opts: ValidatorOptions,
) -> Tuple[Optional[ConnectorMetadataDefinitionV0], Optional[ValidationError]]:
"""Load a metadata file from a path (runs jsonschema validation) and run optional extra validators.
Returns a tuple of (metadata_model, error_message).
If the metadata file is valid, metadata_model will be populated.
Otherwise, error_message will be populated with a string describing the error.
"""
try:
# Load the metadata file - this implicitly runs jsonschema validation
metadata = yaml.safe_load(file_path.read_text())
metadata_model = ConnectorMetadataDefinitionV0.parse_obj(metadata)
except ValidationError as e:
return None, f"Validation error: {e}"
for validator in validators_to_run:
print(f"Running validator: {validator.__name__}")
is_valid, error = validator(metadata_model, validator_opts)
if not is_valid:
return None, f"Validation error: {error}"
return metadata_model, None
| ValidatorOptions |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/automation_condition_evaluator.py | {
"start": 1232,
"end": 9622
} | class ____:
def __init__(
self,
*,
entity_keys: AbstractSet[EntityKey],
instance: DagsterInstance,
asset_graph: BaseAssetGraph,
cursor: AssetDaemonCursor,
emit_backfills: bool,
evaluation_id: int,
default_condition: Optional[AutomationCondition] = None,
evaluation_time: Optional[datetime.datetime] = None,
logger: logging.Logger = logging.getLogger("dagster.automation"),
):
self.entity_keys = entity_keys
self.asset_graph_view = AssetGraphView(
temporal_context=TemporalContext(
effective_dt=evaluation_time or get_current_datetime(),
last_event_id=instance.event_log_storage.get_maximum_record_id(),
),
instance=instance,
asset_graph=asset_graph,
)
self.logger = logger
self.cursor = cursor
self.default_condition = default_condition
self.current_results_by_key: dict[EntityKey, AutomationResult] = {}
self.condition_cursors = []
self.expected_data_time_mapping = defaultdict()
_instance = self.asset_graph_view.instance
self.legacy_auto_materialize_run_tags: Mapping[str, str] = (
_instance.auto_materialize_run_tags
)
self.legacy_respect_materialization_data_versions = (
_instance.auto_materialize_respect_materialization_data_versions
)
self.emit_backfills = emit_backfills or _instance.da_request_backfills()
self.legacy_expected_data_time_by_key: dict[AssetKey, Optional[datetime.datetime]] = {}
self.legacy_data_time_resolver = CachingDataTimeResolver(self.instance_queryer)
self.request_subsets_by_key: dict[EntityKey, EntitySubset] = {}
self.evaluation_id = evaluation_id
@property
def instance_queryer(self) -> "CachingInstanceQueryer":
return self.asset_graph_view.get_inner_queryer_for_back_compat()
@property
def evaluation_time(self) -> datetime.datetime:
return self.asset_graph_view.effective_dt
@property
def asset_graph(self) -> "BaseAssetGraph[BaseAssetNode]":
return self.asset_graph_view.asset_graph
@property
def evaluated_asset_keys_and_parents(self) -> AbstractSet[AssetKey]:
asset_keys = {ek for ek in self.entity_keys if isinstance(ek, AssetKey)}
return {
parent for ek in asset_keys for parent in self.asset_graph.get(ek).parent_keys
} | asset_keys
@property
def asset_records_to_prefetch(self) -> Sequence[AssetKey]:
return [key for key in self.evaluated_asset_keys_and_parents if self.asset_graph.has(key)]
def prefetch(self) -> None:
"""Pre-populate the cached values here to avoid situations in which the new latest_storage_id
value is calculated using information that comes in after the set of asset partitions with
new parent materializations is calculated, as this can result in materializations being
ignored if they happen between the two calculations.
"""
self.logger.info(
f"Prefetching asset records for {len(self.asset_records_to_prefetch)} records."
)
self.instance_queryer.prefetch_asset_records(self.asset_records_to_prefetch)
self.logger.info("Done prefetching asset records.")
def evaluate(self) -> tuple[Sequence[AutomationResult], Sequence[EntitySubset[EntityKey]]]:
return asyncio.run(self.async_evaluate())
async def async_evaluate(
self,
) -> tuple[Sequence[AutomationResult], Sequence[EntitySubset[EntityKey]]]:
with partition_loading_context(
effective_dt=self.evaluation_time, dynamic_partitions_store=self.instance_queryer
):
return await self._async_evaluate()
async def _async_evaluate(
self,
) -> tuple[Sequence[AutomationResult], Sequence[EntitySubset[EntityKey]]]:
self.prefetch()
num_conditions = len(self.entity_keys)
num_evaluated = 0
async def _evaluate_entity_async(entity_key: EntityKey, offset: int):
self.logger.debug(
f"Evaluating {entity_key.to_user_string()} ({num_evaluated + offset}/{num_conditions})"
)
try:
await self.evaluate_entity(entity_key)
except Exception as e:
raise Exception(
f"Error while evaluating conditions for {entity_key.to_user_string()}"
) from e
result = self.current_results_by_key[entity_key]
num_requested = result.true_subset.size
if result.true_subset.is_partitioned:
requested_str = ",".join(result.true_subset.expensively_compute_partition_keys())
else:
requested_str = "(no partition)"
log_fn = self.logger.info if num_requested > 0 else self.logger.debug
log_fn(
f"{entity_key.to_user_string()} evaluation result: {num_requested} "
f"requested ({requested_str}) "
f"({format(result.end_timestamp - result.start_timestamp, '.3f')} seconds)"
)
for topo_level in self.asset_graph.toposorted_entity_keys_by_level:
coroutines = [
_evaluate_entity_async(entity_key, offset)
for offset, entity_key in enumerate(topo_level)
if entity_key in self.entity_keys
]
await asyncio.gather(*coroutines)
num_evaluated += len(coroutines)
return list(self.current_results_by_key.values()), [
v for v in self.request_subsets_by_key.values() if not v.is_empty
]
async def evaluate_entity(self, key: EntityKey) -> None:
# evaluate the condition of this asset
result = await AutomationContext.create(key=key, evaluator=self).evaluate_async()
# update dictionaries to keep track of this result
self.current_results_by_key[key] = result
self._add_request_subset(result.true_subset)
if isinstance(key, AssetKey):
self.legacy_expected_data_time_by_key[key] = result.compute_legacy_expected_data_time()
# handle cases where an entity must be materialized with others
self._handle_execution_set(result)
def _add_request_subset(self, subset: EntitySubset) -> None:
"""Adds the provided subset to the dictionary tracking what we will request on this tick."""
if subset.key not in self.request_subsets_by_key:
self.request_subsets_by_key[subset.key] = subset
else:
self.request_subsets_by_key[subset.key] = self.request_subsets_by_key[
subset.key
].compute_union(subset)
def _handle_execution_set(self, result: AutomationResult[AssetKey]) -> None:
# if we need to materialize any partitions of a non-subsettable multi-asset, we need to
# materialize all of them
asset_key = result.key
execution_set_keys = self.asset_graph.get(asset_key).execution_set_entity_keys
if len(execution_set_keys) > 1 and result.true_subset.size > 0:
for neighbor_key in execution_set_keys:
if isinstance(neighbor_key, AssetKey):
self.legacy_expected_data_time_by_key[neighbor_key] = (
self.legacy_expected_data_time_by_key[asset_key]
)
# make sure that the true_subset of the neighbor is accurate -- when it was
# evaluated it may have had a different requested subset. however, because
# all these neighbors must be executed as a unit, we need to union together
# the subset of all required neighbors
neighbor_true_subset = result.true_subset.compute_mapped_subset(
neighbor_key, direction="up"
)
if neighbor_key in self.current_results_by_key:
self.current_results_by_key[
neighbor_key
].set_internal_serializable_subset_override(
neighbor_true_subset.convert_to_serializable_subset()
)
self._add_request_subset(neighbor_true_subset)
| AutomationConditionEvaluator |
python | pydantic__pydantic | pydantic/_internal/_validate_call.py | {
"start": 1862,
"end": 5321
} | class ____:
"""This is a wrapper around a function that validates the arguments passed to it, and optionally the return value."""
__slots__ = (
'function',
'validate_return',
'schema_type',
'module',
'qualname',
'ns_resolver',
'config_wrapper',
'__pydantic_complete__',
'__pydantic_validator__',
'__return_pydantic_validator__',
)
def __init__(
self,
function: ValidateCallSupportedTypes,
config: ConfigDict | None,
validate_return: bool,
parent_namespace: MappingNamespace | None,
) -> None:
self.function = function
self.validate_return = validate_return
if isinstance(function, partial):
self.schema_type = function.func
self.module = function.func.__module__
else:
self.schema_type = function
self.module = function.__module__
self.qualname = extract_function_qualname(function)
self.ns_resolver = NsResolver(
namespaces_tuple=ns_for_function(self.schema_type, parent_namespace=parent_namespace)
)
self.config_wrapper = ConfigWrapper(config)
if not self.config_wrapper.defer_build:
self._create_validators()
else:
self.__pydantic_complete__ = False
def _create_validators(self) -> None:
gen_schema = GenerateSchema(self.config_wrapper, self.ns_resolver)
schema = gen_schema.clean_schema(gen_schema.generate_schema(self.function))
core_config = self.config_wrapper.core_config(title=self.qualname)
self.__pydantic_validator__ = create_schema_validator(
schema,
self.schema_type,
self.module,
self.qualname,
'validate_call',
core_config,
self.config_wrapper.plugin_settings,
)
if self.validate_return:
signature = inspect.signature(self.function)
return_type = signature.return_annotation if signature.return_annotation is not signature.empty else Any
gen_schema = GenerateSchema(self.config_wrapper, self.ns_resolver)
schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type))
validator = create_schema_validator(
schema,
self.schema_type,
self.module,
self.qualname,
'validate_call',
core_config,
self.config_wrapper.plugin_settings,
)
if inspect.iscoroutinefunction(self.function):
async def return_val_wrapper(aw: Awaitable[Any]) -> None:
return validator.validate_python(await aw)
self.__return_pydantic_validator__ = return_val_wrapper
else:
self.__return_pydantic_validator__ = validator.validate_python
else:
self.__return_pydantic_validator__ = None
self.__pydantic_complete__ = True
def __call__(self, *args: Any, **kwargs: Any) -> Any:
if not self.__pydantic_complete__:
self._create_validators()
res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))
if self.__return_pydantic_validator__:
return self.__return_pydantic_validator__(res)
else:
return res
| ValidateCallWrapper |
python | pennersr__django-allauth | allauth/socialaccount/providers/edx/views.py | {
"start": 228,
"end": 1709
} | class ____(OAuth2Adapter):
provider_id = "edx"
provider_default_url = "https://edx.org"
settings = app_settings.PROVIDERS.get(provider_id, {})
provider_base_url = settings.get("EDX_URL", provider_default_url)
access_token_url = "{0}/oauth2/access_token".format(provider_base_url)
authorize_url = "{0}/oauth2/authorize/".format(provider_base_url)
profile_url = "{0}/api/user/v1/me".format(provider_base_url)
account_url = "{0}/api/user/v1/accounts/{1}"
supports_state = False
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": "Bearer {0}".format(token.token)}
response = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
extra_data = response.json()
if extra_data.get("email", None) is None:
response = (
get_adapter()
.get_requests_session()
.get(
self.account_url.format(
self.provider_base_url, extra_data["username"]
),
headers=headers,
)
)
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(EdxOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(EdxOAuth2Adapter)
| EdxOAuth2Adapter |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_delete_pending_with_fk_constraints_app/migrations/0002_delete_without_pending.py | {
"start": 190,
"end": 501
} | class ____(CheckedMigration):
atomic = False
dependencies = [
("bad_flow_delete_pending_with_fk_constraints_app", "0001_initial"),
]
operations = [
SafeDeleteModel(
name="TestTable",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
python | pytorch__pytorch | tools/test/test_upload_test_stats.py | {
"start": 134,
"end": 697
} | class ____(unittest.TestCase):
@unittest.skipIf(
IN_CI,
"don't run in CI as this does a lot of network calls and uses up GH API rate limit",
)
def test_existing_job(self) -> None:
"""Run on a known-good job and make sure we don't error and get basically okay results."""
test_cases = get_tests(2561394934, 1)
self.assertEqual(len(test_cases), 609873)
summary = summarize_test_cases(test_cases)
self.assertEqual(len(summary), 5068)
if __name__ == "__main__":
unittest.main()
| TestUploadTestStats |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/swimmer_v5.py | {
"start": 182,
"end": 15390
} | class ____(MujocoEnv, utils.EzPickle):
r"""
## Description
This environment corresponds to the Swimmer environment described in Rémi Coulom's PhD thesis ["Reinforcement Learning Using Neural Networks, with Applications to Motor Control"](https://tel.archives-ouvertes.fr/tel-00003985/document).
The environment aims to increase the number of independent state and control variables compared to classical control environments.
The swimmers consist of three or more segments ('***links***') and one less articulation joints ('***rotors***') - one rotor joint connects exactly two links to form a linear chain.
The swimmer is suspended in a two-dimensional pool and always starts in the same position (subject to some deviation drawn from a uniform distribution),
and the goal is to move as fast as possible towards the right by applying torque to the rotors and using fluid friction.
## Notes
The problem parameters are:
Problem parameters:
* *n*: number of body parts
* *m<sub>i</sub>*: mass of part *i* (*i* ∈ {1...n})
* *l<sub>i</sub>*: length of part *i* (*i* ∈ {1...n})
* *k*: viscous-friction coefficient
While the default environment has *n* = 3, *l<sub>i</sub>* = 0.1, and *k* = 0.1.
It is possible to pass a custom MuJoCo XML file during construction to increase the number of links, or to tweak any of the parameters.
## Action Space
```{figure} action_space_figures/swimmer.png
:name: swimmer
```
The action space is a `Box(-1, 1, (2,), float32)`. An action represents the torques applied between *links*
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Type (Unit) |
|-----|------------------------------------|-------------|-------------|----------------------------------|-------|--------------|
| 0 | Torque applied on the first rotor | -1 | 1 | motor1_rot | hinge | torque (N m) |
| 1 | Torque applied on the second rotor | -1 | 1 | motor2_rot | hinge | torque (N m) |
## Observation Space
The observation space consists of the following parts (in order):
- *qpos (3 elements by default):* Position values of the robot's body parts.
- *qvel (5 elements):* The velocities of these individual body parts (their derivatives).
By default, the observation does not include the x- and y-coordinates of the front tip.
These can be included by passing `exclude_current_positions_from_observation=False` during construction.
In this case, the observation space will be a `Box(-Inf, Inf, (10,), float64)`, where the first two observations are the x- and y-coordinates of the front tip.
Regardless of whether `exclude_current_positions_from_observation` is set to `True` or `False`, the x- and y-coordinates are returned in `info` with the keys `"x_position"` and `"y_position"`, respectively.
By default, however, the observation space is a `Box(-Inf, Inf, (8,), float64)` where the elements are as follows:
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Type (Unit) |
| --- | ------------------------------------ | ---- | --- | -------------------------------- | ----- | ------------------------ |
| 0 | angle of the front tip | -Inf | Inf | free_body_rot | hinge | angle (rad) |
| 1 | angle of the first rotor | -Inf | Inf | motor1_rot | hinge | angle (rad) |
| 2 | angle of the second rotor | -Inf | Inf | motor2_rot | hinge | angle (rad) |
| 3 | velocity of the tip along the x-axis | -Inf | Inf | slider1 | slide | velocity (m/s) |
| 4 | velocity of the tip along the y-axis | -Inf | Inf | slider2 | slide | velocity (m/s) |
| 5 | angular velocity of front tip | -Inf | Inf | free_body_rot | hinge | angular velocity (rad/s) |
| 6 | angular velocity of first rotor | -Inf | Inf | motor1_rot | hinge | angular velocity (rad/s) |
| 7 | angular velocity of second rotor | -Inf | Inf | motor2_rot | hinge | angular velocity (rad/s) |
| excluded | position of the tip along the x-axis | -Inf | Inf | slider1 | slide | position (m) |
| excluded | position of the tip along the y-axis | -Inf | Inf | slider2 | slide | position (m) |
## Rewards
The total reward is: ***reward*** *=* *forward_reward - ctrl_cost*.
- *forward_reward*:
A reward for moving forward,
this reward would be positive if the Swimmer moves forward (in the positive $x$ direction / in the right direction).
$w_{forward} \times \frac{dx}{dt}$, where
$dx$ is the displacement of the (front) "tip" ($x_{after-action} - x_{before-action}$),
$dt$ is the time between actions, which depends on the `frame_skip` parameter (default is 4),
and `frametime` which is $0.01$ - so the default is $dt = 4 \times 0.01 = 0.04$,
$w_{forward}$ is the `forward_reward_weight` (default is $1$).
- *ctrl_cost*:
A negative reward to penalize the Swimmer for taking actions that are too large.
$w_{control} \times \|action\|_2^2$,
where $w_{control}$ is `ctrl_cost_weight` (default is $10^{-4}$).
`info` contains the individual reward terms.
## Starting State
The initial position state is $\mathcal{U}_{[-reset\_noise\_scale \times I_{5}, reset\_noise\_scale \times I_{5}]}$.
The initial velocity state is $\mathcal{U}_{[-reset\_noise\_scale \times I_{5}, reset\_noise\_scale \times I_{5}]}$.
where $\mathcal{U}$ is the multivariate uniform continuous distribution.
## Episode End
### Termination
The Swimmer never terminates.
### Truncation
The default duration of an episode is 1000 timesteps.
## Arguments
Swimmer provides a range of parameters to modify the observation space, reward function, initial state, and termination condition.
These parameters can be applied during `gymnasium.make` in the following way:
```python
import gymnasium as gym
env = gym.make('Swimmer-v5', xml_file=...)
```
| Parameter | Type | Default |Description |
|--------------------------------------------| --------- |-------------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|`xml_file` | **str** |`"swimmer.xml"`| Path to a MuJoCo model |
|`forward_reward_weight` | **float** | `1` | Weight for _forward_reward_ term (see `Rewards` section) |
|`ctrl_cost_weight` | **float** | `1e-4` | Weight for _ctrl_cost_ term (see `Rewards` section) |
|`reset_noise_scale` | **float** | `0.1` | Scale of random perturbations of initial position and velocity (see `Starting State` section) |
|`exclude_current_positions_from_observation`| **bool** | `True` | Whether or not to omit the x- and y-coordinates from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies (see `Observation Space` section) |
## Version History
* v5:
- Minimum `mujoco` version is now 2.3.3.
- Added support for fully custom/third party `mujoco` models using the `xml_file` argument (previously only a few changes could be made to the existing models).
- Added `default_camera_config` argument, a dictionary for setting the `mj_camera` properties, mainly useful for custom environments.
- Added `env.observation_structure`, a dictionary for specifying the observation space compose (e.g. `qpos`, `qvel`), useful for building tooling and wrappers for the MuJoCo environments.
- Return a non-empty `info` with `reset()`, previously an empty dictionary was returned, the new keys are the same state information as `step()`.
- Added `frame_skip` argument, used to configure the `dt` (duration of `step()`), default varies by environment check environment documentation pages.
- Restored the `xml_file` argument (was removed in `v4`).
- Added `forward_reward_weight`, `ctrl_cost_weight`, to configure the reward function (defaults are effectively the same as in `v4`).
- Added `reset_noise_scale` argument to set the range of initial states.
- Added `exclude_current_positions_from_observation` argument.
- Replaced `info["reward_fwd"]` and `info["forward_reward"]` with `info["reward_forward"]` to be consistent with the other environments.
* v4: All MuJoCo environments now use the MuJoCo bindings in mujoco >= 2.1.3.
* v3: Support for `gymnasium.make` kwargs such as `xml_file`, `ctrl_cost_weight`, `reset_noise_scale`, etc. rgb rendering comes from tracking camera (so agent does not run away from screen). Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
* v2: All continuous control environments now use mujoco-py >= 1.50. Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
* v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments.
* v0: Initial versions release.
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
}
def __init__(
self,
xml_file: str = "swimmer.xml",
frame_skip: int = 4,
default_camera_config: dict[str, float | int] = {},
forward_reward_weight: float = 1.0,
ctrl_cost_weight: float = 1e-4,
reset_noise_scale: float = 0.1,
exclude_current_positions_from_observation: bool = True,
**kwargs,
):
utils.EzPickle.__init__(
self,
xml_file,
frame_skip,
default_camera_config,
forward_reward_weight,
ctrl_cost_weight,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs,
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
MujocoEnv.__init__(
self,
xml_file,
frame_skip,
observation_space=None,
default_camera_config=default_camera_config,
**kwargs,
)
self.metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": int(np.round(1.0 / self.dt)),
}
obs_size = (
self.data.qpos.size
+ self.data.qvel.size
- 2 * exclude_current_positions_from_observation
)
self.observation_space = Box(
low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float64
)
self.observation_structure = {
"skipped_qpos": 2 * exclude_current_positions_from_observation,
"qpos": self.data.qpos.size
- 2 * exclude_current_positions_from_observation,
"qvel": self.data.qvel.size,
}
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
xy_position_before = self.data.qpos[0:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.data.qpos[0:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
observation = self._get_obs()
reward, reward_info = self._get_rew(x_velocity, action)
info = {
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
**reward_info,
}
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return observation, reward, False, False, info
def _get_rew(self, x_velocity: float, action):
forward_reward = self._forward_reward_weight * x_velocity
ctrl_cost = self.control_cost(action)
reward = forward_reward - ctrl_cost
reward_info = {
"reward_forward": forward_reward,
"reward_ctrl": -ctrl_cost,
}
return reward, reward_info
def _get_obs(self):
position = self.data.qpos.flatten()
velocity = self.data.qvel.flatten()
if self._exclude_current_positions_from_observation:
position = position[2:]
observation = np.concatenate([position, velocity]).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def _get_reset_info(self):
return {
"x_position": self.data.qpos[0],
"y_position": self.data.qpos[1],
"distance_from_origin": np.linalg.norm(self.data.qpos[0:2], ord=2),
}
| SwimmerEnv |
python | Textualize__textual | docs/examples/widgets/input.py | {
"start": 79,
"end": 295
} | class ____(App):
def compose(self) -> ComposeResult:
yield Input(placeholder="First Name")
yield Input(placeholder="Last Name")
if __name__ == "__main__":
app = InputApp()
app.run()
| InputApp |
python | huggingface__transformers | tests/models/vitmatte/test_modeling_vitmatte.py | {
"start": 4357,
"end": 9259
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as VitMatte does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VitMatteForImageMatting,) if is_torch_available() else ()
pipeline_model_mapping = {}
test_resize_embeddings = False
test_torch_exportable = True
test_torch_exportable_strictly = get_torch_major_and_minor_version() != "2.7"
def setUp(self):
self.model_tester = VitMatteModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=VitMatteConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["hidden_size"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="VitMatte does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Training is not yet supported")
def test_training(self):
pass
@unittest.skip(reason="Training is not yet supported")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="ViTMatte does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "hustvl/vitmatte-small-composition-1k"
model = VitMatteForImageMatting.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="ViTMatte does not support retaining gradient on attention logits")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[2, 2],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
print("Hello we're here")
check_hidden_states_output(inputs_dict, config, model_class)
@require_timm
def test_backbone_selection(self):
def _validate_backbone_init():
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
if model.__class__.__name__ == "VitMatteForImageMatting":
# Confirm out_indices propagated to backbone
self.assertEqual(len(model.backbone.out_indices), 2)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_pretrained_backbone = True
config.backbone_config = None
config.backbone_kwargs = {"out_indices": [-2, -1]}
# Force load_backbone path
config.is_hybrid = False
# Load a timm backbone
config.backbone = "resnet18"
config.use_timm_backbone = True
_validate_backbone_init()
# Load a HF backbone
config.backbone = "facebook/dinov2-small"
config.use_timm_backbone = False
_validate_backbone_init()
@require_torch
| VitMatteModelTest |
python | milvus-io__pymilvus | pymilvus/grpc_gen/milvus_pb2_grpc.py | {
"start": 1007,
"end": 35707
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateCollection',
request_serializer=milvus__pb2.CreateCollectionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropCollection',
request_serializer=milvus__pb2.DropCollectionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.HasCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/HasCollection',
request_serializer=milvus__pb2.HasCollectionRequest.SerializeToString,
response_deserializer=milvus__pb2.BoolResponse.FromString,
)
self.LoadCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/LoadCollection',
request_serializer=milvus__pb2.LoadCollectionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ReleaseCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ReleaseCollection',
request_serializer=milvus__pb2.ReleaseCollectionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DescribeCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DescribeCollection',
request_serializer=milvus__pb2.DescribeCollectionRequest.SerializeToString,
response_deserializer=milvus__pb2.DescribeCollectionResponse.FromString,
)
self.BatchDescribeCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/BatchDescribeCollection',
request_serializer=milvus__pb2.BatchDescribeCollectionRequest.SerializeToString,
response_deserializer=milvus__pb2.BatchDescribeCollectionResponse.FromString,
)
self.GetCollectionStatistics = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetCollectionStatistics',
request_serializer=milvus__pb2.GetCollectionStatisticsRequest.SerializeToString,
response_deserializer=milvus__pb2.GetCollectionStatisticsResponse.FromString,
)
self.ShowCollections = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ShowCollections',
request_serializer=milvus__pb2.ShowCollectionsRequest.SerializeToString,
response_deserializer=milvus__pb2.ShowCollectionsResponse.FromString,
)
self.AlterCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AlterCollection',
request_serializer=milvus__pb2.AlterCollectionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.AlterCollectionField = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AlterCollectionField',
request_serializer=milvus__pb2.AlterCollectionFieldRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.AddCollectionFunction = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AddCollectionFunction',
request_serializer=milvus__pb2.AddCollectionFunctionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.AlterCollectionFunction = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AlterCollectionFunction',
request_serializer=milvus__pb2.AlterCollectionFunctionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropCollectionFunction = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropCollectionFunction',
request_serializer=milvus__pb2.DropCollectionFunctionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.CreatePartition = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreatePartition',
request_serializer=milvus__pb2.CreatePartitionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropPartition = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropPartition',
request_serializer=milvus__pb2.DropPartitionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.HasPartition = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/HasPartition',
request_serializer=milvus__pb2.HasPartitionRequest.SerializeToString,
response_deserializer=milvus__pb2.BoolResponse.FromString,
)
self.LoadPartitions = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/LoadPartitions',
request_serializer=milvus__pb2.LoadPartitionsRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ReleasePartitions = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ReleasePartitions',
request_serializer=milvus__pb2.ReleasePartitionsRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.GetPartitionStatistics = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetPartitionStatistics',
request_serializer=milvus__pb2.GetPartitionStatisticsRequest.SerializeToString,
response_deserializer=milvus__pb2.GetPartitionStatisticsResponse.FromString,
)
self.ShowPartitions = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ShowPartitions',
request_serializer=milvus__pb2.ShowPartitionsRequest.SerializeToString,
response_deserializer=milvus__pb2.ShowPartitionsResponse.FromString,
)
self.GetLoadingProgress = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetLoadingProgress',
request_serializer=milvus__pb2.GetLoadingProgressRequest.SerializeToString,
response_deserializer=milvus__pb2.GetLoadingProgressResponse.FromString,
)
self.GetLoadState = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetLoadState',
request_serializer=milvus__pb2.GetLoadStateRequest.SerializeToString,
response_deserializer=milvus__pb2.GetLoadStateResponse.FromString,
)
self.CreateAlias = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateAlias',
request_serializer=milvus__pb2.CreateAliasRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropAlias = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropAlias',
request_serializer=milvus__pb2.DropAliasRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.AlterAlias = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AlterAlias',
request_serializer=milvus__pb2.AlterAliasRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DescribeAlias = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DescribeAlias',
request_serializer=milvus__pb2.DescribeAliasRequest.SerializeToString,
response_deserializer=milvus__pb2.DescribeAliasResponse.FromString,
)
self.ListAliases = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListAliases',
request_serializer=milvus__pb2.ListAliasesRequest.SerializeToString,
response_deserializer=milvus__pb2.ListAliasesResponse.FromString,
)
self.CreateIndex = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateIndex',
request_serializer=milvus__pb2.CreateIndexRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.AlterIndex = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AlterIndex',
request_serializer=milvus__pb2.AlterIndexRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DescribeIndex = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DescribeIndex',
request_serializer=milvus__pb2.DescribeIndexRequest.SerializeToString,
response_deserializer=milvus__pb2.DescribeIndexResponse.FromString,
)
self.GetIndexStatistics = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetIndexStatistics',
request_serializer=milvus__pb2.GetIndexStatisticsRequest.SerializeToString,
response_deserializer=milvus__pb2.GetIndexStatisticsResponse.FromString,
)
self.GetIndexState = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetIndexState',
request_serializer=milvus__pb2.GetIndexStateRequest.SerializeToString,
response_deserializer=milvus__pb2.GetIndexStateResponse.FromString,
)
self.GetIndexBuildProgress = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetIndexBuildProgress',
request_serializer=milvus__pb2.GetIndexBuildProgressRequest.SerializeToString,
response_deserializer=milvus__pb2.GetIndexBuildProgressResponse.FromString,
)
self.DropIndex = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropIndex',
request_serializer=milvus__pb2.DropIndexRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.Insert = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Insert',
request_serializer=milvus__pb2.InsertRequest.SerializeToString,
response_deserializer=milvus__pb2.MutationResult.FromString,
)
self.Delete = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Delete',
request_serializer=milvus__pb2.DeleteRequest.SerializeToString,
response_deserializer=milvus__pb2.MutationResult.FromString,
)
self.Upsert = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Upsert',
request_serializer=milvus__pb2.UpsertRequest.SerializeToString,
response_deserializer=milvus__pb2.MutationResult.FromString,
)
self.Search = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Search',
request_serializer=milvus__pb2.SearchRequest.SerializeToString,
response_deserializer=milvus__pb2.SearchResults.FromString,
)
self.HybridSearch = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/HybridSearch',
request_serializer=milvus__pb2.HybridSearchRequest.SerializeToString,
response_deserializer=milvus__pb2.SearchResults.FromString,
)
self.Flush = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Flush',
request_serializer=milvus__pb2.FlushRequest.SerializeToString,
response_deserializer=milvus__pb2.FlushResponse.FromString,
)
self.Query = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Query',
request_serializer=milvus__pb2.QueryRequest.SerializeToString,
response_deserializer=milvus__pb2.QueryResults.FromString,
)
self.CalcDistance = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CalcDistance',
request_serializer=milvus__pb2.CalcDistanceRequest.SerializeToString,
response_deserializer=milvus__pb2.CalcDistanceResults.FromString,
)
self.FlushAll = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/FlushAll',
request_serializer=milvus__pb2.FlushAllRequest.SerializeToString,
response_deserializer=milvus__pb2.FlushAllResponse.FromString,
)
self.AddCollectionField = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AddCollectionField',
request_serializer=milvus__pb2.AddCollectionFieldRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.GetFlushState = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetFlushState',
request_serializer=milvus__pb2.GetFlushStateRequest.SerializeToString,
response_deserializer=milvus__pb2.GetFlushStateResponse.FromString,
)
self.GetFlushAllState = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetFlushAllState',
request_serializer=milvus__pb2.GetFlushAllStateRequest.SerializeToString,
response_deserializer=milvus__pb2.GetFlushAllStateResponse.FromString,
)
self.GetPersistentSegmentInfo = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetPersistentSegmentInfo',
request_serializer=milvus__pb2.GetPersistentSegmentInfoRequest.SerializeToString,
response_deserializer=milvus__pb2.GetPersistentSegmentInfoResponse.FromString,
)
self.GetQuerySegmentInfo = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetQuerySegmentInfo',
request_serializer=milvus__pb2.GetQuerySegmentInfoRequest.SerializeToString,
response_deserializer=milvus__pb2.GetQuerySegmentInfoResponse.FromString,
)
self.GetReplicas = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetReplicas',
request_serializer=milvus__pb2.GetReplicasRequest.SerializeToString,
response_deserializer=milvus__pb2.GetReplicasResponse.FromString,
)
self.Dummy = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Dummy',
request_serializer=milvus__pb2.DummyRequest.SerializeToString,
response_deserializer=milvus__pb2.DummyResponse.FromString,
)
self.RegisterLink = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/RegisterLink',
request_serializer=milvus__pb2.RegisterLinkRequest.SerializeToString,
response_deserializer=milvus__pb2.RegisterLinkResponse.FromString,
)
self.GetMetrics = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetMetrics',
request_serializer=milvus__pb2.GetMetricsRequest.SerializeToString,
response_deserializer=milvus__pb2.GetMetricsResponse.FromString,
)
self.GetComponentStates = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetComponentStates',
request_serializer=milvus__pb2.GetComponentStatesRequest.SerializeToString,
response_deserializer=milvus__pb2.ComponentStates.FromString,
)
self.LoadBalance = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/LoadBalance',
request_serializer=milvus__pb2.LoadBalanceRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.GetCompactionState = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetCompactionState',
request_serializer=milvus__pb2.GetCompactionStateRequest.SerializeToString,
response_deserializer=milvus__pb2.GetCompactionStateResponse.FromString,
)
self.ManualCompaction = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ManualCompaction',
request_serializer=milvus__pb2.ManualCompactionRequest.SerializeToString,
response_deserializer=milvus__pb2.ManualCompactionResponse.FromString,
)
self.GetCompactionStateWithPlans = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetCompactionStateWithPlans',
request_serializer=milvus__pb2.GetCompactionPlansRequest.SerializeToString,
response_deserializer=milvus__pb2.GetCompactionPlansResponse.FromString,
)
self.Import = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Import',
request_serializer=milvus__pb2.ImportRequest.SerializeToString,
response_deserializer=milvus__pb2.ImportResponse.FromString,
)
self.GetImportState = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetImportState',
request_serializer=milvus__pb2.GetImportStateRequest.SerializeToString,
response_deserializer=milvus__pb2.GetImportStateResponse.FromString,
)
self.ListImportTasks = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListImportTasks',
request_serializer=milvus__pb2.ListImportTasksRequest.SerializeToString,
response_deserializer=milvus__pb2.ListImportTasksResponse.FromString,
)
self.CreateCredential = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateCredential',
request_serializer=milvus__pb2.CreateCredentialRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.UpdateCredential = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/UpdateCredential',
request_serializer=milvus__pb2.UpdateCredentialRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DeleteCredential = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DeleteCredential',
request_serializer=milvus__pb2.DeleteCredentialRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListCredUsers = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListCredUsers',
request_serializer=milvus__pb2.ListCredUsersRequest.SerializeToString,
response_deserializer=milvus__pb2.ListCredUsersResponse.FromString,
)
self.CreateRole = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateRole',
request_serializer=milvus__pb2.CreateRoleRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropRole = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropRole',
request_serializer=milvus__pb2.DropRoleRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.OperateUserRole = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/OperateUserRole',
request_serializer=milvus__pb2.OperateUserRoleRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.SelectRole = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/SelectRole',
request_serializer=milvus__pb2.SelectRoleRequest.SerializeToString,
response_deserializer=milvus__pb2.SelectRoleResponse.FromString,
)
self.SelectUser = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/SelectUser',
request_serializer=milvus__pb2.SelectUserRequest.SerializeToString,
response_deserializer=milvus__pb2.SelectUserResponse.FromString,
)
self.OperatePrivilege = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/OperatePrivilege',
request_serializer=milvus__pb2.OperatePrivilegeRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.OperatePrivilegeV2 = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/OperatePrivilegeV2',
request_serializer=milvus__pb2.OperatePrivilegeV2Request.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.SelectGrant = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/SelectGrant',
request_serializer=milvus__pb2.SelectGrantRequest.SerializeToString,
response_deserializer=milvus__pb2.SelectGrantResponse.FromString,
)
self.GetVersion = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetVersion',
request_serializer=milvus__pb2.GetVersionRequest.SerializeToString,
response_deserializer=milvus__pb2.GetVersionResponse.FromString,
)
self.CheckHealth = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CheckHealth',
request_serializer=milvus__pb2.CheckHealthRequest.SerializeToString,
response_deserializer=milvus__pb2.CheckHealthResponse.FromString,
)
self.CreateResourceGroup = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateResourceGroup',
request_serializer=milvus__pb2.CreateResourceGroupRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropResourceGroup = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropResourceGroup',
request_serializer=milvus__pb2.DropResourceGroupRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.UpdateResourceGroups = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/UpdateResourceGroups',
request_serializer=milvus__pb2.UpdateResourceGroupsRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.TransferNode = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/TransferNode',
request_serializer=milvus__pb2.TransferNodeRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.TransferReplica = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/TransferReplica',
request_serializer=milvus__pb2.TransferReplicaRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListResourceGroups = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListResourceGroups',
request_serializer=milvus__pb2.ListResourceGroupsRequest.SerializeToString,
response_deserializer=milvus__pb2.ListResourceGroupsResponse.FromString,
)
self.DescribeResourceGroup = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DescribeResourceGroup',
request_serializer=milvus__pb2.DescribeResourceGroupRequest.SerializeToString,
response_deserializer=milvus__pb2.DescribeResourceGroupResponse.FromString,
)
self.RenameCollection = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/RenameCollection',
request_serializer=milvus__pb2.RenameCollectionRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListIndexedSegment = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListIndexedSegment',
request_serializer=feder__pb2.ListIndexedSegmentRequest.SerializeToString,
response_deserializer=feder__pb2.ListIndexedSegmentResponse.FromString,
)
self.DescribeSegmentIndexData = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DescribeSegmentIndexData',
request_serializer=feder__pb2.DescribeSegmentIndexDataRequest.SerializeToString,
response_deserializer=feder__pb2.DescribeSegmentIndexDataResponse.FromString,
)
self.Connect = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/Connect',
request_serializer=milvus__pb2.ConnectRequest.SerializeToString,
response_deserializer=milvus__pb2.ConnectResponse.FromString,
)
self.AllocTimestamp = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AllocTimestamp',
request_serializer=milvus__pb2.AllocTimestampRequest.SerializeToString,
response_deserializer=milvus__pb2.AllocTimestampResponse.FromString,
)
self.CreateDatabase = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateDatabase',
request_serializer=milvus__pb2.CreateDatabaseRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropDatabase = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropDatabase',
request_serializer=milvus__pb2.DropDatabaseRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListDatabases = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListDatabases',
request_serializer=milvus__pb2.ListDatabasesRequest.SerializeToString,
response_deserializer=milvus__pb2.ListDatabasesResponse.FromString,
)
self.AlterDatabase = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AlterDatabase',
request_serializer=milvus__pb2.AlterDatabaseRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DescribeDatabase = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DescribeDatabase',
request_serializer=milvus__pb2.DescribeDatabaseRequest.SerializeToString,
response_deserializer=milvus__pb2.DescribeDatabaseResponse.FromString,
)
self.ReplicateMessage = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ReplicateMessage',
request_serializer=milvus__pb2.ReplicateMessageRequest.SerializeToString,
response_deserializer=milvus__pb2.ReplicateMessageResponse.FromString,
)
self.BackupRBAC = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/BackupRBAC',
request_serializer=milvus__pb2.BackupRBACMetaRequest.SerializeToString,
response_deserializer=milvus__pb2.BackupRBACMetaResponse.FromString,
)
self.RestoreRBAC = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/RestoreRBAC',
request_serializer=milvus__pb2.RestoreRBACMetaRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.CreatePrivilegeGroup = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreatePrivilegeGroup',
request_serializer=milvus__pb2.CreatePrivilegeGroupRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropPrivilegeGroup = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropPrivilegeGroup',
request_serializer=milvus__pb2.DropPrivilegeGroupRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListPrivilegeGroups = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListPrivilegeGroups',
request_serializer=milvus__pb2.ListPrivilegeGroupsRequest.SerializeToString,
response_deserializer=milvus__pb2.ListPrivilegeGroupsResponse.FromString,
)
self.OperatePrivilegeGroup = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/OperatePrivilegeGroup',
request_serializer=milvus__pb2.OperatePrivilegeGroupRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.RunAnalyzer = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/RunAnalyzer',
request_serializer=milvus__pb2.RunAnalyzerRequest.SerializeToString,
response_deserializer=milvus__pb2.RunAnalyzerResponse.FromString,
)
self.AddFileResource = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AddFileResource',
request_serializer=milvus__pb2.AddFileResourceRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.RemoveFileResource = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/RemoveFileResource',
request_serializer=milvus__pb2.RemoveFileResourceRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListFileResources = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListFileResources',
request_serializer=milvus__pb2.ListFileResourcesRequest.SerializeToString,
response_deserializer=milvus__pb2.ListFileResourcesResponse.FromString,
)
self.AddUserTags = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/AddUserTags',
request_serializer=milvus__pb2.AddUserTagsRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DeleteUserTags = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DeleteUserTags',
request_serializer=milvus__pb2.DeleteUserTagsRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.GetUserTags = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetUserTags',
request_serializer=milvus__pb2.GetUserTagsRequest.SerializeToString,
response_deserializer=milvus__pb2.GetUserTagsResponse.FromString,
)
self.ListUsersWithTag = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListUsersWithTag',
request_serializer=milvus__pb2.ListUsersWithTagRequest.SerializeToString,
response_deserializer=milvus__pb2.ListUsersWithTagResponse.FromString,
)
self.CreateRowPolicy = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/CreateRowPolicy',
request_serializer=milvus__pb2.CreateRowPolicyRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.DropRowPolicy = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/DropRowPolicy',
request_serializer=milvus__pb2.DropRowPolicyRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.ListRowPolicies = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/ListRowPolicies',
request_serializer=milvus__pb2.ListRowPoliciesRequest.SerializeToString,
response_deserializer=milvus__pb2.ListRowPoliciesResponse.FromString,
)
self.UpdateReplicateConfiguration = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/UpdateReplicateConfiguration',
request_serializer=milvus__pb2.UpdateReplicateConfigurationRequest.SerializeToString,
response_deserializer=common__pb2.Status.FromString,
)
self.GetReplicateInfo = channel.unary_unary(
'/milvus.proto.milvus.MilvusService/GetReplicateInfo',
request_serializer=milvus__pb2.GetReplicateInfoRequest.SerializeToString,
response_deserializer=milvus__pb2.GetReplicateInfoResponse.FromString,
)
self.CreateReplicateStream = channel.stream_stream(
'/milvus.proto.milvus.MilvusService/CreateReplicateStream',
request_serializer=milvus__pb2.ReplicateRequest.SerializeToString,
response_deserializer=milvus__pb2.ReplicateResponse.FromString,
)
| MilvusServiceStub |
python | kamyu104__LeetCode-Solutions | Python/symmetric-tree.py | {
"start": 154,
"end": 299
} | class ____(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Iterative solution
| TreeNode |
python | ray-project__ray | rllib/offline/input_reader.py | {
"start": 3815,
"end": 4855
} | class ____(threading.Thread):
"""Thread that feeds a TF queue from a InputReader."""
def __init__(
self,
input_reader: InputReader,
queue: "tf1.FIFOQueue",
keys: List[str],
dtypes: "tf.dtypes.DType",
):
threading.Thread.__init__(self)
self.sess = tf1.get_default_session()
self.daemon = True
self.input_reader = input_reader
self.keys = keys
self.queue = queue
self.placeholders = [tf1.placeholder(dtype) for dtype in dtypes]
self.enqueue_op = queue.enqueue(dict(zip(keys, self.placeholders)))
def enqueue(self, batch: SampleBatchType):
data = {self.placeholders[i]: batch[key] for i, key in enumerate(self.keys)}
self.sess.run(self.enqueue_op, feed_dict=data)
def run(self):
while True:
try:
batch = self.input_reader.next()
self.enqueue(batch)
except Exception:
logger.exception("Error reading from input")
| _QueueRunner |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_image.py | {
"start": 41332,
"end": 63109
} | class ____(np.ndarray):
def __new__(cls, input_array, units):
obj = np.asarray(input_array).view(cls)
obj.units = units
return obj
def __array_finalize__(self, obj):
self.units = getattr(obj, "units", None)
def __getitem__(self, item):
units = getattr(self, "units", None)
ret = super().__getitem__(item)
if isinstance(ret, QuantityND) or units is not None:
ret = QuantityND(ret, units)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if "out" in kwargs:
return NotImplemented
if len(inputs) == 1:
i0 = inputs[0]
unit = getattr(i0, "units", "dimensionless")
out_arr = func(np.asarray(i0), **kwargs)
elif len(inputs) == 2:
i0 = inputs[0]
i1 = inputs[1]
u0 = getattr(i0, "units", "dimensionless")
u1 = getattr(i1, "units", "dimensionless")
u0 = u1 if u0 is None else u0
u1 = u0 if u1 is None else u1
if ufunc in [np.add, np.subtract]:
if u0 != u1:
raise ValueError
unit = u0
elif ufunc == np.multiply:
unit = f"{u0}*{u1}"
elif ufunc == np.divide:
unit = f"{u0}/({u1})"
elif ufunc in (np.greater, np.greater_equal,
np.equal, np.not_equal,
np.less, np.less_equal):
# Comparisons produce unitless booleans for output
unit = None
else:
return NotImplemented
out_arr = func(i0.view(np.ndarray), i1.view(np.ndarray), **kwargs)
else:
return NotImplemented
if unit is None:
out_arr = np.array(out_arr)
else:
out_arr = QuantityND(out_arr, unit)
return out_arr
@property
def v(self):
return self.view(np.ndarray)
def test_quantitynd():
q = QuantityND([1, 2], "m")
q0, q1 = q[:]
assert np.all(q.v == np.asarray([1, 2]))
assert q.units == "m"
assert np.all((q0 + q1).v == np.asarray([3]))
assert (q0 * q1).units == "m*m"
assert (q1 / q0).units == "m/(m)"
with pytest.raises(ValueError):
q0 + QuantityND(1, "s")
def test_imshow_quantitynd():
# generate a dummy ndarray subclass
arr = QuantityND(np.ones((2, 2)), "m")
fig, ax = plt.subplots()
ax.imshow(arr)
# executing the draw should not raise an exception
fig.canvas.draw()
@check_figures_equal()
def test_norm_change(fig_test, fig_ref):
# LogNorm should not mask anything invalid permanently.
data = np.full((5, 5), 1, dtype=np.float64)
data[0:2, :] = -1
masked_data = np.ma.array(data, mask=False)
masked_data.mask[0:2, 0:2] = True
cmap = mpl.colormaps['viridis'].with_extremes(under='w')
ax = fig_test.subplots()
im = ax.imshow(data, norm=colors.LogNorm(vmin=0.5, vmax=1),
extent=(0, 5, 0, 5), interpolation='nearest', cmap=cmap)
im.set_norm(colors.Normalize(vmin=-2, vmax=2))
im = ax.imshow(masked_data, norm=colors.LogNorm(vmin=0.5, vmax=1),
extent=(5, 10, 5, 10), interpolation='nearest', cmap=cmap)
im.set_norm(colors.Normalize(vmin=-2, vmax=2))
ax.set(xlim=(0, 10), ylim=(0, 10))
ax = fig_ref.subplots()
ax.imshow(data, norm=colors.Normalize(vmin=-2, vmax=2),
extent=(0, 5, 0, 5), interpolation='nearest', cmap=cmap)
ax.imshow(masked_data, norm=colors.Normalize(vmin=-2, vmax=2),
extent=(5, 10, 5, 10), interpolation='nearest', cmap=cmap)
ax.set(xlim=(0, 10), ylim=(0, 10))
@pytest.mark.parametrize('x', [-1, 1])
@check_figures_equal()
def test_huge_range_log(fig_test, fig_ref, x):
# parametrize over bad lognorm -1 values and large range 1 -> 1e20
data = np.full((5, 5), x, dtype=np.float64)
data[0:2, :] = 1E20
ax = fig_test.subplots()
ax.imshow(data, norm=colors.LogNorm(vmin=1, vmax=data.max()),
interpolation='nearest', cmap='viridis')
data = np.full((5, 5), x, dtype=np.float64)
data[0:2, :] = 1000
ax = fig_ref.subplots()
cmap = mpl.colormaps['viridis'].with_extremes(under='w')
ax.imshow(data, norm=colors.Normalize(vmin=1, vmax=data.max()),
interpolation='nearest', cmap=cmap)
@check_figures_equal()
def test_spy_box(fig_test, fig_ref):
# setting up reference and test
ax_test = fig_test.subplots(1, 3)
ax_ref = fig_ref.subplots(1, 3)
plot_data = (
[[1, 1], [1, 1]],
[[0, 0], [0, 0]],
[[0, 1], [1, 0]],
)
plot_titles = ["ones", "zeros", "mixed"]
for i, (z, title) in enumerate(zip(plot_data, plot_titles)):
ax_test[i].set_title(title)
ax_test[i].spy(z)
ax_ref[i].set_title(title)
ax_ref[i].imshow(z, interpolation='nearest',
aspect='equal', origin='upper', cmap='Greys',
vmin=0, vmax=1)
ax_ref[i].set_xlim(-0.5, 1.5)
ax_ref[i].set_ylim(1.5, -0.5)
ax_ref[i].xaxis.tick_top()
ax_ref[i].title.set_y(1.05)
ax_ref[i].xaxis.set_ticks_position('both')
ax_ref[i].xaxis.set_major_locator(
mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)
)
ax_ref[i].yaxis.set_major_locator(
mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)
)
@image_comparison(["nonuniform_and_pcolor.png"], style="mpl20")
def test_nonuniform_and_pcolor():
axs = plt.figure(figsize=(3, 3)).subplots(3, sharex=True, sharey=True)
for ax, interpolation in zip(axs, ["nearest", "bilinear"]):
im = NonUniformImage(ax, interpolation=interpolation)
im.set_data(np.arange(3) ** 2, np.arange(3) ** 2,
np.arange(9).reshape((3, 3)))
ax.add_image(im)
axs[2].pcolorfast( # PcolorImage
np.arange(4) ** 2, np.arange(4) ** 2, np.arange(9).reshape((3, 3)))
for ax in axs:
ax.set_axis_off()
# NonUniformImage "leaks" out of extents, not PColorImage.
ax.set(xlim=(0, 10))
@image_comparison(["nonuniform_logscale.png"], style="mpl20")
def test_nonuniform_logscale():
_, axs = plt.subplots(ncols=3, nrows=1)
for i in range(3):
ax = axs[i]
im = NonUniformImage(ax)
im.set_data(np.arange(1, 4) ** 2, np.arange(1, 4) ** 2,
np.arange(9).reshape((3, 3)))
ax.set_xlim(1, 16)
ax.set_ylim(1, 16)
ax.set_box_aspect(1)
if i == 1:
ax.set_xscale("log", base=2)
ax.set_yscale("log", base=2)
if i == 2:
ax.set_xscale("log", base=4)
ax.set_yscale("log", base=4)
ax.add_image(im)
@image_comparison(['rgba_antialias.png'], style='mpl20', remove_text=True, tol=0.02)
def test_rgba_antialias():
fig, axs = plt.subplots(2, 2, figsize=(3.5, 3.5), sharex=False,
sharey=False, constrained_layout=True)
N = 250
aa = np.ones((N, N))
aa[::2, :] = -1
x = np.arange(N) / N - 0.5
y = np.arange(N) / N - 0.5
X, Y = np.meshgrid(x, y)
R = np.sqrt(X**2 + Y**2)
f0 = 10
k = 75
# aliased concentric circles
a = np.sin(np.pi * 2 * (f0 * R + k * R**2 / 2))
# stripes on lhs
a[:int(N/2), :][R[:int(N/2), :] < 0.4] = -1
a[:int(N/2), :][R[:int(N/2), :] < 0.3] = 1
aa[:, int(N/2):] = a[:, int(N/2):]
# set some over/unders and NaNs
aa[20:50, 20:50] = np.nan
aa[70:90, 70:90] = 1e6
aa[70:90, 20:30] = -1e6
aa[70:90, 195:215] = 1e6
aa[20:30, 195:215] = -1e6
cmap = plt.colormaps["RdBu_r"].with_extremes(over='yellow', under='cyan')
axs = axs.flatten()
# zoom in
axs[0].imshow(aa, interpolation='nearest', cmap=cmap, vmin=-1.2, vmax=1.2)
axs[0].set_xlim(N/2-25, N/2+25)
axs[0].set_ylim(N/2+50, N/2-10)
# no anti-alias
axs[1].imshow(aa, interpolation='nearest', cmap=cmap, vmin=-1.2, vmax=1.2)
# data antialias: Note no purples, and white in circle. Note
# that alternating red and blue stripes become white.
axs[2].imshow(aa, interpolation='auto', interpolation_stage='data',
cmap=cmap, vmin=-1.2, vmax=1.2)
# rgba antialias: Note purples at boundary with circle. Note that
# alternating red and blue stripes become purple
axs[3].imshow(aa, interpolation='auto', interpolation_stage='rgba',
cmap=cmap, vmin=-1.2, vmax=1.2)
@check_figures_equal()
def test_upsample_interpolation_stage(fig_test, fig_ref):
"""
Show that interpolation_stage='auto' gives the same as 'data'
for upsampling.
"""
# Fixing random state for reproducibility. This non-standard seed
# gives red splotches for 'rgba'.
np.random.seed(19680801+9)
grid = np.random.rand(4, 4)
ax = fig_ref.subplots()
ax.imshow(grid, interpolation='bilinear', cmap='viridis',
interpolation_stage='data')
ax = fig_test.subplots()
ax.imshow(grid, interpolation='bilinear', cmap='viridis',
interpolation_stage='auto')
@check_figures_equal()
def test_downsample_interpolation_stage(fig_test, fig_ref):
"""
Show that interpolation_stage='auto' gives the same as 'rgba'
for downsampling.
"""
# Fixing random state for reproducibility
np.random.seed(19680801)
grid = np.random.rand(4000, 4000)
ax = fig_ref.subplots()
ax.imshow(grid, interpolation='auto', cmap='viridis',
interpolation_stage='rgba')
ax = fig_test.subplots()
ax.imshow(grid, interpolation='auto', cmap='viridis',
interpolation_stage='auto')
def test_rc_interpolation_stage():
for val in ["data", "rgba"]:
with mpl.rc_context({"image.interpolation_stage": val}):
assert plt.imshow([[1, 2]]).get_interpolation_stage() == val
for val in ["DATA", "foo", None]:
with pytest.raises(ValueError):
mpl.rcParams["image.interpolation_stage"] = val
# We check for the warning with a draw() in the test, but we also need to
# filter the warning as it is emitted by the figure test decorator
@pytest.mark.filterwarnings(r'ignore:Data with more than .* '
'cannot be accurately displayed')
@pytest.mark.parametrize('origin', ['upper', 'lower'])
@pytest.mark.parametrize(
'dim, size, msg', [['row', 2**23, r'2\*\*23 columns'],
['col', 2**24, r'2\*\*24 rows']])
@check_figures_equal()
def test_large_image(fig_test, fig_ref, dim, size, msg, origin):
# Check that Matplotlib downsamples images that are too big for AGG
# See issue #19276. Currently the fix only works for png output but not
# pdf or svg output.
ax_test = fig_test.subplots()
ax_ref = fig_ref.subplots()
array = np.zeros((1, size + 2))
array[:, array.size // 2:] = 1
if dim == 'col':
array = array.T
im = ax_test.imshow(array, vmin=0, vmax=1,
aspect='auto', extent=(0, 1, 0, 1),
interpolation='none',
origin=origin)
with pytest.warns(UserWarning,
match=f'Data with more than {msg} cannot be '
'accurately displayed.'):
fig_test.canvas.draw()
array = np.zeros((1, 2))
array[:, 1] = 1
if dim == 'col':
array = array.T
im = ax_ref.imshow(array, vmin=0, vmax=1, aspect='auto',
extent=(0, 1, 0, 1),
interpolation='none',
origin=origin)
@check_figures_equal()
def test_str_norms(fig_test, fig_ref):
t = np.random.rand(10, 10) * .8 + .1 # between 0 and 1
axts = fig_test.subplots(1, 5)
axts[0].imshow(t, norm="log")
axts[1].imshow(t, norm="log", vmin=.2)
axts[2].imshow(t, norm="symlog")
axts[3].imshow(t, norm="symlog", vmin=.3, vmax=.7)
axts[4].imshow(t, norm="logit", vmin=.3, vmax=.7)
axrs = fig_ref.subplots(1, 5)
axrs[0].imshow(t, norm=colors.LogNorm())
axrs[1].imshow(t, norm=colors.LogNorm(vmin=.2))
# same linthresh as SymmetricalLogScale's default.
axrs[2].imshow(t, norm=colors.SymLogNorm(linthresh=2))
axrs[3].imshow(t, norm=colors.SymLogNorm(linthresh=2, vmin=.3, vmax=.7))
axrs[4].imshow(t, norm="logit", clim=(.3, .7))
assert type(axts[0].images[0].norm) is colors.LogNorm # Exactly that class
with pytest.raises(ValueError):
axts[0].imshow(t, norm="foobar")
def test__resample_valid_output():
resample = functools.partial(mpl._image.resample, transform=Affine2D())
with pytest.raises(TypeError, match="incompatible function arguments"):
resample(np.zeros((9, 9)), None)
with pytest.raises(ValueError, match="different dimensionalities"):
resample(np.zeros((9, 9)), np.zeros((9, 9, 4)))
with pytest.raises(ValueError, match="different dimensionalities"):
resample(np.zeros((9, 9, 4)), np.zeros((9, 9)))
with pytest.raises(ValueError, match="3D input array must be RGBA"):
resample(np.zeros((9, 9, 3)), np.zeros((9, 9, 4)))
with pytest.raises(ValueError, match="3D output array must be RGBA"):
resample(np.zeros((9, 9, 4)), np.zeros((9, 9, 3)))
with pytest.raises(ValueError, match="mismatched types"):
resample(np.zeros((9, 9), np.uint8), np.zeros((9, 9)))
with pytest.raises(ValueError, match="must be C-contiguous"):
resample(np.zeros((9, 9)), np.zeros((9, 9)).T)
out = np.zeros((9, 9))
out.flags.writeable = False
with pytest.raises(ValueError, match="Output array must be writeable"):
resample(np.zeros((9, 9)), out)
@pytest.mark.parametrize("data, interpolation, expected",
[(np.array([[0.1, 0.3, 0.2]]), mimage.NEAREST,
np.array([[0.1, 0.1, 0.1, 0.3, 0.3, 0.3, 0.3, 0.2, 0.2, 0.2]])),
(np.array([[0.1, 0.3, 0.2]]), mimage.BILINEAR,
np.array([[0.1, 0.1, 0.15078125, 0.21096191, 0.27033691,
0.28476562, 0.2546875, 0.22460938, 0.20002441, 0.20002441]])),
]
)
def test_resample_nonaffine(data, interpolation, expected):
# Test that equivalent affine and nonaffine transforms resample the same
# Create a simple affine transform for scaling the input array
affine_transform = Affine2D().scale(sx=expected.shape[1] / data.shape[1], sy=1)
affine_result = np.empty_like(expected)
mimage.resample(data, affine_result, affine_transform, interpolation=interpolation)
assert_allclose(affine_result, expected)
# Create a nonaffine version of the same transform
# by compositing with a nonaffine identity transform
class NonAffineIdentityTransform(Transform):
input_dims = 2
output_dims = 2
def inverted(self):
return self
nonaffine_transform = NonAffineIdentityTransform() + affine_transform
nonaffine_result = np.empty_like(expected)
mimage.resample(data, nonaffine_result, nonaffine_transform,
interpolation=interpolation)
assert_allclose(nonaffine_result, expected, atol=5e-3)
def test_axesimage_get_shape():
# generate dummy image to test get_shape method
ax = plt.gca()
im = AxesImage(ax)
with pytest.raises(RuntimeError, match="You must first set the image array"):
im.get_shape()
z = np.arange(12, dtype=float).reshape((4, 3))
im.set_data(z)
assert im.get_shape() == (4, 3)
assert im.get_size() == im.get_shape()
def test_non_transdata_image_does_not_touch_aspect():
ax = plt.figure().add_subplot()
im = np.arange(4).reshape((2, 2))
ax.imshow(im, transform=ax.transAxes)
assert ax.get_aspect() == "auto"
ax.imshow(im, transform=Affine2D().scale(2) + ax.transData)
assert ax.get_aspect() == 1
ax.imshow(im, transform=ax.transAxes, aspect=2)
assert ax.get_aspect() == 2
@image_comparison(
['downsampling.png'], style='mpl20', remove_text=True, tol=0.09)
def test_downsampling():
N = 450
x = np.arange(N) / N - 0.5
y = np.arange(N) / N - 0.5
aa = np.ones((N, N))
aa[::2, :] = -1
X, Y = np.meshgrid(x, y)
R = np.sqrt(X**2 + Y**2)
f0 = 5
k = 100
a = np.sin(np.pi * 2 * (f0 * R + k * R**2 / 2))
# make the left hand side of this
a[:int(N / 2), :][R[:int(N / 2), :] < 0.4] = -1
a[:int(N / 2), :][R[:int(N / 2), :] < 0.3] = 1
aa[:, int(N / 3):] = a[:, int(N / 3):]
a = aa
fig, axs = plt.subplots(2, 3, figsize=(7, 6), layout='compressed')
axs[0, 0].imshow(a, interpolation='nearest', interpolation_stage='rgba',
cmap='RdBu_r')
axs[0, 0].set_xlim(125, 175)
axs[0, 0].set_ylim(250, 200)
axs[0, 0].set_title('Zoom')
for ax, interp, space in zip(axs.flat[1:], ['nearest', 'nearest', 'hanning',
'hanning', 'auto'],
['data', 'rgba', 'data', 'rgba', 'auto']):
ax.imshow(a, interpolation=interp, interpolation_stage=space,
cmap='RdBu_r')
ax.set_title(f"interpolation='{interp}'\nspace='{space}'")
@image_comparison(
['downsampling_speckle.png'], style='mpl20', remove_text=True, tol=0.09)
def test_downsampling_speckle():
fig, axs = plt.subplots(1, 2, figsize=(5, 2.7), sharex=True, sharey=True,
layout="compressed")
axs = axs.flatten()
img = ((np.arange(1024).reshape(-1, 1) * np.ones(720)) // 50).T
cm = plt.get_cmap("viridis").with_extremes(over="m")
norm = colors.LogNorm(vmin=3, vmax=11)
# old default cannot be tested because it creates over/under speckles
# in the following that are machine dependent.
axs[0].set_title("interpolation='auto', stage='rgba'")
axs[0].imshow(np.triu(img), cmap=cm, norm=norm, interpolation_stage='rgba')
# Should be same as previous
axs[1].set_title("interpolation='auto', stage='auto'")
axs[1].imshow(np.triu(img), cmap=cm, norm=norm)
@image_comparison(
['upsampling.png'], style='mpl20', remove_text=True)
def test_upsampling():
np.random.seed(19680801+9) # need this seed to get yellow next to blue
a = np.random.rand(4, 4)
fig, axs = plt.subplots(1, 3, figsize=(6.5, 3), layout='compressed')
im = axs[0].imshow(a, cmap='viridis')
axs[0].set_title(
"interpolation='auto'\nstage='antialaised'\n(default for upsampling)")
# probably what people want:
axs[1].imshow(a, cmap='viridis', interpolation='sinc')
axs[1].set_title(
"interpolation='sinc'\nstage='auto'\n(default for upsampling)")
# probably not what people want:
axs[2].imshow(a, cmap='viridis', interpolation='sinc', interpolation_stage='rgba')
axs[2].set_title("interpolation='sinc'\nstage='rgba'")
fig.colorbar(im, ax=axs, shrink=0.7, extend='both')
@pytest.mark.parametrize(
'dtype',
('float64', 'float32', 'int16', 'uint16', 'int8', 'uint8'),
)
@pytest.mark.parametrize('ndim', (2, 3))
def test_resample_dtypes(dtype, ndim):
# Issue 28448, incorrect dtype comparisons in C++ image_resample can raise
# ValueError: arrays must be of dtype byte, short, float32 or float64
rng = np.random.default_rng(4181)
shape = (2, 2) if ndim == 2 else (2, 2, 3)
data = rng.uniform(size=shape).astype(np.dtype(dtype, copy=True))
fig, ax = plt.subplots()
axes_image = ax.imshow(data)
# Before fix the following raises ValueError for some dtypes.
axes_image.make_image(None)[0]
@pytest.mark.parametrize('intp_stage', ('data', 'rgba'))
@check_figures_equal(extensions=['png', 'pdf', 'svg'])
def test_interpolation_stage_rgba_respects_alpha_param(fig_test, fig_ref, intp_stage):
axs_tst = fig_test.subplots(2, 3)
axs_ref = fig_ref.subplots(2, 3)
ny, nx = 3, 3
scalar_alpha = 0.5
array_alpha = np.random.rand(ny, nx)
# When the image does not have an alpha channel, alpha should be specified
# by the user or default to 1.0
im_rgb = np.random.rand(ny, nx, 3)
im_concat_default_a = np.ones((ny, nx, 1)) # alpha defaults to 1.0
im_rgba = np.concatenate( # combine rgb channels with array alpha
(im_rgb, array_alpha.reshape((ny, nx, 1))), axis=-1
)
axs_tst[0][0].imshow(im_rgb)
axs_ref[0][0].imshow(np.concatenate((im_rgb, im_concat_default_a), axis=-1))
axs_tst[0][1].imshow(im_rgb, interpolation_stage=intp_stage, alpha=scalar_alpha)
axs_ref[0][1].imshow(
np.concatenate( # combine rgb channels with broadcasted scalar alpha
(im_rgb, scalar_alpha * im_concat_default_a), axis=-1
), interpolation_stage=intp_stage
)
axs_tst[0][2].imshow(im_rgb, interpolation_stage=intp_stage, alpha=array_alpha)
axs_ref[0][2].imshow(im_rgba, interpolation_stage=intp_stage)
# When the image already has an alpha channel, multiply it by the
# scalar alpha param, or replace it by the array alpha param
axs_tst[1][0].imshow(im_rgba)
axs_ref[1][0].imshow(im_rgb, alpha=array_alpha)
axs_tst[1][1].imshow(im_rgba, interpolation_stage=intp_stage, alpha=scalar_alpha)
axs_ref[1][1].imshow(
np.concatenate( # combine rgb channels with scaled array alpha
(im_rgb, scalar_alpha * array_alpha.reshape((ny, nx, 1))), axis=-1
), interpolation_stage=intp_stage
)
new_array_alpha = np.random.rand(ny, nx)
axs_tst[1][2].imshow(im_rgba, interpolation_stage=intp_stage, alpha=new_array_alpha)
axs_ref[1][2].imshow(
np.concatenate( # combine rgb channels with new array alpha
(im_rgb, new_array_alpha.reshape((ny, nx, 1))), axis=-1
), interpolation_stage=intp_stage
)
| QuantityND |
python | sqlalchemy__sqlalchemy | test/sql/test_compare.py | {
"start": 5242,
"end": 5312
} | class ____(TypeDecorator):
cache_ok = True
impl = String
| MyType1 |
python | django__django | tests/migrations2/test_migrations_2_first/0002_second.py | {
"start": 43,
"end": 433
} | class ____(migrations.Migration):
dependencies = [("migrations2", "0001_initial")]
operations = [
migrations.CreateModel(
"Bookstore",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
],
),
]
| Migration |
python | google__jax | jax/_src/callback.py | {
"start": 15367,
"end": 15427
} | class ____(effects.Effect):
__str__ = lambda _: "IO"
| IOEffect |
python | scipy__scipy | scipy/fft/tests/test_helper.py | {
"start": 14718,
"end": 17814
} | class ____:
def test_definition(self, xp):
x = xp.asarray([0., 1, 2, 3, 4, -4, -3, -2, -1])
y = xp.asarray([-4., -3, -2, -1, 0, 1, 2, 3, 4])
xp_assert_close(fft.fftshift(x), y)
xp_assert_close(fft.ifftshift(y), x)
x = xp.asarray([0., 1, 2, 3, 4, -5, -4, -3, -2, -1])
y = xp.asarray([-5., -4, -3, -2, -1, 0, 1, 2, 3, 4])
xp_assert_close(fft.fftshift(x), y)
xp_assert_close(fft.ifftshift(y), x)
def test_inverse(self, xp):
for n in [1, 4, 9, 100, 211]:
x = xp.asarray(np.random.random((n,)))
xp_assert_close(fft.ifftshift(fft.fftshift(x)), x)
@skip_xp_backends('cupy', reason='cupy/cupy#8393')
def test_axes_keyword(self, xp):
freqs = xp.asarray([[0., 1, 2], [3, 4, -4], [-3, -2, -1]])
shifted = xp.asarray([[-1., -3, -2], [2, 0, 1], [-4, 3, 4]])
xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shifted)
xp_assert_close(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,)))
xp_assert_close(fft.ifftshift(shifted, axes=(0, 1)), freqs)
xp_assert_close(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
xp_assert_close(fft.fftshift(freqs), shifted)
xp_assert_close(fft.ifftshift(shifted), freqs)
@skip_xp_backends('cupy', reason='cupy/cupy#8393')
def test_uneven_dims(self, xp):
""" Test 2D input, which has uneven dimension sizes """
freqs = xp.asarray([
[0, 1],
[2, 3],
[4, 5]
], dtype=xp.float64)
# shift in dimension 0
shift_dim0 = xp.asarray([
[4, 5],
[0, 1],
[2, 3]
], dtype=xp.float64)
xp_assert_close(fft.fftshift(freqs, axes=0), shift_dim0)
xp_assert_close(fft.ifftshift(shift_dim0, axes=0), freqs)
xp_assert_close(fft.fftshift(freqs, axes=(0,)), shift_dim0)
xp_assert_close(fft.ifftshift(shift_dim0, axes=[0]), freqs)
# shift in dimension 1
shift_dim1 = xp.asarray([
[1, 0],
[3, 2],
[5, 4]
], dtype=xp.float64)
xp_assert_close(fft.fftshift(freqs, axes=1), shift_dim1)
xp_assert_close(fft.ifftshift(shift_dim1, axes=1), freqs)
# shift in both dimensions
shift_dim_both = xp.asarray([
[5, 4],
[1, 0],
[3, 2]
], dtype=xp.float64)
xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
xp_assert_close(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
xp_assert_close(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
xp_assert_close(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
# axes=None (default) shift in all dimensions
xp_assert_close(fft.fftshift(freqs, axes=None), shift_dim_both)
xp_assert_close(fft.ifftshift(shift_dim_both, axes=None), freqs)
xp_assert_close(fft.fftshift(freqs), shift_dim_both)
xp_assert_close(fft.ifftshift(shift_dim_both), freqs)
| TestFFTShift |
python | ansible__ansible | lib/ansible/utils/_junit_xml.py | {
"start": 1721,
"end": 3605
} | class ____:
"""An individual test case."""
name: str
assertions: int | None = None
classname: str | None = None
status: str | None = None
time: decimal.Decimal | None = None
errors: list[TestError] = dataclasses.field(default_factory=list)
failures: list[TestFailure] = dataclasses.field(default_factory=list)
skipped: str | None = None
system_out: str | None = None
system_err: str | None = None
is_disabled: bool = False
@property
def is_failure(self) -> bool:
"""True if the test case contains failure info."""
return bool(self.failures)
@property
def is_error(self) -> bool:
"""True if the test case contains error info."""
return bool(self.errors)
@property
def is_skipped(self) -> bool:
"""True if the test case was skipped."""
return bool(self.skipped)
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
assertions=self.assertions,
classname=self.classname,
name=self.name,
status=self.status,
time=self.time,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testcase', self.get_attributes())
if self.skipped:
ET.SubElement(element, 'skipped').text = self.skipped
element.extend([error.get_xml_element() for error in self.errors])
element.extend([failure.get_xml_element() for failure in self.failures])
if self.system_out:
ET.SubElement(element, 'system-out').text = self.system_out
if self.system_err:
ET.SubElement(element, 'system-err').text = self.system_err
return element
@dataclasses.dataclass
| TestCase |
python | getsentry__sentry | src/sentry/middleware/integrations/tasks.py | {
"start": 875,
"end": 3549
} | class ____(ABC):
request_payload: dict[str, Any]
response_url: str
@property
@abstractmethod
def log_code(self) -> str:
raise NotImplementedError
def log_message(self, tag: str) -> str:
return f"{self.log_code}.{tag}"
def dispatch(self, region_names: Iterable[str]) -> Response | None:
results = [self._dispatch_to_region(name) for name in region_names]
successes = [r for r in results if r.was_successful()]
logger.info(
self.log_message("async_region_response"),
extra={
"regions": [r.region.name for r in successes],
"response_map": {r.region.name: r.response.status_code for r in results},
},
)
if successes:
# Typically we expect only one request to be made or only one successful
# response. If there are multiple, forward one arbitrarily.
return self._forward_response(successes[-1])
else:
return None
@abstractmethod
def unpack_payload(self, response: Response) -> Any:
raise NotImplementedError
def _dispatch_to_region(self, region_name: str) -> _AsyncResult:
region = get_region_by_name(region_name)
client = RegionSiloClient(region=region)
response = client.request(
method=self.request_payload["method"],
path=self.request_payload["path"],
headers=self.request_payload["headers"],
data=self.request_payload["body"].encode("utf-8"),
json=False,
raw_response=True,
)
return _AsyncResult(region, cast(Response, response))
def _forward_response(self, result: _AsyncResult) -> Response | None:
if not result.was_successful():
raise ValueError("Cannot forward a failed result")
try:
response_payload = self.unpack_payload(result.response)
if response_payload is None:
return None
integration_response = requests.post(self.response_url, json=response_payload)
except Exception as exc:
sentry_sdk.capture_exception(exc)
return None
else:
logger.info(
"integration.async_integration_response",
extra={
"path": self.request_payload["path"],
"region": result.region.name,
"region_status_code": result.response.status_code,
"integration_status_code": integration_response.status_code,
},
)
return integration_response
| _AsyncRegionDispatcher |
python | django__django | tests/generic_views/test_base.py | {
"start": 416,
"end": 576
} | class ____(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse("This is a simple view")
| SimpleView |
python | huggingface__transformers | tests/models/fnet/test_modeling_fnet.py | {
"start": 1854,
"end": 8961
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
def get_config(self):
return FNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
tpu_short_seq_length=self.seq_length,
)
def create_and_check_model(self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels):
model = FNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = FNetForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = FNetForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = FNetForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids}
return config, inputs_dict
@require_torch
| FNetModelTester |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py | {
"start": 4907,
"end": 8015
} | class ____(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV2Test, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV2Test, self).tearDown()
def _test_loop_fn(self, loop_fn, iters, force_xla=False):
def f():
return pfor_control_flow_ops.pfor(loop_fn, iters)
@def_function.function
def jit_f():
with jit.experimental_jit_scope():
return f()
out = f()
jit_out = jit_f()
self.run_and_assert_equal(out, jit_out)
# TODO(agarwal): The following may complain about uncompilable nodes. Hence
# these are currently not enabled for all tests.
if force_xla:
out_exp_compile_f = def_function.function(jit_compile=True)(f)()
self.run_and_assert_equal(out, out_exp_compile_f)
out_xla_compile_f = xla.compile(f, inputs=[])
self.run_and_assert_equal(out, out_xla_compile_f)
def test_stateless_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
return while_loop.while_loop(
lambda j, _: j < lengths_i,
lambda j, t: (j + 1, t + array_ops.gather(x_i, j)),
[0, 0.])
self._test_loop_fn(loop_fn, 3)
def test_while_with_variable(self):
if not context.executing_eagerly():
self.skipTest("Flaky with tf.Session")
v = resource_variable_ops.ResourceVariable(5.)
def loop_fn(_):
_, output = while_loop.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + v),
[0, 0.])
return output
self._test_loop_fn(loop_fn, 3)
def test_while_unstacked_condition(self):
def loop_fn(i):
return while_loop.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3, force_xla=True)
def test_while_force_unstacked_condition(self):
# The while_loop in this setup is similar to the one in test_stateless_while
# whose condition is loop variant. However here we wrap the cond and body of
# the loop in a way that makes the while_loop condition pfor loop invariant.
# This allows xla compilation to work since the vectorized code no longer
# needs to perform dynamic partitioning of the inputs.
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
def _cond(j, _):
return j < lengths_i
def _body(j, t):
return (j + 1, t + array_ops.gather(x_i, j))
cond, body = _make_unstacked(_cond, _body, pfor_config)
return while_loop.while_loop(
cond,
body,
[True, 0, 0.])
self._test_loop_fn(loop_fn, 3, force_xla=True)
if __name__ == "__main__":
test.main()
| WhileV2Test |
python | PyCQA__pylint | tests/functional/r/regression/regression_issue_4633.py | {
"start": 264,
"end": 341
} | class ____(mock.spam):
def __init__(self):
self.queue = Queue()
| Ham |
python | scipy__scipy | scipy/spatial/tests/test_qhull.py | {
"start": 30398,
"end": 38273
} | class ____:
@pytest.mark.parametrize("qhull_opts, extra_pts", [
# option Qz (default for SciPy) will add
# an extra point at infinity
("Qbb Qc Qz", 1),
("Qbb Qc", 0),
])
@pytest.mark.parametrize("n_pts", [50, 100])
@pytest.mark.parametrize("ndim", [2, 3])
def test_point_region_structure(self,
qhull_opts,
n_pts,
extra_pts,
ndim):
# see gh-16773
rng = np.random.default_rng(7790)
points = rng.random((n_pts, ndim))
vor = Voronoi(points, qhull_options=qhull_opts)
pt_region = vor.point_region
assert pt_region.max() == n_pts - 1 + extra_pts
assert pt_region.size == len(vor.regions) - extra_pts
assert len(vor.regions) == n_pts + extra_pts
assert vor.points.shape[0] == n_pts
# if there is an empty sublist in the Voronoi
# regions data structure, it should never be
# indexed because it corresponds to an internally
# added point at infinity and is not a member of the
# generators (input points)
if extra_pts:
sublens = [len(x) for x in vor.regions]
# only one point at infinity (empty region)
# is allowed
assert sublens.count(0) == 1
assert sublens.index(0) not in pt_region
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Voronoi, masked_array)
def test_simple(self):
# Simple case with known Voronoi diagram
points = [(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)]
# qhull v o Fv Qbb Qc Qz < dat
output = """
2
5 10 1
-10.101 -10.101
0.5 0.5
0.5 1.5
1.5 0.5
1.5 1.5
2 0 1
3 2 0 1
2 0 2
3 3 0 1
4 1 2 4 3
3 4 0 2
2 0 3
3 4 0 3
2 0 4
0
12
4 0 3 0 1
4 0 1 0 1
4 1 4 1 2
4 1 2 0 2
4 2 5 0 2
4 3 4 1 3
4 3 6 0 3
4 4 5 2 4
4 4 7 3 4
4 5 8 0 4
4 6 7 0 3
4 7 8 0 4
"""
self._compare_qvoronoi(points, output)
def _compare_qvoronoi(self, points, output, **kw):
"""Compare to output from 'qvoronoi o Fv < data' to Voronoi()"""
# Parse output
output = [list(map(float, x.split())) for x in output.strip().splitlines()]
nvertex = int(output[1][0])
vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf
nregion = int(output[1][1])
regions = [[int(y)-1 for y in x[1:]]
for x in output[2+nvertex:2+nvertex+nregion]]
ridge_points = [[int(y) for y in x[1:3]]
for x in output[3+nvertex+nregion:]]
ridge_vertices = [[int(y)-1 for y in x[3:]]
for x in output[3+nvertex+nregion:]]
# Compare results
vor = qhull.Voronoi(points, **kw)
def sorttuple(x):
return tuple(sorted(x))
assert_allclose(vor.vertices, vertices)
assert_equal(set(map(tuple, vor.regions)),
set(map(tuple, regions)))
p1 = list(zip(list(map(sorttuple, ridge_points)),
list(map(sorttuple, ridge_vertices))))
p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),
list(map(sorttuple, vor.ridge_vertices))))
p1.sort()
p2.sort()
assert_equal(p1, p2)
@pytest.mark.parametrize("name", sorted(DATASETS))
def test_ridges(self, name):
# Check that the ridges computed by Voronoi indeed separate
# the regions of nearest neighborhood, by comparing the result
# to KDTree.
points = DATASETS[name]
tree = KDTree(points)
vor = qhull.Voronoi(points)
for p, v in vor.ridge_dict.items():
# consider only finite ridges
if not np.all(np.asarray(v) >= 0):
continue
ridge_midpoint = vor.vertices[v].mean(axis=0)
d = 1e-6 * (points[p[0]] - ridge_midpoint)
dist, k = tree.query(ridge_midpoint + d, k=1)
assert_equal(k, p[0])
dist, k = tree.query(ridge_midpoint - d, k=1)
assert_equal(k, p[1])
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
# qhull v o Fv Qbb Qc Qu < dat
output = """
2
3 5 1
-10.101 -10.101
0.6000000000000001 0.5
0.5 0.6000000000000001
3 0 2 1
2 0 1
2 0 2
0
3 0 2 1
5
4 0 2 0 2
4 0 4 1 2
4 0 1 0 1
4 1 4 0 1
4 2 4 0 2
"""
self._compare_qvoronoi(points, output, furthest_site=True)
def test_furthest_site_flag(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
vor = Voronoi(points)
assert_equal(vor.furthest_site,False)
vor = Voronoi(points,furthest_site=True)
assert_equal(vor.furthest_site,True)
@pytest.mark.fail_slow(10)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the triangulation
if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:
# too slow (testing of the result --- qhull is still fast)
return
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Voronoi(points)
obj3 = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# -- Check that the incremental mode agrees with upfront mode
assert_equal(len(obj.point_region), len(obj2.point_region))
assert_equal(len(obj.point_region), len(obj3.point_region))
# The vertices may be in different order or duplicated in
# the incremental map
for objx in obj, obj3:
vertex_map = {-1: -1}
for i, v in enumerate(objx.vertices):
for j, v2 in enumerate(obj2.vertices):
if np.allclose(v, v2):
vertex_map[i] = j
def remap(x):
if hasattr(x, '__len__'):
return tuple({remap(y) for y in x})
try:
return vertex_map[x]
except KeyError as e:
message = (f"incremental result has spurious vertex "
f"at {objx.vertices[x]!r}")
raise AssertionError(message) from e
def simplified(x):
items = set(map(sorted_tuple, x))
if () in items:
items.remove(())
items = [x for x in items if len(x) > 1]
items.sort()
return items
assert_equal(
simplified(remap(objx.regions)),
simplified(obj2.regions)
)
assert_equal(
simplified(remap(objx.ridge_vertices)),
simplified(obj2.ridge_vertices)
)
# XXX: compare ridge_points --- not clear exactly how to do this
| TestVoronoi |
python | python-openxml__python-docx | src/docx/shared.py | {
"start": 3170,
"end": 4194
} | class ____(Tuple[int, int, int]):
"""Immutable value object defining a particular RGB color."""
def __new__(cls, r: int, g: int, b: int):
msg = "RGBColor() takes three integer values 0-255"
for val in (r, g, b):
if not isinstance(val, int): # pyright: ignore[reportUnnecessaryIsInstance]
raise TypeError(msg)
if val < 0 or val > 255:
raise ValueError(msg)
return super(RGBColor, cls).__new__(cls, (r, g, b))
def __repr__(self):
return "RGBColor(0x%02x, 0x%02x, 0x%02x)" % self
def __str__(self):
"""Return a hex string rgb value, like '3C2F80'."""
return "%02X%02X%02X" % self
@classmethod
def from_string(cls, rgb_hex_str: str) -> RGBColor:
"""Return a new instance from an RGB color hex string like ``'3C2F80'``."""
r = int(rgb_hex_str[:2], 16)
g = int(rgb_hex_str[2:4], 16)
b = int(rgb_hex_str[4:], 16)
return cls(r, g, b)
T = TypeVar("T")
| RGBColor |
python | kamyu104__LeetCode-Solutions | Python/find-the-last-marked-nodes-in-tree.py | {
"start": 4611,
"end": 6446
} | class ____(object):
def lastMarkedNodes(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
def increase(x):
return (x[0]+1, x[1])
def iter_dfs1():
dp = [[(0, u)]*2 for u in xrange(len(adj))]
stk = [(1, (0, -1))]
while stk:
step, args = stk.pop()
if step == 1:
u, p = args
stk.append((2, (u, p, 0)))
elif step == 2:
u, p, i = args
if i == len(adj[u]):
continue
stk.append((2, (u, p, i+1)))
v = adj[u][i]
if v == p:
continue
stk.append((3, (v, u)))
stk.append((1, (v, u)))
elif step == 3:
v, u = args
curr = increase(dp[v][0])
for i in xrange(len(dp[u])):
if curr > dp[u][i]:
curr, dp[u][i] = dp[u][i], curr
return dp
def iter_dfs2():
result = [-1]*len(adj)
stk = [(0, -1, (0, -1))]
while stk:
u, p, curr = stk.pop()
result[u] = max(dp[u][0], curr)[1]
for v in reversed(adj[u]):
if v == p:
continue
stk.append((v, u, increase(max(dp[u][dp[u][0][1] == dp[v][0][1]], curr))))
return result
adj = [[] for _ in xrange(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
dp = iter_dfs1()
return iter_dfs2()
# Time: O(n)
# Space: O(n)
# dfs, tree dp
| Solution4 |
python | getsentry__sentry | src/sentry/replays/lib/new_query/conditions.py | {
"start": 5523,
"end": 6772
} | class ____(GenericBase):
"""Non-empty string scalar condition class."""
@staticmethod
def visit_eq(expression: Expression, value: str) -> Condition:
return StringScalar.visit_eq(expression, value)
@staticmethod
def visit_neq(expression: Expression, value: str) -> Condition:
return And(
conditions=[
StringScalar.visit_neq(expression, value),
StringScalar.visit_neq(expression, ""),
]
)
@staticmethod
def visit_match(expression: Expression, value: str) -> Condition:
return StringScalar.visit_match(expression, value)
@staticmethod
def visit_not_match(expression: Expression, value: str) -> Condition:
return And(
conditions=[
StringScalar.visit_not_match(expression, value),
StringScalar.visit_neq(expression, ""),
]
)
@staticmethod
def visit_in(expression: Expression, value: list[str]) -> Condition:
return StringScalar.visit_in(expression, value)
@staticmethod
def visit_not_in(expression: Expression, value: list[str]) -> Condition:
return StringScalar.visit_not_in(expression, value + [""])
| NonEmptyStringScalar |
python | numba__numba | versioneer.py | {
"start": 65126,
"end": 83607
} | class ____(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to setuptools
from setuptools import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# pip install -e . and setuptool/editable_wheel will invoke build_py
# but the build_py command is not expected to copy any files.
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
else:
from setuptools.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
if getattr(self, "editable_mode", False):
# During editable installs `.py` and data files are
# not copied to build_lib
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if 'build_ext' in cmds:
_build_ext = cmds['build_ext']
else:
from setuptools.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if not cfg.versionfile_build:
return
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
if not os.path.exists(target_versionfile):
print(f"Warning: {target_versionfile} does not exist, skipping "
"version update. This can happen if you are running build_ext "
"without first running build_py.")
return
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.setuptools_buildexe import py2exe as _py2exe
except ImportError:
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# sdist farms its file list building out to egg_info
if 'egg_info' in cmds:
_egg_info = cmds['egg_info']
else:
from setuptools.command.egg_info import egg_info as _egg_info
class cmd_egg_info(_egg_info):
def find_sources(self):
# egg_info.find_sources builds the manifest list and writes it
# in one shot
super().find_sources()
# Modify the filelist and normalize it
root = get_root()
cfg = get_config_from_root(root)
self.filelist.append('versioneer.py')
if cfg.versionfile_source:
# There are rare cases where versionfile_source might not be
# included by default, so we must be explicit
self.filelist.append(cfg.versionfile_source)
self.filelist.sort()
self.filelist.remove_duplicates()
# The write method is hidden in the manifest_maker instance that
# generated the filelist and was thrown away
# We will instead replicate their final normalization (to unicode,
# and POSIX-style paths)
from setuptools import unicode_utils
normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/')
for f in self.filelist.files]
manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt')
with open(manifest_filename, 'w') as fobj:
fobj.write('\n'.join(normalized))
cmds['egg_info'] = cmd_egg_info
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
else:
from setuptools.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
def setup_command():
"""Set up Versioneer and exit with appropriate error code."""
errors = do_setup()
errors += scan_setup_py()
sys.exit(1 if errors else 0)
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
setup_command()
| VersioneerBadRootError |
python | mlflow__mlflow | tests/pytorch/test_pytorch_model_export.py | {
"start": 4877,
"end": 46375
} | class ____(get_subclassed_model_definition()):
"""
A custom PyTorch model class defined in the test module scope. This is a subclass of
``torch.nn.Module``.
"""
@pytest.fixture(scope="module")
def module_scoped_subclassed_model(data):
"""
A custom PyTorch model inheriting from ``torch.nn.Module`` whose class is defined in the test
module scope.
"""
model = ModuleScopedSubclassedModel()
train_model(model=model, data=data)
return model
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
@pytest.fixture
def pytorch_custom_env(tmp_path):
conda_env = os.path.join(tmp_path, "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pytorch", "torchvision", "pytest"])
return conda_env
def _predict(model, data):
dataset = get_dataset(data)
batch_size = 16
num_workers = 4
dataloader = DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, drop_last=False
)
predictions = np.zeros((len(dataloader.sampler),))
model.eval()
with torch.no_grad():
for i, batch in enumerate(dataloader):
y_preds = model(batch[0]).squeeze(dim=1).numpy()
predictions[i * batch_size : (i + 1) * batch_size] = y_preds
return predictions
@pytest.fixture
def sequential_predicted(sequential_model, data):
return _predict(sequential_model, data)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_signature_and_examples_are_saved_correctly(sequential_model, data, iris_tensor_spec):
model = sequential_model
example_ = data[0].head(3).values.astype(np.float32)
for signature in (None, iris_tensor_spec):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.pytorch.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
if signature is None and example is None:
assert mlflow_model.signature is None
else:
assert mlflow_model.signature == iris_tensor_spec
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
np.testing.assert_allclose(_read_example(mlflow_model, path), example)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_log_model(sequential_model, data, sequential_predicted):
try:
artifact_path = "pytorch"
model_info = mlflow.pytorch.log_model(sequential_model, name=artifact_path)
sequential_model_loaded = mlflow.pytorch.load_model(model_uri=model_info.model_uri)
test_predictions = _predict(sequential_model_loaded, data)
np.testing.assert_array_equal(test_predictions, sequential_predicted)
finally:
mlflow.end_run()
def test_log_model_calls_register_model(module_scoped_subclassed_model):
custom_pickle_module = pickle
artifact_path = "model"
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch:
model_info = mlflow.pytorch.log_model(
module_scoped_subclassed_model,
name=artifact_path,
pickle_module=custom_pickle_module,
registered_model_name="AdsModel1",
)
assert_register_model_called_with_local_model_path(
register_model_mock=mlflow.tracking._model_registry.fluent._register_model,
model_uri=model_info.model_uri,
registered_model_name="AdsModel1",
)
def test_log_model_no_registered_model_name(module_scoped_subclassed_model):
custom_pickle_module = pickle
artifact_path = "model"
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch:
mlflow.pytorch.log_model(
module_scoped_subclassed_model,
name=artifact_path,
pickle_module=custom_pickle_module,
)
mlflow.tracking._model_registry.fluent._register_model.assert_not_called()
@pytest.mark.parametrize("scripted_model", [True, False])
def test_raise_exception(sequential_model):
with TempDir(chdr=True, remove_on_exit=True) as tmp:
path = tmp.path("model")
with pytest.raises(IOError, match="No such file or directory"):
mlflow.pytorch.load_model(path)
with pytest.raises(TypeError, match="Argument 'pytorch_model' should be a torch.nn.Module"):
mlflow.pytorch.save_model([1, 2, 3], path)
mlflow.pytorch.save_model(sequential_model, path)
with pytest.raises(MlflowException, match=f"Path '{os.path.abspath(path)}' already exists"):
mlflow.pytorch.save_model(sequential_model, path)
import sklearn.neighbors as knn
from mlflow import sklearn
path = tmp.path("knn.pkl")
knn = knn.KNeighborsClassifier()
with open(path, "wb") as f:
pickle.dump(knn, f)
path = tmp.path("knn")
sklearn.save_model(knn, path=path)
with pytest.raises(MlflowException, match='Model does not have the "pytorch" flavor'):
mlflow.pytorch.load_model(path)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_save_and_load_model(sequential_model, model_path, data, sequential_predicted):
mlflow.pytorch.save_model(sequential_model, model_path)
# Loading pytorch model
sequential_model_loaded = mlflow.pytorch.load_model(model_path)
np.testing.assert_array_equal(_predict(sequential_model_loaded, data), sequential_predicted)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
np.testing.assert_array_almost_equal(
pyfunc_loaded.predict(data[0]).values[:, 0], sequential_predicted, decimal=4
)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_pyfunc_model_works_with_np_input_type(
sequential_model, model_path, data, sequential_predicted
):
mlflow.pytorch.save_model(sequential_model, model_path)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
# predict works with dataframes
df_result = pyfunc_loaded.predict(data[0])
assert type(df_result) == pd.DataFrame
np.testing.assert_array_almost_equal(df_result.values[:, 0], sequential_predicted, decimal=4)
# predict works with numpy ndarray
np_result = pyfunc_loaded.predict(data[0].values.astype(np.float32))
assert type(np_result) == np.ndarray
np.testing.assert_array_almost_equal(np_result[:, 0], sequential_predicted, decimal=4)
# predict does not work with lists
with pytest.raises(
TypeError, match="The PyTorch flavor does not support List or Dict input types"
):
pyfunc_loaded.predict([1, 2, 3, 4])
# predict does not work with scalars
with pytest.raises(TypeError, match="Input data should be pandas.DataFrame or numpy.ndarray"):
pyfunc_loaded.predict(4)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_load_model_from_remote_uri_succeeds(
sequential_model, model_path, mock_s3_bucket, data, sequential_predicted
):
mlflow.pytorch.save_model(sequential_model, model_path)
artifact_root = f"s3://{mock_s3_bucket}"
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
sequential_model_loaded = mlflow.pytorch.load_model(model_uri=model_uri)
np.testing.assert_array_equal(_predict(sequential_model_loaded, data), sequential_predicted)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
sequential_model, model_path, pytorch_custom_env
):
mlflow.pytorch.save_model(
pytorch_model=sequential_model, path=model_path, conda_env=pytorch_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != pytorch_custom_env
with open(pytorch_custom_env) as f:
pytorch_custom_env_text = f.read()
with open(saved_conda_env_path) as f:
saved_conda_env_text = f.read()
assert saved_conda_env_text == pytorch_custom_env_text
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_save_persists_requirements_in_mlflow_model_directory(
sequential_model, model_path, pytorch_custom_env
):
mlflow.pytorch.save_model(
pytorch_model=sequential_model, path=model_path, conda_env=pytorch_custom_env
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(pytorch_custom_env, saved_pip_req_path)
@pytest.mark.parametrize("scripted_model", [False])
def test_save_model_with_pip_requirements(sequential_model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
# Path to a requirements file
tmpdir1 = tmp_path.joinpath("1")
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
mlflow.pytorch.save_model(sequential_model, tmpdir1, pip_requirements=str(req_file))
_assert_pip_requirements(tmpdir1, [expected_mlflow_version, "a"], strict=True)
# List of requirements
tmpdir2 = tmp_path.joinpath("2")
mlflow.pytorch.save_model(sequential_model, tmpdir2, pip_requirements=[f"-r {req_file}", "b"])
_assert_pip_requirements(tmpdir2, [expected_mlflow_version, "a", "b"], strict=True)
# Constraints file
tmpdir3 = tmp_path.joinpath("3")
mlflow.pytorch.save_model(sequential_model, tmpdir3, pip_requirements=[f"-c {req_file}", "b"])
_assert_pip_requirements(
tmpdir3, [expected_mlflow_version, "b", "-c constraints.txt"], ["a"], strict=True
)
@pytest.mark.parametrize("scripted_model", [False])
def test_save_model_with_extra_pip_requirements(sequential_model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
default_reqs = mlflow.pytorch.get_default_pip_requirements()
# Path to a requirements file
tmpdir1 = tmp_path.joinpath("1")
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
mlflow.pytorch.save_model(sequential_model, tmpdir1, extra_pip_requirements=str(req_file))
_assert_pip_requirements(tmpdir1, [expected_mlflow_version, *default_reqs, "a"])
# List of requirements
tmpdir2 = tmp_path.joinpath("2")
mlflow.pytorch.save_model(
sequential_model, tmpdir2, extra_pip_requirements=[f"-r {req_file}", "b"]
)
_assert_pip_requirements(tmpdir2, [expected_mlflow_version, *default_reqs, "a", "b"])
# Constraints file
tmpdir3 = tmp_path.joinpath("3")
mlflow.pytorch.save_model(
sequential_model, tmpdir3, extra_pip_requirements=[f"-c {req_file}", "b"]
)
_assert_pip_requirements(
tmpdir3, [expected_mlflow_version, *default_reqs, "b", "-c constraints.txt"], ["a"]
)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_save_accepts_conda_env_as_dict(sequential_model, model_path):
conda_env = dict(mlflow.pytorch.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(
sequential_model, pytorch_custom_env
):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
sequential_model,
name=artifact_path,
conda_env=pytorch_custom_env,
)
model_path = _download_artifact_from_uri(model_info.model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != pytorch_custom_env
with open(pytorch_custom_env) as f:
pytorch_custom_env_text = f.read()
with open(saved_conda_env_path) as f:
saved_conda_env_text = f.read()
assert saved_conda_env_text == pytorch_custom_env_text
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_log_persists_requirements_in_mlflow_model_directory(
sequential_model, pytorch_custom_env
):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
sequential_model,
name=artifact_path,
conda_env=pytorch_custom_env,
)
model_path = _download_artifact_from_uri(model_info.model_uri)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(pytorch_custom_env, saved_pip_req_path)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
sequential_model, model_path
):
mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path)
_assert_pip_requirements(model_path, mlflow.pytorch.get_default_pip_requirements())
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
sequential_model,
):
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(sequential_model, name="model")
_assert_pip_requirements(model_info.model_uri, mlflow.pytorch.get_default_pip_requirements())
@pytest.mark.parametrize("scripted_model", [True, False])
def test_load_model_with_differing_pytorch_version_logs_warning(sequential_model, model_path):
mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path)
saver_pytorch_version = "1.0"
model_config_path = os.path.join(model_path, "MLmodel")
model_config = Model.load(model_config_path)
model_config.flavors[mlflow.pytorch.FLAVOR_NAME]["pytorch_version"] = saver_pytorch_version
model_config.save(model_config_path)
log_messages = []
def custom_warn(message_text, *args, **kwargs):
log_messages.append(message_text % args % kwargs)
loader_pytorch_version = "0.8.2"
with (
mock.patch("mlflow.pytorch._logger.warning") as warn_mock,
mock.patch("torch.__version__", loader_pytorch_version),
):
warn_mock.side_effect = custom_warn
mlflow.pytorch.load_model(model_uri=model_path)
assert any(
"does not match installed PyTorch version" in log_message
and saver_pytorch_version in log_message
and loader_pytorch_version in log_message
for log_message in log_messages
)
def test_pyfunc_model_serving_with_module_scoped_subclassed_model_and_default_conda_env(
module_scoped_subclassed_model, data
):
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
module_scoped_subclassed_model,
name="pytorch_model",
code_paths=[__file__],
input_example=data[0],
)
inference_payload = load_serving_example(model_info.model_uri)
scoring_response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert scoring_response.status_code == 200
deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)["predictions"])
np.testing.assert_array_almost_equal(
deployed_model_preds.values[:, 0],
_predict(model=module_scoped_subclassed_model, data=data),
decimal=4,
)
def test_save_model_with_wrong_codepaths_fails_correctly(
module_scoped_subclassed_model, model_path, data
):
with pytest.raises(TypeError, match="Argument code_paths should be a list, not <class 'str'>"):
mlflow.pytorch.save_model(
path=model_path, pytorch_model=module_scoped_subclassed_model, code_paths="some string"
)
def test_pyfunc_model_serving_with_main_scoped_subclassed_model_and_custom_pickle_module(
main_scoped_subclassed_model, data
):
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
main_scoped_subclassed_model,
name="pytorch_model",
pickle_module=mlflow_pytorch_pickle_module,
input_example=data[0],
)
inference_payload = load_serving_example(model_info.model_uri)
scoring_response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert scoring_response.status_code == 200
deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)["predictions"])
np.testing.assert_array_almost_equal(
deployed_model_preds.values[:, 0],
_predict(model=main_scoped_subclassed_model, data=data),
decimal=4,
)
def test_load_model_succeeds_with_dependencies_specified_via_code_paths(
module_scoped_subclassed_model, model_path, data
):
# Save a PyTorch model whose class is defined in the current test suite. Because the
# `tests` module is not available when the model is deployed for local scoring, we include
# the test suite file as a code dependency
mlflow.pytorch.save_model(
path=model_path,
pytorch_model=module_scoped_subclassed_model,
code_paths=[__file__],
)
# Define a custom pyfunc model that loads a PyTorch model artifact using
# `mlflow.pytorch.load_model`
class TorchValidatorModel(pyfunc.PythonModel):
def load_context(self, context):
self.pytorch_model = mlflow.pytorch.load_model(context.artifacts["pytorch_model"])
def predict(self, context, model_input, params=None):
with torch.no_grad():
input_tensor = torch.from_numpy(model_input.values.astype(np.float32))
output_tensor = self.pytorch_model(input_tensor)
return pd.DataFrame(output_tensor.numpy())
pyfunc_artifact_path = "pyfunc_model"
with mlflow.start_run():
model_info = pyfunc.log_model(
pyfunc_artifact_path,
python_model=TorchValidatorModel(),
artifacts={"pytorch_model": model_path},
input_example=data[0],
# save file into code_paths, otherwise after first model loading (happens when
# validating input_example) then we can not load the model again
code_paths=[__file__],
)
# Deploy the custom pyfunc model and ensure that it is able to successfully load its
# constituent PyTorch model via `mlflow.pytorch.load_model`
inference_payload = load_serving_example(model_info.model_uri)
scoring_response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert scoring_response.status_code == 200
deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)["predictions"])
np.testing.assert_array_almost_equal(
deployed_model_preds.values[:, 0],
_predict(model=module_scoped_subclassed_model, data=data),
decimal=4,
)
def test_load_pyfunc_loads_torch_model_using_pickle_module_specified_at_save_time(
module_scoped_subclassed_model, model_path
):
custom_pickle_module = pickle
mlflow.pytorch.save_model(
path=model_path,
pytorch_model=module_scoped_subclassed_model,
pickle_module=custom_pickle_module,
)
import_module_fn = importlib.import_module
imported_modules = []
def track_module_imports(module_name):
imported_modules.append(module_name)
return import_module_fn(module_name)
with (
mock.patch("importlib.import_module") as import_mock,
mock.patch("torch.load") as torch_load_mock,
):
import_mock.side_effect = track_module_imports
pyfunc.load_model(model_path)
expected_kwargs = {"pickle_module": custom_pickle_module}
if ENABLE_LEGACY_DESERIALIZATION:
expected_kwargs["weights_only"] = False
torch_load_mock.assert_called_with(mock.ANY, **expected_kwargs)
assert custom_pickle_module.__name__ in imported_modules
def test_load_model_loads_torch_model_using_pickle_module_specified_at_save_time(
module_scoped_subclassed_model,
):
custom_pickle_module = pickle
artifact_path = "pytorch_model"
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
module_scoped_subclassed_model,
name=artifact_path,
pickle_module=custom_pickle_module,
)
model_uri = model_info.model_uri
import_module_fn = importlib.import_module
imported_modules = []
def track_module_imports(module_name):
imported_modules.append(module_name)
return import_module_fn(module_name)
with (
mock.patch("importlib.import_module") as import_mock,
mock.patch("torch.load") as torch_load_mock,
):
import_mock.side_effect = track_module_imports
pyfunc.load_model(model_uri=model_uri)
expected_kwargs = {"pickle_module": custom_pickle_module}
if ENABLE_LEGACY_DESERIALIZATION:
expected_kwargs["weights_only"] = False
torch_load_mock.assert_called_with(mock.ANY, **expected_kwargs)
assert custom_pickle_module.__name__ in imported_modules
def test_load_pyfunc_succeeds_when_data_is_model_file_instead_of_directory(
module_scoped_subclassed_model, model_path, data
):
"""
This test verifies that PyTorch models saved in older versions of MLflow are loaded successfully
by ``mlflow.pytorch.load_model``. The ``data`` path associated with these older models is
serialized PyTorch model file, as opposed to the current format: a directory containing a
serialized model file and pickle module information.
"""
mlflow.pytorch.save_model(path=model_path, pytorch_model=module_scoped_subclassed_model)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME)
assert pyfunc_conf is not None
model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA])
assert os.path.exists(model_data_path)
assert mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME in os.listdir(model_data_path)
pyfunc_conf[pyfunc.DATA] = os.path.join(
model_data_path, mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME
)
model_conf.save(model_conf_path)
loaded_pyfunc = pyfunc.load_model(model_path)
np.testing.assert_array_almost_equal(
loaded_pyfunc.predict(data[0]),
pd.DataFrame(_predict(model=module_scoped_subclassed_model, data=data)),
decimal=4,
)
def test_load_model_succeeds_when_data_is_model_file_instead_of_directory(
module_scoped_subclassed_model, model_path, data
):
"""
This test verifies that PyTorch models saved in older versions of MLflow are loaded successfully
by ``mlflow.pytorch.load_model``. The ``data`` path associated with these older models is
serialized PyTorch model file, as opposed to the current format: a directory containing a
serialized model file and pickle module information.
"""
artifact_path = "pytorch_model"
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(module_scoped_subclassed_model, name=artifact_path)
model_path = _download_artifact_from_uri(model_info.model_uri)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME)
assert pyfunc_conf is not None
model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA])
assert os.path.exists(model_data_path)
assert mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME in os.listdir(model_data_path)
pyfunc_conf[pyfunc.DATA] = os.path.join(
model_data_path, mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME
)
model_conf.save(model_conf_path)
loaded_pyfunc = pyfunc.load_model(model_path)
np.testing.assert_array_almost_equal(
loaded_pyfunc.predict(data[0]),
pd.DataFrame(_predict(model=module_scoped_subclassed_model, data=data)),
decimal=4,
)
def test_load_model_allows_user_to_override_pickle_module_via_keyword_argument(
module_scoped_subclassed_model, model_path
):
mlflow.pytorch.save_model(
path=model_path, pytorch_model=module_scoped_subclassed_model, pickle_module=pickle
)
with (
mock.patch("torch.load") as torch_load_mock,
mock.patch("mlflow.pytorch._logger.warning") as warn_mock,
):
mlflow.pytorch.load_model(model_uri=model_path, pickle_module=mlflow_pytorch_pickle_module)
torch_load_mock.assert_called_with(mock.ANY, pickle_module=mlflow_pytorch_pickle_module)
warn_mock.assert_any_call(mock.ANY, mlflow_pytorch_pickle_module.__name__, pickle.__name__)
def test_load_model_raises_exception_when_pickle_module_cannot_be_imported(
main_scoped_subclassed_model, model_path
):
mlflow.pytorch.save_model(path=model_path, pytorch_model=main_scoped_subclassed_model)
bad_pickle_module_name = "not.a.real.module"
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA])
assert os.path.exists(model_data_path)
assert mlflow.pytorch._PICKLE_MODULE_INFO_FILE_NAME in os.listdir(model_data_path)
with open(
os.path.join(model_data_path, mlflow.pytorch._PICKLE_MODULE_INFO_FILE_NAME), "w"
) as f:
f.write(bad_pickle_module_name)
with pytest.raises(
MlflowException,
match=r"Failed to import the pickle module.+" + re.escape(bad_pickle_module_name),
):
mlflow.pytorch.load_model(model_uri=model_path)
def test_pyfunc_serve_and_score(data):
model = torch.nn.Linear(4, 1)
train_model(model=model, data=data)
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(model, name="model", input_example=data[0])
inference_payload = load_serving_example(model_info.model_uri)
resp = pyfunc_serve_and_score_model(
model_info.model_uri,
inference_payload,
pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
from mlflow.deployments import PredictionsResponse
scores = PredictionsResponse.from_json(resp.content).get_predictions()
np.testing.assert_array_almost_equal(scores.values[:, 0], _predict(model=model, data=data))
@pytest.mark.skipif(not _is_importable("transformers"), reason="This test requires transformers")
def test_pyfunc_serve_and_score_transformers():
from transformers import BertConfig, BertModel
from mlflow.deployments import PredictionsResponse
class MyBertModel(BertModel):
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs).last_hidden_state
model = MyBertModel(
BertConfig(
vocab_size=16,
hidden_size=2,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=2,
)
)
model.eval()
input_ids = model.dummy_inputs["input_ids"]
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
model, name="model", input_example=np.array(input_ids.tolist())
)
inference_payload = load_serving_example(model_info.model_uri)
resp = pyfunc_serve_and_score_model(
model_info.model_uri,
inference_payload,
pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
scores = PredictionsResponse.from_json(resp.content.decode("utf-8")).get_predictions(
predictions_format="ndarray"
)
assert_array_almost_equal(scores, model(input_ids).detach().numpy(), rtol=1e-6)
@pytest.fixture
def create_requirements_file(tmp_path):
requirement_file_name = "requirements.txt"
fp = tmp_path.joinpath(requirement_file_name)
test_string = "mlflow"
fp.write_text(test_string)
return str(fp), test_string
@pytest.fixture
def create_extra_files(tmp_path):
fp1 = tmp_path.joinpath("extra1.txt")
fp2 = tmp_path.joinpath("extra2.txt")
fp1.write_text("1")
fp2.write_text("2")
return [str(fp1), str(fp2)], ["1", "2"]
@pytest.mark.parametrize("scripted_model", [True, False])
def test_extra_files_log_model(create_extra_files, sequential_model):
extra_files, contents_expected = create_extra_files
with mlflow.start_run():
mlflow.pytorch.log_model(sequential_model, name="models", extra_files=extra_files)
model_uri = "runs:/{run_id}/{model_path}".format(
run_id=mlflow.active_run().info.run_id, model_path="models"
)
with TempDir(remove_on_exit=True) as tmp:
model_path = _download_artifact_from_uri(model_uri, tmp.path())
model_config_path = os.path.join(model_path, "MLmodel")
model_config = Model.load(model_config_path)
flavor_config = model_config.flavors["pytorch"]
assert "extra_files" in flavor_config
loaded_extra_files = flavor_config["extra_files"]
for loaded_extra_file, content_expected in zip(loaded_extra_files, contents_expected):
assert "path" in loaded_extra_file
extra_file_path = os.path.join(model_path, loaded_extra_file["path"])
with open(extra_file_path) as fp:
assert fp.read() == content_expected
@pytest.mark.parametrize("scripted_model", [True, False])
def test_extra_files_save_model(create_extra_files, sequential_model):
extra_files, contents_expected = create_extra_files
with TempDir(remove_on_exit=True) as tmp:
model_path = os.path.join(tmp.path(), "models")
mlflow.pytorch.save_model(
pytorch_model=sequential_model, path=model_path, extra_files=extra_files
)
model_config_path = os.path.join(model_path, "MLmodel")
model_config = Model.load(model_config_path)
flavor_config = model_config.flavors["pytorch"]
assert "extra_files" in flavor_config
loaded_extra_files = flavor_config["extra_files"]
for loaded_extra_file, content_expected in zip(loaded_extra_files, contents_expected):
assert "path" in loaded_extra_file
extra_file_path = os.path.join(model_path, loaded_extra_file["path"])
with open(extra_file_path) as fp:
assert fp.read() == content_expected
@pytest.mark.parametrize("scripted_model", [True, False])
def test_log_model_invalid_extra_file_path(sequential_model):
with (
mlflow.start_run(),
pytest.raises(MlflowException, match="No such file or directory: 'non_existing_file.txt'"),
):
mlflow.pytorch.log_model(
sequential_model,
name="models",
extra_files=["non_existing_file.txt"],
)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_log_model_invalid_extra_file_type(sequential_model):
with (
mlflow.start_run(),
pytest.raises(TypeError, match="Extra files argument should be a list"),
):
mlflow.pytorch.log_model(
sequential_model,
name="models",
extra_files="non_existing_file.txt",
)
def state_dict_equal(state_dict1, state_dict2):
for key1 in state_dict1:
if key1 not in state_dict2:
return False
value1 = state_dict1[key1]
value2 = state_dict2[key1]
if type(value1) != type(value2):
return False
elif isinstance(value1, dict):
if not state_dict_equal(value1, value2):
return False
elif isinstance(value1, torch.Tensor):
if not torch.equal(value1, value2):
return False
elif value1 != value2:
return False
else:
continue
return True
@pytest.mark.parametrize("scripted_model", [True, False])
def test_save_state_dict(sequential_model, model_path, data):
state_dict = sequential_model.state_dict()
mlflow.pytorch.save_state_dict(state_dict, model_path)
loaded_state_dict = mlflow.pytorch.load_state_dict(model_path)
assert state_dict_equal(loaded_state_dict, state_dict)
model = get_sequential_model()
model.load_state_dict(loaded_state_dict)
np.testing.assert_array_almost_equal(
_predict(model, data),
_predict(sequential_model, data),
decimal=4,
)
def test_save_state_dict_can_save_nested_state_dict(model_path):
"""
This test ensures that `save_state_dict` supports a use case described in the page below
where a user bundles multiple objects (e.g., model, optimizer, learning-rate scheduler)
into a single nested state_dict and loads it back later for inference or re-training:
https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html
"""
model = get_sequential_model()
optim = torch.optim.Adam(model.parameters())
state_dict = {"model": model.state_dict(), "optim": optim.state_dict()}
mlflow.pytorch.save_state_dict(state_dict, model_path)
loaded_state_dict = mlflow.pytorch.load_state_dict(model_path)
assert state_dict_equal(loaded_state_dict, state_dict)
model.load_state_dict(loaded_state_dict["model"])
optim.load_state_dict(loaded_state_dict["optim"])
@pytest.mark.parametrize("not_state_dict", [0, "", get_sequential_model()])
def test_save_state_dict_throws_for_invalid_object_type(not_state_dict, model_path):
with pytest.raises(TypeError, match="Invalid object type for `state_dict`"):
mlflow.pytorch.save_state_dict(not_state_dict, model_path)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_log_state_dict(sequential_model, data):
artifact_path = "model"
state_dict = sequential_model.state_dict()
with mlflow.start_run():
mlflow.pytorch.log_state_dict(state_dict, artifact_path)
state_dict_uri = mlflow.get_artifact_uri(artifact_path)
loaded_state_dict = mlflow.pytorch.load_state_dict(state_dict_uri)
assert state_dict_equal(loaded_state_dict, state_dict)
model = get_sequential_model()
model.load_state_dict(loaded_state_dict)
np.testing.assert_array_almost_equal(
_predict(model, data),
_predict(sequential_model, data),
decimal=4,
)
@pytest.mark.parametrize("scripted_model", [True, False])
def test_log_model_with_code_paths(sequential_model):
artifact_path = "model"
with (
mlflow.start_run(),
mock.patch("mlflow.pytorch._add_code_from_conf_to_system_path") as add_mock,
):
model_info = mlflow.pytorch.log_model(
sequential_model, name=artifact_path, code_paths=[__file__]
)
_compare_logged_code_paths(__file__, model_info.model_uri, mlflow.pytorch.FLAVOR_NAME)
mlflow.pytorch.load_model(model_info.model_uri)
add_mock.assert_called()
def test_virtualenv_subfield_points_to_correct_path(model_path):
model = get_sequential_model()
mlflow.pytorch.save_model(model, path=model_path)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
python_env_path = Path(model_path, pyfunc_conf[pyfunc.ENV]["virtualenv"])
assert python_env_path.exists()
assert python_env_path.is_file()
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_save_load_with_metadata(sequential_model, model_path):
mlflow.pytorch.save_model(
sequential_model, path=model_path, metadata={"metadata_key": "metadata_value"}
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_log_with_metadata(sequential_model):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
sequential_model,
name=artifact_path,
metadata={"metadata_key": "metadata_value"},
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
@pytest.mark.parametrize("scripted_model", [True, False])
def test_model_log_with_signature_inference(sequential_model, data):
artifact_path = "model"
example_ = data[0].head(3).values.astype(np.float32)
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(
sequential_model, name=artifact_path, input_example=example_
)
assert model_info.signature == ModelSignature(
inputs=Schema([TensorSpec(np.dtype("float32"), (-1, 4))]),
outputs=Schema([TensorSpec(np.dtype("float32"), (-1, 1))]),
)
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_info.model_uri,
inference_payload,
pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200
deployed_model_preds = pd.DataFrame(json.loads(response.content)["predictions"])
np.testing.assert_array_almost_equal(
deployed_model_preds.values[:, 0],
_predict(model=sequential_model, data=(data[0].head(3), data[1].head(3))),
decimal=4,
)
@pytest.mark.parametrize("scripted_model", [False])
def test_load_model_to_device(sequential_model):
with mock.patch("mlflow.pytorch._load_model") as load_model_mock:
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(sequential_model, name="pytorch")
model_config = {"device": "cuda"}
if ENABLE_LEGACY_DESERIALIZATION:
model_config["weights_only"] = False
mlflow.pyfunc.load_model(model_uri=model_info.model_uri, model_config=model_config)
load_model_mock.assert_called_with(mock.ANY, **model_config)
mlflow.pytorch.load_model(model_uri=model_info.model_uri, **model_config)
load_model_mock.assert_called_with(path=mock.ANY, **model_config)
def test_passing_params_to_model(data):
class CustomModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 1)
def forward(self, x, y):
if not torch.is_tensor(x):
x = torch.from_numpy(x)
y = torch.tensor(y)
combined = x * y
return self.linear(combined)
model = CustomModel()
x = np.random.randn(8, 4).astype(np.float32)
signature = mlflow.models.infer_signature(x, None, {"y": 1})
with mlflow.start_run():
model_info = mlflow.pytorch.log_model(model, name="model", signature=signature)
pyfunc_model = mlflow.pyfunc.load_model(model_info.model_uri)
with torch.no_grad():
np.testing.assert_array_almost_equal(pyfunc_model.predict(x), model(x, 1), decimal=4)
np.testing.assert_array_almost_equal(
pyfunc_model.predict(x, {"y": 2}), model(x, 2), decimal=4
)
def test_log_model_with_datetime_input():
df = pd.DataFrame(
{
"datetime": pd.date_range("2022-01-01", periods=5, freq="D"),
"x": np.random.uniform(20, 30, 5),
"y": np.random.uniform(2, 4, 5),
"z": np.random.uniform(0, 10, 5),
}
)
model = get_sequential_model()
model_info = mlflow.pytorch.log_model(model, name="pytorch", input_example=df)
assert model_info.signature.inputs.inputs[0].type == DataType.datetime
pyfunc_model = mlflow.pyfunc.load_model(model_info.model_uri)
with torch.no_grad():
input_tensor = torch.from_numpy(df.to_numpy(dtype=np.float32))
expected_result = model(input_tensor)
with torch.no_grad():
np.testing.assert_array_almost_equal(pyfunc_model.predict(df), expected_result, decimal=4)
| ModuleScopedSubclassedModel |
python | getsentry__sentry | src/sentry/seer/autofix/utils.py | {
"start": 2130,
"end": 2301
} | class ____(BaseModel):
description: str
repo_provider: str
repo_full_name: str
branch_name: str | None = None
pr_url: str | None = None
| CodingAgentResult |
python | astropy__astropy | astropy/utils/masked/tests/test_function_helpers.py | {
"start": 45569,
"end": 46959
} | class ____:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(cls):
cls.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == "[— 1 2]"
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=", ")
assert out1 == "[—, 1, 2]"
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=", ", formatter={"all": hex})
assert out2 == "[———, 0x1, 0x2]"
# Also as positional argument (no, nobody will do this!)
if NUMPY_LT_2_4:
args = (self.ma, None, None, None, ", ", "", np._NoValue, {"int": hex})
out3 = np.array2string(*args)
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=", ", formatter={"float": hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == "MaskedNDArray([—, 1, 2])"
ma2 = self.ma.astype("f4")
out2 = np.array_repr(ma2)
assert out2 == "MaskedNDArray([——, 1., 2.], dtype=float32)"
def test_array_str(self):
out = np.array_str(self.ma)
assert out == "[— 1 2]"
| TestStringFunctions |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 76222,
"end": 82192
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test long join paths with a joined-inh in the middle, where we go
multiple times across the same joined-inh to the same target but with
other classes in the middle. E.g. test [ticket:2908]
"""
run_setup_mappers = "once"
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Root(Base):
__tablename__ = "root"
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey("sub1.id"))
intermediate = relationship("Intermediate")
sub1 = relationship("Sub1")
class Intermediate(Base):
__tablename__ = "intermediate"
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey("sub1.id"))
root_id = Column(Integer, ForeignKey("root.id"))
sub1 = relationship("Sub1")
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
class Sub1(Parent):
__tablename__ = "sub1"
id = Column(Integer, ForeignKey("parent.id"), primary_key=True)
target = relationship("Target")
class Target(Base):
__tablename__ = "target"
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey("sub1.id"))
def test_join(self):
Root, Intermediate, Sub1, Target = (
self.classes.Root,
self.classes.Intermediate,
self.classes.Sub1,
self.classes.Target,
)
s1_alias = aliased(Sub1)
s2_alias = aliased(Sub1)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = fixture_session()
q = (
sess.query(Root)
.join(s1_alias, Root.sub1)
.join(t1_alias, s1_alias.target)
.join(Root.intermediate)
.join(s2_alias, Intermediate.sub1)
.join(t2_alias, s2_alias.target)
)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_1 "
"ON anon_1.sub1_id = root.sub1_id "
"JOIN target AS target_1 ON anon_1.sub1_id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_2 "
"ON anon_2.sub1_id = intermediate.sub1_id "
"JOIN target AS target_2 ON anon_2.sub1_id = target_2.sub1_id",
)
def test_join_flat(self):
Root, Intermediate, Sub1, Target = (
self.classes.Root,
self.classes.Intermediate,
self.classes.Sub1,
self.classes.Target,
)
s1_alias = aliased(Sub1, flat=True)
s2_alias = aliased(Sub1, flat=True)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = fixture_session()
q = (
sess.query(Root)
.join(s1_alias, Root.sub1)
.join(t1_alias, s1_alias.target)
.join(Root.intermediate)
.join(s2_alias, Intermediate.sub1)
.join(t2_alias, s2_alias.target)
)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = root.sub1_id "
"JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) "
"ON sub1_2.id = intermediate.sub1_id "
"JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id",
)
def test_joinedload(self):
Root, Intermediate, Sub1 = (
self.classes.Root,
self.classes.Intermediate,
self.classes.Sub1,
)
sess = fixture_session()
q = sess.query(Root).options(
joinedload(Root.sub1).joinedload(Sub1.target),
joinedload(Root.intermediate)
.joinedload(Intermediate.sub1)
.joinedload(Sub1.target),
)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id, "
"target_1.id AS target_1_id, "
"target_1.sub1_id AS target_1_sub1_id, "
"sub1_1.id AS sub1_1_id, parent_1.id AS parent_1_id, "
"intermediate_1.id AS intermediate_1_id, "
"intermediate_1.sub1_id AS intermediate_1_sub1_id, "
"intermediate_1.root_id AS intermediate_1_root_id, "
"target_2.id AS target_2_id, "
"target_2.sub1_id AS target_2_sub1_id, "
"sub1_2.id AS sub1_2_id, parent_2.id AS parent_2_id "
"FROM root "
"LEFT OUTER JOIN intermediate AS intermediate_1 "
"ON root.id = intermediate_1.root_id "
"LEFT OUTER JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = intermediate_1.sub1_id "
"LEFT OUTER JOIN target AS target_1 "
"ON sub1_1.id = target_1.sub1_id "
"LEFT OUTER JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) ON sub1_2.id = root.sub1_id "
"LEFT OUTER JOIN target AS target_2 "
"ON sub1_2.id = target_2.sub1_id",
)
| JoinAcrossJoinedInhMultiPath |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 11576,
"end": 11796
} | class ____(django.db.models.base.ModelBase):
def __new__(cls, name: str, bases: tuple[Any, ...], attrs: dict[str, Any], **kwargs: Any) -> MetaclassInWhichSelfCannotBeUsed6:
...
| MetaclassInWhichSelfCannotBeUsed6 |
python | PyCQA__pylint | pylint/pyreverse/dot_printer.py | {
"start": 1678,
"end": 6661
} | class ____(Printer):
DEFAULT_COLOR = "black"
def __init__(
self,
title: str,
layout: Layout | None = None,
use_automatic_namespace: bool | None = None,
):
layout = layout or Layout.BOTTOM_TO_TOP
self.charset = "utf-8"
super().__init__(title, layout, use_automatic_namespace)
def _open_graph(self) -> None:
"""Emit the header lines."""
self.emit(f'digraph "{self.title}" {{')
if self.layout:
self.emit(f"rankdir={self.layout.value}")
if self.charset:
assert (
self.charset.lower() in ALLOWED_CHARSETS
), f"unsupported charset {self.charset}"
self.emit(f'charset="{self.charset}"')
def emit_node(
self,
name: str,
type_: NodeType,
properties: NodeProperties | None = None,
) -> None:
"""Create a new node.
Nodes can be classes, packages, participants etc.
"""
if properties is None:
properties = NodeProperties(label=name)
shape = SHAPES[type_]
color = properties.color if properties.color is not None else self.DEFAULT_COLOR
style = "filled" if color != self.DEFAULT_COLOR else "solid"
label = self._build_label_for_node(properties)
label_part = f", label=<{label}>" if label else ""
fontcolor_part = (
f', fontcolor="{properties.fontcolor}"' if properties.fontcolor else ""
)
self.emit(
f'"{name}" [color="{color}"{fontcolor_part}{label_part}, shape="{shape}", style="{style}"];'
)
def _build_label_for_node(self, properties: NodeProperties) -> str:
if not properties.label:
return ""
label: str = properties.label
if properties.attrs is None and properties.methods is None:
# return a "compact" form which only displays the class name in a box
return label
# Add class attributes
attrs: list[str] = properties.attrs or []
attrs_string = rf"{HTMLLabels.LINEBREAK_LEFT.value}".join(
attr.replace("|", r"\|") for attr in attrs
)
label = rf"{{{label}|{attrs_string}{HTMLLabels.LINEBREAK_LEFT.value}|"
# Add class methods
methods: list[nodes.FunctionDef] = properties.methods or []
for func in methods:
args = ", ".join(self._get_method_arguments(func)).replace("|", r"\|")
method_name = (
f"<I>{func.name}</I>" if func.is_abstract() else f"{func.name}"
)
label += rf"{method_name}({args})"
if func.returns:
annotation_label = get_annotation_label(func.returns)
label += ": " + self._escape_annotation_label(annotation_label)
label += rf"{HTMLLabels.LINEBREAK_LEFT.value}"
label += "}"
return label
def _escape_annotation_label(self, annotation_label: str) -> str:
# Escape vertical bar characters to make them appear as a literal characters
# otherwise it gets treated as field separator of record-based nodes
annotation_label = annotation_label.replace("|", r"\|")
return annotation_label
def emit_edge(
self,
from_node: str,
to_node: str,
type_: EdgeType,
label: str | None = None,
) -> None:
"""Create an edge from one node to another to display relationships."""
arrowstyle = ARROWS[type_]
attrs = [f'{prop}="{value}"' for prop, value in arrowstyle.items()]
if label:
attrs.append(f'label="{label}"')
self.emit(f'"{from_node}" -> "{to_node}" [{", ".join(sorted(attrs))}];')
def generate(self, outputfile: str) -> None:
self._close_graph()
graphviz_extensions = ("dot", "gv")
name = self.title
if outputfile is None:
target = "png"
pdot, dot_sourcepath = tempfile.mkstemp(".gv", name)
ppng, outputfile = tempfile.mkstemp(".png", name)
os.close(pdot)
os.close(ppng)
else:
target = Path(outputfile).suffix.lstrip(".")
if not target:
target = "png"
outputfile = outputfile + "." + target
if target not in graphviz_extensions:
pdot, dot_sourcepath = tempfile.mkstemp(".gv", name)
os.close(pdot)
else:
dot_sourcepath = outputfile
with open(dot_sourcepath, "w", encoding="utf8") as outfile:
outfile.writelines(self.lines)
if target not in graphviz_extensions:
subprocess.run(
["dot", "-T", target, dot_sourcepath, "-o", outputfile], check=True
)
os.unlink(dot_sourcepath)
def _close_graph(self) -> None:
"""Emit the lines needed to properly close the graph."""
self.emit("}\n")
| DotPrinter |
python | ray-project__ray | python/ray/train/backend.py | {
"start": 375,
"end": 724
} | class ____:
"""Parent class for configurations of training backend."""
@property
def backend_cls(self):
return Backend
@property
def train_func_context(self):
return nullcontext
def _repr_html_(self) -> str:
return make_table_html_repr(obj=self, title=type(self).__name__)
@DeveloperAPI
| BackendConfig |
python | huggingface__transformers | tests/models/deberta_v2/test_modeling_deberta_v2.py | {
"start": 9433,
"end": 12095
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DebertaV2Model,
DebertaV2ForMaskedLM,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2ForQuestionAnswering,
DebertaV2ForMultipleChoice,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DebertaV2Model,
"fill-mask": DebertaV2ForMaskedLM,
"question-answering": DebertaV2ForQuestionAnswering,
"text-classification": DebertaV2ForSequenceClassification,
"token-classification": DebertaV2ForTokenClassification,
"zero-shot": DebertaV2ForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = False
def setUp(self):
self.model_tester = DebertaV2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_deberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/deberta-v2-xlarge"
model = DebertaV2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
@require_sentencepiece
@require_tokenizers
| DebertaV2ModelTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 37974,
"end": 38370
} | class ____(sgqlc.types.Enum):
"""The possible default commit titles for merges.
Enumeration Choices:
* `MERGE_MESSAGE`: Default to the classic title for a merge
message (e.g., Merge pull request #123 from branch-name).
* `PR_TITLE`: Default to the pull request's title.
"""
__schema__ = github_schema
__choices__ = ("MERGE_MESSAGE", "PR_TITLE")
| MergeCommitTitle |
python | doocs__leetcode | solution/0100-0199/0199.Binary Tree Right Side View/Solution.py | {
"start": 192,
"end": 649
} | class ____:
def rightSideView(self, root: Optional[TreeNode]) -> List[int]:
ans = []
if root is None:
return ans
q = deque([root])
while q:
ans.append(q[0].val)
for _ in range(len(q)):
node = q.popleft()
if node.right:
q.append(node.right)
if node.left:
q.append(node.left)
return ans
| Solution |
python | rq__rq | tests/fixtures.py | {
"start": 3107,
"end": 3185
} | class ____:
def __call__(self):
return "I'm callable"
| CallableObject |
python | numba__numba | numba/core/generators.py | {
"start": 205,
"end": 1489
} | class ____(FunctionDescriptor):
"""
The descriptor for a generator's next function.
"""
__slots__ = ()
@classmethod
def from_generator_fndesc(cls, func_ir, fndesc, gentype, mangler):
"""
Build a GeneratorDescriptor for the generator returned by the
function described by *fndesc*, with type *gentype*.
The generator inherits the env_name from the *fndesc*.
All emitted functions for the generator shares the same Env.
"""
assert isinstance(gentype, types.Generator)
restype = gentype.yield_type
args = ['gen']
argtypes = (gentype,)
qualname = fndesc.qualname + '.next'
unique_name = fndesc.unique_name + '.next'
self = cls(fndesc.native, fndesc.modname, qualname, unique_name,
fndesc.doc, fndesc.typemap, restype, fndesc.calltypes,
args, fndesc.kws, argtypes=argtypes, mangler=mangler,
inline=False, env_name=fndesc.env_name)
return self
@property
def llvm_finalizer_name(self):
"""
The LLVM name of the generator's finalizer function
(if <generator type>.has_finalizer is true).
"""
return 'finalize_' + self.mangled_name
| GeneratorDescriptor |
python | getsentry__sentry | tests/sentry/integrations/slack/test_link_identity.py | {
"start": 611,
"end": 1973
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.external_id = "new-slack-id"
self.channel_id = "my-channel"
self.response_url = "http://example.slack.com/response_url"
self.integration = install_slack(self.organization)
self.idp = add_identity(self.integration, self.user, self.external_id)
@pytest.fixture(autouse=True)
def mock_webhook_send(self) -> Generator[None]:
with patch(
"slack_sdk.webhook.WebhookClient.send",
return_value=WebhookResponse(
url="",
body='{"ok": true}',
headers={},
status_code=200,
),
) as self.mock_webhook:
yield
@pytest.fixture(autouse=True)
def mock_chat_postMessage(self) -> Generator[None]:
with patch(
"slack_sdk.web.WebClient.chat_postMessage",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/chat.postMessage",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self.mock_post:
yield
@control_silo_test
| SlackIntegrationLinkIdentityTestBase |
python | doocs__leetcode | solution/0000-0099/0043.Multiply Strings/Solution.py | {
"start": 0,
"end": 564
} | class ____:
def multiply(self, num1: str, num2: str) -> str:
if num1 == "0" or num2 == "0":
return "0"
m, n = len(num1), len(num2)
arr = [0] * (m + n)
for i in range(m - 1, -1, -1):
a = int(num1[i])
for j in range(n - 1, -1, -1):
b = int(num2[j])
arr[i + j + 1] += a * b
for i in range(m + n - 1, 0, -1):
arr[i - 1] += arr[i] // 10
arr[i] %= 10
i = 0 if arr[0] else 1
return "".join(str(x) for x in arr[i:])
| Solution |
python | pytorch__pytorch | torch/cuda/__init__.py | {
"start": 19366,
"end": 19964
} | class ____:
r"""Context-manager that changes the selected device.
Args:
device (torch.device or int): device index to select. It's a no-op if
this argument is a negative integer or ``None``.
"""
def __init__(self, device: Any):
self.idx = _get_device_index(device, optional=True)
self.prev_idx = -1
def __enter__(self):
self.prev_idx = torch.cuda._exchange_device(self.idx)
def __exit__(self, type: Any, value: Any, traceback: Any):
self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)
return False
| device |
python | facebook__pyre-check | client/background_tasks.py | {
"start": 597,
"end": 713
} | class ____(abc.ABC):
@abc.abstractmethod
async def run(self) -> None:
raise NotImplementedError()
| Task |
python | pola-rs__polars | py-polars/tests/unit/io/database/test_read.py | {
"start": 3921,
"end": 44861
} | class ____(NamedTuple):
"""Clarify exception test params."""
read_method: str
query: str | list[str]
protocol: Any
errclass: type[Exception]
errmsg: str
engine: str | None = None
execute_options: dict[str, Any] | None = None
pre_execution_query: str | list[str] | None = None
kwargs: dict[str, Any] | None = None
@pytest.mark.write_disk
@pytest.mark.parametrize(
(
"read_method",
"connect_using",
"expected_dtypes",
"expected_dates",
"schema_overrides",
"batch_size",
),
[
pytest.param(
*DatabaseReadTestParams(
read_method="read_database_uri",
connect_using="connectorx",
expected_dtypes={
"id": pl.UInt8,
"name": pl.String,
"value": pl.Float64,
"date": pl.Date,
},
expected_dates=[date(2020, 1, 1), date(2021, 12, 31)],
schema_overrides={"id": pl.UInt8},
),
id="uri: connectorx",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database_uri",
connect_using="adbc",
expected_dtypes={
"id": pl.UInt8,
"name": pl.String,
"value": pl.Float64,
"date": pl.String,
},
expected_dates=["2020-01-01", "2021-12-31"],
schema_overrides={"id": pl.UInt8},
),
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_sqlite not available on Windows",
),
id="uri: adbc",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=lambda path: sqlite3.connect(path, detect_types=True),
expected_dtypes={
"id": pl.UInt8,
"name": pl.String,
"value": pl.Float32,
"date": pl.Date,
},
expected_dates=[date(2020, 1, 1), date(2021, 12, 31)],
schema_overrides={"id": pl.UInt8, "value": pl.Float32},
),
id="conn: sqlite3",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=lambda path: sqlite3.connect(path, detect_types=True),
expected_dtypes={
"id": pl.Int32,
"name": pl.String,
"value": pl.Float32,
"date": pl.Date,
},
expected_dates=[date(2020, 1, 1), date(2021, 12, 31)],
schema_overrides={"id": pl.Int32, "value": pl.Float32},
batch_size=1,
),
id="conn: sqlite3 (batched)",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=lambda path: create_engine(
f"sqlite:///{path}",
connect_args={"detect_types": sqlite3.PARSE_DECLTYPES},
).connect(),
expected_dtypes={
"id": pl.Int64,
"name": pl.String,
"value": pl.Float64,
"date": pl.Date,
},
expected_dates=[date(2020, 1, 1), date(2021, 12, 31)],
),
id="conn: sqlalchemy",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=adbc_sqlite_connect,
expected_dtypes={
"id": pl.Int64,
"name": pl.String,
"value": pl.Float64,
"date": pl.String,
},
expected_dates=["2020-01-01", "2021-12-31"],
),
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_sqlite not available on Windows",
),
id="conn: adbc (fetchall)",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=adbc_sqlite_connect,
expected_dtypes={
"id": pl.Int64,
"name": pl.String,
"value": pl.Float64,
"date": pl.String,
},
expected_dates=["2020-01-01", "2021-12-31"],
batch_size=1,
),
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_sqlite not available on Windows",
),
id="conn: adbc (batched)",
),
],
)
def test_read_database(
read_method: Literal["read_database", "read_database_uri"],
connect_using: Any,
expected_dtypes: dict[str, pl.DataType],
expected_dates: list[date | str],
schema_overrides: SchemaDict | None,
batch_size: int | None,
tmp_sqlite_db: Path,
) -> None:
if read_method == "read_database_uri":
connect_using = cast("DbReadEngine", connect_using)
# instantiate the connection ourselves, using connectorx/adbc
df = pl.read_database_uri(
uri=f"sqlite:///{tmp_sqlite_db}",
query="SELECT * FROM test_data",
engine=connect_using,
schema_overrides=schema_overrides,
)
df_empty = pl.read_database_uri(
uri=f"sqlite:///{tmp_sqlite_db}",
query="SELECT * FROM test_data WHERE name LIKE '%polars%'",
engine=connect_using,
schema_overrides=schema_overrides,
)
elif "adbc" in os.environ["PYTEST_CURRENT_TEST"]:
# externally instantiated adbc connections
with connect_using(tmp_sqlite_db) as conn:
df = pl.read_database(
connection=conn,
query="SELECT * FROM test_data",
schema_overrides=schema_overrides,
batch_size=batch_size,
)
df_empty = pl.read_database(
connection=conn,
query="SELECT * FROM test_data WHERE name LIKE '%polars%'",
schema_overrides=schema_overrides,
batch_size=batch_size,
)
else:
# other user-supplied connections
df = pl.read_database(
connection=connect_using(tmp_sqlite_db),
query="SELECT * FROM test_data WHERE name NOT LIKE '%polars%'",
schema_overrides=schema_overrides,
batch_size=batch_size,
)
df_empty = pl.read_database(
connection=connect_using(tmp_sqlite_db),
query="SELECT * FROM test_data WHERE name LIKE '%polars%'",
schema_overrides=schema_overrides,
batch_size=batch_size,
)
# validate the expected query return (data and schema)
assert df.schema == expected_dtypes
assert df.shape == (2, 4)
assert df["date"].to_list() == expected_dates
# note: 'cursor.description' is not reliable when no query
# data is returned, so no point comparing expected dtypes
assert df_empty.columns == ["id", "name", "value", "date"]
assert df_empty.shape == (0, 4)
assert df_empty["date"].to_list() == []
@pytest.mark.write_disk
@pytest.mark.parametrize(
(
"read_method",
"connect_using",
"expected_dtypes",
"expected_dates",
"schema_overrides",
"batch_size",
),
[
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=lambda path: sqlite3.connect(path, detect_types=True),
expected_dtypes={
"id": pl.Int32,
"name": pl.String,
"value": pl.Float32,
"date": pl.Date,
},
expected_dates=[date(2020, 1, 1), date(2021, 12, 31)],
schema_overrides={"id": pl.Int32, "value": pl.Float32},
batch_size=1,
),
id="conn: sqlite3",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=lambda path: create_engine(
f"sqlite:///{path}",
connect_args={"detect_types": sqlite3.PARSE_DECLTYPES},
).connect(),
expected_dtypes={
"id": pl.Int64,
"name": pl.String,
"value": pl.Float64,
"date": pl.Date,
},
expected_dates=[date(2020, 1, 1), date(2021, 12, 31)],
batch_size=1,
),
id="conn: sqlalchemy",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=adbc_sqlite_connect,
expected_dtypes={
"id": pl.Int64,
"name": pl.String,
"value": pl.Float64,
"date": pl.String,
},
expected_dates=["2020-01-01", "2021-12-31"],
),
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_sqlite not available on Windows",
),
id="conn: adbc",
),
pytest.param(
*DatabaseReadTestParams(
read_method="read_database",
connect_using=adbc_sqlite_connect,
expected_dtypes={
"id": pl.Int64,
"name": pl.String,
"value": pl.Float64,
"date": pl.String,
},
expected_dates=["2020-01-01", "2021-12-31"],
batch_size=1,
),
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_sqlite not available on Windows",
),
id="conn: adbc (ignore batch_size)",
),
],
)
def test_read_database_iter_batches(
read_method: Literal["read_database"],
connect_using: Any,
expected_dtypes: dict[str, pl.DataType],
expected_dates: list[date | str],
schema_overrides: SchemaDict | None,
batch_size: int | None,
tmp_sqlite_db: Path,
) -> None:
if "adbc" in os.environ["PYTEST_CURRENT_TEST"]:
# externally instantiated adbc connections
with connect_using(tmp_sqlite_db) as conn:
dfs = pl.read_database(
connection=conn,
query="SELECT * FROM test_data",
schema_overrides=schema_overrides,
iter_batches=True,
batch_size=batch_size,
)
empty_dfs = pl.read_database(
connection=conn,
query="SELECT * FROM test_data WHERE name LIKE '%polars%'",
schema_overrides=schema_overrides,
iter_batches=True,
batch_size=batch_size,
)
# must consume the iterators while the connection is open
dfs = iter(list(dfs))
empty_dfs = iter(list(empty_dfs))
else:
# other user-supplied connections
dfs = pl.read_database(
connection=connect_using(tmp_sqlite_db),
query="SELECT * FROM test_data WHERE name NOT LIKE '%polars%'",
schema_overrides=schema_overrides,
iter_batches=True,
batch_size=batch_size,
)
empty_dfs = pl.read_database(
connection=connect_using(tmp_sqlite_db),
query="SELECT * FROM test_data WHERE name LIKE '%polars%'",
schema_overrides=schema_overrides,
iter_batches=True,
batch_size=batch_size,
)
df: pl.DataFrame = pl.concat(dfs)
# validate the expected query return (data and schema)
assert df.schema == expected_dtypes
assert df.shape == (2, 4)
assert df["date"].to_list() == expected_dates
# some drivers return an empty iterator when there is no result
try:
df_empty: pl.DataFrame = pl.concat(empty_dfs)
except ValueError:
return
# # note: 'cursor.description' is not reliable when no query
# # data is returned, so no point comparing expected dtypes
assert df_empty.columns == ["id", "name", "value", "date"]
assert df_empty.shape == (0, 4)
assert df_empty["date"].to_list() == []
def test_read_database_alchemy_selectable(tmp_sqlite_db: Path) -> None:
# various flavours of alchemy connection
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
alchemy_session: ConnectionOrCursor = sessionmaker(bind=alchemy_engine)()
alchemy_conn: ConnectionOrCursor = alchemy_engine.connect()
t = Table("test_data", MetaData(), autoload_with=alchemy_engine)
# establish sqlalchemy "selectable" and validate usage
selectable_query = select(
alchemy_cast(func.strftime("%Y", t.c.date), Integer).label("year"),
t.c.name,
t.c.value,
).where(t.c.value < 0)
expected = pl.DataFrame({"year": [2021], "name": ["other"], "value": [-99.5]})
for conn in (alchemy_session, alchemy_engine, alchemy_conn):
assert_frame_equal(
pl.read_database(selectable_query, connection=conn),
expected,
)
batches = list(
pl.read_database(
selectable_query,
connection=conn,
iter_batches=True,
batch_size=1,
)
)
assert len(batches) == 1
assert_frame_equal(batches[0], expected)
def test_read_database_alchemy_textclause(tmp_sqlite_db: Path) -> None:
# various flavours of alchemy connection
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
alchemy_session: ConnectionOrCursor = sessionmaker(bind=alchemy_engine)()
alchemy_conn: ConnectionOrCursor = alchemy_engine.connect()
# establish sqlalchemy "textclause" and validate usage
textclause_query = text(
"""
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value < 0
"""
)
expected = pl.DataFrame({"year": [2021], "name": ["other"], "value": [-99.5]})
for conn in (alchemy_session, alchemy_engine, alchemy_conn):
assert_frame_equal(
pl.read_database(textclause_query, connection=conn),
expected,
)
batches = list(
pl.read_database(
textclause_query,
connection=conn,
iter_batches=True,
batch_size=1,
)
)
assert len(batches) == 1
assert_frame_equal(batches[0], expected)
@pytest.mark.parametrize(
("param", "param_value"),
[
(":n", {"n": 0}),
("?", (0,)),
("?", [0]),
],
)
def test_read_database_parameterised(
param: str, param_value: Any, tmp_sqlite_db: Path
) -> None:
# raw cursor "execute" only takes positional params, alchemy cursor takes kwargs
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
alchemy_conn: ConnectionOrCursor = alchemy_engine.connect()
alchemy_session: ConnectionOrCursor = sessionmaker(bind=alchemy_engine)()
raw_conn: ConnectionOrCursor = sqlite3.connect(tmp_sqlite_db)
# establish parameterised queries and validate usage
query = """
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value < {n}
"""
expected_frame = pl.DataFrame({"year": [2021], "name": ["other"], "value": [-99.5]})
for conn in (alchemy_session, alchemy_engine, alchemy_conn, raw_conn):
if conn is alchemy_session and param == "?":
continue # alchemy session.execute() doesn't support positional params
if parse_version(sqlalchemy.__version__) < (2, 0) and param == ":n":
continue # skip for older sqlalchemy versions
assert_frame_equal(
expected_frame,
pl.read_database(
query.format(n=param),
connection=conn,
execute_options={"parameters": param_value},
),
)
@pytest.mark.parametrize(
("param", "param_value"),
[
pytest.param(
":n",
pa.Table.from_pydict({"n": [0]}),
marks=pytest.mark.skip(
reason="Named binding not currently supported. See https://github.com/apache/arrow-adbc/issues/3262"
),
),
pytest.param(
":n",
{"n": 0},
marks=pytest.mark.skip(
reason="Named binding not currently supported. See https://github.com/apache/arrow-adbc/issues/3262",
),
),
("?", pa.Table.from_pydict({"data": [0]})),
("?", pl.DataFrame({"data": [0]})),
("?", pl.Series([{"data": 0}])),
("?", (0,)),
("?", [0]),
],
)
@pytest.mark.skipif(
sys.platform == "win32", reason="adbc_driver_sqlite not available on Windows"
)
def test_read_database_parameterised_adbc(
param: str, param_value: Any, tmp_sqlite_db: Path
) -> None:
# establish parameterised queries and validate usage
query = """
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value < {n}
"""
expected_frame = pl.DataFrame({"year": [2021], "name": ["other"], "value": [-99.5]})
# ADBC will complain in pytest if the connection isn't closed
with adbc_driver_sqlite.dbapi.connect(str(tmp_sqlite_db)) as conn:
assert_frame_equal(
expected_frame,
pl.read_database(
query.format(n=param),
connection=conn,
execute_options={"parameters": param_value},
),
)
@pytest.mark.parametrize(
("params", "param_value"),
[
([":lo", ":hi"], {"lo": 90, "hi": 100}),
(["?", "?"], (90, 100)),
(["?", "?"], [90, 100]),
],
)
def test_read_database_parameterised_multiple(
params: list[str], param_value: Any, tmp_sqlite_db: Path
) -> None:
param_1, param_2 = params
# establish parameterised queries and validate usage
query = """
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value BETWEEN {param_1} AND {param_2}
"""
expected_frame = pl.DataFrame({"year": [2020], "name": ["misc"], "value": [100.0]})
# raw cursor "execute" only takes positional params, alchemy cursor takes kwargs
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
alchemy_conn: ConnectionOrCursor = alchemy_engine.connect()
alchemy_session: ConnectionOrCursor = sessionmaker(bind=alchemy_engine)()
raw_conn: ConnectionOrCursor = sqlite3.connect(tmp_sqlite_db)
for conn in (alchemy_session, alchemy_engine, alchemy_conn, raw_conn):
if alchemy_session is conn and param_1 == "?":
continue # alchemy session.execute() doesn't support positional params
if parse_version(sqlalchemy.__version__) < (2, 0) and isinstance(
param_value, dict
):
continue # skip for older sqlalchemy versions
assert_frame_equal(
expected_frame,
pl.read_database(
query.format(param_1=param_1, param_2=param_2),
connection=conn,
execute_options={"parameters": param_value},
),
)
@pytest.mark.parametrize(
("params", "param_value"),
[
pytest.param(
[":lo", ":hi"],
{"lo": 90, "hi": 100},
marks=pytest.mark.skip(
reason="Named binding not currently supported. See https://github.com/apache/arrow-adbc/issues/3262"
),
),
(["?", "?"], pa.Table.from_pydict({"data_1": [90], "data_2": [100]})),
(["?", "?"], pl.DataFrame({"data_1": [90], "data_2": [100]})),
(["?", "?"], pl.Series([{"data_1": 90, "data_2": 100}])),
(["?", "?"], (90, 100)),
(["?", "?"], [90, 100]),
],
)
@pytest.mark.skipif(
sys.platform == "win32", reason="adbc_driver_sqlite not available on Windows"
)
def test_read_database_parameterised_multiple_adbc(
params: list[str], param_value: Any, tmp_sqlite_db: Path
) -> None:
param_1, param_2 = params
# establish parameterised queries and validate usage
query = """
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value BETWEEN {param_1} AND {param_2}
"""
expected_frame = pl.DataFrame({"year": [2020], "name": ["misc"], "value": [100.0]})
# ADBC will complain in pytest if the connection isn't closed
with adbc_driver_sqlite.dbapi.connect(str(tmp_sqlite_db)) as conn:
assert_frame_equal(
expected_frame,
pl.read_database(
query.format(param_1=param_1, param_2=param_2),
connection=conn,
execute_options={"parameters": param_value},
),
)
@pytest.mark.parametrize(
("param", "param_value"),
[
pytest.param(
":n",
pa.Table.from_pydict({"n": [0]}),
marks=pytest.mark.skip(
reason="Named binding not currently supported. See https://github.com/apache/arrow-adbc/issues/3262"
),
),
pytest.param(
":n",
{"n": 0},
marks=pytest.mark.skip(
reason="Named binding not currently supported. See https://github.com/apache/arrow-adbc/issues/3262",
),
),
("?", pa.Table.from_pydict({"data": [0]})),
("?", pl.DataFrame({"data": [0]})),
("?", pl.Series([{"data": 0}])),
("?", (0,)),
("?", [0]),
],
)
@pytest.mark.skipif(
sys.platform == "win32", reason="adbc_driver_sqlite not available on Windows"
)
def test_read_database_uri_parameterised(
param: str, param_value: Any, tmp_sqlite_db: Path
) -> None:
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
uri = alchemy_engine.url.render_as_string(hide_password=False)
query = """
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value < {n}
"""
expected_frame = pl.DataFrame({"year": [2021], "name": ["other"], "value": [-99.5]})
# test URI read method (adbc only)
assert_frame_equal(
expected_frame,
pl.read_database_uri(
query.format(n=param),
uri=uri,
engine="adbc",
execute_options={"parameters": param_value},
),
)
# no connectorx support for execute_options
with pytest.raises(
ValueError,
match=r"connectorx.*does not support.*execute_options",
):
pl.read_database_uri(
query.format(n=":n"),
uri=uri,
engine="connectorx",
execute_options={"parameters": (":n", {"n": 0})},
)
@pytest.mark.parametrize(
("params", "param_value"),
[
pytest.param(
[":lo", ":hi"],
{"lo": 90, "hi": 100},
marks=pytest.mark.xfail(
reason="Named binding not supported. See https://github.com/apache/arrow-adbc/issues/3262",
strict=True,
),
),
(["?", "?"], pa.Table.from_pydict({"data_1": [90], "data_2": [100]})),
(["?", "?"], pl.DataFrame({"data_1": [90], "data_2": [100]})),
(["?", "?"], pl.Series([{"data_1": 90, "data_2": 100}])),
(["?", "?"], (90, 100)),
(["?", "?"], [90, 100]),
],
)
@pytest.mark.skipif(
sys.platform == "win32", reason="adbc_driver_sqlite not available on Windows"
)
def test_read_database_uri_parameterised_multiple(
params: list[str], param_value: Any, tmp_sqlite_db: Path
) -> None:
param_1, param_2 = params
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
uri = alchemy_engine.url.render_as_string(hide_password=False)
query = """
SELECT CAST(STRFTIME('%Y',"date") AS INT) as "year", name, value
FROM test_data
WHERE value BETWEEN {param_1} AND {param_2}
"""
expected_frame = pl.DataFrame({"year": [2020], "name": ["misc"], "value": [100.0]})
# test URI read method (ADBC only)
assert_frame_equal(
expected_frame,
pl.read_database_uri(
query.format(param_1=param_1, param_2=param_2),
uri=uri,
engine="adbc",
execute_options={"parameters": param_value},
),
)
# no connectorx support for execute_options
with pytest.raises(
ValueError,
match=r"connectorx.*does not support.*execute_options",
):
pl.read_database_uri(
query.format(param_1="?", param_2="?"),
uri=uri,
engine="connectorx",
execute_options={"parameters": (90, 100)},
)
@pytest.mark.parametrize(
("driver", "batch_size", "iter_batches", "expected_call"),
[
("snowflake", None, False, "fetch_arrow_all"),
("snowflake", 10_000, False, "fetch_arrow_all"),
("snowflake", 10_000, True, "fetch_arrow_batches"),
("databricks", None, False, "fetchall_arrow"),
("databricks", 25_000, False, "fetchall_arrow"),
("databricks", 25_000, True, "fetchmany_arrow"),
("turbodbc", None, False, "fetchallarrow"),
("turbodbc", 50_000, False, "fetchallarrow"),
("turbodbc", 50_000, True, "fetcharrowbatches"),
pytest.param(
"adbc_driver_postgresql",
None,
False,
"fetch_arrow",
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_postgresql not available on Windows",
),
),
pytest.param(
"adbc_driver_postgresql",
75_000,
False,
"fetch_arrow",
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_postgresql not available on Windows",
),
),
pytest.param(
"adbc_driver_postgresql",
75_000,
True,
"fetch_record_batch",
marks=pytest.mark.skipif(
sys.platform == "win32",
reason="adbc_driver_postgresql not available on Windows",
),
),
],
)
def test_read_database_mocked(
driver: str, batch_size: int | None, iter_batches: bool, expected_call: str
) -> None:
# since we don't have access to snowflake/databricks/etc from CI we
# mock them so we can check that we're calling the expected methods
arrow = pl.DataFrame({"x": [1, 2, 3], "y": ["aa", "bb", "cc"]}).to_arrow()
reg = ARROW_DRIVER_REGISTRY.get(driver, [{}])[0] # type: ignore[var-annotated]
exact_batch_size = reg.get("exact_batch_size", False)
repeat_batch_calls = reg.get("repeat_batch_calls", False)
mc = MockConnection(
driver,
batch_size,
test_data=arrow,
repeat_batch_calls=repeat_batch_calls,
exact_batch_size=exact_batch_size, # type: ignore[arg-type]
)
res = pl.read_database(
query="SELECT * FROM test_data",
connection=mc,
iter_batches=iter_batches,
batch_size=batch_size,
)
if iter_batches:
assert isinstance(res, GeneratorType)
res = pl.concat(res)
res = cast("pl.DataFrame", res)
assert expected_call in mc.cursor().called
assert res.rows() == [(1, "aa"), (2, "bb"), (3, "cc")]
@pytest.mark.parametrize(
(
"read_method",
"query",
"protocol",
"errclass",
"errmsg",
"engine",
"execute_options",
"pre_execution_query",
"kwargs",
),
[
pytest.param(
*ExceptionTestParams(
read_method="read_database_uri",
query="SELECT * FROM test_data",
protocol="sqlite",
errclass=ValueError,
errmsg="engine must be one of {'connectorx', 'adbc'}, got 'not_an_engine'",
engine="not_an_engine",
),
id="Not an available sql engine",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database_uri",
query=["SELECT * FROM test_data", "SELECT * FROM test_data"],
protocol="sqlite",
errclass=ValueError,
errmsg="only a single SQL query string is accepted for adbc, got a 'list' type",
engine="adbc",
),
id="Unavailable list of queries for adbc",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database_uri",
query="SELECT * FROM test_data",
protocol="mysql",
errclass=ModuleNotFoundError,
errmsg="ADBC 'adbc_driver_mysql' driver not detected.",
engine="adbc",
),
id="Unavailable adbc driver",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database_uri",
query="SELECT * FROM test_data",
protocol=sqlite3.connect(":memory:"),
errclass=TypeError,
errmsg="expected connection to be a URI string",
engine="adbc",
),
id="Invalid connection URI",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
query="SELECT * FROM imaginary_table",
protocol=sqlite3.connect(":memory:"),
errclass=sqlite3.OperationalError,
errmsg="no such table: imaginary_table",
),
id="Invalid query (unrecognised table name)",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
query="SELECT * FROM imaginary_table",
protocol=sys.getsizeof, # not a connection
errclass=TypeError,
errmsg="Unrecognised connection .* no 'execute' or 'cursor' method",
),
id="Invalid read DB kwargs",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
query="/* tag: misc */ INSERT INTO xyz VALUES ('polars')",
protocol=sqlite3.connect(":memory:"),
errclass=UnsuitableSQLError,
errmsg="INSERT statements are not valid 'read' queries",
),
id="Invalid statement type",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
query="DELETE FROM xyz WHERE id = 'polars'",
protocol=sqlite3.connect(":memory:"),
errclass=UnsuitableSQLError,
errmsg="DELETE statements are not valid 'read' queries",
),
id="Invalid statement type",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
query="SELECT * FROM sqlite_master",
protocol=sqlite3.connect(":memory:"),
errclass=ValueError,
kwargs={"iter_batches": True},
errmsg="Cannot set `iter_batches` without also setting a non-zero `batch_size`",
),
id="Invalid batch_size",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
engine="adbc",
query="SELECT * FROM test_data",
protocol=sqlite3.connect(":memory:"),
errclass=TypeError,
errmsg=r"unexpected keyword argument 'partition_on'",
kwargs={"partition_on": "id"},
),
id="Invalid kwargs",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database",
engine="adbc",
query="SELECT * FROM test_data",
protocol="{not:a, valid:odbc_string}",
errclass=ValueError,
errmsg=r"unable to identify string connection as valid ODBC",
),
id="Invalid ODBC string",
),
pytest.param(
*ExceptionTestParams(
read_method="read_database_uri",
query="SELECT * FROM test_data",
protocol="sqlite",
errclass=ValueError,
errmsg="the 'adbc' engine does not support use of `pre_execution_query`",
engine="adbc",
pre_execution_query="SET statement_timeout = 2151",
),
id="Unavailable `pre_execution_query` for adbc",
),
],
)
def test_read_database_exceptions(
read_method: str,
query: str,
protocol: Any,
errclass: type[Exception],
errmsg: str,
engine: DbReadEngine | None,
execute_options: dict[str, Any] | None,
pre_execution_query: str | list[str] | None,
kwargs: dict[str, Any] | None,
) -> None:
if read_method == "read_database_uri":
conn = f"{protocol}://test" if isinstance(protocol, str) else protocol
params = {
"uri": conn,
"query": query,
"engine": engine,
"pre_execution_query": pre_execution_query,
}
else:
params = {"connection": protocol, "query": query}
if execute_options:
params["execute_options"] = execute_options
if kwargs is not None:
params.update(kwargs)
read_database = getattr(pl, read_method)
with pytest.raises(errclass, match=errmsg):
read_database(**params)
@pytest.mark.parametrize(
"query",
[
"SELECT 1, 1 FROM test_data",
'SELECT 1 AS "n", 2 AS "n" FROM test_data',
'SELECT name, value AS "name" FROM test_data',
],
)
def test_read_database_duplicate_column_error(tmp_sqlite_db: Path, query: str) -> None:
alchemy_conn = create_engine(f"sqlite:///{tmp_sqlite_db}").connect()
with pytest.raises(
DuplicateError,
match=r"column .+ appears more than once in the query/result cursor",
):
pl.read_database(query, connection=alchemy_conn)
@pytest.mark.parametrize(
"uri",
[
"fakedb://123:456@account/database/schema?warehouse=warehouse&role=role",
"fakedb://my#%us3r:p433w0rd@not_a_real_host:9999/database",
],
)
def test_read_database_cx_credentials(uri: str) -> None:
with pytest.raises(RuntimeError, match=r"Source.*not supported"):
pl.read_database_uri("SELECT * FROM data", uri=uri, engine="connectorx")
@pytest.mark.skipif(
sys.platform == "win32",
reason="kuzu segfaults on windows: https://github.com/pola-rs/polars/actions/runs/12502055945/job/34880479875?pr=20462",
)
@pytest.mark.write_disk
def test_read_kuzu_graph_database(tmp_path: Path, io_files_path: Path) -> None:
import kuzu
tmp_path.mkdir(exist_ok=True)
if (kuzu_test_db := (tmp_path / "kuzu_test.db")).exists():
kuzu_test_db.unlink()
test_db = str(kuzu_test_db).replace("\\", "/")
db = kuzu.Database(test_db)
conn = kuzu.Connection(db)
conn.execute("CREATE NODE TABLE User(name STRING, age UINT64, PRIMARY KEY (name))")
conn.execute("CREATE REL TABLE Follows(FROM User TO User, since INT64)")
users = str(io_files_path / "graph-data" / "user.csv").replace("\\", "/")
follows = str(io_files_path / "graph-data" / "follows.csv").replace("\\", "/")
conn.execute(f'COPY User FROM "{users}"')
conn.execute(f'COPY Follows FROM "{follows}"')
# basic: single relation
df1 = pl.read_database(
query="MATCH (u:User) RETURN u.name, u.age",
connection=conn,
)
assert_frame_equal(
df1,
pl.DataFrame(
{
"u.name": ["Adam", "Karissa", "Zhang", "Noura"],
"u.age": [30, 40, 50, 25],
},
schema={"u.name": pl.Utf8, "u.age": pl.UInt64},
),
)
# join: connected edges/relations
df2 = pl.read_database(
query="MATCH (a:User)-[f:Follows]->(b:User) RETURN a.name, f.since, b.name",
connection=conn,
schema_overrides={"f.since": pl.Int16},
)
assert_frame_equal(
df2,
pl.DataFrame(
{
"a.name": ["Adam", "Adam", "Karissa", "Zhang"],
"f.since": [2020, 2020, 2021, 2022],
"b.name": ["Karissa", "Zhang", "Zhang", "Noura"],
},
schema={"a.name": pl.Utf8, "f.since": pl.Int16, "b.name": pl.Utf8},
),
)
# empty: no results for the given query
df3 = pl.read_database(
query="MATCH (a:User)-[f:Follows]->(b:User) WHERE a.name = '🔎️' RETURN a.name, f.since, b.name",
connection=conn,
)
assert_frame_equal(
df3,
pl.DataFrame(
schema={"a.name": pl.Utf8, "f.since": pl.Int64, "b.name": pl.Utf8}
),
)
def test_sqlalchemy_row_init(tmp_sqlite_db: Path) -> None:
expected_frame = pl.DataFrame(
{
"id": [1, 2],
"name": ["misc", "other"],
"value": [100.0, -99.5],
"date": ["2020-01-01", "2021-12-31"],
}
)
alchemy_engine = create_engine(f"sqlite:///{tmp_sqlite_db}")
query = text("SELECT * FROM test_data ORDER BY name")
with alchemy_engine.connect() as conn:
# note: sqlalchemy `Row` is a NamedTuple-like object; it additionally has
# a `_mapping` attribute that returns a `RowMapping` dict-like object. we
# validate frame/series init from each flavour of query result.
query_result = list(conn.execute(query))
for df in (
pl.DataFrame(query_result),
pl.DataFrame([row._mapping for row in query_result]),
pl.from_records([row._mapping for row in query_result]),
):
assert_frame_equal(expected_frame, df)
expected_series = expected_frame.to_struct()
for s in (
pl.Series(query_result),
pl.Series([row._mapping for row in query_result]),
):
assert_series_equal(expected_series, s)
@patch("polars.io.database._utils.from_arrow")
@patch("polars.io.database._utils.import_optional")
def test_read_database_uri_pre_execution_query_success(
import_mock: Mock, from_arrow_mock: Mock
) -> None:
cx_mock = Mock()
cx_mock.__version__ = "0.4.2"
import_mock.return_value = cx_mock
pre_execution_query = "SET statement_timeout = 2151"
pl.read_database_uri(
query="SELECT 1",
uri="mysql://test",
engine="connectorx",
pre_execution_query=pre_execution_query,
)
assert (
cx_mock.read_sql.call_args.kwargs["pre_execution_query"] == pre_execution_query
)
@patch("polars.io.database._utils.import_optional")
def test_read_database_uri_pre_execution_not_supported_exception(
import_mock: Mock,
) -> None:
cx_mock = Mock()
cx_mock.__version__ = "0.4.0"
import_mock.return_value = cx_mock
with (
pytest.raises(
ValueError,
match=r"'pre_execution_query' is only supported in connectorx version 0\.4\.2 or later",
),
):
pl.read_database_uri(
query="SELECT 1",
uri="mysql://test",
engine="connectorx",
pre_execution_query="SET statement_timeout = 2151",
)
@patch("polars.io.database._utils.from_arrow")
@patch("polars.io.database._utils.import_optional")
def test_read_database_uri_pre_execution_query_not_supported_success(
import_mock: Mock, from_arrow_mock: Mock
) -> None:
cx_mock = Mock()
cx_mock.__version__ = "0.4.0"
import_mock.return_value = cx_mock
pl.read_database_uri(
query="SELECT 1",
uri="mysql://test",
engine="connectorx",
)
assert cx_mock.read_sql.call_args.kwargs.get("pre_execution_query") is None
| ExceptionTestParams |
python | kamyu104__LeetCode-Solutions | Python/my-calendar-iii.py | {
"start": 915,
"end": 1784
} | class ____(object):
def __init__(self):
self.__books = []
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: int
"""
i = bisect.bisect_left(self.__books, (start, 1))
if i < len(self.__books) and self.__books[i][0] == start:
self.__books[i] = (self.__books[i][0], self.__books[i][1]+1)
else:
self.__books.insert(i, (start, 1))
j = bisect.bisect_left(self.__books, (end, 1))
if j < len(self.__books) and self.__books[j][0] == end:
self.__books[j] = (self.__books[j][0], self.__books[j][1]-1)
else:
self.__books.insert(j, (end, -1))
result, cnt = 0, 0
for book in self.__books:
cnt += book[1]
result = max(result, cnt)
return result
| MyCalendarThree2 |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 66743,
"end": 67883
} | class ____(TestCase):
"""Tests for ``zip_offset()``"""
def test_shortest(self):
a_1 = [0, 1, 2, 3]
a_2 = [0, 1, 2, 3, 4, 5]
a_3 = [0, 1, 2, 3, 4, 5, 6, 7]
actual = list(
mi.zip_offset(a_1, a_2, a_3, offsets=(-1, 0, 1), fillvalue='')
)
expected = [('', 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5)]
self.assertEqual(actual, expected)
def test_longest(self):
a_1 = [0, 1, 2, 3]
a_2 = [0, 1, 2, 3, 4, 5]
a_3 = [0, 1, 2, 3, 4, 5, 6, 7]
actual = list(
mi.zip_offset(a_1, a_2, a_3, offsets=(-1, 0, 1), longest=True)
)
expected = [
(None, 0, 1),
(0, 1, 2),
(1, 2, 3),
(2, 3, 4),
(3, 4, 5),
(None, 5, 6),
(None, None, 7),
]
self.assertEqual(actual, expected)
def test_mismatch(self):
iterables = [0, 1, 2], [2, 3, 4]
offsets = (-1, 0, 1)
self.assertRaises(
ValueError,
lambda: list(mi.zip_offset(*iterables, offsets=offsets)),
)
| ZipOffsetTest |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 11911,
"end": 11994
} | class ____(Node):
"""ParamSpec.kwargs special form."""
name: str
| ParamSpecKwargs |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 66393,
"end": 67148
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.max_text_len = config.max_text_len
def forward(
self,
vision_hidden_state: torch.FloatTensor,
text_hidden_state: torch.FloatTensor,
text_token_mask: torch.BoolTensor,
) -> torch.FloatTensor:
output = vision_hidden_state @ text_hidden_state.transpose(-1, -2)
output = output.masked_fill(~text_token_mask[:, None, :], float("-inf"))
# padding to max_text_len
new_output = torch.full((*output.shape[:-1], self.max_text_len), float("-inf"), device=output.device)
new_output[..., : output.shape[-1]] = output
return new_output
@auto_docstring
| GroundingDinoContrastiveEmbedding |
python | ray-project__ray | python/ray/serve/tests/unit/test_constants_utils.py | {
"start": 1154,
"end": 2916
} | class ____:
def test_parse_latency_buckets(self):
# Test valid inputs with different formats
assert parse_latency_buckets("1,2,3", []) == [1.0, 2.0, 3.0]
assert parse_latency_buckets("1,2,3,4 ", []) == [1.0, 2.0, 3.0, 4.0]
assert parse_latency_buckets(" 1,2,3,4,5", []) == [1.0, 2.0, 3.0, 4.0, 5.0]
assert parse_latency_buckets(" 1, 2,3 ,4,5 ,6 ", []) == [
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
]
# Test decimal numbers
assert parse_latency_buckets("0.5,1.5,2.5", []) == [0.5, 1.5, 2.5]
def test_parse_latency_buckets_invalid(self):
# Test negative numbers
with pytest.raises(ValueError, match=".*must be positive.*"):
parse_latency_buckets("-1,1,2,3,4", [])
# Test non-ascending order
with pytest.raises(ValueError, match=".*be in strictly ascending order*"):
parse_latency_buckets("4,3,2,1", [])
# Test duplicate values
with pytest.raises(ValueError, match=".*be in strictly ascending order.*"):
parse_latency_buckets("1,2,2,3,4", [])
# Test invalid number format
with pytest.raises(ValueError, match=".*Invalid.*format.*"):
parse_latency_buckets("1,2,3,4,a", [])
# Test empty list
with pytest.raises(ValueError, match=".*could not convert.*"):
parse_latency_buckets(",,,", [])
# Test invalid separators
with pytest.raises(ValueError, match=".*could not convert.*"):
parse_latency_buckets("1;2;3;4", [])
@pytest.fixture
def mock_environ():
with patch.dict(os.environ, {}, clear=True) as mock_env:
yield mock_env
| TestParseLatencyBuckets |
python | pandas-dev__pandas | pandas/core/arrays/numpy_.py | {
"start": 1352,
"end": 20247
} | class ____(
OpsMixin,
NDArrayBackedExtensionArray,
ObjectStringArrayMixin,
):
"""
A pandas ExtensionArray for NumPy data.
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Attributes
----------
None
Methods
-------
None
See Also
--------
array : Create an array.
Series.to_numpy : Convert a Series to a NumPy array.
Examples
--------
>>> pd.arrays.NumpyExtensionArray(np.array([0, 1, 2, 3]))
<NumpyExtensionArray>
[0, 1, 2, 3]
Length: 4, dtype: int64
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCNumpyExtensionArray`. We check for
# that _typ to ensure that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
_ndarray: np.ndarray
_dtype: NumpyEADtype
_internal_fill_value = np.nan
# ------------------------------------------------------------------------
# Constructors
def __init__(
self, values: np.ndarray | NumpyExtensionArray, copy: bool = False
) -> None:
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"'values' must be a NumPy array, not {type(values).__name__}"
)
if values.ndim == 0:
# Technically we support 2, but do not advertise that fact.
raise ValueError("NumpyExtensionArray must be 1-dimensional.")
if copy:
values = values.copy()
dtype = NumpyEADtype(values.dtype)
super().__init__(values, dtype)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> NumpyExtensionArray:
if isinstance(dtype, NumpyEADtype):
dtype = dtype._dtype
# error: Argument "dtype" to "asarray" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object],
# None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type]
if (
result.ndim > 1
and not hasattr(scalars, "dtype")
and (dtype is None or dtype == object)
):
# e.g. list-of-tuples
result = construct_1d_object_array_from_listlike(scalars)
if copy and result is scalars:
result = result.copy()
return cls(result)
def _cast_pointwise_result(self, values) -> ArrayLike:
result = super()._cast_pointwise_result(values)
lkind = self.dtype.kind
rkind = result.dtype.kind
if (
(lkind in "iu" and rkind in "iu")
or (lkind == "f" and rkind == "f")
or (lkind == rkind == "c")
):
result = maybe_downcast_to_dtype(result, self.dtype.numpy_dtype)
elif rkind == "M":
# Ensure potential subsequent .astype(object) doesn't incorrectly
# convert Timestamps to ints
from pandas import array as pd_array
result = pd_array(result, copy=False)
return result
# ------------------------------------------------------------------------
# Data
@property
def dtype(self) -> NumpyEADtype:
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(
self, dtype: np.dtype | None = None, copy: bool | None = None
) -> np.ndarray:
if copy is not None:
# Note: branch avoids `copy=None` for NumPy 1.x support
result = np.array(self._ndarray, dtype=dtype, copy=copy)
else:
result = np.asarray(self._ndarray, dtype=dtype)
if (
self._readonly
and not copy
and (dtype is None or astype_is_view(self.dtype, dtype))
):
result = result.view()
result.flags.writeable = False
return result
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# Lightly modified version of
# https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in NumpyExtensionArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get("out", ())
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. test_ufunc_unary
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
# e.g. tests.series.test_ufunc.TestNumpyReductions
return result
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(
x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs
)
if out:
kwargs["out"] = tuple(
x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if ufunc.nout > 1:
# multiple return values; re-box array-like results
return tuple(type(self)(x) for x in result)
elif method == "at":
# no return value
return None
elif method == "reduce":
if isinstance(result, np.ndarray):
# e.g. test_np_reduce_2d
return type(self)(result)
# e.g. test_np_max_nested_tuples
return result
else:
if self.dtype.type is str: # type: ignore[comparison-overlap]
# StringDtype
self = cast("StringArray", self)
try:
# specify dtype to preserve storage/na_value
return type(self)(result, dtype=self.dtype)
except ValueError:
# if validation of input fails (no strings)
# -> fallback to returning raw numpy array
return result
# one return value; re-box array-like results
return type(self)(result)
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if dtype == self.dtype:
if copy:
return self.copy()
return self
result = astype_array(self._ndarray, dtype=dtype, copy=copy)
return result
def isna(self) -> np.ndarray:
return isna(self._ndarray)
def _validate_scalar(self, fill_value):
if fill_value is None:
# Primarily for subclasses
fill_value = self.dtype.na_value
return fill_value
def _values_for_factorize(self) -> tuple[np.ndarray, float | None]:
if self.dtype.kind in "iub":
fv = None
else:
fv = np.nan
return self._ndarray, fv
# Base EA class (and all other EA classes) don't have limit_area keyword
# This can be removed here as well when the interpolate ffill/bfill method
# deprecation is enforced
def _pad_or_backfill(
self,
*,
method: FillnaOptions,
limit: int | None = None,
limit_area: Literal["inside", "outside"] | None = None,
copy: bool = True,
) -> Self:
"""
ffill or bfill along axis=0.
"""
if copy:
out_data = self._ndarray.copy()
else:
out_data = self._ndarray
meth = missing.clean_fill_method(method)
missing.pad_or_backfill_inplace(
out_data.T,
method=meth,
axis=0,
limit=limit,
limit_area=limit_area,
)
if not copy:
return self
return type(self)._simple_new(out_data, dtype=self.dtype)
def interpolate(
self,
*,
method: InterpolateOptions,
axis: int,
index: Index,
limit,
limit_direction,
limit_area,
copy: bool,
**kwargs,
) -> Self:
"""
See NDFrame.interpolate.__doc__.
"""
# NB: we return type(self) even if copy=False
if not self.dtype._is_numeric:
raise TypeError(f"Cannot interpolate with {self.dtype} dtype")
if not copy:
out_data = self._ndarray
else:
out_data = self._ndarray.copy()
# TODO: assert we have floating dtype?
missing.interpolate_2d_inplace(
out_data,
method=method,
axis=axis,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
**kwargs,
)
if not copy:
return self
return type(self)._simple_new(out_data, dtype=self.dtype)
def take(
self,
indices: TakeIndexer,
*,
allow_fill: bool = False,
fill_value: Any = None,
axis: AxisInt = 0,
) -> Self:
"""
Take entries from this array at each index in a list of indices,
producing an array containing only those entries.
"""
result = super().take(
indices, allow_fill=allow_fill, fill_value=fill_value, axis=axis
)
# See GH#62448.
if self.dtype.kind in "iub":
return type(self)(result._ndarray, copy=False)
return result
# ------------------------------------------------------------------------
# Reductions
def any(
self,
*,
axis: AxisInt | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_any((), {"out": out, "keepdims": keepdims})
result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def all(
self,
*,
axis: AxisInt | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_all((), {"out": out, "keepdims": keepdims})
result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def min(
self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
) -> Scalar:
nv.validate_min((), kwargs)
result = nanops.nanmin(
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def max(
self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
) -> Scalar:
nv.validate_max((), kwargs)
result = nanops.nanmax(
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def sum(
self,
*,
axis: AxisInt | None = None,
skipna: bool = True,
min_count: int = 0,
**kwargs,
) -> Scalar:
nv.validate_sum((), kwargs)
result = nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def prod(
self,
*,
axis: AxisInt | None = None,
skipna: bool = True,
min_count: int = 0,
**kwargs,
) -> Scalar:
nv.validate_prod((), kwargs)
result = nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def mean(
self,
*,
axis: AxisInt | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def median(
self,
*,
axis: AxisInt | None = None,
out=None,
overwrite_input: bool = False,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_median(
(), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims}
)
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def std(
self,
*,
axis: AxisInt | None = None,
dtype: NpDtype | None = None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
)
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def var(
self,
*,
axis: AxisInt | None = None,
dtype: NpDtype | None = None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
)
result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def sem(
self,
*,
axis: AxisInt | None = None,
dtype: NpDtype | None = None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
)
result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def kurt(
self,
*,
axis: AxisInt | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
)
result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def skew(
self,
*,
axis: AxisInt | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
)
result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value: object = lib.no_default,
) -> np.ndarray:
mask = self.isna()
if na_value is not lib.no_default and mask.any():
result = self._ndarray.copy()
result[mask] = na_value
else:
result = self._ndarray
if not copy and self._readonly:
result = result.view()
result.flags.writeable = False
result = np.asarray(result, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result
# ------------------------------------------------------------------------
# Ops
def __invert__(self) -> NumpyExtensionArray:
return type(self)(~self._ndarray)
def __neg__(self) -> NumpyExtensionArray:
return type(self)(-self._ndarray)
def __pos__(self) -> NumpyExtensionArray:
return type(self)(+self._ndarray)
def __abs__(self) -> NumpyExtensionArray:
return type(self)(abs(self._ndarray))
def _cmp_method(self, other, op):
if isinstance(other, NumpyExtensionArray):
other = other._ndarray
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
pd_op = ops.get_array_op(op)
other = ensure_wrapped_if_datetimelike(other)
result = pd_op(self._ndarray, other)
if op is divmod or op is ops.rdivmod:
a, b = result
if isinstance(a, np.ndarray):
# for e.g. op vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
return a, b
if isinstance(result, np.ndarray):
# for e.g. multiplication vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
return self._wrap_ndarray_result(result)
return result
_arith_method = _cmp_method
def _wrap_ndarray_result(self, result: np.ndarray):
# If we have timedelta64[ns] result, return a TimedeltaArray instead
# of a NumpyExtensionArray
if result.dtype.kind == "m" and is_supported_dtype(result.dtype):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._simple_new(result, dtype=result.dtype)
return type(self)(result)
def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
# NEP 51: https://github.com/numpy/numpy/pull/22449
if self.dtype.kind in "SU":
return "'{}'".format
elif self.dtype == "object":
return repr
else:
return str
| NumpyExtensionArray |
python | pytorch__pytorch | test/test_binary_ufuncs.py | {
"start": 1844,
"end": 185213
} | class ____(TestCase):
# Generic tests for elementwise binary (AKA binary universal (u) functions (funcs))
# TODO: below contiguous tensor results are compared with a variety of noncontiguous results.
# It would be interesting to have the lhs and rhs have different discontinuities.
# Helper for comparing torch tensors and NumPy arrays
# TODO: should this or assertEqual also validate that strides are equal?
def assertEqualHelper(
self, actual, expected, msg, *, dtype, exact_dtype=True, **kwargs
):
assert isinstance(actual, torch.Tensor)
# Some NumPy functions return scalars, not arrays
if isinstance(expected, Number):
self.assertEqual(actual.item(), expected, msg=msg, **kwargs)
elif isinstance(expected, np.ndarray):
# Handles exact dtype comparisons between arrays and tensors
if exact_dtype:
# Allows array dtype to be float32 when comparing with bfloat16 tensors
# since NumPy doesn't support the bfloat16 dtype
# Also ops like scipy.special.erf, scipy.special.erfc, etc, promote float16
# to float32
if expected.dtype == np.float32:
assert actual.dtype in (
torch.float16,
torch.bfloat16,
torch.float32,
)
else:
assert expected.dtype == torch_to_numpy_dtype_dict[actual.dtype]
self.assertEqual(
actual,
torch.from_numpy(expected).to(actual.dtype),
msg,
exact_device=False,
**kwargs,
)
else:
self.assertEqual(actual, expected, msg, exact_device=False, **kwargs)
# Tests that the function and its (array-accepting) reference produce the same
# values on given tensors
def _test_reference_numerics(self, dtype, op, gen, equal_nan=True):
def _helper_reference_numerics(
expected, actual, msg, exact_dtype, equal_nan=True
):
if not torch.can_cast(
numpy_to_torch_dtype_dict[expected.dtype.type], dtype
):
exact_dtype = False
if dtype is torch.bfloat16 and expected.dtype == np.float32:
# Ref: https://github.com/pytorch/pytorch/blob/master/torch/testing/_internal/common_utils.py#L1149
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
exact_dtype=exact_dtype,
rtol=16e-3,
atol=1e-5,
)
else:
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
)
for sample in gen:
# Each sample input acquired from the generator is just one lhs tensor
# and one rhs tensor
l = sample.input
r = sample.args[0]
numpy_sample = sample.numpy()
l_numpy = numpy_sample.input
r_numpy = numpy_sample.args[0]
actual = op(l, r)
expected = op.ref(l_numpy, r_numpy)
# Dtype promo rules have changed since NumPy 2.
# Specialize the backward-incompatible cases.
if (
np.__version__ > "2"
and op.name in ("sub", "_refs.sub")
and isinstance(l_numpy, np.ndarray)
):
expected = expected.astype(l_numpy.dtype)
# Crafts a custom error message for smaller, printable tensors
def _numel(x):
if isinstance(x, torch.Tensor):
return x.numel()
# Assumes x is a scalar
return 1
if _numel(l) <= 100 and _numel(r) <= 100:
msg = (
"Failed to produce expected results! Input lhs tensor was"
f" {l}, rhs tensor was {r}, torch result is {actual}, and reference result is"
f" {expected}."
)
else:
msg = None
exact_dtype = True
if isinstance(actual, torch.Tensor):
_helper_reference_numerics(
expected, actual, msg, exact_dtype, equal_nan
)
else:
for x, y in zip(expected, actual):
# testing multi-outputs results
_helper_reference_numerics(x, y, msg, exact_dtype, equal_nan)
# The following tests only apply to elementwise binary operators with references
binary_ufuncs_with_references = list(
filter(lambda op: op.ref is not None and op.ref is not None, binary_ufuncs)
)
@ops(binary_ufuncs_with_references)
def test_reference_numerics(self, device, dtype, op):
gen = generate_elementwise_binary_tensors(op, device=device, dtype=dtype)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(binary_ufuncs_with_references)
def test_reference_numerics_small_values(self, device, dtype, op):
if dtype is torch.bool:
self.skipTest("Doesn't support bool!")
gen = generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
),
)
def test_reference_numerics_large_values(self, device, dtype, op):
gen = generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
),
)
def test_reference_numerics_extremal_values(self, device, dtype, op):
gen = generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
# tests broadcasting and noncontiguous broadcasting behavior
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(
torch.long,
torch.float32,
),
)
def test_broadcasting(self, device, dtype, op):
gen = generate_elementwise_binary_broadcasting_tensors(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(
binary_ufuncs_with_references,
allowed_dtypes=(torch.long, torch.float32, torch.complex64),
)
def test_scalar_support(self, device, dtype, op):
gen = generate_elementwise_binary_with_scalar_samples(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
gen = generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, device=device, dtype=dtype
)
self._test_reference_numerics(dtype, op, gen, equal_nan=True)
@ops(binary_ufuncs)
def test_contig_vs_every_other(self, device, dtype, op):
lhs = make_tensor(
(1026,), device=device, dtype=dtype, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
(1026,), device=device, dtype=dtype, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs[::2]
rhs_non_contig = rhs[::2]
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)[::2]
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_contig_vs_transposed(self, device, dtype, op):
lhs = make_tensor(
(789, 357), device=device, dtype=dtype, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
(789, 357), device=device, dtype=dtype, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs.T
rhs_non_contig = rhs.T
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs).T
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_non_contig(self, device, dtype, op):
shapes = ((5, 7), (1024,))
for shape in shapes:
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[
..., 0
]
lhs_non_contig.copy_(lhs)
rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[
..., 0
]
rhs_non_contig.copy_(rhs)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_non_contig_index(self, device, dtype, op):
shape = (2, 2, 1, 2)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs[:, 1, ...]
lhs = lhs_non_contig.contiguous()
rhs_non_contig = rhs[:, 1, ...]
rhs = rhs_non_contig.contiguous()
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_non_contig, rhs_non_contig)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_non_contig_expand(self, device, dtype, op):
shapes = [(1, 3), (1, 7), (5, 7)]
for shape in shapes:
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs_non_contig = lhs.clone().expand(3, -1, -1)
rhs_non_contig = rhs.clone().expand(3, -1, -1)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertFalse(lhs_non_contig.is_contiguous())
self.assertFalse(rhs_non_contig.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_non_contig, rhs_non_contig)
for i in range(3):
self.assertEqual(expected, actual[i])
@ops(binary_ufuncs)
def test_contig_size1(self, device, dtype, op):
shape = (5, 100)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs = lhs[:1, :50]
lhs_alt = torch.empty(lhs.size(), device=device, dtype=dtype)
lhs_alt.copy_(lhs)
rhs = rhs[:1, :50]
rhs_alt = torch.empty(rhs.size(), device=device, dtype=dtype)
rhs_alt.copy_(rhs)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertTrue(lhs_alt.is_contiguous())
self.assertTrue(rhs_alt.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_alt, rhs_alt)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_contig_size1_large_dim(self, device, dtype, op):
shape = (5, 2, 3, 1, 4, 5, 3, 2, 1, 2, 3, 4)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
lhs = lhs[:1, :, :, :, :, :, :, :, :, :, :, :]
lhs_alt = torch.empty(lhs.size(), device=device, dtype=dtype)
lhs_alt.copy_(lhs)
rhs = rhs[:1, :, :, :, :, :, :, :, :, :, :, :]
rhs_alt = torch.empty(rhs.size(), device=device, dtype=dtype)
rhs_alt.copy_(rhs)
self.assertTrue(lhs.is_contiguous())
self.assertTrue(rhs.is_contiguous())
self.assertTrue(lhs_alt.is_contiguous())
self.assertTrue(rhs_alt.is_contiguous())
expected = op(lhs, rhs)
actual = op(lhs_alt, rhs_alt)
self.assertEqual(expected, actual)
@ops(binary_ufuncs)
def test_batch_vs_slicing(self, device, dtype, op):
shape = (32, 512)
lhs = make_tensor(
shape, dtype=dtype, device=device, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape, dtype=dtype, device=device, **op.rhs_make_tensor_kwargs
)
expected = op(lhs, rhs)
actual = []
for idx in range(32):
actual.append(op(lhs[idx], rhs[idx]))
actual = torch.stack(actual)
self.assertEqual(expected, actual)
# Tests that elementwise binary operators participate in type promotion properly
# NOTE: because the cross-product of all possible type promotion tests is huge, this
# just spot checks some handwritten cases.
# NOTE: It may be possible to refactor this test into something simpler
@ops(binary_ufuncs_and_refs, dtypes=OpDTypes.none)
def test_type_promotion(self, device, op):
supported_dtypes = op.supported_dtypes(torch.device(device).type)
make_lhs = partial(
make_tensor, (5,), device=device, **op.lhs_make_tensor_kwargs
)
make_rhs = partial(
make_tensor, (5,), device=device, **op.rhs_make_tensor_kwargs
)
make_rhs_scalar_tensor = partial(
make_tensor, (), device="cpu", **op.rhs_make_tensor_kwargs
)
def _supported(dtypes):
return all(x in supported_dtypes for x in dtypes)
# int x int type promotion
if _supported((torch.int16, torch.int32, torch.int64)):
lhs_i16 = make_lhs(dtype=torch.int16)
lhs_i32 = make_lhs(dtype=torch.int32)
lhs_i64 = make_lhs(dtype=torch.int64)
rhs_i16 = make_rhs(dtype=torch.int16)
rhs_i32 = make_rhs(dtype=torch.int32)
rhs_i64 = make_rhs(dtype=torch.int64)
if op.promotes_int_to_float:
default_dtype = torch.get_default_dtype()
self.assertEqual(op(lhs_i16, rhs_i32).dtype, default_dtype)
self.assertEqual(
op(lhs_i16, rhs_i32),
op(lhs_i16.to(default_dtype), rhs_i32.to(default_dtype)),
)
self.assertEqual(op(lhs_i32, rhs_i64).dtype, default_dtype)
self.assertEqual(
op(lhs_i32, rhs_i64),
op(lhs_i32.to(default_dtype), rhs_i64.to(default_dtype)),
)
elif op.always_returns_bool:
self.assertEqual(op(lhs_i16, rhs_i32).dtype, torch.bool)
self.assertEqual(op(lhs_i32, rhs_i64).dtype, torch.bool)
else: # standard type promotion
self.assertEqual(op(lhs_i16, rhs_i32).dtype, torch.int32)
self.assertEqual(
op(lhs_i16, rhs_i32), op(lhs_i16.to(torch.int32), rhs_i32)
)
self.assertEqual(op(lhs_i32, rhs_i64).dtype, torch.int64)
self.assertEqual(
op(lhs_i32, rhs_i64), op(lhs_i32.to(torch.int64), rhs_i64)
)
if op.supports_out:
if not op.promotes_int_to_float:
# Integers can be safely cast to other integer types
out = torch.empty_like(lhs_i64)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.int64)
self.assertEqual(op(lhs_i16, rhs_i32), out, exact_dtype=False)
out = torch.empty_like(lhs_i16)
self.assertEqual(op(lhs_i32, rhs_i64, out=out).dtype, torch.int16)
else:
# Float outs cannot be safely cast to integer types
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(lhs_i16, rhs_i32, out=torch.empty_like(lhs_i64))
if not op.always_returns_bool:
# Neither integer nor float outs can be cast to bool
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_i16,
rhs_i32,
out=torch.empty_like(lhs_i64, dtype=torch.bool),
)
# All these output types can be cast to any float or complex type
out = torch.empty_like(lhs_i64, dtype=torch.float16)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.float16)
out = torch.empty_like(lhs_i64, dtype=torch.bfloat16)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.bfloat16)
out = torch.empty_like(lhs_i64, dtype=torch.float32)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.float32)
self.assertEqual(op(lhs_i16, rhs_i32), out, exact_dtype=False)
out = torch.empty_like(lhs_i64, dtype=torch.complex64)
self.assertEqual(op(lhs_i16, rhs_i32, out=out).dtype, torch.complex64)
self.assertEqual(op(lhs_i16, rhs_i32), out, exact_dtype=False)
# float x float type promotion
if _supported((torch.float32, torch.float64)):
lhs_f32 = make_lhs(dtype=torch.float32)
lhs_f64 = make_lhs(dtype=torch.float64)
rhs_f32 = make_rhs(dtype=torch.float32)
rhs_f64 = make_rhs(dtype=torch.float64)
if op.always_returns_bool:
self.assertEqual(op(lhs_f32, rhs_f64).dtype, torch.bool)
else: # normal float type promotion
self.assertEqual(op(lhs_f32, rhs_f64).dtype, torch.float64)
self.assertEqual(
op(lhs_f32, rhs_f64), op(lhs_f32.to(torch.float64), rhs_f64)
)
if op.supports_out:
# All these output types can be cast to any float or complex type
out = torch.empty_like(lhs_f64, dtype=torch.float16)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.float16)
out = torch.empty_like(lhs_f64, dtype=torch.bfloat16)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.bfloat16)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
out = torch.empty_like(lhs_f64, dtype=torch.float32)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.float32)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
out = torch.empty_like(lhs_f64, dtype=torch.complex64)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.complex64)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
if not op.always_returns_bool:
# float outs can't be cast to an integer dtype
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_f32,
rhs_f64,
out=torch.empty_like(lhs_f64, dtype=torch.int64),
)
else:
# bool outs can be cast to an integer dtype
out = torch.empty_like(lhs_f64, dtype=torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
# complex x complex type promotion
if _supported((torch.complex64, torch.complex128)):
lhs_c64 = make_lhs(dtype=torch.complex64)
lhs_c128 = make_lhs(dtype=torch.complex128)
rhs_c64 = make_rhs(dtype=torch.complex64)
rhs_c128 = make_rhs(dtype=torch.complex128)
if op.always_returns_bool:
self.assertEqual(op(lhs_c64, lhs_c128).dtype, torch.bool)
else: # normal complex type promotion
self.assertEqual(op(lhs_c64, rhs_c128).dtype, torch.complex128)
self.assertEqual(
op(lhs_c64, rhs_c128), op(lhs_c64.to(torch.complex128), rhs_c128)
)
if op.supports_out:
# All these output types can be cast to any or complex type
out = torch.empty_like(lhs_c64, dtype=torch.complex64)
self.assertEqual(op(lhs_c64, rhs_c128, out=out).dtype, torch.complex64)
result = op(lhs_c64, rhs_c128)
self.assertEqual(result, out.to(result.dtype))
if not op.always_returns_bool:
# complex outs can't be cast to float types
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_c64,
rhs_c128,
out=torch.empty_like(lhs_c64, dtype=torch.float64),
)
# complex outs can't be cast to an integer dtype
with self.assertRaisesRegex(RuntimeError, "can't be cast"):
op(
lhs_c64,
rhs_c128,
out=torch.empty_like(lhs_c64, dtype=torch.int64),
)
else:
# bool outs can be cast to a float type
out = torch.empty_like(lhs_c64, dtype=torch.float64)
self.assertEqual(
op(lhs_c64, rhs_c128, out=out).dtype, torch.float64
)
self.assertEqual(op(lhs_c64, rhs_c128), out, exact_dtype=False)
# bool outs can be cast to an integer dtype
out = torch.empty_like(lhs_f64, dtype=torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64, out=out).dtype, torch.int64)
self.assertEqual(op(lhs_f32, rhs_f64), out, exact_dtype=False)
# int x float type promotion
# Note: float type is the result dtype
if _supported((torch.long, torch.float32)):
lhs_i64 = make_lhs(dtype=torch.int64)
rhs_f32 = make_rhs(dtype=torch.float32)
result = op(lhs_i64, rhs_f32)
expected_dtype = torch.float32 if not op.always_returns_bool else torch.bool
self.assertEqual(result.dtype, expected_dtype)
# float x complex type promotion
# Note: complex type with highest "value type" is the result dtype
if _supported((torch.float64, torch.complex64)):
lhs_f64 = make_lhs(dtype=torch.float64)
rhs_c64 = make_rhs(dtype=torch.complex64)
result = op(lhs_f64, rhs_c64)
expected_dtype = (
torch.complex128 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# int x float scalar type promotion
# Note: default float dtype is the result dtype
if _supported((torch.int64, torch.float32)) and op.supports_rhs_python_scalar:
lhs_i64 = make_lhs(dtype=torch.int64)
rhs_f_scalar = 1.0
result = op(lhs_i64, rhs_f_scalar)
expected_dtype = (
torch.get_default_dtype() if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# repeats with a scalar float tensor, which should set the dtype
rhs_f32_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.float32)
result = op(lhs_i64, rhs_f32_scalar_tensor)
expected_dtype = torch.float32 if not op.always_returns_bool else torch.bool
self.assertEqual(result.dtype, expected_dtype)
# Additional test with double
if _supported((torch.float64,)):
rhs_f64_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.float64)
result = op(lhs_i64, rhs_f64_scalar_tensor)
expected_dtype = (
torch.float64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# float x complex scalar type promotion
# Note: result dtype is complex with highest "value type" among all tensors
if (
_supported((torch.float32, torch.complex64))
and op.supports_rhs_python_scalar
):
lhs_f32 = make_lhs(dtype=torch.float32)
rhs_c_scalar = complex(1, 1)
result = op(lhs_f32, rhs_c_scalar)
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# repeats with a scalar complex tensor
rhs_c64_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.complex64)
result = op(lhs_f32, rhs_c64_scalar_tensor)
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# Additional test with complexdouble
if _supported((torch.complex128,)):
rhs_c128_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.complex128)
result = op(lhs_f32, rhs_c128_scalar_tensor)
# Value type of 1D+ Tensor (lhs_f32) takes priority over scalar tensor (rhs_c128).
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# float x float scalar tensor
# Note: result dtype is the type of the float tensor
if _supported((torch.float32, torch.float64)) and op.supports_rhs_python_scalar:
lhs_f32 = make_lhs(dtype=torch.float32)
rhs_f64_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.float64)
result = op(lhs_f32, rhs_f64_scalar_tensor)
expected_dtype = torch.float32 if not op.always_returns_bool else torch.bool
self.assertEqual(result.dtype, expected_dtype)
# complex x complex scalar tensor
# Note: result dtype is the type of the complex tensor
if (
_supported((torch.complex64, torch.complex128))
and op.supports_rhs_python_scalar
):
lhs_c64 = make_lhs(dtype=torch.complex64)
rhs_c128_scalar_tensor = make_rhs_scalar_tensor(dtype=torch.complex128)
result = op(lhs_c64, rhs_c128_scalar_tensor)
expected_dtype = (
torch.complex64 if not op.always_returns_bool else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# scalar x scalar
# Note: result dtype is default float type
if op.supports_two_python_scalars and _supported((torch.long, torch.float32)):
rhs_f_scalar = 2.0
for lhs in (1, 1.0):
result = op(lhs, rhs_f_scalar)
expected_dtype = (
torch.get_default_dtype()
if not op.always_returns_bool
else torch.bool
)
self.assertEqual(result.dtype, expected_dtype)
# TODO: move to error input test
@ops(binary_ufuncs, allowed_dtypes=(torch.float32,))
def test_not_broadcastable(self, device, dtype, op):
for shape_lhs, shape_rhs in (
((2,), (3,)),
((3, 1), (2, 1)),
((1, 3, 2), (3,)),
((3, 1, 2), (2, 1, 2)),
):
lhs = make_tensor(
shape_lhs, device=device, dtype=dtype, **op.lhs_make_tensor_kwargs
)
rhs = make_tensor(
shape_rhs, device=device, dtype=dtype, **op.rhs_make_tensor_kwargs
)
try:
broadcasted_shape = op(lhs, rhs).shape
except RuntimeError:
continue
msg = (
f"On {device}, torch.{op.name} broadcasts inputs shapes {shape_lhs} and {shape_rhs} into "
f"{broadcasted_shape}, although they are not broadcastable."
)
raise AssertionError(msg)
def test_add_broadcast_empty(self, device):
# empty + empty
self.assertRaises(
RuntimeError,
lambda: torch.randn(5, 0, device=device) + torch.randn(0, 5, device=device),
)
self.assertEqual(
torch.randn(5, 0, device=device),
torch.randn(0, device=device) + torch.randn(5, 0, device=device),
)
self.assertEqual(
torch.randn(5, 0, 0, device=device),
torch.randn(0, device=device) + torch.randn(5, 0, 1, device=device),
)
# scalar + empty
self.assertEqual(
torch.randn(5, 0, 6, device=device),
torch.randn((), device=device) + torch.randn(5, 0, 6, device=device),
)
# non-empty, empty
self.assertEqual(
torch.randn(0, device=device),
torch.randn(0, device=device) + torch.randn(1, device=device),
)
self.assertEqual(
torch.randn(0, 7, 0, 6, 5, 0, 7, device=device),
torch.randn(0, 7, 0, 6, 5, 0, 1, device=device)
+ torch.randn(1, 1, 5, 1, 7, device=device),
)
self.assertRaises(
RuntimeError,
lambda: torch.randn(7, 0, device=device) + torch.randn(2, 1, device=device),
)
def test_addcmul_scalars_as_floats(self, device):
# zero-dim variables that don't require grad should bind to scalar arguments
x = torch.tensor(2.0)
y = torch.tensor(3.0, device=device)
# 3 + (3 * 3) * 2
self.assertEqual(y.addcmul(y, y, value=x), 21)
x = torch.tensor(2.0, requires_grad=True)
self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))
# Tests that the binary operators and, or, and xor (as well as their reflected and inplace versions)
# work properly (AKA &, ||, ^ and &=, |=, ^=)
@dtypes(*integral_types_and(torch.bool))
def test_bitwise_ops(self, device, dtype):
# Tensor x Tensor and Tensor x Scalar ops
ops = (
operator.and_,
operator.iand,
operator.or_,
operator.ior,
operator.xor,
operator.ixor,
)
inplace_ops = (operator.iand, operator.ior, operator.ixor)
shapes = ((5,), (15, 15), (500, 500))
for op, shape in itertools.product(ops, shapes):
# Tests tensor x tensor case
a = make_tensor(shape, device=device, dtype=dtype)
b = make_tensor(shape, device=device, dtype=dtype)
a_np = a.cpu().clone().numpy()
b_np = b.cpu().clone().numpy()
self.assertEqual(op(a, b), op(a_np, b_np))
# Tests tensor x scalar case
a = make_tensor(shape, device=device, dtype=dtype)
b_scalar = make_tensor((), device="cpu", dtype=dtype).item()
a_np = a.cpu().clone().numpy()
self.assertEqual(op(a, b_scalar), op(a_np, b_scalar))
# Tests scalar x tensor case
a_scalar = make_tensor((), device="cpu", dtype=dtype).item()
b = make_tensor(shape, device=device, dtype=dtype)
b_np = b.cpu().clone().numpy()
self.assertEqual(op(a_scalar, b), op(a_scalar, b_np))
# Tests scalar x tensor case (for ops which aren't inplace)
if op in inplace_ops:
# Tests tensor x tensor case
a = make_tensor(shape, device=device, dtype=dtype)
b = make_tensor(shape, device=device, dtype=dtype)
a_np = a.cpu().clone().numpy()
b_np = b.cpu().clone().numpy()
op(a, b)
op(a_np, b_np)
self.assertEqual(a, a_np)
# Tests tensor x scalar case
a = make_tensor(shape, device=device, dtype=dtype)
b_scalar = make_tensor((), device="cpu", dtype=dtype).item()
a_np = a.cpu().clone().numpy()
op(a, b_scalar)
op(a_np, b_scalar)
self.assertEqual(a, a_np)
def test_inplace_division(self, device):
t = torch.rand(5, 5, device=device)
id_before = id(t)
t /= 2
id_after = id(t)
self.assertEqual(id_before, id_after)
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_div_rounding_modes(self, device, dtype):
if dtype.is_floating_point:
low, high = -10.0, 10.0
else:
info = torch.iinfo(dtype)
low, high = info.min, info.max
a = make_tensor((100,), dtype=dtype, device=device, low=low, high=high)
b = make_tensor((100,), dtype=dtype, device=device, low=low, high=high)
# Avoid division by zero so we can test (a / b) * b == a
if dtype.is_floating_point:
eps = 0.1
b[(-eps < b) & (b < eps)] = eps
else:
b[b == 0] = 1
if not dtype.is_floating_point:
# floor(a / b) * b can be < a, so fixup slightly to avoid underflow
a = torch.where(a < 0, a + b, a)
d_true = torch.divide(a, b, rounding_mode=None)
self.assertTrue(d_true.is_floating_point())
self.assertEqual(d_true * b, a.to(d_true.dtype))
d_floor = torch.divide(a, b, rounding_mode="floor")
if dtype not in (torch.bfloat16, torch.half):
self.assertEqual(d_floor * b + torch.remainder(a, b), a)
else:
self.assertEqual(
d_floor * b + torch.remainder(a.float(), b.float()),
a,
exact_dtype=False,
)
d_trunc = torch.divide(a, b, rounding_mode="trunc")
rounding_unsupported = (
dtype == torch.half
and device != "cuda"
or dtype == torch.bfloat16
and device != "cpu"
)
d_ref = d_true.float() if rounding_unsupported else d_true
self.assertEqual(d_trunc, d_ref.trunc().to(dtype))
@dtypes(*floating_types_and(torch.bfloat16, torch.float16))
def test_floor_div_extremal(self, device, dtype):
for num, denom, shape in itertools.product(
[torch.finfo(dtype).max * 0.7],
[0.5, -0.5, 0.0],
[(), (32,)], # Scalar and vectorized
):
a = torch.full(shape, num, dtype=dtype, device=device)
b = torch.full(shape, denom, dtype=dtype, device=device)
ref = np.floor_divide(num, denom).item()
if ref > torch.finfo(dtype).max:
ref = np.inf
elif ref < torch.finfo(dtype).min:
ref = -np.inf
expect = torch.full(shape, ref, dtype=dtype, device=device)
actual = torch.div(a, b, rounding_mode="floor")
self.assertEqual(expect, actual)
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_div_rounding_nonfinite(self, device, dtype):
# Compare division of special floating point values against NumPy
num = torch.tensor(
[1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype,
device=device,
)
# Divide by zero is tested separately
denom = num[num != 0]
a, b = num[None, :].clone(), denom[:, None].clone()
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in ((None, np.true_divide), ("floor", np.floor_divide)):
expect = np_ref(an, bn)
kwargs = dict(rounding_mode=mode) if mode is not None else {}
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(
actual,
torch.from_numpy(expect),
exact_device=False,
exact_dtype=exact_dtype,
)
# Compare contiguous (likely vectorized) against non-contiguous (not vectorized)
a_noncontig = torch.empty([2 * i for i in a.shape], dtype=dtype, device=device)[
::2, ::2
]
a_noncontig[:] = a
b_noncontig = torch.empty([2 * i for i in b.shape], dtype=dtype, device=device)[
::2, ::2
]
b_noncontig[:] = b
for rounding_mode in (None, "trunc", "floor"):
expect = torch.divide(a_noncontig, b_noncontig, rounding_mode=rounding_mode)
actual = torch.divide(a, b, rounding_mode=rounding_mode)
self.assertEqual(actual, expect)
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_divide_by_zero_rounding(self, device, dtype):
a = torch.tensor(
[1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype,
)
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an = a.cpu().numpy()
else:
an = a.float().cpu().numpy()
zero = torch.zeros_like(a)
# NOTE: NumPy's floor_divide rounding changed in 1.20.0 to be consistent with divide
expect = np.divide(an, 0)
for rounding_mode in (None, "floor"):
# CPU scalar
actual = torch.divide(a, 0, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
# Device tensor
actual = torch.divide(a, zero, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
@dtypes(*all_types_and(torch.half))
def test_div_rounding_numpy(self, device, dtype):
info = torch.finfo(dtype) if dtype.is_floating_point else torch.iinfo(dtype)
low, high = info.min, info.max
# Compare division of random values against NumPy
a = make_tensor((4096,), dtype=dtype, device=device, low=low, high=high)
b = make_tensor((4096,), dtype=dtype, device=device, low=low, high=high)
# Avoid division by zero which raises for integers and, for floats,
# NumPy 1.20 changed floor_divide to follow IEEE rules for inf/nan
# after dividing by zero.
b[b == 0] = 1
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in (
(None, np.true_divide),
("floor", np.floor_divide),
("trunc", lambda a, b: np.trunc(np.true_divide(a, b)).astype(a.dtype)),
):
expect = torch.from_numpy(np_ref(an, bn))
kwargs = dict(rounding_mode=mode) if mode is not None else {}
# Contiguous (likely vectorized)
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(
actual, expect, exact_device=False, exact_dtype=exact_dtype
)
# Non-contiguous (not vectorized)
expect = expect[::2]
with set_default_dtype(torch.double):
actual = torch.divide(a[::2], b[::2], **kwargs)
self.assertEqual(
actual, expect, exact_device=False, exact_dtype=exact_dtype
)
@dtypes(*complex_types())
def test_complex_div_underflow_overflow(self, device, dtype):
# test to make sure the complex division does not produce underflow or overflow
# in the intermediate of its calculations
# NOTE: the calculation still produces an error if the number is greater than
# finfo.max / 2, but hopefully people realized that it's a dangerous region to work with
finfo = torch.finfo(dtype)
nom_lst = [
complex(finfo.min / 2, finfo.min / 2),
complex(finfo.max / 2, finfo.max / 2),
complex(finfo.tiny, finfo.tiny),
complex(finfo.tiny, 0.0),
complex(0.0, 0.0),
]
denom_lst = [
complex(finfo.min / 2, finfo.min / 2),
complex(finfo.max / 2, finfo.max / 2),
complex(finfo.tiny, finfo.tiny),
complex(0.0, finfo.tiny),
complex(finfo.tiny, finfo.tiny),
]
expected_lst = [
complex(1.0, 0.0),
complex(1.0, 0.0),
complex(1.0, 0.0),
complex(0.0, -1.0),
complex(0.0, 0.0),
]
nom = torch.tensor(nom_lst, dtype=dtype, device=device)
denom = torch.tensor(denom_lst, dtype=dtype, device=device)
expected = torch.tensor(expected_lst, dtype=dtype, device=device)
res = nom / denom
self.assertEqual(res, expected)
# Tests that trying to add, inplace, a CUDA tensor to a CPU tensor
# throws the correct error message
@onlyCUDA
def test_cross_device_inplace_error_msg(self, device):
a = torch.tensor(2.0)
b = torch.tensor(2.0, device=device)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
a += b
# TODO: refactor this test into a more generic one, it's parked here currently
@onlyNativeDeviceTypes
def test_out_resize_warning(self, device):
a = torch.tensor((1, 2, 3), device=device, dtype=torch.float32)
b = torch.tensor((4, 5, 6), device=device, dtype=torch.float32)
unary_inputs = (a,)
binary_inputs = (a, b)
unary_ops = (torch.ceil, torch.exp)
binary_ops = (torch.add, torch.sub)
for op in unary_ops + binary_ops:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inputs = unary_inputs if op in unary_ops else binary_inputs
# No warnings
op(*inputs, out=torch.empty(3, device=device))
op(*inputs, out=torch.empty(0, device=device))
self.assertEqual(len(w), 0)
# Cases that throw warnings
op(*inputs, out=torch.empty(2, device=device))
self.assertEqual(len(w), 1)
# test that multi-d out doesn't trigger segfault
arg1 = (torch.ones(2, 1, device=device), torch.ones(1, device=device))
arg2 = (torch.ones(2, device=device), torch.ones(1, 1, device=device))
outs = (
torch.ones(2, 1, 1, 1, device=device),
torch.ones(2, 2, 2, 2, device=device),
)
for a1, a2, o in zip(arg1, arg2, outs):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.mul(a1, a2, out=o)
self.assertEqual(len(w), 1)
# Verifies that the inplace dunders (like idiv) actually are in place
@expectedFailureMeta # UserWarning not triggered
@onlyNativeDeviceTypes
def test_inplace_dunders(self, device):
t = torch.randn((1,), device=device)
expected = t.data_ptr()
t += 1
t -= 1
t *= 1
t /= 1
t **= 1
t //= 1
t %= 1
self.assertEqual(expected, t.data_ptr())
def check_internal_mem_overlap(
self, inplace_op, num_inputs, dtype, device, expected_failure=False
):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input) for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, "single memory location"):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, "single memory location"):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(
self, data, sz, op, expected_failure=False
):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz : 2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, "unsupported operation"):
_test(op, data[0:sz], data[1 : sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, "unsupported operation"):
_test(op, data[0:sz], data[1 : sz + 1])
def binary_check_input_output_mem_overlap(self, op, device, expected_failure=False):
sz = 3
data = torch.randn(2 * sz, device=device)
other = torch.randn(sz, device=device)
self.unary_check_input_output_mem_overlap(
data,
sz,
lambda input, out: op(other, input, out=out),
expected_failure=expected_failure,
)
self.unary_check_input_output_mem_overlap(
data,
sz,
lambda input, out: op(input, other, out=out),
expected_failure=expected_failure,
)
# https://github.com/pytorch/pytorch/issues/126474
@xfailIfTorchDynamo
@dtypes(torch.double)
def test_binary_op_mem_overlap(self, device, dtype):
ops = [
("add", True, True, "cpu"),
("add", True, True, "cuda"),
("mul", True, True, "cpu"),
("mul", True, True, "cuda"),
("sub", True, True, "cpu"),
("sub", True, True, "cuda"),
("div", True, True, "cpu"),
("div", True, True, "cuda"),
("pow", True, True, "cpu"),
("pow", True, True, "cuda"),
("fmod", True, True, "cpu"),
("fmod", True, True, "cuda"),
("atan2", True, True, "cpu"),
("atan2", True, True, "cuda"),
("hypot", True, True, "cpu"),
("hypot", True, True, "cuda"),
("igamma", True, True, "cpu"),
("igamma", True, True, "cuda"),
("igammac", True, True, "cpu"),
("igammac", True, True, "cuda"),
("nextafter", True, True, "cpu"),
("nextafter", True, True, "cuda"),
("le", True, True, "cpu"),
("le", True, True, "cuda"),
("lt", True, True, "cpu"),
("lt", True, True, "cuda"),
("ge", True, True, "cpu"),
("ge", True, True, "cuda"),
("gt", True, True, "cpu"),
("gt", True, True, "cuda"),
("eq", True, True, "cpu"),
("eq", True, True, "cuda"),
("ne", True, True, "cpu"),
("ne", True, True, "cuda"),
("logical_and", True, True, "cpu"),
("logical_and", True, True, "cuda"),
("logical_or", True, True, "cpu"),
("logical_or", True, True, "cuda"),
("logical_xor", True, True, "cpu"),
("logical_xor", True, True, "cuda"),
]
for (
fn,
has_input_output_mem_overlap_check,
has_internal_mem_overlap_check,
dev,
) in ops:
if dev != device:
continue
out_op = getattr(torch, fn)
inplace_op = getattr(torch.Tensor, fn + "_")
self.check_internal_mem_overlap(
inplace_op,
2,
dtype,
device,
expected_failure=not has_internal_mem_overlap_check,
)
self.binary_check_input_output_mem_overlap(
out_op, device, expected_failure=not has_input_output_mem_overlap_check
)
def _do_pow_for_exponents(self, m1, exponents, pow_fn, atol):
for num in exponents:
if (
isinstance(num, int)
and num < 0
and not m1.is_floating_point()
and not m1.is_complex()
):
with self.assertRaisesRegex(
RuntimeError,
r"Integers to negative integer powers are not allowed\.",
):
torch.pow(m1[4], num)
else:
# base - tensor, exponent - number
# contiguous
res1 = torch.pow(m1[4], num)
res2 = res1.clone().zero_()
# `math.pow` has issues with complex exponentiation so we need to resort to normal `pow`.
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[4][i], num)
rtol = 0 if atol is not None else None
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# non-contiguous
res1 = torch.pow(m1[:, 4], num)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[i, 4], num)
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# scalar ** tensor to enforce correct handling of dtypes for __rpow__().
expected_dtype = torch.result_type(num, m1)
res1 = num ** m1[4]
res2 = (
torch.tensor(num, dtype=expected_dtype, device=m1.device) ** m1[4]
)
self.assertEqual(res1, res2)
self.assertEqual(res1.dtype, expected_dtype)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_pow(self, device, dtype):
m1 = torch.empty(0, dtype=dtype, device=device)
if m1.is_floating_point() or m1.is_complex():
m1 = (
make_tensor((100, 100), low=0, high=1, dtype=dtype, device=device) + 0.5
)
else:
# math.pow will overflow and throw exceptions for large integers
range_high = 4 if dtype in (torch.int8, torch.uint8) else 10
m1 = make_tensor(
(100, 100), low=1, high=range_high, dtype=dtype, device=device
)
exponents = [-2.8, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 4, 3.3, True, False]
complex_exponents = [
-2.5j,
-1.0j,
0j,
1.0j,
2.5j,
1.0 + 1.0j,
-1.0 - 1.5j,
3.3j,
]
if m1.is_complex():
self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, exponents, math.pow, None)
will_raise_error = (
dtype is torch.half and torch.device(device).type == "cpu"
)
if will_raise_error:
# On CPU,
# Half Tensor with complex exponents leads to computation dtype
# of ComplexHalf for which this ops is not supported yet
with self.assertRaisesRegex(
RuntimeError, "not implemented for 'ComplexHalf'"
):
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
# base - number, exponent - tensor
# contiguous
res1 = torch.pow(3, m1[4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[4, i])
self.assertEqual(res1, res2)
# non-contiguous
res1 = torch.pow(3, m1[:, 4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[i][4])
self.assertEqual(res1, res2)
# TODO: refactor all these tests using opinfos properly
def _test_pow(self, base, exponent, np_exponent=None):
if np_exponent is None:
np_exponent = exponent
def to_np(value):
if isinstance(value, torch.Tensor):
return value.cpu().numpy()
return value
try:
np_res = np.power(to_np(base), to_np(np_exponent))
expected = (
torch.from_numpy(np_res)
if isinstance(np_res, np.ndarray)
else torch.tensor(np_res, dtype=base.dtype)
)
except ValueError as e:
err_msg = "Integers to negative integer powers are not allowed."
self.assertEqual(str(e), err_msg)
out = torch.empty_like(base)
test_cases = [
lambda: base.pow(exponent),
lambda: base.pow_(exponent),
lambda: torch.pow(base, exponent),
lambda: torch.pow(base, exponent, out=out),
]
for test_case in test_cases:
self.assertRaisesRegex(RuntimeError, err_msg, test_case)
else:
if isinstance(base, torch.Tensor):
actual = base.pow(exponent)
self.assertEqual(actual, expected.to(actual))
actual = base.clone()
# When base is a 0-dim cpu tensor and exp is a cuda tensor, we exp `pow` to work but `pow_` to fail, since
# `pow` will try to create the output tensor on a cuda device, but `pow_` needs to use the cpu tensor as the output
if (
isinstance(exponent, torch.Tensor)
and base.dim() == 0
and base.device.type == "cpu"
and exponent.device.type == "cuda"
):
regex = "Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
elif torch.can_cast(torch.result_type(base, exponent), base.dtype):
actual2 = actual.pow_(exponent)
self.assertEqual(actual, expected.to(actual))
self.assertEqual(actual2, expected.to(actual2))
else:
self.assertRaisesRegex(
RuntimeError,
r"result type \w+ can't be cast to the desired output type \w+",
lambda: actual.pow_(exponent),
)
actual = torch.pow(base, exponent)
self.assertEqual(actual, expected.to(actual))
actual2 = torch.pow(base, exponent, out=actual)
self.assertEqual(actual, expected.to(actual))
self.assertEqual(actual2, expected.to(actual))
# We can potentially merge this into OpInfo, but one blocker is that the
# first input must be a scalar. It is not as simple as just wrapping this in
# a lambada that switches the inputs, because we also want to test samples inputs
# where the second input is a scalar. The wrapper would need some more logic.
def test_pow_scalar_base(self, device):
a = (
torch.arange(1, 13, dtype=torch.double, device=device)
.view(3, 4)
.requires_grad_()
)
gradcheck(lambda a: torch.pow(2, a), (a,))
# Tests pow() for integral, floating-type tensors, with integral, floating-type
# exponents (tensor or scalar), respectively. noncontiguous tensors are also tested.
def test_int_and_float_pow(self, device):
def _test_int_and_float_pow(dt, low, high, dev):
test_cases = (
((4, 4), 0, (4, 1)),
((3, 1), 4, (3, 1)),
((2,), 4, (1,)),
((1,), 2, ()),
((513, 513), 4, (513,)),
((5, 5, 5), 5, (5,)),
((), 2, ()),
)
for base_shape, exp_scalar, exp_shape in test_cases:
base_tensor = make_tensor(
base_shape, dtype=dt, device=dev, low=low, high=high
)
# int tensors don't take negative exponents
if dt in [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
exp_tensor = make_tensor(
exp_shape, dtype=dt, device=dev, low=0, high=high
)
else:
exp_tensor = make_tensor(
exp_shape, dtype=dt, device=dev, low=low, high=high
)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
# test non-contiguous tensors as well
base_tensor = make_tensor(
base_shape,
dtype=dt,
device=dev,
low=low,
high=high,
noncontiguous=True,
)
if dt in [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
exp_tensor = make_tensor(
exp_shape,
dtype=dt,
device=dev,
low=0,
high=high,
noncontiguous=True,
)
else:
exp_tensor = make_tensor(
exp_shape,
dtype=dt,
device=dev,
low=low,
high=high,
noncontiguous=True,
)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
_test_int_and_float_pow(torch.int8, -2, 2, device)
_test_int_and_float_pow(torch.uint8, 0, 3, device)
_test_int_and_float_pow(torch.int16, -5, 5, device)
_test_int_and_float_pow(torch.int64, -10, 10, device)
_test_int_and_float_pow(torch.int32, -10, 10, device)
_test_int_and_float_pow(torch.float16, 0.0, 5.0, device)
_test_int_and_float_pow(torch.float32, 0.0, 10.0, device)
_test_int_and_float_pow(torch.float64, 0.0, 10.0, device)
# pow's output would have some NaNs as well
_test_int_and_float_pow(torch.float32, -10.0, 10.0, device)
_test_int_and_float_pow(torch.float64, -10.0, 10.0, device)
# Tests that a Runtime error occurs when a base tensor cannot be resized
# by pow's inplace variant due to PyTorch's broadcasting semantics.
def test_pow_inplace_resizing_exception(self, device):
test_cases = (
((), (3,)),
((2,), (2, 1)),
((2, 1), (2, 2)),
((2, 2), (2, 1, 1)),
)
test_inputs = [
(
make_tensor(
base_size, dtype=torch.float64, device=device, high=10.0, low=0.0
),
make_tensor(
exp_size, dtype=torch.float64, device=device, high=10.0, low=0.0
),
)
for base_size, exp_size in test_cases
]
for base, exponent in test_inputs:
regex = "doesn't match the broadcast shape"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
def test_int_tensor_pow_neg_ints(self, device):
ints = [
torch.iinfo(torch.int32).min,
-3,
-2,
-1,
0,
1,
2,
3,
torch.iinfo(torch.int32).max,
]
neg_ints = [torch.iinfo(torch.int32).min, -3, -2, -1]
tensor = torch.tensor(ints, dtype=torch.int32, device=device)
for pow in neg_ints:
self._test_pow(tensor, pow)
def test_long_tensor_pow_floats(self, device):
ints = [0, 1, 23, 4567]
floats = [0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
tensor = torch.tensor(ints, dtype=torch.int64, device=device)
for pow in floats:
self._test_pow(tensor, pow)
@dtypes(*[torch.float32, torch.float64])
def test_float_scalar_pow_float_tensor(self, device, dtype):
floats = [2.0, -3 / 2, -1.0, -1 / 2, -1 / 3, 0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
exponent_shapes = (
(1,),
(2, 2),
(2, 1),
(2, 2, 2),
)
tensors = [
make_tensor(shape, dtype=dtype, device=device, low=0)
for shape in exponent_shapes
]
floats_tensor = torch.tensor(floats, dtype=dtype, device=device)
for base in floats:
self._test_pow(base, floats_tensor)
for tensor in tensors:
self._test_pow(base, tensor)
@onlyCUDA
def test_cuda_tensor_pow_scalar_tensor(self, device):
cuda_tensors = [
torch.randn((3, 3), device=device),
torch.tensor(3.0, device=device),
]
scalar_tensors = [
torch.tensor(5.0, device="cpu"),
torch.tensor(-3),
torch.tensor(1),
]
for base, exp in product(cuda_tensors, scalar_tensors):
self._test_pow(base, exp)
@onlyCUDA
def test_cpu_tensor_pow_cuda_scalar_tensor(self, device):
cuda_tensors = [
torch.tensor(5.0, device="cuda"),
torch.tensor(-3, device="cuda"),
]
for exp in cuda_tensors:
base = torch.randn((3, 3), device="cpu")
regex = "Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!"
self.assertRaisesRegex(RuntimeError, regex, torch.pow, base, exp)
for exp in cuda_tensors:
# Binary ops with a cpu + cuda tensor are allowed if the cpu tensor has 0 dimension
base = torch.tensor(3.0, device="cpu")
self._test_pow(base, exp)
@onlyCUDA
@dtypes(torch.complex64, torch.complex128)
def test_pow_cuda_complex_extremal_passing(self, device, dtype):
t = torch.tensor(complex(-1.0, float("inf")), dtype=dtype, device=device)
cuda_out = t.pow(2)
cpu_out = t.cpu().pow(2)
self.assertEqual(cpu_out, cuda_out)
@skipIfTorchDynamo()
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_complex_scalar_pow_tensor(self, device, dtype):
complexes = [0.5j, 1.0 + 1.0j, -1.5j, 2.2 - 1.6j, 1 + 0j]
first_exp = make_tensor((100,), dtype=dtype, device=device, low=-2, high=2)
second_exp = make_tensor(
(100,), dtype=dtype, device=device, low=-2, high=2, noncontiguous=True
)
first_exp[0] = first_exp[10] = first_exp[20] = 0
second_exp[0] = second_exp[10] = second_exp[20] = 0
for base in complexes:
# On CPU,
# Half Tensor with complex base leads to computation dtype
# of ComplexHalf for which this ops is not supported yet
# NOTE: pow has fast-path when base is 1 which supports
# ComplexHalf
will_raise_error = (
torch.device(device).type == "cpu"
and dtype is torch.half
and base != (1 + 0j)
)
if will_raise_error:
with self.assertRaisesRegex(
RuntimeError, "not implemented for 'ComplexHalf'"
):
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
else:
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
@onlyNativeDeviceTypes
@skipMeta
def test_pow_scalar_type_promotion(self, device):
# Test against a scalar and non-scalar input
inputs = [17, [17]]
for input in inputs:
# We expect the computation to be performed in uint8 (overflowing to 0), and then cast to int64
input_tensor_uint8 = torch.tensor(input, dtype=torch.uint8, device=device)
out_uint8_computation = torch.pow(
2,
input_tensor_uint8,
out=torch.tensor(0, dtype=torch.int64, device=device),
)
# Computation should run in int64, and not overflow
input_tensor_int64 = torch.tensor(input, dtype=torch.int64, device=device)
out_int64_computation = torch.pow(
2,
input_tensor_int64,
out=torch.tensor(0, dtype=torch.int64, device=device),
)
self.assertNotEqual(out_uint8_computation, out_int64_computation)
self.assertEqual(
out_uint8_computation.to(dtype=torch.uint8),
out_int64_computation.to(dtype=torch.uint8),
)
def test_tensor_pow_tensor(self, device):
def rotate(l, n):
return l[-n:] + l[:-n]
def test_tensor_pow_tensor(values, torch_type, numpy_type):
vals_tensor = torch.tensor(values, dtype=torch_type, device=device)
for i in range(len(values)):
pows = rotate(values, i)
pows_tensor = torch.tensor(pows, dtype=torch_type, device=device)
self._test_pow(vals_tensor, pows_tensor)
ints = [0, 1, 2, 3]
test_tensor_pow_tensor(ints, torch.uint8, np.uint8)
test_tensor_pow_tensor(ints, torch.int8, np.int8)
test_tensor_pow_tensor(ints, torch.int16, np.int16)
test_tensor_pow_tensor(ints, torch.int32, np.int32)
test_tensor_pow_tensor(ints, torch.int64, np.int64)
floats = [-3.0, -2.0, -1.0, -1 / 2, -1 / 3, 0.0, 1 / 3, 1 / 2, 1.0, 2.0, 3.0]
test_tensor_pow_tensor(floats, torch.float16, np.float16)
test_tensor_pow_tensor(floats, torch.float32, np.float32)
test_tensor_pow_tensor(floats, torch.float64, np.float64)
def test_logical_xor_with_nontrivial_alignment(self, device):
# test tensor that is not aligned to multiple of 16 bytes
size = 128
a = torch.randn(size, device=device) > 0
b = torch.randn(size, device=device) > 0
c = torch.randn(size, device=device) > 0
non_trivial_alignment = [1, 2, 4, 8, 15]
for i in non_trivial_alignment:
for j in non_trivial_alignment:
for k in non_trivial_alignment:
a_ = a[i : 100 + i]
b_ = b[j : 100 + j]
c_ = c[k : 100 + k]
torch.logical_xor(a_, b_, out=c_)
for x, y, z in zip(a_.tolist(), b_.tolist(), c_.tolist()):
self.assertEqual(x ^ y, z)
@dtypes(torch.float)
def test_add_with_tail(self, device, dtype):
# test tensor where there is a tail which is not a multiple
# of GPU warp size
for tail_size in [1, 63, 67, 130]:
size = 4096 + tail_size
a = torch.randn(size, device=device, dtype=dtype)
b = torch.randn(size, device=device, dtype=dtype)
c = a + b
for x, y, z in zip(a.tolist(), b.tolist(), c.tolist()):
self.assertEqual(x + y, z)
# Tests that CUDA tensors on different devices cannot be used in the same
# binary operation, and that CUDA "scalars" cannot be used in the same
# binary operation as non-scalar CPU tensors.
@deviceCountAtLeast(2)
@onlyCUDA
def test_cross_device_binary_ops(self, devices):
vals = (1.0, (2.0,))
cpu_tensor = torch.randn(2, 2)
def do_test(op, a, b):
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, b)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(b, a)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, cpu_tensor)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(cpu_tensor, a)
for op in (
operator.add,
torch.add,
operator.sub,
torch.sub,
operator.mul,
torch.mul,
operator.truediv,
torch.true_divide,
operator.floordiv,
torch.floor_divide,
):
for a, b in product(vals, vals):
a = torch.tensor(a, device=devices[0])
b = torch.tensor(b, device=devices[1])
do_test(op, a, b)
# This test ensures that a scalar Tensor can be safely used
# in a binary operation in conjunction with a Tensor on all
# available CUDA devices
@deviceCountAtLeast(2)
@onlyCUDA
def test_binary_op_scalar_device_unspecified(self, devices):
scalar_val = torch.tensor(1.0)
for default_device in devices:
with torch.cuda.device(default_device):
for device in devices:
device_obj = torch.device(device)
x = torch.rand(3, device=device)
y0 = x * scalar_val
self.assertEqual(y0.device, device_obj)
y1 = scalar_val * x
self.assertEqual(y1.device, device_obj)
self.assertEqual(y0, y1)
def test_div_and_floordiv_vs_python(self, device):
# Tests torch division ops which can handle both arguments being
# scalars.
def _scalar_helper(python_op, torch_op):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected = python_op(a, b)
actual_scalar = torch_op(a, b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
actual_tensor = torch_op(a_t, b_t)
actual_first_tensor = torch_op(a_t, b)
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected)
self.assertEqual(actual_tensor.item(), expected)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
_scalar_helper(operator.truediv, torch.true_divide)
_scalar_helper(lambda a, b: math.floor(a / b), operator.floordiv)
_scalar_helper(lambda a, b: math.floor(a / b), torch.floor_divide)
@onlyNativeDeviceTypes
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_div_and_floordiv_script_vs_python(self, device):
# Creates jitted functions of two tensors
def _wrapped_div(a, b):
return a / b
def _wrapped_floordiv(a, b):
return a // b
scripted_div = torch.jit.script(_wrapped_div)
scripted_floordiv = torch.jit.script(_wrapped_floordiv)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_div = a / b
expected_floordiv = math.floor(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
self.assertEqual(scripted_div(a_t, b_t), expected_div)
self.assertEqual(scripted_floordiv(a_t, b_t), expected_floordiv)
# Creates jitted functions of one tensor
def _wrapped_div_scalar(a):
return a / 5
# NOTE: the JIT implements division as torch.reciprocal(a) * 5
def _wrapped_rdiv_scalar(a):
return 5 / a
def _wrapped_floordiv_scalar(a):
return a // 5
# NOTE: this fails if the input is not an integer tensor
# See https://github.com/pytorch/pytorch/issues/45199
def _wrapped_rfloordiv_scalar(a):
return 5 // a
scripted_div_scalar = torch.jit.script(_wrapped_div_scalar)
scripted_rdiv_scalar = torch.jit.script(_wrapped_rdiv_scalar)
scripted_floordiv_scalar = torch.jit.script(_wrapped_floordiv_scalar)
scripted_rfloordiv_scalar = torch.jit.script(_wrapped_rfloordiv_scalar)
for a in range(-10, 10):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
a_t = torch.tensor(a, device=device)
self.assertEqual(a / 5, scripted_div_scalar(a_t))
# Skips zero divisors
if a == 0:
continue
self.assertEqual(5 / a, scripted_rdiv_scalar(a_t))
# Handles Issue 45199 (see comment above)
if a_t.is_floating_point():
with self.assertRaises(RuntimeError):
scripted_rfloordiv_scalar(a_t)
else:
# This should emit a UserWarning, why doesn't it?
# See issue gh-52387
self.assertEqual(5 // a, scripted_rfloordiv_scalar(a_t))
@onlyNativeDeviceTypes
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_idiv_and_ifloordiv_vs_python(self, device):
def _wrapped_idiv_tensor(a, b):
a /= b
return a
def _wrapped_idiv_scalar(a):
a /= 5
return a
def _wrapped_true_divide__tensor(a, b):
a.true_divide_(b)
return a
def _wrapped_true_divide__scalar(a):
a.true_divide_(5)
return a
def _wrapped_floor_divide__tensor(a, b):
a.floor_divide_(b)
return a
def _wrapped_floor_divide__scalar(a):
a.floor_divide_(5)
return a
# The following functions are unsupported by the JIT
def _wrapped_ifloordiv_tensor(a, b):
a //= b
return a
def _wrapped_ifloordiv_scalar(a):
a //= 5
return a
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_tensor = torch.jit.script(_wrapped_ifloordiv_tensor)
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_scalar = torch.jit.script(_wrapped_ifloordiv_scalar)
scripted_idiv_tensor = torch.jit.script(_wrapped_idiv_tensor)
scripted_idiv_scalar = torch.jit.script(_wrapped_idiv_scalar)
scripted_true_divide__tensor = torch.jit.script(_wrapped_true_divide__tensor)
scripted_true_divide__scalar = torch.jit.script(_wrapped_true_divide__scalar)
scripted_floor_divide__tensor = torch.jit.script(_wrapped_floor_divide__tensor)
scripted_floor_divide__scalar = torch.jit.script(_wrapped_floor_divide__scalar)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_idiv = a / b
expected_ifloordiv = a // b
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
if a_t.is_floating_point():
tmp0 = a_t.clone()
tmp0 /= b
tmp1 = a_t.clone()
tmp1 /= b_t
self.assertEqual(tmp0.item(), expected_idiv)
self.assertEqual(tmp1.item(), expected_idiv)
self.assertEqual(
scripted_true_divide__tensor(a_t.clone(), b_t).item(),
expected_idiv,
)
self.assertEqual(
scripted_true_divide__scalar(a_t.clone()).item(), a / 5
)
else:
tmp = a_t.clone()
with self.assertRaises(RuntimeError):
tmp /= b
with self.assertRaises(RuntimeError):
tmp /= b_t
with self.assertRaises(RuntimeError):
scripted_true_divide__tensor(tmp, b_t)
with self.assertRaises(RuntimeError):
scripted_true_divide__scalar(tmp)
if not a_t.is_floating_point() and b_t.is_floating_point():
# Inplace modification fails because a float tensor is required
# if the divisor is a float tensor
a_t.clone().floor_divide_(b_t)
scripted_floor_divide__tensor(a_t.clone(), b_t)
tmp = a_t.clone()
tmp //= b_t
else:
# Inplace modification is OK when both or neither tensor is
# a float tensor
self.assertEqual(
a_t.clone().floor_divide_(b_t).item(), expected_ifloordiv
)
self.assertEqual(
scripted_floor_divide__tensor(a_t.clone(), b_t).item(),
expected_ifloordiv,
)
tmp = a_t.clone()
tmp //= b_t
self.assertEqual(tmp.item(), expected_ifloordiv)
self.assertEqual(scripted_floor_divide__scalar(a_t), math.floor(a / 5))
# Tests binary op equivalence with Python builtin ops
# Also tests that reverse operations are equivalent to forward ops
# NOTE: division ops are tested separately above
def test_binary_ops_with_scalars(self, device):
for python_op, torch_op in (
(operator.add, torch.add),
(operator.sub, torch.sub),
(operator.mul, torch.mul),
(operator.truediv, torch.div),
):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * 0.5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0 or a == 0:
continue
a_tensor = torch.tensor(a, device=device)
b_tensor = torch.tensor(b, device=device)
a_tensor_cpu = a_tensor.cpu()
b_tensor_cpu = b_tensor.cpu()
vals = (a, b, a_tensor, b_tensor, a_tensor_cpu, b_tensor_cpu)
for args in product(vals, vals):
first, second = args
first_scalar = (
first
if not isinstance(first, torch.Tensor)
else first.item()
)
second_scalar = (
second
if not isinstance(second, torch.Tensor)
else second.item()
)
expected = python_op(first_scalar, second_scalar)
self.assertEqual(expected, python_op(first, second))
self.assertEqual(expected, torch_op(first, second))
@dtypes(
*product(
all_types_and(torch.half, torch.bfloat16, torch.bool),
all_types_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_maximum_minimum_type_promotion(self, device, dtypes):
a = torch.tensor((0, 1), device=device, dtype=dtypes[0])
b = torch.tensor((1, 0), device=device, dtype=dtypes[1])
for op in (
torch.maximum,
torch.max,
torch.fmax,
torch.minimum,
torch.min,
torch.fmin,
):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
@dtypes(*integral_types_and(torch.bool))
def test_maximum_minimum_int_and_bool(self, device, dtype):
ops = (
(torch.maximum, torch.max, np.maximum),
(torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax),
(torch.fmin, None, np.fmin),
)
rng = np.random.default_rng()
a_np = np.array(
rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype]
)
b_np = np.array(
rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype]
)
for torch_op, alias, numpy_op in ops:
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
numpy_result = numpy_op(a_np, b_np)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@precisionOverride({torch.bfloat16: 1e-2})
@dtypes(*(floating_types_and(torch.half, torch.bfloat16)))
def test_maximum_minimum_float(self, device, dtype):
ops = (
(torch.maximum, torch.max, np.maximum),
(torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax),
(torch.fmin, None, np.fmin),
)
if dtype == torch.bfloat16:
a_np = np.random.randn(10).astype(np.float64)
b_np = np.random.randn(10).astype(np.float64)
else:
a_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
b_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result, exact_dtype=False)
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
@dtypes(*(floating_types_and(torch.half, torch.bfloat16)))
def test_maximum_minimum_float_nan_and_inf(self, device, dtype):
# np.maximum and np.minimum functions compare input arrays element-wisely.
# if one of the elements being compared is a NaN, then that element is returned.
ops = (
(torch.maximum, torch.max, np.maximum),
(torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax),
(torch.fmin, None, np.fmin),
)
a_vals = (
float("inf"),
-float("inf"),
float("nan"),
float("inf"),
float("nan"),
float("nan"),
1,
float("nan"),
)
b_vals = (
-float("inf"),
float("inf"),
float("inf"),
float("nan"),
float("nan"),
0,
float("nan"),
-5,
)
if dtype == torch.bfloat16:
a_np = np.array(a_vals, dtype=np.float64)
b_np = np.array(b_vals, dtype=np.float64)
else:
a_np = np.array(a_vals, dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(b_vals, dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
if dtype == torch.bfloat16:
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
else:
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@dtypes(
*product(
complex_types(),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_maximum_minimum_complex(self, device, dtypes):
for torch_op in (
torch.maximum,
torch.minimum,
torch.max,
torch.min,
torch.fmax,
torch.fmin,
):
with self.assertRaisesRegex(RuntimeError, ".+not implemented for.+"):
torch_op(
torch.ones(1, device=device, dtype=dtypes[0]),
torch.ones(1, device=device, dtype=dtypes[1]),
)
with self.assertRaisesRegex(RuntimeError, ".+not implemented for.+"):
torch_op(
torch.ones(1, device=device, dtype=dtypes[1]),
torch.ones(1, device=device, dtype=dtypes[0]),
)
@onlyCUDA
def test_maximum_minimum_cross_device(self, device):
a = torch.tensor((1, 2, -1))
b = torch.tensor((3, 0, 4), device=device)
ops = (torch.maximum, torch.minimum)
for torch_op in ops:
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch_op(a, b)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch_op(b, a)
# test cuda tensor and cpu scalar
ops = ((torch.maximum, np.maximum), (torch.minimum, np.minimum))
a_np = np.array(1)
b_np = np.array([3, 0, 4])
for torch_op, numpy_op in ops:
a_tensor = torch.from_numpy(a_np)
b_tensor = torch.from_numpy(b_np).to(device=device)
tensor_result_1 = torch_op(a_tensor, b_tensor)
numpy_result_1 = numpy_op(a_np, b_np)
tensor_result_2 = torch_op(b_tensor, a_tensor)
numpy_result_2 = numpy_op(b_np, a_np)
self.assertEqual(tensor_result_1, numpy_result_1)
self.assertEqual(tensor_result_2, numpy_result_2)
@dtypes(
*product(
floating_types_and(torch.half, torch.bfloat16),
floating_types_and(torch.half, torch.bfloat16),
)
)
def test_maximum_and_minimum_subgradient(self, device, dtypes):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True, device=device, dtype=dtypes[0])
b = torch.tensor(b, requires_grad=True, device=device, dtype=dtypes[1])
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(
torch.maximum,
[0.0, 1.0, 2.0],
[1.0, 1.0, 1.0],
[0.0, 0.5, 1.0],
[1.0, 0.5, 0.0],
)
run_test(
torch.minimum,
[0.0, 1.0, 2.0],
[1.0, 1.0, 1.0],
[1.0, 0.5, 0.0],
[0.0, 0.5, 1.0],
)
def test_maximum_minimum_forward_ad_float32(self, device):
# TODO: This should really be covered by OpInfo but it isn't. The problem
# is that our gradient tests test using float64 but it should also test
# float32
x = torch.randn(3, device=device, dtype=torch.float32)
y = torch.randn(3, device=device, dtype=torch.float32)
tx = torch.randn(3, device=device, dtype=torch.float32)
ty = torch.randn(3, device=device, dtype=torch.float32)
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, tx)
y_dual = fwAD.make_dual(y, ty)
result = torch.maximum(x_dual, y_dual)
_, result_tangent = fwAD.unpack_dual(result)
expected = torch.where(x > y, tx, ty)
self.assertEqual(result_tangent, expected)
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, tx)
y_dual = fwAD.make_dual(y, ty)
result = torch.minimum(x_dual, y_dual)
_, result_tangent = fwAD.unpack_dual(result)
expected = torch.where(x < y, tx, ty)
self.assertEqual(result_tangent, expected)
# TODO: tests like this should be generic
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_mul_intertype_scalar(self, device, dtype):
x = torch.tensor(1.5, dtype=dtype, device=device)
y = torch.tensor(3, dtype=torch.int32, device=device)
self.assertEqual(x * y, 4.5)
self.assertEqual(y * x, 4.5)
with self.assertRaisesRegex(
RuntimeError, "can't be cast to the desired output type"
):
y *= x
x *= y
self.assertEqual(x, 4.5)
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_sub(self, device, dtype):
if dtype in integral_types():
# Before Python 3.10, floats were implicitly converted to ints, but with
# DeprecationWarning: an integer is required (got type float).
# Implicit conversion to integers using __int__ is deprecated,
# and may be removed in a future version of Python.
# Since Python 3.10, that attempt gives an error.
m1 = torch.tensor([2, 4], dtype=dtype, device=device)
m2 = torch.tensor([1, 2], dtype=dtype, device=device)
diff = torch.tensor([1, 2], dtype=dtype)
else:
m1 = torch.tensor([2.34, 4.44], dtype=dtype, device=device)
m2 = torch.tensor([1.23, 2.33], dtype=dtype, device=device)
diff = torch.tensor([1.11, 2.11], dtype=dtype)
if dtype == torch.bool:
self.assertRaises(RuntimeError, lambda: m1 - m2)
elif dtype == torch.bfloat16 or dtype == torch.half:
# bfloat16 has a lower precision so we have to have a separate check for it
self.assertEqual(m1 - m2, diff, atol=0.01, rtol=0)
else:
self.assertEqual(m1 - m2, diff)
# TODO: what is this test testing?
@onlyCPU
@dtypes(torch.float)
def test_csub(self, device, dtype):
# with a tensor
a = torch.randn(100, 90, dtype=dtype, device=device)
b = a.clone().normal_()
res_add = torch.add(a, b, alpha=-1)
res_csub = a.clone()
res_csub.sub_(b)
self.assertEqual(res_add, res_csub)
# with a scalar
a = torch.randn(100, 100, dtype=dtype, device=device)
scalar = 123.5
res_add = torch.add(a, -scalar)
res_csub = a.clone()
res_csub.sub_(scalar)
self.assertEqual(res_add, res_csub)
# TODO: reconcile with minimum/maximum tests
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_min_max_binary_op_nan(self, device, dtype):
a = torch.rand(1000, dtype=dtype, device=device)
b = torch.rand(1000, dtype=dtype, device=device)
# 0:250: a -- nan, b -- not nan
a[:250] = float("nan")
# 250:500: a -- not nan, b -- nan
b[250:500] = float("nan")
# 500:750: a and b both nan
a[500:750] = float("nan")
b[500:750] = float("nan")
# 750:1000: neither nan
ma = torch.max(a, b)
mi = torch.min(a, b)
for i in range(750):
self.assertTrue(
torch.isnan(ma[i]),
f"max(a, b): {ma[i]}, a: {a[i]}, b: {b[i]}",
)
self.assertTrue(
torch.isnan(mi[i]),
f"min(a, b): {mi[i]}, a: {a[i]}, b: {b[i]}",
)
for i in range(750, 1000):
self.assertFalse(
torch.isnan(ma[i]),
f"max(a, b): {ma[i]}, a: {a[i]}, b: {b[i]}",
)
self.assertFalse(
torch.isnan(mi[i]),
f"min(a, b): {mi[i]}, a: {a[i]}, b: {b[i]}",
)
@dtypes(
*product(
all_types_and(torch.half, torch.bfloat16, torch.bool),
all_types_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_copysign(self, device, dtypes):
def _test_copysign_numpy(a, b):
torch_result = torch.copysign(a, b)
if a.dtype == torch.bfloat16:
np_a = a.to(torch.float).cpu().numpy()
else:
np_a = a.cpu().numpy()
if b.dtype == torch.bfloat16:
np_b = b.to(torch.float).cpu().numpy()
else:
np_b = b.cpu().numpy()
expected = torch.from_numpy(np.copysign(np_a, np_b))
# To handle inconsistencies of type promotion between PyTorch and Numpy
# Applied for both arguments having integral precision and bfloat16
types = integral_types_and(torch.bool, torch.bfloat16)
if a.dtype in types or b.dtype in types:
promoted_type = torch.promote_types(torch_result.dtype, expected.dtype)
torch_result = torch_result.to(promoted_type)
expected = expected.to(promoted_type)
# Verify Value
self.assertEqual(torch_result, expected)
# Verify Sign
# Use double copysign to verify the correctness of 0.0 and -0.0, since
# it always True for self.assertEqual(0.0 == -0.0). So, we use 1 as the
# magnitude to verify the sign between torch and numpy results, elementwise.
# Special case: NaN conversions between FP32 and FP16 is not bitwise
# equivalent to pass this assertion.
if a.dtype != torch.float16 and b.dtype != torch.float16:
self.assertEqual(
torch.copysign(torch.tensor(1.0), torch_result),
torch.copysign(torch.tensor(1.0), expected),
)
# Compare Result with NumPy
# Type promotion
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# Broadcast
a = make_tensor((10, 1, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 1, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# 0.0/-0.0/inf/-inf/nan
cases = [0.0, -0.0, float("inf"), float("-inf"), float("nan")]
# torch.bfloat16 can not hold '-nan'
# torch.half can not hold '-nan' on CUDA
types = [torch.float32, torch.float64]
if device == "cpu":
types.append(torch.float16)
if dtypes[0] in types:
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
for case in cases:
_test_copysign_numpy(
torch.tensor([case], device=device, dtype=dtypes[0]), b
)
if dtypes[1] in floating_types_and(torch.half, torch.bfloat16):
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
for case in cases:
_test_copysign_numpy(
a, torch.tensor([case], device=device, dtype=dtypes[1])
)
@dtypes(
*product(
floating_types_and(torch.half, torch.bfloat16),
floating_types_and(torch.half, torch.bfloat16),
)
)
def test_copysign_subgradient(self, device, dtypes):
# Input is 0.0
x = torch.tensor(
[0.0, 0.0, 0.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor(
[-0.0, -0.0, -0.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[0.0, 0.0, 0.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor(
[-1.0, 0.0, 1.0], dtype=dtypes[0], device=device, requires_grad=True
)
y = torch.tensor(
[-0.0, -0.0, -0.0], dtype=dtypes[1], device=device, requires_grad=True
)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@dtypes(torch.bfloat16, torch.float)
def test_div(self, device, dtype):
for op, method, inplace in (
(torch.div, torch.Tensor.div, torch.Tensor.div_),
(torch.true_divide, torch.Tensor.true_divide, torch.Tensor.true_divide_),
):
m1 = torch.randn(10, 10, dtype=torch.float, device=device).to(dtype=dtype)
res1 = m1.clone()
inplace(res1[:, 3], 2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] / 2
self.assertEqual(res1, res2)
if dtype == torch.bfloat16:
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2.0, 2.0], dtype=dtype, device=device)
self.assertEqual(
op(a1, a2),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01,
rtol=0,
)
self.assertEqual(method(a1, a2), op(a1, a2))
@dtypes(torch.bfloat16, torch.float)
def test_true_divide_out(self, device, dtype):
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2.0, 2.0], dtype=dtype, device=device)
res = torch.empty_like(a1)
self.assertEqual(
torch.true_divide(a1, a2, out=res),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01,
rtol=0,
)
@dtypes(torch.half)
def test_divmul_scalar(self, device, dtype):
x = torch.tensor(100.0, device=device, dtype=dtype)
x_ref = x.float()
scale = 1e5
res = x.div(scale)
expected = x_ref.div(scale)
self.assertEqual(res, expected.to(dtype), atol=0.0, rtol=0.0)
x = torch.tensor(1e-5, device=device, dtype=dtype)
x_ref = x.float()
res = x.mul(scale)
expected = x_ref.mul(scale)
self.assertEqual(res, expected.to(dtype), atol=0.0, rtol=0.0)
res = scale * x
self.assertEqual(res, expected.to(dtype), atol=0.0, rtol=0.0)
@dtypesIfCUDA(
*set(get_all_math_dtypes("cuda")) - {torch.complex64, torch.complex128}
)
@dtypes(*set(get_all_math_dtypes("cpu")) - {torch.complex64, torch.complex128})
def test_floor_divide_tensor(self, device, dtype):
x = torch.randn(10, device=device).mul(30).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
z = x // y
z_alt = torch.floor(x.double() / y.double()).to(dtype)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@dtypesIfCUDA(
*set(get_all_math_dtypes("cuda")) - {torch.complex64, torch.complex128}
)
@dtypes(*set(get_all_math_dtypes("cpu")) - {torch.complex64, torch.complex128})
def test_floor_divide_scalar(self, device, dtype):
x = torch.randn(100, device=device).mul(10).to(dtype)
z = x // 3
z_alt = torch.tensor(
[math.floor(v.item() / 3.0) for v in x], dtype=x.dtype, device=device
)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@onlyCPU
@dtypes(*get_all_math_dtypes("cpu"))
def test_rdiv(self, device, dtype):
if dtype is torch.float16:
return
elif dtype.is_complex:
x = torch.rand(100, dtype=dtype, device=device).add(1).mul(4)
else:
x = torch.rand(100, device=device).add(1).mul(4).to(dtype)
y = 30 / x
z = torch.tensor([30 / v.item() for v in x], device=device)
self.assertEqual(y, z, exact_dtype=False)
@dtypes(*floating_types_and(torch.half))
def test_fmod_remainder_by_zero_float(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check floating-point tensor fmod/remainder to zero is nan on both CPU and GPU
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
self.assertTrue(torch.all(fn(x, 0.0).isnan()))
self.assertTrue(torch.all(fn(x, zero).isnan()))
@onlyNativeDeviceTypes # Check Issue https://github.com/pytorch/pytorch/issues/48130
@dtypes(*integral_types())
def test_fmod_remainder_by_zero_integral(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check integral tensor fmod/remainder to zero
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
# RuntimeError on CPU
if self.device_type == "cpu":
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
fn(x, zero)
elif torch.version.hip is not None:
# ROCm behavior: x % 0 is a no-op; x is returned
self.assertEqual(fn(x, zero), x)
else:
# CUDA behavior: Different value for different dtype
# Due to it's an undefined behavior, CUDA returns a pattern of all 1s
# for integral dividend (other than int64) divided by zero. For int64,
# CUDA returns all 1s for negative dividend, half 1s for positive dividend.
# uint8: 0xff -> 255
# int32: 0xffffffff -> -1
if dtype == torch.int64:
self.assertEqual(fn(x, zero) == 4294967295, x >= 0)
self.assertEqual(fn(x, zero) == -1, x < 0)
else:
value = 255 if dtype == torch.uint8 else -1
self.assertTrue(torch.all(fn(x, zero) == value))
@onlyNativeDeviceTypes
@dtypes(*integral_types())
def test_fmod_remainder_overflow(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
if dtype in [torch.uint8, torch.uint16, torch.uint32, torch.uint64]:
continue
min_val = torch.iinfo(dtype).min
dividend = torch.full((2, 3), min_val, dtype=dtype, device=device)
divisor = torch.full((3,), -1, dtype=dtype, device=device)
result = fn(dividend, divisor)
expected = torch.zeros_like(dividend)
self.assertEqual(result, expected)
result_scalar = fn(dividend, -1)
self.assertEqual(result_scalar, expected)
@dtypes(*all_types_and(torch.half))
def test_fmod_remainder(self, device, dtype):
# Use numpy as reference
def _helper(x, mod, fns_list):
for fn, inplace_fn, ref_fn in fns_list:
np_x = x.cpu().numpy() if torch.is_tensor(x) else x
np_mod = mod.cpu().numpy() if torch.is_tensor(mod) else mod
exp = ref_fn(np_x, np_mod)
exp = torch.from_numpy(exp)
res = fn(x, mod)
self.assertEqual(res, exp, exact_dtype=False)
if torch.is_tensor(x):
# out
out = torch.empty(0, device=device, dtype=res.dtype)
fn(x, mod, out=out)
self.assertEqual(out, exp, exact_dtype=False)
self.assertEqual(out.size(), torch.Size([10, 10]))
# in-place (Type cast runtime error)
try:
inplace_fn(x, mod)
self.assertEqual(x, exp, exact_dtype=False)
except RuntimeError as e:
self.assertRegex(
str(e),
"result type (Half|Float|Double) "
"can't be cast to the desired output "
"type (Byte|Char|Short|Int|Long)",
)
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# mod with same dtype as x
mod = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# Exclude 0
mod[mod == 0] = 1
# Mods: Integer, Float, Tensor, Non-contiguous Tensor
mods = [3, 2.3, mod, mod.t()]
# mod with floating-point dtype
if dtype in integral_types():
mod_float = make_tensor(
(10, 10), device=device, dtype=torch.float, low=-9, high=9
)
mod[mod == 0] = 1
mods.append(mod_float)
for dividend, mod in product([x, x.t()], mods):
_helper(
dividend,
mod,
(
(torch.fmod, torch.Tensor.fmod_, np.fmod),
(torch.remainder, torch.Tensor.remainder_, np.remainder),
),
)
# Tests for torch.remainder(scalar, tensor)
for dividend, mod in product([5, 3.14], mods):
if torch.is_tensor(mod):
_helper(
dividend,
mod,
((torch.remainder, torch.Tensor.remainder_, np.remainder),),
)
@dtypes(torch.float, torch.double)
def test_remainder_fmod_large_dividend(self, device, dtype):
alarge = 1e9
pi = 3.14159265358979
for avalue in [alarge, -alarge]:
for bvalue in [pi, -pi]:
a = torch.tensor([avalue], dtype=dtype, device=device)
b = torch.tensor([bvalue], dtype=dtype, device=device)
c = torch.remainder(a, b)
d = torch.fmod(a, b)
self.assertTrue(
(b[0] > 0) == (c[0] > 0)
) # remainder has same sign as divisor
self.assertTrue(
(a[0] > 0) == (d[0] > 0)
) # fmod has same sign as dividend
self.assertTrue(
abs(c[0]) < abs(b[0])
) # remainder is within range of divisor
self.assertTrue(
abs(d[0]) < abs(b[0])
) # fmod is within range of divisor
if (a[0] > 0) == (b[0] > 0):
self.assertTrue(c[0] == d[0]) # remainder is same as fmod
else:
self.assertTrue(
abs(c[0] - d[0]) == abs(b[0])
) # differ by one divisor
@dtypesIfCPU(torch.bfloat16, torch.half, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
def test_hypot(self, device, dtype):
inputs = [
(
torch.randn(10, device=device).to(dtype),
torch.randn(10, device=device).to(dtype),
),
(
torch.randn((3, 3, 3), device=device).to(dtype),
torch.randn((3, 3, 3), device=device).to(dtype),
),
(
torch.randn((10, 1), device=device).to(dtype),
torch.randn((10, 1), device=device).to(dtype).transpose(0, 1),
),
(
torch.randint(100, (10,), device=device, dtype=torch.long),
torch.randn(10, device=device).to(dtype),
),
]
for input in inputs:
actual = torch.hypot(input[0], input[1])
if dtype in [torch.bfloat16, torch.half]:
expected = torch.sqrt(input[0] * input[0] + input[1] * input[1])
else:
expected = np.hypot(input[0].cpu().numpy(), input[1].cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_gcd(self, device, dtype):
# Tests gcd(0, 0), gcd(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.gcd(t1, t2)
expected = np.gcd([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
if dtype == torch.uint8:
# Test unsigned integers with potential sign issues (i.e., uint8 with value >= 128)
a = torch.tensor([190, 210], device=device, dtype=dtype)
b = torch.tensor([190, 220], device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = torch.tensor([190, 10], device=device, dtype=dtype)
self.assertEqual(actual, expected)
else:
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = np.gcd(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@dtypes(torch.int16, torch.int32, torch.int64)
def test_lcm(self, device, dtype):
# Tests lcm(0, 0), lcm(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.lcm(t1, t2)
expected = np.lcm([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.lcm(a, b)
expected = np.lcm(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypesIfCPU(torch.float32, torch.float64, torch.float16)
@dtypes(torch.float32, torch.float64)
def test_nextafter(self, device, dtype):
# Test special cases
t1 = torch.tensor([0, 0, 10], device=device, dtype=dtype)
t2 = torch.tensor([inf, -inf, 10], device=device, dtype=dtype)
actual = torch.nextafter(t1, t2)
expected = np.nextafter(t1.cpu().numpy(), t2.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
actual = torch.nextafter(t2, t1)
expected = np.nextafter(t2.cpu().numpy(), t1.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
t1 = torch.tensor([0, nan], device=device, dtype=dtype)
t2 = torch.tensor([nan, 0], device=device, dtype=dtype)
self.assertTrue(torch.nextafter(t1, t2).isnan().all())
a = torch.randn(100, device=device, dtype=dtype)
b = torch.randn(100, device=device, dtype=dtype)
actual = torch.nextafter(a, b)
expected = np.nextafter(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
@onlyNativeDeviceTypes
@dtypes(torch.bfloat16)
def test_nextafter_bfloat16(self, device, dtype):
nan = float("nan")
inf = float("inf")
cases = (
# (from, to, expected)
(0, 1, 9.183549615799121e-41),
(0, -1, -9.183549615799121e-41),
(1, -2, 0.99609375),
(1, 0, 0.99609375),
(1, 2, 1.0078125),
(-1, -2, -1.0078125),
(-1, 0, -0.99609375),
(2, -1, 1.9921875),
(2, 1, 1.9921875),
(20, 3000, 20.125),
(20, -3000, 19.875),
(3000, -20, 2992.0),
(-3000, 20, -2992.0),
(65536, 0, 65280.0),
(65536, inf, 66048.0),
(-65536, 0, -65280.0),
(-65536, -inf, -66048.0),
(nan, 0, nan),
(0, nan, nan),
(nan, nan, nan),
(nan, inf, nan),
(inf, nan, nan),
(inf, -inf, 3.3895313892515355e38),
(-inf, inf, -3.3895313892515355e38),
(inf, 0, 3.3895313892515355e38),
(0, inf, 9.183549615799121e-41),
(-inf, 0, -3.3895313892515355e38),
(0, -inf, -9.183549615799121e-41),
)
for from_v, to_v, expected in cases:
from_t = torch.tensor([from_v], device=device, dtype=dtype)
to_t = torch.tensor([to_v], device=device, dtype=dtype)
actual = torch.nextafter(from_t, to_t).item()
self.assertEqual(actual, expected, atol=0, rtol=0)
def _test_cop(self, torchfn, mathfn, dtype, device):
def reference_implementation(res2):
for i, j in iter_indices(sm1):
idx1d = i * sm1.size(0) + j
res2[i, j] = mathfn(sm1[i, j], sm2[idx1d])
return res2
# contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[4]
sm2 = m2[4]
res1 = torchfn(sm1, sm2.view(10, 10))
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10 * 10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[:, 4]
sm2 = m2[:, 4]
# view as sm1.size()
sm2.set_(
sm2.storage(),
sm2.storage_offset(),
sm1.size(),
(sm2.stride()[0] * 10, sm2.stride()[0]),
)
res1 = torchfn(sm1, sm2)
# reference_implementation assumes 1-d sm2
sm2.set_(
sm2.storage(), sm2.storage_offset(), m2[:, 4].size(), m2[:, 4].stride()
)
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
@onlyCPU
@dtypes(torch.float)
def test_cdiv(self, device, dtype):
self._test_cop(torch.div, operator.truediv, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cremainder(self, device, dtype):
self._test_cop(torch.remainder, operator.mod, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cmul(self, device, dtype):
self._test_cop(torch.mul, operator.mul, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cpow(self, device, dtype):
self._test_cop(
torch.pow, lambda x, y: nan if x < 0 else math.pow(x, y), dtype, device
)
@onlyCPU
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_floor_divide_zero(self, device, dtype):
a = torch.tensor([0, 1], dtype=dtype, device=device)
b = torch.tensor([0, 1], dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a // b
@dtypes(torch.int8, torch.int16, torch.int32, torch.int64)
def test_floor_divide_int_min(self, device, dtype):
int_min = torch.iinfo(dtype).min
a = torch.tensor([int_min], dtype=dtype, device=device)
b = torch.tensor([-1], dtype=dtype, device=device)
result = torch.floor_divide(a, b)
result_ = a // b
self.assertEqual(result, a)
self.assertEqual(result_, a)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_muldiv_scalar(self, device, dtype):
x = make_tensor((10, 3), dtype=dtype, device=device, low=None, high=None)
s = make_tensor((1,), dtype=dtype, device="cpu", low=None, high=None).item()
y = torch.full_like(x, s)
self.assertEqual(x * s, x * y)
self.assertEqual(s * x, y * x)
self.assertEqual(x / s, x / y)
self.assertEqual(s / x, y / x)
# TODO: update make_tensor to support extremal additions and remove this in favor of make_tensor
def _generate_input(self, shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(
*shape, dtype=dtype, device=device
) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float("nan")
x[torch.randn(*shape) > 0.5] = float("inf")
x[torch.randn(*shape) > 0.5] = float("-inf")
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex("nan")
x[torch.randn(*shape) > 0.5] = complex("inf")
x[torch.randn(*shape) > 0.5] = complex("-inf")
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
@dtypes(
*tuple(
itertools.combinations_with_replacement(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), 2
)
)
)
def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes):
# issue #42660
# testing all combinations of broadcasting and type promotion
# with a range of dtypes and input shapes, and with extremal values
def compare_with_numpy_bin_op(torch_fn, np_fn, x, y, out=None):
# working around the fact that numpy doesn't support bfloat16
# by letting numpy treat them as float32's
x_np = x if x.dtype != torch.bfloat16 else x.to(torch.float32)
y_np = (
y.cpu().numpy()
if y.dtype != torch.bfloat16
else y.to(torch.float32).cpu().numpy()
)
self.compare_with_numpy(
lambda inp: torch_fn(inp, y, out=out) if out else torch_fn(inp, y),
lambda inp: np_fn(inp, y_np, out=out) if out else np_fn(inp, y_np),
x_np,
)
complex_op_denylist = [
torch.lt,
torch.le,
torch.gt,
torch.ge,
] # complex not supported
input_sizes = [(1,), (10,), (10, 1), (1, 10), (4, 10), (64, 10), (12, 3)]
op_pairs = [
(torch.lt, np.less),
(torch.le, np.less_equal),
(torch.gt, np.greater),
(torch.ge, np.greater_equal),
(torch.eq, np.equal),
(torch.ne, np.not_equal),
(torch.logical_and, np.logical_and),
(torch.logical_or, np.logical_or),
(torch.logical_xor, np.logical_xor),
]
for size1 in input_sizes:
size2 = (2,) + size1 # perform broadcasting
for with_extremal in [False, True]:
a = self._generate_input(size1, dtypes[0], device, with_extremal)
b = self._generate_input(size2, dtypes[1], device, with_extremal)
for torch_op, numpy_op in op_pairs:
if (
dtypes[0].is_complex or dtypes[1].is_complex
) and torch_op in complex_op_denylist:
continue
# functional version of op
compare_with_numpy_bin_op(torch_op, numpy_op, a, b)
# functional comparison ops always return bool tensors
self.assertEqual(torch_op(a, b).dtype, torch.bool)
# out version of op
out = torch.zeros(
1, dtype=torch.complex128
) # all casts to complex128 are safe
compare_with_numpy_bin_op(torch_op, numpy_op, a, b, out=out)
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.int16, torch.int32, torch.int64)
def test_signed_shift(self, device, dtype):
"Ensure that signed integer bit shifting works as expected."
a = torch.tensor([-10, 10], device=device, dtype=dtype) # [11...1110110, 1010]
expected_l = torch.tensor(
[-40, 40], device=device, dtype=dtype
) # [11...11011000, 101000]
self.assertEqual(a << 2, expected_l)
self.compare_with_numpy(lambda x: x << 2, lambda x: np.left_shift(x, 2), a)
expected_r = torch.tensor(
[-5, 5], device=device, dtype=dtype
) # [1111...111011, 101]
self.assertEqual(a >> 1, expected_r)
self.compare_with_numpy(lambda x: x >> 1, lambda x: np.right_shift(x, 1), a)
@onlyNativeDeviceTypes
@dtypes(*get_all_int_dtypes())
def test_shift_limits(self, device, dtype):
"Ensure that integer bit shifting works as expected with out-of-limits shift values."
# Issue #70904
iinfo = torch.iinfo(dtype)
bits = iinfo.bits
low = iinfo.min
high = iinfo.max
exact_dtype = (
dtype != torch.uint8
) # numpy changes dtype from uint8 to int16 for some out-of-limits shift values
for input in (
torch.tensor(
[-1, 0, 1], device=device, dtype=dtype
), # small for non-vectorized operation
torch.tensor(
[low, high], device=device, dtype=dtype
), # small for non-vectorized operation
make_tensor(
(64, 64, 64), low=low, high=high, device=device, dtype=dtype
), # large for vectorized operation
):
shift_left_expected = torch.zeros_like(input)
shift_right_expected = torch.clamp(input, -1, 0)
# NumPy 2 does not support negative shift values.
if np.__version__ > "2":
iterator = range(bits, 100)
else:
iterator = chain(range(-100, -1), range(bits, 100))
for shift in iterator:
shift_left = input << shift
self.assertEqual(shift_left, shift_left_expected, msg=f"<< {shift}")
self.compare_with_numpy(
lambda x: x << shift,
lambda x: np.left_shift(x, shift),
input,
exact_dtype=exact_dtype,
msg=f"<< {shift}",
)
shift_right = input >> shift
self.assertEqual(shift_right, shift_right_expected, msg=f">> {shift}")
self.compare_with_numpy(
lambda x: x >> shift,
lambda x: np.right_shift(x, shift),
input,
exact_dtype=exact_dtype,
msg=f">> {shift}",
)
@onlyNativeDeviceTypes
@dtypes(
*list(
product(
all_types_and(torch.half, torch.bfloat16, torch.bool),
all_types_and(torch.half, torch.bfloat16, torch.bool),
)
)
)
def test_heaviside(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
rng = np.random.default_rng()
input = np.array(
rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[
input_dtype if (input_dtype != torch.bfloat16) else torch.float64
],
)
input[0] = input[3] = input[7] = 0
values = np.array(
rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[
values_dtype if (values_dtype != torch.bfloat16) else torch.float64
],
)
np_result = torch.from_numpy(np.heaviside(input, values)).to(
device=device, dtype=input_dtype
)
input = torch.from_numpy(input).to(device=device, dtype=input_dtype)
values = torch.from_numpy(values).to(device=device, dtype=values_dtype)
out = torch.empty_like(input)
if input_dtype == values_dtype:
torch_result = torch.heaviside(input, values)
self.assertEqual(np_result, torch_result)
torch_result = input.heaviside(values)
self.assertEqual(np_result, torch_result)
torch.heaviside(input, values, out=out)
self.assertEqual(np_result, out)
input.heaviside_(values)
self.assertEqual(np_result, input)
else:
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
torch.heaviside(input, values)
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
input.heaviside(values)
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
torch.heaviside(input, values, out=out)
with self.assertRaisesRegex(
RuntimeError,
"heaviside is not yet implemented for tensors with different dtypes.",
):
input.heaviside_(values)
@onlyCUDA
def test_heaviside_cross_device(self, device):
x = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
y = torch.tensor(0)
result = torch.heaviside(x, y)
expect = torch.tensor([0, 1, 0, 1, 0, 1], device=device)
self.assertEqual(result, expect)
result = torch.heaviside(y, x)
expect = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
self.assertEqual(result, expect)
x = torch.tensor([-9, 5, 0, 6, -2, 2])
y = torch.tensor(0, device=device)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.heaviside(x, y)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.heaviside(y, x)
@dtypes(*list(product(complex_types(), complex_types())))
def test_heaviside_complex(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
data = (complex(0, -6), complex(-1, 3), complex(1, 1))
input = torch.tensor(data, device=device, dtype=input_dtype)
values = torch.tensor(data, device=device, dtype=values_dtype)
out = torch.empty_like(input)
real = input.real
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
torch.heaviside(input, real)
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
real.heaviside(values)
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
input.heaviside_(values)
with self.assertRaisesRegex(
RuntimeError, "heaviside is not yet implemented for complex tensors."
):
torch.heaviside(real, real, out=out)
def _test_logical(self, device, dtypes, op, a_, b_, expected_res_):
expected_res = torch.tensor(expected_res_, dtype=dtypes[0], device=device)
a = torch.tensor(a_, dtype=dtypes[0], device=device)
b = torch.tensor(b_, dtype=dtypes[1], device=device)
# new tensor
self.assertEqual(expected_res.bool(), getattr(a, op)(b))
# out
c = torch.empty(0, dtype=torch.bool, device=device)
getattr(torch, op)(a, b, out=c)
self.assertEqual(expected_res.bool(), c)
getattr(a, op + "_")(b)
self.assertEqual(expected_res, a)
@dtypes(
*product(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_logical_xor(self, device, dtypes):
self._test_logical(
device, dtypes, "logical_xor", [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1]
)
@dtypes(
*product(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_logical_and(self, device, dtypes):
self._test_logical(
device, dtypes, "logical_and", [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0]
)
@dtypes(
*product(
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
)
)
def test_logical_or(self, device, dtypes):
self._test_logical(
device, dtypes, "logical_or", [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1]
)
def test_remainder_overflow(self, device):
# Check Integer Overflows
x = torch.tensor(23500, dtype=torch.int64, device=device)
q = 392486996410368
self.assertEqual(x % q, x)
self.assertEqual(-x % q, q - x)
self.assertEqual(x % -q, x - q)
self.assertEqual(-x % -q, -x)
def test_rpow(self, device):
m = torch.randn(10, 10, device=device)
self.assertEqual(torch.pow(2, m), 2**m)
# test with scalar
m = torch.randn(1, device=device).squeeze()
assert m.dim() == 0, "m is intentionally a scalar"
self.assertEqual(torch.pow(2, m), 2**m)
def test_ldexp(self, device):
# random values
mantissas = torch.randn(64, device=device)
exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)
# basic test
np_outcome = np.ldexp(mantissas.cpu().numpy(), exponents.cpu().numpy())
pt_outcome_1 = torch.ldexp(mantissas, exponents)
pt_outcome_2 = mantissas.ldexp(exponents)
self.assertEqual(np_outcome, pt_outcome_1.cpu())
self.assertEqual(np_outcome, pt_outcome_2.cpu())
mantissas.ldexp_(exponents)
self.assertEqual(np_outcome, mantissas.cpu())
# test bounds
mantissas = torch.tensor(
[float("inf"), float("-inf"), float("inf"), float("nan")], device=device
)
exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.cpu().numpy(), exponents.cpu().numpy())
pt_outcome = torch.ldexp(mantissas, exponents)
self.assertEqual(np_outcome, pt_outcome.cpu())
# test half dtype behavior
mantissas = torch.randn(64, device=device, dtype=torch.half)
exponents = torch.randint(-5, 5, (64,), device=device)
self.assertEqual(torch.ldexp(mantissas, exponents).dtype, torch.half)
# test float64 computation
mantissas = torch.tensor([1], dtype=torch.float64, device=device)
exponents = torch.tensor([128], dtype=torch.int64, device=device)
expected = torch.pow(
torch.full((1,), 2, device=device, dtype=torch.float64), 128
)
self.assertEqual(torch.ldexp(mantissas, exponents), expected)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp(self, device, dtype):
start_end_weight_shapes = [(), (5,), (5, 5)]
for shapes in product(
start_end_weight_shapes, start_end_weight_shapes, start_end_weight_shapes
):
start = torch.randn(shapes[0], device=device, dtype=dtype)
end = torch.randn(shapes[1], device=device, dtype=dtype)
# Tensor weights
weights = [
torch.randn(shapes[2], device=device, dtype=dtype),
random.random(),
torch.randn([], device="cpu", dtype=dtype),
]
if dtype.is_complex:
weights += [complex(0, 1), complex(0.4, 1.2)]
for weight in weights:
actual = torch.lerp(start, end, weight)
actual_method = start.lerp(end, weight)
self.assertEqual(actual, actual_method)
actual_out = torch.tensor(1.0, dtype=dtype, device=device)
torch.lerp(start, end, weight, out=actual_out)
self.assertEqual(actual, actual_out)
expected = start + weight * (end - start)
self.assertEqual(expected, actual)
@onlyCUDA
@dtypes(torch.half, torch.bfloat16)
def test_lerp_lowp(self, device, dtype):
xvals = (0.0, -30000.0)
yvals = (0.1, -20000.0)
xs = [torch.full((4,), xval, device=device, dtype=dtype) for xval in xvals]
ys = [torch.full((4,), yval, device=device, dtype=dtype) for yval in yvals]
weights = [70000, torch.full((4,), 8, device=device, dtype=dtype)]
for x, y, w in zip(xs, ys, weights):
xref = x.float()
yref = y.float()
wref = w.float() if isinstance(w, torch.Tensor) else w
actual = torch.lerp(x, y, w)
expected = torch.lerp(xref, yref, wref).to(dtype)
self.assertEqual(actual, expected, atol=0.0, rtol=0.0)
@onlyCPU
@dtypes(torch.half, torch.bfloat16)
def test_lerp_lowp_cpu(self, device, dtype):
xvals = (0.0, -30000.0)
yvals = (0.1, -20000.0)
for shape in [(4,), (20,), (3, 10, 10)]:
xs = [torch.full(shape, xval, device=device, dtype=dtype) for xval in xvals]
ys = [torch.full(shape, yval, device=device, dtype=dtype) for yval in yvals]
weights = [70000, torch.full(shape, 8, device=device, dtype=dtype)]
for x, y, w in zip(xs, ys, weights):
xref = x.float()
yref = y.float()
wref = w.float() if isinstance(w, torch.Tensor) else w
actual = torch.lerp(x, y, w)
expected = torch.lerp(xref, yref, wref).to(dtype)
self.assertEqual(actual, expected, atol=0.0, rtol=0.0)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp_weight_scalar_tensor_promotion(self, device, dtype):
start = make_tensor((5, 5), dtype=dtype, device=device, low=1, high=100)
end = make_tensor((5, 5), dtype=dtype, device=device, low=1, high=100)
weight = torch.rand((), dtype=torch.float, device=device)
actual = torch.lerp(start, end, weight)
expected = start + weight.to(dtype) * (end - start)
self.assertEqual(expected, actual)
@dtypes(torch.double, torch.cfloat, torch.cdouble)
def test_lerp_weight_tensor_promotion_error(self, device, dtype):
start = make_tensor((5, 5), dtype=dtype, device=device, low=1, high=100)
end = make_tensor((5, 5), dtype=dtype, device=device, low=1, high=100)
weight = torch.rand((5, 5), dtype=torch.float, device=device)
with self.assertRaisesRegex(RuntimeError, "expected dtype"):
torch.lerp(start, end, weight)
def _test_logaddexp(self, device, dtype, base2):
if base2:
ref_func = np.logaddexp2
our_func = torch.logaddexp2
elif dtype in (torch.complex32, torch.complex64, torch.complex128):
# numpy has not implemented logaddexp for complex
def complex_logaddexp(x1, x2):
x = np.stack((x1, x2))
amax = np.amax(x, axis=0)
amax[~np.isfinite(amax)] = 0
return np.log(np.sum(np.exp(x - amax), axis=0)) + np.squeeze(amax)
ref_func = complex_logaddexp
our_func = torch.logaddexp
else:
ref_func = np.logaddexp
our_func = torch.logaddexp
def _test_helper(a, b):
if dtype == torch.bfloat16:
ref = ref_func(a.cpu().float().numpy(), b.cpu().float().numpy())
v = our_func(a, b)
self.assertEqual(ref, v.float(), atol=0.01, rtol=0.01)
elif dtype == torch.complex32:
ref = ref_func(
a.cpu().to(torch.complex64).numpy(),
b.cpu().to(torch.complex64).numpy(),
)
v = our_func(a, b)
self.assertEqual(ref, v.to(torch.complex64), atol=0.01, rtol=0.01)
else:
ref = ref_func(a.cpu().numpy(), b.cpu().numpy())
v = our_func(a, b)
self.assertEqual(ref, v)
# simple test
a = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
b = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
_test_helper(a, b)
_test_helper(a[:3], b[:3])
# large value test for numerical stability
a *= 10000
b *= 10000
_test_helper(a, b)
_test_helper(a[:3], b[:3])
a = torch.tensor(
[float("inf"), float("-inf"), float("inf"), float("nan")],
dtype=dtype,
device=device,
)
b = torch.tensor(
[float("inf"), float("-inf"), float("-inf"), float("nan")],
dtype=dtype,
device=device,
)
_test_helper(a, b)
@skipIfTorchDynamo() # complex infs/nans differ under Dynamo/Inductor
@dtypesIfCUDA(
torch.float32,
torch.float64,
torch.bfloat16,
torch.complex32,
torch.complex64,
torch.complex128,
)
@dtypes(
torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128
)
def test_logaddexp(self, device, dtype):
if sys.version_info >= (3, 12) and dtype in (
torch.complex32,
torch.complex64,
torch.complex128,
):
return self.skipTest("complex flaky in 3.12")
self._test_logaddexp(device, dtype, base2=False)
@dtypes(torch.float32, torch.float64, torch.bfloat16)
def test_logaddexp2(self, device, dtype):
self._test_logaddexp(device, dtype, base2=True)
def test_add(self, device):
dtypes = floating_and_complex_types()
for dtype in dtypes:
# [res] torch.add([res,] tensor1, tensor2)
m1 = torch.randn(100, 100, dtype=dtype, device=device)
v1 = torch.randn(100, dtype=dtype, device=device)
# contiguous
res1 = torch.add(m1[4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(1)):
res2[i] = m1[4, i] + v1[i]
self.assertEqual(res1, res2)
m1 = torch.randn(100, 100, device=device)
v1 = torch.randn(100, device=device)
# non-contiguous
res1 = torch.add(m1[:, 4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(0)):
res2[i] = m1[i, 4] + v1[i]
self.assertEqual(res1, res2)
# [res] torch.add([res,] tensor, value)
m1 = torch.randn(10, 10, device=device)
# contiguous
res1 = m1.clone()
res1[3].add_(2)
res2 = m1.clone()
for i in range(m1.size(1)):
res2[3, i] = res2[3, i] + 2
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].add_(2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] + 2
self.assertEqual(res1, res2)
# inter-type
m1 = torch.randn(10, 10, dtype=dtype, device=device)
self.assertEqual(m1 + 3, m1 + torch.tensor(3))
self.assertEqual(3 + m1, torch.tensor(3) + m1)
# contiguous + non-contiguous
m1 = torch.randn(10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10, dtype=dtype, device=device).t()
res = m1 + m2
self.assertTrue(res.is_contiguous())
self.assertEqual(res, m1 + m2.contiguous())
# 1d + empty
m1 = torch.tensor([1.0], dtype=dtype, device=device)
m2 = torch.tensor([], dtype=dtype, device=device)
self.assertEqual(m1 + m2, [])
# inter-type unint8
one = torch.tensor(1, dtype=torch.uint8, device=device)
self.assertEqual(torch.add(one, 1), 2)
self.assertEqual(torch.add(one, 1).dtype, torch.uint8)
# bool
m1 = torch.tensor(
[True, False, False, True, False, False], dtype=torch.bool, device=device
)
m2 = torch.tensor(
[True, True, False, False, False, True], dtype=torch.bool, device=device
)
expected = torch.tensor(
[True, True, False, True, False, True], dtype=torch.bool, device=device
)
self.assertEqual(m1 + m2, expected)
# fused multiply add
a = torch.zeros(2, 3, dtype=torch.bool, device=device)
res = torch.add(a, a, alpha=0)
expected = torch.zeros(2, 3, device=device).bool()
self.assertEqual(res, expected)
# bfloat16
m1 = torch.tensor([1.0, 2.0], dtype=torch.bfloat16)
m2 = torch.tensor([3.0, 4.0], dtype=torch.bfloat16)
self.assertEqual(m1 + m2, torch.tensor([4.0, 6.0], dtype=torch.bfloat16))
# different alpha types
m1 = torch.tensor([2 + 3j, 4 + 5j], dtype=torch.complex64, device=device)
m2 = torch.tensor([4 + 5j, 2 + 3j], dtype=torch.complex64, device=device)
# add complex numbers with float alpha
res = torch.add(m1, m2, alpha=0.1)
expected = torch.tensor(
[2.4000 + 3.5000j, 4.2000 + 5.3000j], dtype=torch.complex64, device=device
)
self.assertEqual(res, expected)
# add complex numbers with complex alpha
res = torch.add(m1, m2, alpha=complex(0.1, 0.2))
expected = torch.tensor(
[1.4000 + 4.3000j, 3.6000 + 5.7000j], dtype=torch.complex64, device=device
)
self.assertEqual(res, expected)
# add complex numbers with integer alpha
res = torch.add(m1, m2, alpha=2)
expected = torch.tensor(
[10.0 + 13.0j, 8.0 + 11.0j], dtype=torch.complex64, device=device
)
self.assertEqual(res, expected)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(
RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.add(m1, m2, alpha=True),
)
self.assertRaisesRegex(
RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.add(m1, m2, alpha=1.0),
)
# mismatched alpha, float / double tensor and complex alpha
msg = r"For non-complex input tensors, argument alpha must not be a complex number\."
m1 = torch.tensor([3.0, 4.0], device=device)
m2 = torch.tensor([4.0, 3.0], device=device)
self.assertRaisesRegex(
RuntimeError, msg, lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2))
)
m1 = torch.tensor([3.0, 4.0], dtype=torch.double, device=device)
m2 = torch.tensor([4.0, 3.0], dtype=torch.double, device=device)
self.assertRaisesRegex(
RuntimeError, msg, lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2))
)
# complex
m1 = torch.tensor((4.0000 + 4.0000j), dtype=torch.complex64)
m2 = torch.tensor(4.0, dtype=torch.float64)
self.assertRaisesRegex(
RuntimeError,
r"result type ComplexFloat can't be cast to the desired output type Double",
lambda: torch.add(m1, m1, out=m2),
)
@onlyCUDA
def test_addsub_half_tensor(self, device):
x = torch.tensor([60000.0], dtype=torch.half, device=device)
for op, y, alpha in (
(torch.add, torch.tensor([-60000.0], dtype=torch.half, device=device), 2),
(torch.sub, torch.tensor([60000.0], dtype=torch.half, device=device), 2),
(torch.add, -70000.0, 1),
(torch.sub, 70000.0, 1),
):
actual = op(x, y, alpha=alpha)
self.assertTrue(not (actual.isnan() or actual.isinf()))
def test_sub_typing(self, device):
m1 = torch.tensor(
[True, False, False, True, False, False], dtype=torch.bool, device=device
)
m2 = torch.tensor(
[True, True, False, False, False, True], dtype=torch.bool, device=device
)
self.assertRaisesRegex(
RuntimeError,
r"Subtraction, the `\-` operator, with two bool tensors is not supported. "
r"Use the `\^` or `logical_xor\(\)` operator instead.",
lambda: m1 - m2,
)
self.assertRaisesRegex(
RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: 1 - m1,
)
self.assertRaisesRegex(
RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: m2 - 1,
)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(
RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.sub(m1, m2, alpha=True),
)
self.assertRaisesRegex(
RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.sub(m1, m2, alpha=1.0),
)
def test_mul(self, device):
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].mul_(2)
res2 = m1.clone()
for i in range(res1.size(0)):
res2[i, 3] = res2[i, 3] * 2
self.assertEqual(res1, res2)
a1 = torch.tensor([True, False, False, True], dtype=torch.bool, device=device)
a2 = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)
self.assertEqual(
a1 * a2,
torch.tensor([True, False, False, False], dtype=torch.bool, device=device),
)
if device == "cpu":
a1 = torch.tensor([0.1, 0.1], dtype=torch.bfloat16, device=device)
a2 = torch.tensor([1.1, 0.1], dtype=torch.bfloat16, device=device)
self.assertEqual(
a1 * a2,
torch.tensor([0.11, 0.01], dtype=torch.bfloat16, device=device),
atol=0.01,
rtol=0,
)
self.assertEqual(a1.mul(a2), a1 * a2)
def test_bool_tensor_comparison_ops(self, device):
a = torch.tensor(
[True, False, True, False, True, False], dtype=torch.bool, device=device
)
b = torch.tensor(
[True, False, True, True, True, True], dtype=torch.bool, device=device
)
self.assertEqual(
a == b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a != b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device)
)
self.assertEqual(
a < b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device)
)
self.assertEqual(
a > b, torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a >= b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a <= b, torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.bool, device=device)
)
self.assertEqual(
a > False, torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device)
)
self.assertEqual(
a == torch.tensor(True, dtype=torch.bool, device=device),
torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device),
)
self.assertEqual(
a == torch.tensor(0, dtype=torch.bool, device=device),
torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device),
)
self.assertFalse(a.equal(b))
@dtypes(*all_types_and(torch.half, torch.bfloat16, torch.bool))
def test_logical(self, device, dtype):
if dtype != torch.bool:
x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype)
b = torch.tensor([2], device=device, dtype=dtype)
self.assertEqual(x.lt(2), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(2), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(2), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(2), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(2), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(2), torch.tensor([True, False, True, True]))
self.assertEqual(x.lt(b), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(b), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(b), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(b), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(b), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(b), torch.tensor([True, False, True, True]))
else:
x = torch.tensor([True, False, True, False], device=device)
self.assertEqual(x.lt(True), torch.tensor([False, True, False, True]))
self.assertEqual(x.le(True), torch.tensor([True, True, True, True]))
self.assertEqual(x.ge(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.gt(True), torch.tensor([False, False, False, False]))
self.assertEqual(x.eq(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.ne(True), torch.tensor([False, True, False, True]))
def test_atan2(self, device):
def _test_atan2_with_size(size, device):
a = torch.rand(size=size, device=device, dtype=torch.double)
b = torch.rand(size=size, device=device, dtype=torch.double)
actual = a.atan2(b)
x = a.view(-1)
y = b.view(-1)
expected = torch.tensor(
[math.atan2(x[i].item(), y[i].item()) for i in range(x.numel())],
device=device,
dtype=torch.double,
)
self.assertEqual(expected, actual.view(-1), rtol=0, atol=0.02)
# bfloat16/float16
for lowp_dtype in [torch.bfloat16, torch.float16]:
if lowp_dtype == torch.bfloat16:
rtol = 0
atol = 0.02
else:
rtol = 0
atol = 0.001
a_16 = a.to(dtype=lowp_dtype)
b_16 = b.to(dtype=lowp_dtype)
actual_16 = a_16.atan2(b_16)
self.assertEqual(actual_16, actual.to(dtype=lowp_dtype))
self.assertEqual(
expected,
actual_16.view(-1),
exact_dtype=False,
rtol=rtol,
atol=atol,
)
_test_atan2_with_size((2, 2), device)
_test_atan2_with_size((3, 3), device)
_test_atan2_with_size((5, 5), device)
def test_atan2_edgecases(self, device):
def _test_atan2(x, y, expected, device, dtype):
expected_tensor = torch.tensor([expected], dtype=dtype, device=device)
x_tensor = torch.tensor([x], dtype=dtype, device=device)
y_tensor = torch.tensor([y], dtype=dtype, device=device)
actual = torch.atan2(y_tensor, x_tensor)
self.assertEqual(expected_tensor, actual, rtol=0, atol=0.02)
for dtype in [torch.float, torch.double]:
_test_atan2(0, 0, 0, device, dtype)
_test_atan2(0, 1, math.pi / 2, device, dtype)
_test_atan2(0, -1, math.pi / -2, device, dtype)
_test_atan2(-1, 0, math.pi, device, dtype)
_test_atan2(1, 0, 0, device, dtype)
_test_atan2(-1, -1, math.pi * -3 / 4, device, dtype)
_test_atan2(1, 1, math.pi / 4, device, dtype)
_test_atan2(1, -1, math.pi / -4, device, dtype)
_test_atan2(-1, 1, math.pi * 3 / 4, device, dtype)
def test_trapezoid(self, device):
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
actual = torch.trapezoid(t, dx=dx, dim=dim)
expected = np.trapz(t.cpu().numpy(), dx=dx, axis=dim) # noqa: NPY201
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.trapezoid(t, x=torch.tensor(x, device=device), dim=dim)
expected = np.trapz(t.cpu().numpy(), x=x, axis=dim) # noqa: NPY201
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual.cpu(), exact_dtype=False)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x(
(10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device
)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 0, [], device)
test_x((0, 2), 1, [1.0, 2.0], device)
test_x((2, 3, 4), -1, [1.0, 2.0, 3.0, 4.0], device)
test_x((2, 3, 4), 0, [1.0, 2.0], device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((2, 3, 4), 2, [1.0, 2.0, 3.0, 4.0], device)
test_x((2, 2, 4), -1, [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]], device)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError, "There must be one `x` value for each sample point"
):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_cumulative_trapezoid(self, device):
import scipy.integrate
if hasattr(scipy.integrate, "cumulative_trapezoid"):
_scipy_cumulative_trapezoid = scipy.integrate.cumulative_trapezoid
else: # Older version of SciPy uses a different name
_scipy_cumulative_trapezoid = scipy.integrate.cumtrapz
def scipy_cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
if y.shape[axis] == 0:
return np.empty_like(y)
else:
return _scipy_cumulative_trapezoid(y, x, dx, axis, initial)
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
y = t.cpu().numpy()
actual = torch.cumulative_trapezoid(t, dx=dx, dim=dim)
expected = scipy_cumulative_trapezoid(t.cpu().numpy(), dx=dx, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False, atol=1e-4, rtol=1e-4)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.cumulative_trapezoid(
t, x=torch.tensor(x, device=device), dim=dim
)
expected = scipy_cumulative_trapezoid(t.cpu().numpy(), x=x, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(
expected, actual.cpu(), exact_dtype=False, atol=1e-4, rtol=1e-4
)
def test_empty_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.cumulative_trapezoid(
t, x=torch.tensor(x, device=device), dim=dim
)
self.assertEqual(torch.empty(actual.shape), actual)
test_dx((2,), -1, 1, device)
test_dx((3, 3), -1, 1, device)
test_dx((4, 2), 0, 1, device)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_dx((512, 512), 1, 1.0, device)
test_dx((100, 100, 100), 1, 1.0, device)
test_x((2,), -1, [100, 50], device)
test_x((4, 2), 0, [2, 3, 4, 5], device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x(
(10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device
)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 1, [1, 2], device)
test_x((2, 3, 4), -1, [1.0, 2.0, 3.0, 4.0], device)
test_x((2, 3, 4), 0, [1.0, 2.0], device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((2, 3, 4), 2, [1.0, 2.0, 3.0, 4.0], device)
test_empty_x(
(0, 2), 0, [], device
) # SciPy failing when x == [], but our version returns empty
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError, "There must be one `x` value for each sample point"
):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((0, 2), 0, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
with self.assertRaisesRegex(
RuntimeError, "Currently, we only support dx as a real number"
):
test_dx((2, 2), -1, complex(1, 1), device)
with self.assertRaisesRegex(
TypeError, "received an invalid combination of arguments"
):
actual = torch.cumulative_trapezoid(
torch.randn((3, 3)), x=torch.randn((3, 3)), dx=3
)
@skipMeta
@dtypes(torch.double)
def test_pow_scalar_overloads_mem_overlap(self, device, dtype):
sz = 3
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
self.check_internal_mem_overlap(lambda t: t.pow_(42), 1, dtype, device)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(input, 42, out=out)
)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(42, input, out=out)
)
@dtypes(
*list(
product(
all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16),
)
)
)
def test_float_power(self, device, dtypes):
def to_np(value):
if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16:
return value.to(torch.float).cpu().numpy()
return value.cpu().numpy() if isinstance(value, torch.Tensor) else value
base_dtype = dtypes[0]
exp_dtype = dtypes[1]
out_dtype = (
torch.complex128
if base_dtype.is_complex or exp_dtype.is_complex
else torch.float64
)
base = make_tensor((30,), dtype=base_dtype, device=device, low=1, high=100)
# Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0
# Related: https://github.com/pytorch/pytorch/issues/48000
# base[0] = base[3] = base[7] = 0
exp = make_tensor((30,), dtype=exp_dtype, device=device, low=-2, high=2)
exp[0] = exp[4] = exp[6] = 0
expected = torch.from_numpy(np.float_power(to_np(base), to_np(exp)))
exponents = [-2.8, -2, -1, -0.5, 0.5, 1, 2]
complex_exponents = exponents + [
-2.5j,
-1.0j,
1.0j,
2.5j,
1.0 + 1.0j,
-1.0 - 1.5j,
3.3j,
]
for op in (
torch.float_power,
torch.Tensor.float_power,
torch.Tensor.float_power_,
):
# Case of Tensor x Tensor
if op is torch.Tensor.float_power_ and base_dtype != out_dtype:
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
op(base.clone(), exp)
else:
result = op(base.clone(), exp)
self.assertEqual(expected, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype)
op(base, exp, out=out)
self.assertEqual(expected, out)
# Case of Tensor x Scalar
for i in complex_exponents if exp_dtype.is_complex else exponents:
out_dtype_scalar_exp = (
torch.complex128
if base_dtype.is_complex or type(i) is complex
else torch.float64
)
expected_scalar_exp = torch.from_numpy(np.float_power(to_np(base), i))
if (
op is torch.Tensor.float_power_
and base_dtype != out_dtype_scalar_exp
):
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
op(base.clone(), i)
else:
result = op(base.clone(), i)
self.assertEqual(expected_scalar_exp, result)
if op is torch.float_power:
out = torch.empty_like(base).to(
device=device, dtype=out_dtype_scalar_exp
)
op(base, i, out=out)
self.assertEqual(expected_scalar_exp, out)
# Case of Scalar x Tensor
for i in complex_exponents if base_dtype.is_complex else exponents:
out_dtype_scalar_base = (
torch.complex128
if exp_dtype.is_complex or type(i) is complex
else torch.float64
)
expected_scalar_base = torch.from_numpy(np.float_power(i, to_np(exp)))
result = torch.float_power(i, exp)
self.assertEqual(expected_scalar_base, result)
out = torch.empty_like(exp).to(device=device, dtype=out_dtype_scalar_base)
torch.float_power(i, exp, out=out)
self.assertEqual(expected_scalar_base, out)
def test_float_power_exceptions(self, device):
def _promo_helper(x, y):
for i in (x, y):
if type(i) is complex:
return torch.complex128
elif type(i) is torch.Tensor and i.is_complex():
return torch.complex128
return torch.double
test_cases = (
(torch.tensor([-2, -1, 0, 1, 2], device=device), -0.25),
(
torch.tensor([-1.0j, 0j, 1.0j, 1.0 + 1.0j, -1.0 - 1.5j], device=device),
2.0,
),
)
for base, exp in test_cases:
for out_dtype in (torch.long, torch.float, torch.double, torch.cdouble):
out = torch.empty(1, device=device, dtype=out_dtype)
required_dtype = _promo_helper(base, exp)
if out.dtype == required_dtype:
torch.float_power(base, exp, out=out)
else:
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
torch.float_power(base, exp, out=out)
if base.dtype == required_dtype:
torch.Tensor.float_power_(base.clone(), exp)
else:
with self.assertRaisesRegex(
RuntimeError, "operation's result requires dtype"
):
torch.Tensor.float_power_(base.clone(), exp)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(
*product(
all_types_and(torch.half, torch.bool), all_types_and(torch.half, torch.bool)
)
)
def test_xlogy_xlog1py(self, device, dtypes):
x_dtype, y_dtype = dtypes
def out_variant_helper(torch_fn, x, y):
expected = torch_fn(x, y)
out = torch.empty_like(expected)
torch_fn(x, y, out=out)
self.assertEqual(expected, out)
def xlogy_inplace_variant_helper(x, y):
if x.dtype in integral_types_and(torch.bool):
with self.assertRaisesRegex(
RuntimeError, "can't be cast to the desired output type"
):
x.clone().xlogy_(y)
else:
expected = torch.empty_like(x)
torch.xlogy(x, y, out=expected)
inplace_out = x.clone().xlogy_(y)
self.assertEqual(expected, inplace_out)
def test_helper(torch_fn, reference_fn, inputs, scalar=None):
x, y, z = inputs
torch_fn_partial = partial(torch_fn, x)
reference_fn_partial = partial(reference_fn, x.cpu().numpy())
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, x, exact_dtype=False
)
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, y, exact_dtype=False
)
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, z, exact_dtype=False
)
val = scalar if scalar is not None else x
out_variant_helper(torch_fn, val, x)
out_variant_helper(torch_fn, val, y)
out_variant_helper(torch_fn, val, z)
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), dtype=x_dtype, device=device, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
z = make_tensor((4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
x_1p = make_tensor(
(3, 2, 4, 5), dtype=x_dtype, device=device, low=-0.5, high=1000
)
y_1p = make_tensor(
(3, 2, 4, 5), dtype=y_dtype, device=device, low=-0.5, high=1000
)
z_1p = make_tensor((4, 5), dtype=y_dtype, device=device, low=-0.5, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
test_helper(*xlogy_fns, (x, y, z))
xlogy_inplace_variant_helper(x, x)
xlogy_inplace_variant_helper(x, y)
xlogy_inplace_variant_helper(x, z)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p))
# Scalar-Tensor Test
test_helper(*xlogy_fns, (x, y, z), 3.14)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p), 3.14)
# Special Values Tensor-Tensor
t = torch.tensor(
[-1.0, 0.0, 1.0, 2.0, float("inf"), -float("inf"), float("nan")],
device=device,
)
zeros = torch.zeros(7, dtype=y_dtype, device=device)
def test_zeros_special_helper(torch_fn, reference_fn, scalar=False):
zeros_t = 0 if scalar else zeros
zeros_np = 0 if scalar else zeros.cpu().numpy()
torch_fn_partial = partial(torch_fn, zeros_t)
reference_fn_partial = partial(reference_fn, zeros_np)
self.compare_with_numpy(
torch_fn_partial, reference_fn_partial, t, exact_dtype=False
)
out_variant_helper(torch_fn, zeros_t, t)
test_zeros_special_helper(*xlogy_fns)
xlogy_inplace_variant_helper(zeros, t)
test_zeros_special_helper(*xlog1py_fns)
# Special Values Scalar-Tensor
test_zeros_special_helper(*xlogy_fns, scalar=True)
test_zeros_special_helper(*xlog1py_fns, scalar=True)
@dtypes(torch.float64)
def test_xlogy_xlog1py_gradients(self, device, dtype):
make_arg = partial(torch.tensor, dtype=dtype, device=device, requires_grad=True)
zeros = torch.zeros((2,), dtype=dtype, device=device)
x = make_arg([0.0, 0.0])
y = make_arg([-1.5, 0.0])
torch.special.xlogy(x, y).sum().backward()
self.assertEqual(x.grad, zeros)
x = make_arg([0.0, 0.0])
y = make_arg([-2.5, -1.0])
torch.special.xlog1py(x, y).sum().backward()
self.assertEqual(x.grad, zeros)
def test_xlogy_xlog1py_scalar_type_promotion(self, device):
# Test that python numbers don't participate in type promotion at the same
# priority level as 0-dim tensors
t = torch.randn((), dtype=torch.float32, device=device)
self.assertEqual(t.dtype, torch.xlogy(t, 5).dtype)
self.assertEqual(t.dtype, torch.xlogy(t, 5.0).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5.0).dtype)
self.assertEqual(t.dtype, torch.xlogy(5, t).dtype)
self.assertEqual(t.dtype, torch.xlogy(5.0, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5.0, t).dtype)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_xlogy_xlog1py_bfloat16(self, device):
def _compare_helper(x, y, torch_fn, reference_fn):
x_np = x if isinstance(x, float) else x.cpu().to(torch.float).numpy()
y_np = y if isinstance(y, float) else y.cpu().to(torch.float).numpy()
expected = torch.from_numpy(reference_fn(x_np, y_np))
actual = torch_fn(x, y)
self.assertEqual(expected, actual, exact_dtype=False)
x_dtype, y_dtype = torch.bfloat16, torch.bfloat16
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), dtype=x_dtype, device=device, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
z = make_tensor((4, 5), dtype=y_dtype, device=device, low=0.5, high=1000)
x_1p = make_tensor(
(3, 2, 4, 5), dtype=x_dtype, device=device, low=-0.8, high=1000
)
y_1p = make_tensor(
(3, 2, 4, 5), dtype=y_dtype, device=device, low=-0.8, high=1000
)
z_1p = make_tensor((4, 5), dtype=y_dtype, device=device, low=-0.8, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
_compare_helper(x, x, *xlogy_fns)
_compare_helper(x, y, *xlogy_fns)
_compare_helper(x, z, *xlogy_fns)
_compare_helper(x, 3.14, *xlogy_fns)
_compare_helper(y, 3.14, *xlogy_fns)
_compare_helper(z, 3.14, *xlogy_fns)
_compare_helper(x_1p, x_1p, *xlog1py_fns)
_compare_helper(x_1p, y_1p, *xlog1py_fns)
_compare_helper(x_1p, z_1p, *xlog1py_fns)
_compare_helper(x_1p, 3.14, *xlog1py_fns)
_compare_helper(y_1p, 3.14, *xlog1py_fns)
_compare_helper(z_1p, 3.14, *xlog1py_fns)
# Special Values Tensor-Tensor
t = torch.tensor(
[-1.0, 0.0, 1.0, 2.0, float("inf"), -float("inf"), float("nan")],
device=device,
)
zeros = torch.tensor(7, dtype=y_dtype, device=device)
_compare_helper(t, zeros, *xlogy_fns)
_compare_helper(t, 0.0, *xlogy_fns)
_compare_helper(t, zeros, *xlog1py_fns)
_compare_helper(t, 0.0, *xlog1py_fns)
@dtypes(*product(all_types_and(torch.bool), all_types_and(torch.bool)))
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@slowTest
def test_zeta(self, device, dtypes):
x_dtype, q_dtype = dtypes
def test_helper(x, q):
x_np = x if isinstance(x, float) else x.cpu().numpy()
q_np = q if isinstance(q, float) else q.cpu().numpy()
expected = torch.from_numpy(scipy.special.zeta(x_np, q_np))
actual = torch.special.zeta(x, q)
rtol, atol = None, None
if self.device_type == "cpu":
rtol, atol = 1e-6, 1e-6
self.assertEqual(expected, actual, rtol=rtol, atol=atol, exact_dtype=False)
# x tensor - q tensor same size
x = make_tensor((2, 3, 4), dtype=x_dtype, device=device)
q = make_tensor((2, 3, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q tensor broadcast lhs
x = make_tensor((2, 1, 4), dtype=x_dtype, device=device)
q = make_tensor((2, 3, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q tensor broadcast rhs
x = make_tensor((2, 3, 4), dtype=x_dtype, device=device)
q = make_tensor((2, 1, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q tensor broadcast all
x = make_tensor((2, 3, 1), dtype=x_dtype, device=device)
q = make_tensor((2, 1, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x scalar - q tensor
for x in np.linspace(-5, 5, num=10).tolist():
if not q_dtype.is_floating_point:
q_dtype = torch.get_default_dtype()
q = make_tensor((2, 3, 4), dtype=q_dtype, device=device)
test_helper(x, q)
# x tensor - q scalar
for q in np.linspace(-5, 5, num=10).tolist():
if not x_dtype.is_floating_point:
x_dtype = torch.get_default_dtype()
x = make_tensor((2, 3, 4), dtype=x_dtype, device=device)
test_helper(x, q)
@onlyCUDA
@dtypes(torch.chalf)
def test_mul_chalf_tensor_and_cpu_scalar(self, device, dtype):
# Tests that Tensor and CPU Scalar work for `mul` for chalf.
# Ideally, this should be covered by `test_complex_half_reference_testing`
# from test_ops.py by checking reference_samples from the OpInfo.
# But currently that doesn't work as sample generation requires support of
# `index_select` which is not implemented for `complex32` at the
# time of writing this test.
# TODO: Remove this test once above issue is fixed.
# Ref: https://github.com/pytorch/pytorch/pull/76364
x = make_tensor((2, 2), device=device, dtype=dtype)
self.assertEqual(x * 2.5, x * torch.tensor(2.5, device=device, dtype=dtype))
tensor_binary_ops = [
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__eq__",
"__ne__",
"__add__",
"__radd__",
"__iadd__",
"__sub__",
"__rsub__",
"__isub__",
"__mul__",
"__rmul__",
"__imul__",
"__matmul__",
"__rmatmul__",
"__truediv__",
"__rtruediv__",
"__itruediv__",
"__floordiv__",
"__rfloordiv__",
"__ifloordiv__",
"__mod__",
"__rmod__",
"__imod__",
"__pow__",
"__rpow__",
"__ipow__",
"__lshift__",
"__rlshift__",
"__ilshift__",
"__rshift__",
"__rrshift__",
"__irshift__",
"__and__",
"__rand__",
"__iand__",
"__xor__",
"__rxor__",
"__ixor__",
"__or__",
"__ror__",
"__ior__",
# Unsupported operators
# '__imatmul__',
# '__divmod__', '__rdivmod__', '__idivmod__',
]
# Test that binary math operations return NotImplemented for unknown types.
def generate_not_implemented_tests(cls):
class UnknownType:
pass
# TODO: refactor to inline these
_types = [
torch.half,
torch.float,
torch.double,
torch.int8,
torch.short,
torch.int,
torch.long,
torch.uint8,
]
def create_test_func(op):
@dtypes(*_types)
def test(self, device, dtype):
# Generate the inputs
tensor = torch.empty((), device=device, dtype=dtype)
# Runs the tensor op on the device
result = getattr(tensor, op)(UnknownType())
self.assertEqual(result, NotImplemented)
return test
for op in tensor_binary_ops:
test_name = f"test_{op}_not_implemented"
assert not hasattr(cls, test_name), f"{test_name} already in {cls.__name__}"
setattr(cls, test_name, create_test_func(op))
generate_not_implemented_tests(TestBinaryUfuncs)
instantiate_device_type_tests(TestBinaryUfuncs, globals())
if __name__ == "__main__":
run_tests()
| TestBinaryUfuncs |
python | numba__numba | numba/core/utils.py | {
"start": 13066,
"end": 18981
} | class ____(object):
def __init__(self, func, records, loop):
self.func = func
self.loop = loop
self.records = np.array(records) / loop
self.best = np.min(self.records)
def __repr__(self):
name = getattr(self.func, "__name__", self.func)
args = (name, self.loop, self.records.size, format_time(self.best))
return "%20s: %10d loops, best of %d: %s per loop" % args
def format_time(tm):
units = "s ms us ns ps".split()
base = 1
for unit in units[:-1]:
if tm >= base:
break
base /= 1000
else:
unit = units[-1]
return "%.1f%s" % (tm / base, unit)
def benchmark(func, maxsec=1):
timer = timeit.Timer(func)
number = 1
result = timer.repeat(1, number)
# Too fast to be measured
while min(result) / number == 0:
number *= 10
result = timer.repeat(3, number)
best = min(result) / number
if best >= maxsec:
return BenchmarkResult(func, result, number)
# Scale it up to make it close the maximum time
max_per_run_time = maxsec / 3 / number
number = max(max_per_run_time / best / 3, 1)
# Round to the next power of 10
number = int(10 ** math.ceil(math.log10(number)))
records = timer.repeat(3, number)
return BenchmarkResult(func, records, number)
# A dummy module for dynamically-generated functions
_dynamic_modname = '<dynamic>'
_dynamic_module = ModuleType(_dynamic_modname)
_dynamic_module.__builtins__ = builtins
def chain_exception(new_exc, old_exc):
"""Set the __cause__ attribute on *new_exc* for explicit exception
chaining. Returns the inplace modified *new_exc*.
"""
if DEVELOPER_MODE:
new_exc.__cause__ = old_exc
return new_exc
def get_nargs_range(pyfunc):
"""Return the minimal and maximal number of Python function
positional arguments.
"""
sig = pysignature(pyfunc)
min_nargs = 0
max_nargs = 0
for p in sig.parameters.values():
max_nargs += 1
if p.default == inspect._empty:
min_nargs += 1
return min_nargs, max_nargs
def unify_function_types(numba_types):
"""Return a normalized tuple of Numba function types so that
Tuple(numba_types)
becomes
UniTuple(dtype=<unified function type>, count=len(numba_types))
If the above transformation would be incorrect, return the
original input as given. For instance, if the input tuple contains
types that are not function or dispatcher type, the transformation
is considered incorrect.
"""
dtype = unified_function_type(numba_types)
if dtype is None:
return numba_types
return (dtype,) * len(numba_types)
def unified_function_type(numba_types, require_precise=True):
"""Returns a unified Numba function type if possible.
Parameters
----------
numba_types : Sequence of numba Type instances.
require_precise : bool
If True, the returned Numba function type must be precise.
Returns
-------
typ : {numba.core.types.Type, None}
A unified Numba function type. Or ``None`` when the Numba types
cannot be unified, e.g. when the ``numba_types`` contains at
least two different Numba function type instances.
If ``numba_types`` contains a Numba dispatcher type, the unified
Numba function type will be an imprecise ``UndefinedFunctionType``
instance, or None when ``require_precise=True`` is specified.
Specifying ``require_precise=False`` enables unifying imprecise
Numba dispatcher instances when used in tuples or if-then branches
when the precise Numba function cannot be determined on the first
occurrence that is not a call expression.
"""
from numba.core.errors import NumbaExperimentalFeatureWarning
if not (isinstance(numba_types, Sequence) and
len(numba_types) > 0 and
isinstance(numba_types[0],
(types.Dispatcher, types.FunctionType))):
return
warnings.warn("First-class function type feature is experimental",
category=NumbaExperimentalFeatureWarning)
mnargs, mxargs = None, None
dispatchers = set()
function = None
undefined_function = None
for t in numba_types:
if isinstance(t, types.Dispatcher):
mnargs1, mxargs1 = get_nargs_range(t.dispatcher.py_func)
if mnargs is None:
mnargs, mxargs = mnargs1, mxargs1
elif not (mnargs, mxargs) == (mnargs1, mxargs1):
return
dispatchers.add(t.dispatcher)
t = t.dispatcher.get_function_type()
if t is None:
continue
if isinstance(t, types.FunctionType):
if mnargs is None:
mnargs = mxargs = t.nargs
elif not (mnargs == mxargs == t.nargs):
return
if isinstance(t, types.UndefinedFunctionType):
if undefined_function is None:
undefined_function = t
else:
# Refuse to unify using function type
return
dispatchers.update(t.dispatchers)
else:
if function is None:
function = t
else:
assert function == t
else:
return
if require_precise and (function is None or undefined_function is not None):
return
if function is not None:
if undefined_function is not None:
assert function.nargs == undefined_function.nargs
function = undefined_function
elif undefined_function is not None:
undefined_function.dispatchers.update(dispatchers)
function = undefined_function
else:
function = types.UndefinedFunctionType(mnargs, dispatchers)
return function
| BenchmarkResult |
python | getsentry__sentry | tests/sentry/issues/escalating/test_escalating.py | {
"start": 2906,
"end": 10093
} | class ____(
BaseGroupCounts,
PerformanceIssueTestCase,
SearchIssueTestMixin,
):
"""Test that querying Snuba for the hourly counts for groups works as expected."""
def _create_hourly_bucket(self, count: int, event: Event | GroupEvent) -> GroupsCountResponse:
"""It simplifies writing the expected data structures"""
assert event.group_id is not None
return {
"count()": count,
"group_id": event.group_id,
"hourBucket": str(to_start_of_hour(event.datetime)),
"project_id": event.project_id,
}
def test_query_single_group(self) -> None:
event = self._create_events_for_group()
assert query_groups_past_counts(Group.objects.all()) == [
self._create_hourly_bucket(1, event)
]
@pytest.mark.skip(reason="flaky: #95139")
@freeze_time(TIME_YESTERDAY)
def test_query_different_group_categories(self) -> None:
from django.utils import timezone
timestamp = timezone.now() - timedelta(minutes=1)
# This builds an error group and a profiling group
profile_error_event, _, profile_issue_occurrence = self.store_search_issue(
project_id=self.project.id,
user_id=0,
fingerprints=[f"{ProfileFileIOGroupType.type_id}-group1"],
insert_time=timestamp,
)
self.store_metric(
org_id=profile_error_event.project.organization_id,
project_id=profile_error_event.project.id,
mri=build_mri("event_ingested", "c", UseCaseID.ESCALATING_ISSUES, None),
value=1,
tags={"group": str(profile_error_event.group_id)},
timestamp=profile_error_event.data["timestamp"],
)
assert profile_error_event.group is not None
assert profile_issue_occurrence is not None
assert len(Group.objects.all()) == 2
perf_event = self.create_performance_issue()
error_event = self._create_events_for_group()
assert perf_event.group is not None
assert error_event.group is not None
# store_search_issue created two groups
assert len(Group.objects.all()) == 4
assert profile_error_event.group.issue_category == GroupCategory.ERROR
assert error_event.group.issue_category == GroupCategory.ERROR
assert profile_issue_occurrence.group.issue_category == GroupCategory.PERFORMANCE
assert perf_event.group.issue_category == GroupCategory.PERFORMANCE
profile_issue_occurrence_bucket = {
"count()": 1,
"group_id": profile_issue_occurrence.group.id,
"hourBucket": to_start_of_hour(profile_issue_occurrence.group.first_seen),
"project_id": self.project.id,
}
# Error groups will show up at the beginning of the list even if they
# were created later
assert query_groups_past_counts(Group.objects.all()) == [
self._create_hourly_bucket(1, profile_error_event),
self._create_hourly_bucket(1, error_event),
profile_issue_occurrence_bucket,
self._create_hourly_bucket(1, perf_event),
]
# This forces to test the iteration over the Snuba data
@mock.patch("sentry.issues.escalating.escalating.ELEMENTS_PER_SNUBA_PAGE", new=4)
def test_pagination(self) -> None:
events = []
for i in range(20):
event = self._create_events_for_group(count=1, hours_ago=2, group=f"group-{i}")
events.append(event)
assert query_groups_past_counts(Group.objects.all()) == [
self._create_hourly_bucket(1, event) for event in events
]
def test_query_optimization(self) -> None:
px = self.create_project(organization=self.project.organization)
py = self.create_project(organization=self.project.organization)
pz = self.create_project(organization=self.project.organization)
# Two different groups for proj x, one group for proj y and two groups for proj z
self._create_events_for_group(project_id=px.id)
self._create_events_for_group(project_id=px.id, group="group-b")
self._create_events_for_group(project_id=py.id)
self._create_events_for_group(project_id=pz.id)
self._create_events_for_group(project_id=pz.id, group="group-b")
groups = Group.objects.all()
assert len(groups) == 5
# Force pagination to only three elements per page
# Once we get to Python 3.10+ the formating of this multiple with statement will not be an eye sore
with (
patch("sentry.issues.escalating.escalating._query_with_pagination") as query_mock,
patch("sentry.issues.escalating.escalating.ELEMENTS_PER_SNUBA_PAGE", new=3),
patch("sentry.issues.escalating.escalating.BUCKETS_PER_GROUP", new=2),
):
query_groups_past_counts(groups)
# Proj X will expect potentially 4 elements because it has two groups, thus, no other
# project will be called with it.
# Proj Y and Z will be grouped together
assert query_mock.call_count == 2
def test_query_multiple_projects(self) -> None:
proj_x = self.create_project(organization=self.project.organization)
proj_y = self.create_project(organization=self.project.organization)
event1 = self._create_events_for_group(project_id=proj_x.id)
# This event has the same fingerprint as event1 but
# should be different group IDs since they belong to different projects
event_y_1 = self._create_events_for_group(project_id=proj_y.id, hours_ago=1)
assert event1.group_id != event_y_1.group_id
event_y_2 = self._create_events_for_group(project_id=proj_y.id, group="group-1")
# Increases the count of group-1
self._create_events_for_group(project_id=proj_y.id, group="group-1")
assert query_groups_past_counts(Group.objects.all()) == [
self._create_hourly_bucket(1, event1),
self._create_hourly_bucket(1, event_y_1),
self._create_hourly_bucket(2, event_y_2),
]
def test_query_different_orgs(self) -> None:
proj_a = self.create_project(organization=self.project.organization)
org_b = self.create_organization()
proj_b = self.create_project(organization=org_b)
event1 = self._create_events_for_group(project_id=proj_a.id, hours_ago=1)
event_proj_org_b_1 = self._create_events_for_group(project_id=proj_b.id, hours_ago=1)
# Since proj_org_b is created
assert query_groups_past_counts(Group.objects.all()) == [
self._create_hourly_bucket(1, event1),
self._create_hourly_bucket(1, event_proj_org_b_1),
]
def test_query_no_groups(self) -> None:
assert query_groups_past_counts([]) == []
def test_datetime_number_of_hours() -> None:
start, end = _start_and_end_dates(5)
assert (end - start).seconds / 3600 == 5
def test_datetime_number_of_days() -> None:
start, end = _start_and_end_dates()
assert (end - start).days == 7
| HistoricGroupCounts |
python | wandb__wandb | wandb/automations/_generated/fragments.py | {
"start": 2954,
"end": 3149
} | class ____(GQLResult):
typename__: Typename[Literal["QueueJobTriggeredAction"]] = "QueueJobTriggeredAction"
queue: Optional[QueueJobActionFieldsQueue]
template: str
| QueueJobActionFields |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 14246,
"end": 14654
} | class ____:
@staticmethod
def forward(x):
val = torch.neg(x)
return torch.add(val, val)
@staticmethod
def pattern(a):
return torch.neg(a)
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 0),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
| SingleNodePattern |
python | jmcnamara__XlsxWriter | xlsxwriter/exceptions.py | {
"start": 1115,
"end": 1203
} | class ____(XlsxFileError):
"""Unsupported image file format."""
| UnsupportedImageFormat |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_sensor.py | {
"start": 2660,
"end": 24679
} | class ____:
@pytest.fixture
def make_sensor(self):
"""Create a DummySensor"""
def _make_sensor(return_value, task_id=SENSOR_OP, **kwargs):
poke_interval = "poke_interval"
timeout = "timeout"
if poke_interval not in kwargs:
kwargs[poke_interval] = 0
if timeout not in kwargs:
kwargs[timeout] = 0
with DAG(TEST_DAG_ID):
if "xcom_value" in kwargs:
sensor = DummySensorWithXcomValue(task_id=task_id, return_value=return_value, **kwargs)
else:
sensor = DummySensor(task_id=task_id, return_value=return_value, **kwargs)
dummy_op = EmptyOperator(task_id=DUMMY_OP)
sensor >> dummy_op
return sensor
return _make_sensor
@classmethod
def _run(cls, task, context=None):
if context is None:
context = {}
return task.execute(context)
def test_ok(self, make_sensor):
sensor = make_sensor(True)
self._run(sensor)
def test_fail(self, make_sensor):
sensor = make_sensor(False)
with pytest.raises(AirflowSensorTimeout):
self._run(sensor)
def test_soft_fail(self, make_sensor):
sensor = make_sensor(False, soft_fail=True)
with pytest.raises(AirflowSkipException):
self._run(sensor)
@pytest.mark.parametrize(
"exception_cls",
(ValueError,),
)
def test_soft_fail_with_exception(self, make_sensor, exception_cls):
sensor = make_sensor(False, soft_fail=True)
sensor.poke = Mock(side_effect=[exception_cls(None)])
with pytest.raises(ValueError, match="None"):
self._run(sensor)
@pytest.mark.parametrize(
"exception_cls",
(
AirflowSensorTimeout,
AirflowTaskTimeout,
AirflowFailException,
),
)
def test_soft_fail_with_skip_exception(self, make_sensor, exception_cls):
sensor = make_sensor(False, soft_fail=True)
sensor.poke = Mock(side_effect=[exception_cls(None)])
with pytest.raises(AirflowSkipException):
self._run(sensor)
@pytest.mark.parametrize(
"exception_cls",
(AirflowSensorTimeout, AirflowTaskTimeout, AirflowFailException, Exception),
)
def test_never_fail_with_skip_exception(self, make_sensor, exception_cls):
sensor = make_sensor(False, never_fail=True)
sensor.poke = Mock(side_effect=[exception_cls(None)])
with pytest.raises(AirflowSkipException):
self._run(sensor)
def test_ok_with_reschedule(self, run_task, make_sensor, time_machine):
sensor = make_sensor(return_value=None, poke_interval=10, timeout=25, mode="reschedule")
sensor.poke = Mock(side_effect=[False, False, True])
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
time_machine.move_to(date1, tick=False)
state, msg, _ = run_task(task=sensor)
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
assert msg.reschedule_date == date1 + timedelta(seconds=sensor.poke_interval)
# second poke returns False and task is re-scheduled
time_machine.coordinates.shift(sensor.poke_interval)
date2 = date1 + timedelta(seconds=sensor.poke_interval)
state, msg, _ = run_task(task=sensor)
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
assert msg.reschedule_date == date2 + timedelta(seconds=sensor.poke_interval)
# third poke returns True and task succeeds
time_machine.coordinates.shift(sensor.poke_interval)
state, _, _ = run_task(task=sensor)
assert state == TaskInstanceState.SUCCESS
def test_fail_with_reschedule(self, run_task, make_sensor, time_machine, mock_supervisor_comms):
sensor = make_sensor(return_value=False, poke_interval=10, timeout=5, mode="reschedule")
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
time_machine.move_to(date1, tick=False)
state, msg, _ = run_task(task=sensor)
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
assert msg.reschedule_date == date1 + timedelta(seconds=sensor.poke_interval)
# second poke returns False, timeout occurs
time_machine.coordinates.shift(sensor.poke_interval)
# Mocking values from DB/API-server
mock_supervisor_comms.send.return_value = TaskRescheduleStartDate(start_date=date1)
state, msg, error = run_task(task=sensor, context_update={"task_reschedule_count": 1})
assert state == TaskInstanceState.FAILED
assert isinstance(error, AirflowSensorTimeout)
def test_soft_fail_with_reschedule(self, run_task, make_sensor, time_machine, mock_supervisor_comms):
sensor = make_sensor(
return_value=False, poke_interval=10, timeout=5, soft_fail=True, mode="reschedule"
)
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
time_machine.move_to(date1, tick=False)
state, msg, _ = run_task(task=sensor)
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
# second poke returns False, timeout occurs
time_machine.coordinates.shift(sensor.poke_interval)
# Mocking values from DB/API-server
mock_supervisor_comms.send.return_value = TaskRescheduleStartDate(start_date=date1)
state, msg, _ = run_task(task=sensor, context_update={"task_reschedule_count": 1})
assert state == TaskInstanceState.SKIPPED
def test_ok_with_reschedule_and_exponential_backoff(
self, run_task, make_sensor, time_machine, mock_supervisor_comms
):
sensor = make_sensor(
return_value=None,
poke_interval=10,
timeout=36000,
mode="reschedule",
exponential_backoff=True,
)
false_count = 10
sensor.poke = Mock(side_effect=[False] * false_count + [True])
task_start_date = timezone.utcnow()
time_machine.move_to(task_start_date, tick=False)
curr_date = task_start_date
def run_duration():
return (timezone.utcnow() - task_start_date).total_seconds()
new_interval = 0
mock_supervisor_comms.send.return_value = TaskRescheduleStartDate(start_date=task_start_date)
# loop poke returns false
for _poke_count in range(1, false_count + 1):
curr_date = curr_date + timedelta(seconds=new_interval)
time_machine.coordinates.shift(new_interval)
state, msg, _ = run_task(sensor, context_update={"task_reschedule_count": _poke_count})
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
old_interval = new_interval
new_interval = sensor._get_next_poke_interval(task_start_date, run_duration, _poke_count)
assert old_interval < new_interval # actual test
assert msg.reschedule_date == curr_date + timedelta(seconds=new_interval)
# last poke returns True and task succeeds
curr_date = curr_date + timedelta(seconds=new_interval)
time_machine.coordinates.shift(new_interval)
state, msg, _ = run_task(sensor, context_update={"task_reschedule_count": false_count + 1})
assert state == TaskInstanceState.SUCCESS
def test_invalid_mode(self):
with pytest.raises(ValueError, match="The mode must be one of"):
DummySensor(task_id="a", mode="foo")
def test_ok_with_custom_reschedule_exception(self, make_sensor, run_task):
sensor = make_sensor(return_value=None, mode="reschedule")
date1 = timezone.utcnow()
date2 = date1 + timedelta(seconds=60)
date3 = date1 + timedelta(seconds=120)
sensor.poke = Mock(
side_effect=[AirflowRescheduleException(date2), AirflowRescheduleException(date3), True]
)
# first poke returns False and task is re-scheduled
with time_machine.travel(date1, tick=False):
state, msg, error = run_task(sensor)
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
assert isinstance(msg, RescheduleTask)
assert msg.reschedule_date == date2
# second poke returns False and task is re-scheduled
with time_machine.travel(date2, tick=False):
state, msg, error = run_task(sensor)
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
assert isinstance(msg, RescheduleTask)
assert msg.reschedule_date == date3
# third poke returns True and task succeeds
with time_machine.travel(date3, tick=False):
state, _, _ = run_task(sensor)
assert state == TaskInstanceState.SUCCESS
def test_sensor_with_invalid_poke_interval(self):
negative_poke_interval = -10
non_number_poke_interval = "abcd"
positive_poke_interval = 10
with pytest.raises(
ValueError, match="Operator arg `poke_interval` must be timedelta object or a non-negative number"
):
DummySensor(
task_id="test_sensor_task_1",
return_value=None,
poke_interval=negative_poke_interval,
timeout=25,
)
with pytest.raises(
ValueError, match="Operator arg `poke_interval` must be timedelta object or a non-negative number"
):
DummySensor(
task_id="test_sensor_task_2",
return_value=None,
poke_interval=non_number_poke_interval,
timeout=25,
)
DummySensor(
task_id="test_sensor_task_3", return_value=None, poke_interval=positive_poke_interval, timeout=25
)
def test_sensor_with_invalid_timeout(self):
negative_timeout = -25
non_number_timeout = "abcd"
positive_timeout = 25
with pytest.raises(
ValueError, match="Operator arg `timeout` must be timedelta object or a non-negative number"
):
DummySensor(
task_id="test_sensor_task_1", return_value=None, poke_interval=10, timeout=negative_timeout
)
with pytest.raises(
ValueError, match="Operator arg `timeout` must be timedelta object or a non-negative number"
):
DummySensor(
task_id="test_sensor_task_2", return_value=None, poke_interval=10, timeout=non_number_timeout
)
DummySensor(
task_id="test_sensor_task_3", return_value=None, poke_interval=10, timeout=positive_timeout
)
def test_sensor_with_exponential_backoff_off(self):
sensor = DummySensor(
task_id=SENSOR_OP, return_value=None, poke_interval=5, timeout=60, exponential_backoff=False
)
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow() - started_at).total_seconds()
assert sensor._get_next_poke_interval(started_at, run_duration, 1) == sensor.poke_interval
assert sensor._get_next_poke_interval(started_at, run_duration, 2) == sensor.poke_interval
def test_sensor_with_exponential_backoff_on(self):
sensor = DummySensor(
task_id=SENSOR_OP, return_value=None, poke_interval=5, timeout=60, exponential_backoff=True
)
with time_machine.travel(DEFAULT_DATE):
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow() - started_at).total_seconds()
interval1 = sensor._get_next_poke_interval(started_at, run_duration, 1)
interval2 = sensor._get_next_poke_interval(started_at, run_duration, 2)
assert interval1 >= 0
assert interval1 <= sensor.poke_interval
assert interval2 >= sensor.poke_interval
assert interval2 > interval1
@pytest.mark.parametrize("poke_interval", [0, 0.1, 0.9, 1, 2, 3])
def test_sensor_with_exponential_backoff_on_and_small_poke_interval(self, poke_interval):
"""Test that sensor works correctly when poke_interval is small and exponential_backoff is on"""
sensor = DummySensor(
task_id=SENSOR_OP,
return_value=None,
poke_interval=poke_interval,
timeout=60,
exponential_backoff=True,
)
with time_machine.travel(DEFAULT_DATE):
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow() - started_at).total_seconds()
intervals = [
sensor._get_next_poke_interval(started_at, run_duration, retry_number)
for retry_number in range(1, 10)
]
for interval1, interval2 in zip(intervals, intervals[1:]):
# intervals should be increasing or equals
assert interval1 <= interval2
if poke_interval > 0:
# check if the intervals are increasing after some retries when poke_interval > 0
assert intervals[0] < intervals[-1]
else:
# check if the intervals are equal after some retries when poke_interval == 0
assert intervals[0] == intervals[-1]
def test_sensor_with_exponential_backoff_on_and_max_wait(self):
sensor = DummySensor(
task_id=SENSOR_OP,
return_value=None,
poke_interval=10,
timeout=60,
exponential_backoff=True,
max_wait=timedelta(seconds=30),
)
with time_machine.travel(DEFAULT_DATE):
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow() - started_at).total_seconds()
for idx, expected in enumerate([2, 6, 13, 30, 30, 30, 30, 30]):
assert sensor._get_next_poke_interval(started_at, run_duration, idx) == expected
def test_reschedule_and_retry_timeout(self, mock_supervisor_comms, make_sensor, time_machine, run_task):
"""
Test mode="reschedule", retries and timeout configurations interact correctly.
Given a sensor configured like this:
- poke_interval=5
- timeout=10
- retries=2
- retry_delay=timedelta(seconds=3)
The test verifies two phases:
Phase 1: Initial execution until failure
00:00 Returns False try_number=1, max_tries=2, state=up_for_reschedule
00:05 Raises RuntimeError try_number=2, max_tries=2, state=up_for_retry
00:08 Returns False try_number=2, max_tries=2, state=up_for_reschedule
00:13 Raises AirflowSensorTimeout try_number=3, max_tries=2, state=failed
Phase 2: After clearing the failed sensor
00:19 Returns False try_number=3, max_tries=4, state=up_for_reschedule
00:24 Returns False try_number=3, max_tries=4, state=up_for_reschedule
00:26 Returns False try_number=3, max_tries=4, state=up_for_reschedule
00:31 Raises AirflowSensorTimeout try_number=4, max_tries=4, state=failed
"""
# Setup sensor with test configuration
sensor = make_sensor(
return_value=None,
poke_interval=5,
timeout=10,
retries=2,
retry_delay=timedelta(seconds=3),
mode="reschedule",
)
# Configure poke behavior for both phases
sensor.poke = Mock(
side_effect=[
# Phase 1
False, # Initial poke
RuntimeError, # Second poke raises error
False, # Third poke after retry
False, # Fourth poke times out
# Phase 2 (after clearing)
False, # First poke after clear
False, # Second poke after clear
False, # Third poke after clear
False, # Final poke times out
]
)
# To store the state across runs
test_state = {
"task_reschedule_count": 0,
"current_time": timezone.datetime(2025, 1, 1),
"try_number": 1,
"max_tries": sensor.retries, # Initial max_tries
"first_reschedule_date": None, # Track the first reschedule date
}
def _run_task():
"""
Helper function to run the sensor task with consistent state management.
This function:
1. Freezes current time using timemachine
2. Configures the supervisor comms mock to return the appropriate TR start date
3. Runs the task with the current state (try_number, max_tries, task_reschedule_count etc)
4. Updates the state dictionary
We use this helper to ensure consistent state management across all task runs
and to avoid duplicating the setup/teardown code for each run.
"""
time_machine.move_to(test_state["current_time"], tick=False)
# For timeout calculation, we need to use the first reschedule date
# This ensures the timeout is calculated from the start of the task
if test_state["first_reschedule_date"] is None:
mock_supervisor_comms.send.return_value = TaskRescheduleStartDate(start_date=None)
else:
mock_supervisor_comms.send.return_value = TaskRescheduleStartDate(
start_date=test_state["first_reschedule_date"]
)
state, msg, error = run_task(
task=sensor,
try_number=test_state["try_number"],
max_tries=test_state["max_tries"],
context_update={"task_reschedule_count": test_state["task_reschedule_count"]},
)
if state == TaskInstanceState.UP_FOR_RESCHEDULE:
test_state["task_reschedule_count"] += 1
# Only set first_reschedule_date on the first successful reschedule
if test_state["first_reschedule_date"] is None:
test_state["first_reschedule_date"] = test_state["current_time"]
elif state == TaskInstanceState.UP_FOR_RETRY:
test_state["try_number"] += 1
return state, msg, error
# Phase 1: Initial execution until failure
# First poke - should reschedule
state, _, _ = _run_task()
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
# Second poke - should raise RuntimeError and retry
test_state["current_time"] += timedelta(seconds=sensor.poke_interval)
state, _, error = _run_task()
assert state == TaskInstanceState.UP_FOR_RETRY
assert isinstance(error, RuntimeError)
# Third poke - should reschedule again
test_state["current_time"] += sensor.retry_delay + timedelta(seconds=1)
state, _, _ = _run_task()
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
# Fourth poke - should timeout
test_state["current_time"] += timedelta(seconds=sensor.poke_interval)
state, _, error = _run_task()
assert isinstance(error, AirflowSensorTimeout)
assert state == TaskInstanceState.FAILED
# Phase 2: After clearing the failed sensor
# Reset supervisor comms to return None, simulating a fresh start after clearing
test_state["first_reschedule_date"] = None
test_state["max_tries"] = 4 # Original max_tries (2) + retries (2)
test_state["current_time"] += timedelta(seconds=20)
# Test three reschedules after clearing
for _ in range(3):
test_state["current_time"] += timedelta(seconds=sensor.poke_interval)
state, _, _ = _run_task()
assert state == TaskInstanceState.UP_FOR_RESCHEDULE
# Final poke - should timeout
test_state["current_time"] += timedelta(seconds=sensor.poke_interval)
state, _, error = _run_task()
assert isinstance(error, AirflowSensorTimeout)
assert state == TaskInstanceState.FAILED
def test_sensor_with_xcom(self, make_sensor):
xcom_value = "TestValue"
sensor = make_sensor(True, xcom_value=xcom_value)
assert self._run(sensor) == xcom_value
def test_sensor_with_xcom_fails(self, make_sensor):
xcom_value = "TestValue"
sensor = make_sensor(False, xcom_value=xcom_value)
with pytest.raises(AirflowSensorTimeout):
assert self._run(sensor) == xcom_value is None
def test_resume_execution(self):
op = BaseSensorOperator(task_id="hi")
with pytest.raises(AirflowSensorTimeout):
op.resume_execution(
next_method="__fail__",
next_kwargs={"error": TriggerFailureReason.TRIGGER_TIMEOUT},
context={},
)
@pytest.mark.parametrize("mode", ["poke", "reschedule"])
@pytest.mark.parametrize("retries", [0, 1])
def test_sensor_timeout(self, mode, retries, run_task):
"""
Test that AirflowSensorTimeout does not cause sensor to retry.
"""
from airflow.providers.standard.sensors.python import PythonSensor
def timeout():
raise AirflowSensorTimeout
task = PythonSensor(
task_id="test_raise_sensor_timeout",
python_callable=timeout,
retries=retries,
mode=mode,
)
state, _, error = run_task(task=task, dag_id=f"test_sensor_timeout_{mode}_{retries}")
assert isinstance(error, AirflowSensorTimeout)
assert state == TaskInstanceState.FAILED
@poke_mode_only
| TestBaseSensor |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-zephyr-query-engine/llama_index/packs/zephyr_query_engine/base.py | {
"start": 352,
"end": 3279
} | class ____(BaseLlamaPack):
def __init__(self, documents: List[Document]) -> None:
"""Init params."""
try:
import torch
from transformers import BitsAndBytesConfig
except ImportError:
raise ImportError(
"Dependencies missing, run "
"`pip install torch transformers accelerate bitsandbytes`"
)
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
)
try:
llm = HuggingFaceLLM(
model_name="HuggingFaceH4/zephyr-7b-beta",
tokenizer_name="HuggingFaceH4/zephyr-7b-beta",
query_wrapper_prompt=PromptTemplate(
"<|system|>\n</s>\n<|user|>\n{query_str}</s>\n<|assistant|>\n"
),
context_window=3900,
max_new_tokens=256,
model_kwargs={"quantization_config": quantization_config},
generate_kwargs={
"do_sample": True,
"temperature": 0.7,
"top_k": 50,
"top_p": 0.95,
},
device_map="auto",
)
except Exception:
print(
"Failed to load and quantize model, likely due to CUDA being missing. "
"Loading full precision model instead."
)
llm = HuggingFaceLLM(
model_name="HuggingFaceH4/zephyr-7b-beta",
tokenizer_name="HuggingFaceH4/zephyr-7b-beta",
query_wrapper_prompt=PromptTemplate(
"<|system|>\n</s>\n<|user|>\n{query_str}</s>\n<|assistant|>\n"
),
context_window=3900,
max_new_tokens=256,
generate_kwargs={
"do_sample": True,
"temperature": 0.7,
"top_k": 50,
"top_p": 0.95,
},
device_map="auto",
)
# set tokenizer for proper token counting
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
set_global_tokenizer(tokenizer.encode)
Settings.llm = llm
Settings.embed_model = "local:BAAI/bge-base-en-v1.5"
self.llm = llm
self.index = VectorStoreIndex.from_documents(documents)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {"llm": self.llm, "index": self.index}
def run(self, query_str: str, **kwargs: Any) -> Any:
"""Run the pipeline."""
query_engine = self.index.as_query_engine(**kwargs)
return query_engine.query(query_str)
| ZephyrQueryEnginePack |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolExplicit1.py | {
"start": 1312,
"end": 1358
} | class ____(Mixin, Protocol6):
pass
| Concrete6 |
python | readthedocs__readthedocs.org | readthedocs/core/admin.py | {
"start": 1119,
"end": 2323
} | class ____(admin.SimpleListFilter):
"""Filter users based on project properties."""
parameter_name = "project_state"
title = _("user projects")
PROJECT_ACTIVE = "active"
PROJECT_BUILT = "built"
PROJECT_RECENT = "recent"
def lookups(self, request, model_admin):
return (
(self.PROJECT_ACTIVE, _("has active project")),
(self.PROJECT_BUILT, _("has built project")),
(self.PROJECT_RECENT, _("has project with recent builds")),
)
def queryset(self, request, queryset):
"""
Add filters to queryset filter.
``PROJECT_ACTIVE`` and ``PROJECT_BUILT`` look for versions on projects,
``PROJECT_RECENT`` looks for projects with builds in the last year
"""
if self.value() == self.PROJECT_ACTIVE:
return queryset.filter(projects__versions__active=True)
if self.value() == self.PROJECT_BUILT:
return queryset.filter(projects__versions__built=True)
if self.value() == self.PROJECT_RECENT:
recent_date = timezone.now() - timedelta(days=365)
return queryset.filter(projects__builds__date__gt=recent_date)
| UserProjectFilter |
python | fluentpython__example-code | 20-descriptor/descriptorkinds_dump.py | {
"start": 4399,
"end": 4669
} | class ____: # <2>
"""a.k.a. data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner) # <3>
def __set__(self, instance, value):
print_args('set', self, instance, value)
| Overriding |
python | huggingface__transformers | src/transformers/models/qwen3_next/modeling_qwen3_next.py | {
"start": 16087,
"end": 26183
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Qwen3NextConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim * 2, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.q_norm = Qwen3NextRMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
self.k_norm = Qwen3NextRMSNorm(
self.head_dim, eps=config.rms_norm_eps
) # thus post q_norm does not need reshape
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states, gate = torch.chunk(
self.q_proj(hidden_states).view(*input_shape, -1, self.head_dim * 2), 2, dim=-1
)
gate = gate.reshape(*input_shape, -1)
query_states = self.q_norm(query_states.view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = attn_output * torch.sigmoid(gate)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
def apply_mask_to_padding_states(hidden_states, attention_mask):
"""
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
"""
# NOTE: attention mask is a 2D boolean tensor
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return hidden_states
is_fast_path_available = all(
(causal_conv1d_fn, causal_conv1d_update, chunk_gated_delta_rule, fused_recurrent_gated_delta_rule)
)
def torch_causal_conv1d_update(
hidden_states,
conv_state,
weight,
bias=None,
activation=None,
):
_, hidden_size, seq_len = hidden_states.shape
state_len = conv_state.shape[-1]
hidden_states_new = torch.cat([conv_state, hidden_states], dim=-1).to(weight.dtype)
conv_state.copy_(hidden_states_new[:, :, -state_len:])
out = F.conv1d(hidden_states_new, weight.unsqueeze(1), bias, padding=0, groups=hidden_size)
out = F.silu(out[:, :, -seq_len:])
out = out.to(hidden_states.dtype)
return out
def l2norm(x: torch.FloatTensor, dim: int = -1, eps: float = 1e-6):
"""This function is intended to align with the l2norm implementation in the FLA library."""
inv_norm = torch.rsqrt((x * x).sum(dim=dim, keepdim=True) + eps)
return x * inv_norm
def torch_chunk_gated_delta_rule(
query,
key,
value,
g,
beta,
chunk_size=64,
initial_state=None,
output_final_state=False,
use_qk_l2norm_in_kernel=False,
):
initial_dtype = query.dtype
if use_qk_l2norm_in_kernel:
query = l2norm(query, dim=-1, eps=1e-6)
key = l2norm(key, dim=-1, eps=1e-6)
query, key, value, beta, g = [
x.transpose(1, 2).contiguous().to(torch.float32) for x in (query, key, value, beta, g)
]
batch_size, num_heads, sequence_length, k_head_dim = key.shape
v_head_dim = value.shape[-1]
pad_size = (chunk_size - sequence_length % chunk_size) % chunk_size
query = F.pad(query, (0, 0, 0, pad_size))
key = F.pad(key, (0, 0, 0, pad_size))
value = F.pad(value, (0, 0, 0, pad_size))
beta = F.pad(beta, (0, pad_size))
g = F.pad(g, (0, pad_size))
total_sequence_length = sequence_length + pad_size
scale = 1 / (query.shape[-1] ** 0.5)
query = query * scale
v_beta = value * beta.unsqueeze(-1)
k_beta = key * beta.unsqueeze(-1)
# reshape to chunks
query, key, value, k_beta, v_beta = [
x.reshape(x.shape[0], x.shape[1], -1, chunk_size, x.shape[-1]) for x in (query, key, value, k_beta, v_beta)
]
g = g.reshape(g.shape[0], g.shape[1], -1, chunk_size)
mask = torch.triu(torch.ones(chunk_size, chunk_size, dtype=torch.bool, device=query.device), diagonal=0)
# chunk decay
g = g.cumsum(dim=-1)
decay_mask = ((g.unsqueeze(-1) - g.unsqueeze(-2)).tril().exp().float()).tril()
attn = -((k_beta @ key.transpose(-1, -2)) * decay_mask).masked_fill(mask, 0)
for i in range(1, chunk_size):
row = attn[..., i, :i].clone()
sub = attn[..., :i, :i].clone()
attn[..., i, :i] = row + (row.unsqueeze(-1) * sub).sum(-2)
attn = attn + torch.eye(chunk_size, dtype=attn.dtype, device=attn.device)
value = attn @ v_beta
k_cumdecay = attn @ (k_beta * g.exp().unsqueeze(-1))
last_recurrent_state = (
torch.zeros(batch_size, num_heads, k_head_dim, v_head_dim).to(value)
if initial_state is None
else initial_state.to(value)
)
core_attn_out = torch.zeros_like(value)
mask = torch.triu(torch.ones(chunk_size, chunk_size, dtype=torch.bool, device=query.device), diagonal=1)
# for each chunk
for i in range(0, total_sequence_length // chunk_size):
q_i, k_i, v_i = query[:, :, i], key[:, :, i], value[:, :, i]
attn = (q_i @ k_i.transpose(-1, -2) * decay_mask[:, :, i]).masked_fill_(mask, 0)
v_prime = (k_cumdecay[:, :, i]) @ last_recurrent_state
v_new = v_i - v_prime
attn_inter = (q_i * g[:, :, i, :, None].exp()) @ last_recurrent_state
core_attn_out[:, :, i] = attn_inter + attn @ v_new
last_recurrent_state = (
last_recurrent_state * g[:, :, i, -1, None, None].exp()
+ (k_i * (g[:, :, i, -1, None] - g[:, :, i]).exp()[..., None]).transpose(-1, -2) @ v_new
)
if not output_final_state:
last_recurrent_state = None
core_attn_out = core_attn_out.reshape(core_attn_out.shape[0], core_attn_out.shape[1], -1, core_attn_out.shape[-1])
core_attn_out = core_attn_out[:, :, :sequence_length]
core_attn_out = core_attn_out.transpose(1, 2).contiguous().to(initial_dtype)
return core_attn_out, last_recurrent_state
def torch_recurrent_gated_delta_rule(
query, key, value, g, beta, initial_state, output_final_state, use_qk_l2norm_in_kernel=False
):
initial_dtype = query.dtype
if use_qk_l2norm_in_kernel:
query = l2norm(query, dim=-1, eps=1e-6)
key = l2norm(key, dim=-1, eps=1e-6)
query, key, value, beta, g = [
x.transpose(1, 2).contiguous().to(torch.float32) for x in (query, key, value, beta, g)
]
batch_size, num_heads, sequence_length, k_head_dim = key.shape
v_head_dim = value.shape[-1]
scale = 1 / (query.shape[-1] ** 0.5)
query = query * scale
core_attn_out = torch.zeros(batch_size, num_heads, sequence_length, v_head_dim).to(value)
last_recurrent_state = (
torch.zeros(batch_size, num_heads, k_head_dim, v_head_dim).to(value)
if initial_state is None
else initial_state.to(value)
)
for i in range(sequence_length):
q_t = query[:, :, i]
k_t = key[:, :, i]
v_t = value[:, :, i]
g_t = g[:, :, i].exp().unsqueeze(-1).unsqueeze(-1)
beta_t = beta[:, :, i].unsqueeze(-1)
last_recurrent_state = last_recurrent_state * g_t
kv_mem = (last_recurrent_state * k_t.unsqueeze(-1)).sum(dim=-2)
delta = (v_t - kv_mem) * beta_t
last_recurrent_state = last_recurrent_state + k_t.unsqueeze(-1) * delta.unsqueeze(-2)
core_attn_out[:, :, i] = (last_recurrent_state * q_t.unsqueeze(-1)).sum(dim=-2)
if not output_final_state:
last_recurrent_state = None
core_attn_out = core_attn_out.transpose(1, 2).contiguous().to(initial_dtype)
return core_attn_out, last_recurrent_state
| Qwen3NextAttention |
python | walkccc__LeetCode | solutions/2829. Determine the Minimum Sum of a k-avoiding Array/2829.py | {
"start": 0,
"end": 658
} | class ____:
def minimumSum(self, n: int, k: int) -> int:
# These are the unique pairs that sum up to k:
# (1, k - 1), (2, k - 2), ..., (ceil(k // 2), floor(k // 2)).
# Our optimal strategy is to select 1, 2, ..., floor(k // 2), and then
# choose k, k + 1, ... if necessary, as selecting any number in the range
# [ceil(k // 2), k - 1] will result in a pair summing up to k.
def trapezoid(a: int, b: int) -> int:
"""Returns sum(a..b)."""
return (a + b) * (b - a + 1) // 2
mid = k // 2 # floor(k // 2)
if n <= mid:
return trapezoid(1, n)
return trapezoid(1, mid) + trapezoid(k, k + (n - mid - 1))
| Solution |
python | apache__airflow | dev/stats/calculate_statistics_provider_testing_issues.py | {
"start": 2032,
"end": 6850
} | class ____:
issue_number: int
title: str
num_providers: int
num_issues: int
tested_issues: int
url: str
users_involved: set[str]
users_commented: set[str]
def percent_tested(self) -> int:
return 100 * self.tested_issues // self.num_issues
def num_involved_users_who_commented(self) -> int:
return len(self.users_involved.intersection(self.users_commented))
def num_commenting_not_involved(self) -> int:
return len(self.users_commented - self.users_involved)
def percent_commented_among_involved(self) -> int:
return 100 * self.num_involved_users_who_commented() // len(self.users_involved)
def __str__(self):
return (
f"#{self.issue_number}: {self.title}: Num providers: {self.num_providers}, "
f"Issues: {self.num_issues}, Tested {self.tested_issues}, "
f"Percent Tested: {self.percent_tested()}%, "
f"Involved users: {len(self.users_involved)}, Commenting users: {len(self.users_commented)}, "
f"Involved who commented: {self.num_involved_users_who_commented()}, "
f"Extra people: {self.num_commenting_not_involved()}, "
f"Percent commented: {self.percent_commented_among_involved()}%, "
f"URL: {self.url}"
)
def get_users_from_content(content: str) -> set[str]:
users_match = re.findall(r"@\S*", content, re.MULTILINE)
users: set[str] = set()
for user_match in users_match:
users.add(user_match)
return users
def get_users_who_commented(issue: Issue) -> set[str]:
users: set[str] = set()
for comment in issue.get_comments():
users.add("@" + comment.user.login)
return users
def get_stats(issue: Issue) -> Stats:
content = issue.body
return Stats(
issue_number=issue.number,
title=issue.title,
num_providers=content.count("Provider "),
num_issues=content.count("- [") - 1,
tested_issues=content.count("[x]") + content.count("[X]") - 1,
url=issue.html_url,
users_involved=get_users_from_content(content),
users_commented=get_users_who_commented(issue),
)
def stats_to_rows(stats_list: list[Stats]) -> list[tuple]:
total = Stats(
issue_number=0,
title="",
num_providers=0,
num_issues=0,
tested_issues=0,
url="",
users_commented=set(),
users_involved=set(),
)
rows: list[tuple] = []
for stat in stats_list:
total.num_providers += stat.num_providers
total.num_issues += stat.num_issues
total.tested_issues += stat.tested_issues
total.users_involved.update(stat.users_involved)
total.users_commented.update(stat.users_commented)
rows.append(
(
f"[{stat.issue_number}]({stat.url})",
stat.num_providers,
stat.num_issues,
stat.tested_issues,
stat.percent_tested(),
len(stat.users_involved),
len(stat.users_commented),
stat.num_involved_users_who_commented(),
stat.num_commenting_not_involved(),
stat.percent_commented_among_involved(),
)
)
rows.append(
(
"Total",
total.num_providers,
total.num_issues,
total.tested_issues,
total.percent_tested(),
len(total.users_involved),
len(total.users_commented),
total.num_involved_users_who_commented(),
total.num_commenting_not_involved(),
total.percent_commented_among_involved(),
)
)
return rows
@option_github_token
@option_table
@cli.command()
def provide_stats(github_token: str, table: bool):
g = Github(github_token)
repo = g.get_repo("apache/airflow")
issues = repo.get_issues(labels=[PROVIDER_TESTING_LABEL], state="closed", sort="created", direction="asc")
stats_list: list[Stats] = []
for issue in issues:
stat = get_stats(issue)
if not table:
print(stat)
else:
stats_list.append(stat)
if table:
rows = stats_to_rows(stats_list)
print(
tabulate(
rows,
headers=(
"Issue",
"Num Providers",
"Num Issues",
"Tested Issues",
"Tested (%)",
"Involved",
"Commenting",
"Involved who commented",
"Extra people",
"User response (%)",
),
tablefmt="github",
)
)
if __name__ == "__main__":
cli()
| Stats |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP007.py | {
"start": 1084,
"end": 1371
} | class ____(Protocol[*_B0]):
def __iter__(self) -> Iterator[Union[*_B0]]:
...
# Regression test for: https://github.com/astral-sh/ruff/issues/8609
def f(x: Union[int, str, bytes]) -> None:
...
# Regression test for https://github.com/astral-sh/ruff/issues/14132
| Collection |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.