language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1585904,
"end": 1586097
} | class ____(sgqlc.types.Union):
"""An object which can have its data claimed or claim data from
another.
"""
__schema__ = github_schema
__types__ = (Mannequin, User)
| Claimable |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 78454,
"end": 78886
} | class ____(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
| TestUnvalidatedListField |
python | kubernetes-client__python | kubernetes/client/models/v1_service_account_list.py | {
"start": 383,
"end": 7165
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ServiceAccount]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ServiceAccountList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ServiceAccountList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ServiceAccountList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ServiceAccountList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ServiceAccountList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ServiceAccountList. # noqa: E501
List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
:return: The items of this V1ServiceAccountList. # noqa: E501
:rtype: list[V1ServiceAccount]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ServiceAccountList.
List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
:param items: The items of this V1ServiceAccountList. # noqa: E501
:type: list[V1ServiceAccount]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ServiceAccountList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ServiceAccountList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ServiceAccountList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ServiceAccountList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ServiceAccountList. # noqa: E501
:return: The metadata of this V1ServiceAccountList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ServiceAccountList.
:param metadata: The metadata of this V1ServiceAccountList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServiceAccountList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServiceAccountList):
return True
return self.to_dict() != other.to_dict()
| V1ServiceAccountList |
python | kamyu104__LeetCode-Solutions | Python/sum-multiples.py | {
"start": 74,
"end": 331
} | class ____(object):
def sumOfMultiples(self, n):
"""
:type n: int
:rtype: int
"""
def f(d):
return d*((1+(n//d))*(n//d)//2)
return (f(3)+f(5)+f(7))-(f(3*5)+f(5*7)+f(7*3))+f(3*5*7)
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-looker/dagster_looker/api/dagster_looker_api_translator.py | {
"start": 4485,
"end": 10399
} | class ____:
@deprecated(
breaking_version="1.10",
additional_warn_text="Use `DagsterLookerApiTranslator.get_asset_spec().key` instead",
)
def get_view_asset_key(self, looker_structure: LookerApiTranslatorStructureData) -> AssetKey:
return self.get_asset_spec(looker_structure).key
def get_view_asset_spec(self, looker_structure: LookerApiTranslatorStructureData) -> AssetSpec:
lookml_view = check.inst(looker_structure.data, LookmlView)
return AssetSpec(
key=AssetKey(["view", lookml_view.view_name]),
)
@deprecated(
breaking_version="1.10",
additional_warn_text="Use `DagsterLookerApiTranslator.get_asset_spec().key` instead",
)
def get_explore_asset_key(self, looker_structure: LookerApiTranslatorStructureData) -> AssetKey:
return self.get_explore_asset_spec(looker_structure).key
def get_explore_asset_spec(
self, looker_structure: LookerApiTranslatorStructureData
) -> AssetSpec:
lookml_explore = check.inst(looker_structure.data, (LookmlModelExplore, DashboardFilter))
if isinstance(lookml_explore, LookmlModelExplore):
explore_base_view = LookmlView(
view_name=check.not_none(lookml_explore.view_name),
sql_table_name=check.not_none(lookml_explore.sql_table_name),
)
explore_join_views = [
LookmlView(
view_name=check.not_none(lookml_explore_join.from_ or lookml_explore_join.name),
sql_table_name=lookml_explore_join.sql_table_name,
)
for lookml_explore_join in (lookml_explore.joins or [])
]
return AssetSpec(
key=AssetKey(check.not_none(lookml_explore.id)),
deps=list(
{
self.get_asset_spec(
LookerApiTranslatorStructureData(
structure_data=LookerStructureData(
structure_type=LookerStructureType.VIEW, data=lookml_view
),
instance_data=looker_structure.instance_data,
)
).key
for lookml_view in [explore_base_view, *explore_join_views]
}
),
tags={
"dagster/kind/looker": "",
"dagster/kind/explore": "",
},
metadata={
"dagster-looker/web_url": MetadataValue.url(
f"{looker_structure.base_url}/explore/{check.not_none(lookml_explore.id).replace('::', '/')}"
),
},
)
elif isinstance(lookml_explore, DashboardFilter):
lookml_model_name = check.not_none(lookml_explore.model)
lookml_explore_name = check.not_none(lookml_explore.explore)
return AssetSpec(key=AssetKey(f"{lookml_model_name}::{lookml_explore_name}"))
else:
check.assert_never(lookml_explore)
@deprecated(
breaking_version="1.10",
additional_warn_text="Use `DagsterLookerApiTranslator.get_asset_spec().key` instead",
)
def get_dashboard_asset_key(
self, looker_structure: LookerApiTranslatorStructureData
) -> AssetKey:
return self.get_asset_spec(looker_structure).key
def get_dashboard_asset_spec(
self, looker_structure: LookerApiTranslatorStructureData
) -> AssetSpec:
looker_dashboard = check.inst(looker_structure.data, Dashboard)
user = None
if looker_structure.instance_data and looker_dashboard.user_id:
user = looker_structure.instance_data.users_by_id.get(looker_dashboard.user_id)
return AssetSpec(
key=AssetKey(f"{check.not_none(looker_dashboard.title)}_{looker_dashboard.id}"),
deps=list(
{
self.get_asset_spec(
LookerApiTranslatorStructureData(
structure_data=LookerStructureData(
structure_type=LookerStructureType.EXPLORE, data=dashboard_filter
),
instance_data=looker_structure.instance_data,
)
).key
for dashboard_filter in looker_dashboard.dashboard_filters or []
}
),
tags={
"dagster/kind/looker": "",
"dagster/kind/dashboard": "",
},
metadata={
"dagster-looker/web_url": MetadataValue.url(
f"{looker_structure.base_url}{looker_dashboard.url}"
),
},
owners=[user.email] if user and user.email else None,
)
@public
def get_asset_spec(self, looker_structure: LookerApiTranslatorStructureData) -> AssetSpec:
if looker_structure.structure_type == LookerStructureType.VIEW:
return self.get_view_asset_spec(looker_structure)
if looker_structure.structure_type == LookerStructureType.EXPLORE:
return self.get_explore_asset_spec(looker_structure)
elif looker_structure.structure_type == LookerStructureType.DASHBOARD:
return self.get_dashboard_asset_spec(looker_structure)
else:
check.assert_never(looker_structure.structure_type)
@deprecated(
breaking_version="1.10",
additional_warn_text="Use `DagsterLookerApiTranslator.get_asset_spec().key` instead",
)
@public
def get_asset_key(self, looker_structure: LookerApiTranslatorStructureData) -> AssetKey:
return self.get_asset_spec(looker_structure).key
| DagsterLookerApiTranslator |
python | huggingface__transformers | examples/pytorch/object-detection/run_object_detection.py | {
"start": 10709,
"end": 20847
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default="facebook/detr-resnet-50",
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
ignore_mismatched_sizes: bool = field(
default=False,
metadata={
"help": "Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels)."
},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# ------------------------------------------------------------------------------------------------
# Load dataset, prepare splits
# ------------------------------------------------------------------------------------------------
dataset = load_dataset(
data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code
)
# If we don't have a validation split, split off a percentage of train as validation
data_args.train_val_split = None if "validation" in dataset else data_args.train_val_split
if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
split = dataset["train"].train_test_split(data_args.train_val_split, seed=training_args.seed)
dataset["train"] = split["train"]
dataset["validation"] = split["test"]
# Get dataset categories and prepare mappings for label_name <-> label_id
if isinstance(dataset["train"].features["objects"], dict):
categories = dataset["train"].features["objects"]["category"].feature.names
else: # (for old versions of `datasets` that used Sequence({...}) of the objects)
categories = dataset["train"].features["objects"].feature["category"].names
id2label = dict(enumerate(categories))
label2id = {v: k for k, v in id2label.items()}
# ------------------------------------------------------------------------------------------------
# Load pretrained config, model and image processor
# ------------------------------------------------------------------------------------------------
common_pretrained_args = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
}
config = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path,
label2id=label2id,
id2label=id2label,
**common_pretrained_args,
)
model = AutoModelForObjectDetection.from_pretrained(
model_args.model_name_or_path,
config=config,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
**common_pretrained_args,
)
image_processor = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path,
do_resize=True,
size={"max_height": data_args.image_square_size, "max_width": data_args.image_square_size},
do_pad=True,
pad_size={"height": data_args.image_square_size, "width": data_args.image_square_size},
use_fast=data_args.use_fast,
**common_pretrained_args,
)
# ------------------------------------------------------------------------------------------------
# Define image augmentations and dataset transforms
# ------------------------------------------------------------------------------------------------
max_size = data_args.image_square_size
train_augment_and_transform = A.Compose(
[
A.Compose(
[
A.SmallestMaxSize(max_size=max_size, p=1.0),
A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0),
],
p=0.2,
),
A.OneOf(
[
A.Blur(blur_limit=7, p=0.5),
A.MotionBlur(blur_limit=7, p=0.5),
A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1),
],
p=0.1,
),
A.Perspective(p=0.1),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.5),
A.HueSaturationValue(p=0.1),
],
bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25),
)
validation_transform = A.Compose(
[A.NoOp()],
bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True),
)
# Make transform functions for batch and apply for dataset splits
train_transform_batch = partial(
augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor
)
validation_transform_batch = partial(
augment_and_transform_batch, transform=validation_transform, image_processor=image_processor
)
dataset["train"] = dataset["train"].with_transform(train_transform_batch)
dataset["validation"] = dataset["validation"].with_transform(validation_transform_batch)
dataset["test"] = dataset["test"].with_transform(validation_transform_batch)
# ------------------------------------------------------------------------------------------------
# Model training and evaluation with Trainer API
# ------------------------------------------------------------------------------------------------
eval_compute_metrics_fn = partial(
compute_metrics, image_processor=image_processor, id2label=id2label, threshold=0.0
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset["train"] if training_args.do_train else None,
eval_dataset=dataset["validation"] if training_args.do_eval else None,
processing_class=image_processor,
data_collator=collate_fn,
compute_metrics=eval_compute_metrics_fn,
)
# Training
if training_args.do_train:
train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Final evaluation
if training_args.do_eval:
metrics = trainer.evaluate(eval_dataset=dataset["test"], metric_key_prefix="test")
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"dataset": data_args.dataset_name,
"tags": ["object-detection", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| ModelArguments |
python | pytorch__pytorch | test/inductor/test_decompose_mem_bound_mm.py | {
"start": 1184,
"end": 1383
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
output = torch.mm(input1, input2)
return output
| MyModule3 |
python | PyCQA__pycodestyle | tests/test_util.py | {
"start": 69,
"end": 848
} | class ____(unittest.TestCase):
def test_normalize_paths(self):
self.assertEqual(normalize_paths(''), [])
self.assertEqual(normalize_paths([]), [])
self.assertEqual(normalize_paths(None), [])
self.assertEqual(normalize_paths(['foo']), ['foo'])
self.assertEqual(normalize_paths('foo'), ['foo'])
self.assertEqual(normalize_paths('foo,bar'), ['foo', 'bar'])
self.assertEqual(normalize_paths('foo, bar '), ['foo', 'bar'])
self.assertEqual(
normalize_paths('/foo/bar,baz/../bat'),
[os.path.realpath('/foo/bar'), os.path.abspath('bat')],
)
self.assertEqual(
normalize_paths(".pyc,\n build/*"),
['.pyc', os.path.abspath('build/*')],
)
| UtilTestCase |
python | PrefectHQ__prefect | tests/test_logging.py | {
"start": 36459,
"end": 49617
} | class ____:
@pytest.fixture
async def worker(self):
return APILogWorker.instance()
@pytest.fixture
def log_dict(self):
return LogCreate(
flow_run_id=uuid.uuid4(),
task_run_id=uuid.uuid4(),
name="test.logger",
level=10,
timestamp=now("UTC"),
message="hello",
).model_dump(mode="json")
async def test_send_logs_single_record(
self,
log_dict: dict[str, Any],
prefect_client: PrefectClient,
worker: APILogWorker,
):
worker.send(log_dict)
await worker.drain()
logs = await prefect_client.read_logs()
assert len(logs) == 1
assert logs[0].model_dump(include=log_dict.keys(), mode="json") == log_dict
async def test_send_logs_many_records(
self,
log_dict: dict[str, Any],
prefect_client: PrefectClient,
worker: APILogWorker,
):
# Use the read limit as the count since we'd need multiple read calls otherwise
count = prefect.settings.PREFECT_API_DEFAULT_LIMIT.value()
log_dict.pop("message")
for i in range(count):
new_log = log_dict.copy()
new_log["message"] = str(i)
worker.send(new_log)
await worker.drain()
logs = await prefect_client.read_logs()
assert len(logs) == count
for log in logs:
assert (
log.model_dump(
include=log_dict.keys(), exclude={"message"}, mode="json"
)
== log_dict
)
assert len(set(log.message for log in logs)) == count, "Each log is unique"
async def test_send_logs_writes_exceptions_to_stderr(
self,
log_dict: dict[str, Any],
capsys: pytest.CaptureFixture[str],
monkeypatch: pytest.MonkeyPatch,
worker: APILogWorker,
):
monkeypatch.setattr(
"prefect.client.orchestration.PrefectClient.create_logs",
MagicMock(side_effect=ValueError("Test")),
)
worker.send(log_dict)
await worker.drain()
err = capsys.readouterr().err
assert "--- Error logging to API ---" in err
assert "ValueError: Test" in err
async def test_send_logs_batches_by_size(
self, log_dict: dict[str, Any], monkeypatch: pytest.MonkeyPatch
):
mock_create_logs = AsyncMock()
monkeypatch.setattr(
"prefect.client.orchestration.PrefectClient.create_logs", mock_create_logs
)
log_size = APILogHandler()._get_payload_size(log_dict)
with temporary_settings(
updates={
PREFECT_LOGGING_TO_API_BATCH_SIZE: log_size + 1,
PREFECT_LOGGING_TO_API_MAX_LOG_SIZE: log_size,
}
):
worker = APILogWorker.instance()
worker.send(log_dict)
worker.send(log_dict)
worker.send(log_dict)
await worker.drain()
assert mock_create_logs.call_count == 3
async def test_logs_are_sent_immediately_when_stopped(
self, log_dict: dict[str, Any], prefect_client: PrefectClient
):
# Set a long interval
start_time = time.time()
with temporary_settings(updates={PREFECT_LOGGING_TO_API_BATCH_INTERVAL: "10"}):
worker = APILogWorker.instance()
worker.send(log_dict)
worker.send(log_dict)
await worker.drain()
end_time = time.time()
assert (
end_time - start_time
) < 5 # An arbitrary time less than the 10s interval
logs = await prefect_client.read_logs()
assert len(logs) == 2
async def test_logs_are_sent_immediately_when_flushed(
self,
log_dict: dict[str, Any],
prefect_client: PrefectClient,
worker: APILogWorker,
):
# Set a long interval
start_time = time.time()
with temporary_settings(updates={PREFECT_LOGGING_TO_API_BATCH_INTERVAL: "10"}):
worker.send(log_dict)
worker.send(log_dict)
await worker.drain()
end_time = time.time()
assert (
end_time - start_time
) < 5 # An arbitrary time less than the 10s interval
logs = await prefect_client.read_logs()
assert len(logs) == 2
async def test_logs_include_worker_id_if_available(
self, worker: APILogWorker, log_dict: dict[str, Any]
):
worker_id = str(uuid.uuid4())
log_dict["worker_id"] = worker_id
with mock.patch(
"prefect.client.orchestration.PrefectClient.create_logs", autospec=True
) as mock_create_logs:
worker.send(log_dict)
await worker.drain()
assert mock_create_logs.call_count == 1
logs = mock_create_logs.call_args.args[1]
assert len(logs) == 1
assert logs[0]["worker_id"] == worker_id
def test_flow_run_logger(flow_run: "FlowRun"):
logger = flow_run_logger(flow_run)
assert logger.name == "prefect.flow_runs"
assert logger.extra == {
"flow_run_name": flow_run.name,
"flow_run_id": str(flow_run.id),
"flow_name": "<unknown>",
}
def test_flow_run_logger_with_flow(flow_run: "FlowRun"):
@flow(name="foo")
def test_flow():
pass
logger = flow_run_logger(flow_run, test_flow)
assert logger.extra["flow_name"] == "foo"
def test_flow_run_logger_with_kwargs(flow_run: "FlowRun"):
logger = flow_run_logger(flow_run, foo="test", flow_run_name="bar")
assert logger.extra["foo"] == "test"
assert logger.extra["flow_run_name"] == "bar"
def test_task_run_logger(task_run: "TaskRun"):
logger = task_run_logger(task_run)
assert logger.name == "prefect.task_runs"
assert logger.extra == {
"task_run_name": task_run.name,
"task_run_id": str(task_run.id),
"flow_run_id": str(task_run.flow_run_id),
"flow_run_name": "<unknown>",
"flow_name": "<unknown>",
"task_name": "<unknown>",
}
def test_task_run_logger_with_task(task_run: "TaskRun"):
@task(name="task_run_logger_with_task")
def test_task():
pass
logger = task_run_logger(task_run, test_task)
assert logger.extra["task_name"] == "task_run_logger_with_task"
def test_task_run_logger_with_flow_run(task_run: "TaskRun", flow_run: "FlowRun"):
logger = task_run_logger(task_run, flow_run=flow_run)
assert logger.extra["flow_run_id"] == str(task_run.flow_run_id)
assert logger.extra["flow_run_name"] == flow_run.name
def test_task_run_logger_with_flow(task_run: "TaskRun"):
@flow(name="foo")
def test_flow():
pass
logger = task_run_logger(task_run, flow=test_flow)
assert logger.extra["flow_name"] == "foo"
def test_task_run_logger_with_flow_run_from_context(
task_run: "TaskRun", flow_run: "FlowRun"
):
@flow(name="foo")
def test_flow():
pass
with FlowRunContext.model_construct(flow_run=flow_run, flow=test_flow):
logger = task_run_logger(task_run)
assert (
logger.extra["flow_run_id"] == str(task_run.flow_run_id) == str(flow_run.id)
)
assert logger.extra["flow_run_name"] == flow_run.name
assert logger.extra["flow_name"] == test_flow.name == "foo"
def test_run_logger_with_flow_run_context_without_parent_flow_run_id(
caplog: pytest.LogCaptureFixture,
):
"""Test that get_run_logger works when called from a constructed FlowRunContext"""
with FlowRunContext.model_construct(flow_run=None, flow=None):
logger = get_run_logger()
with caplog.at_level(logging.INFO):
logger.info("test3141592")
assert "prefect.flow_runs" in caplog.text
assert "test3141592" in caplog.text
assert logger.extra["flow_run_id"] == "<unknown>"
assert logger.extra["flow_run_name"] == "<unknown>"
assert logger.extra["flow_name"] == "<unknown>"
async def test_run_logger_with_task_run_context_without_parent_flow_run_id(
prefect_client: PrefectClient, caplog: pytest.LogCaptureFixture
):
"""Test that get_run_logger works when passed a constructed TaskRunContext"""
@task
def foo():
pass
task_run = await prefect_client.create_task_run(
foo, flow_run_id=None, dynamic_key=""
)
task_run_context = TaskRunContext.model_construct(
task=foo, task_run=task_run, client=prefect_client
)
logger = get_run_logger(task_run_context)
with caplog.at_level(logging.INFO):
logger.info("test3141592")
assert "prefect.task_runs" in caplog.text
assert "test3141592" in caplog.text
def test_task_run_logger_with_kwargs(task_run: "TaskRun"):
logger = task_run_logger(task_run, foo="test", task_run_name="bar")
assert logger.extra["foo"] == "test"
assert logger.extra["task_run_name"] == "bar"
def test_run_logger_fails_outside_context():
with pytest.raises(MissingContextError, match="no active flow or task run context"):
get_run_logger()
async def test_run_logger_with_explicit_context_of_invalid_type():
with pytest.raises(TypeError, match="Received unexpected type 'str' for context."):
get_run_logger("my man!")
async def test_run_logger_with_explicit_context(
prefect_client: PrefectClient,
flow_run: "FlowRun",
):
@task
def foo():
pass
task_run = await prefect_client.create_task_run(foo, flow_run.id, dynamic_key="")
context = TaskRunContext.model_construct(
task=foo,
task_run=task_run,
client=prefect_client,
)
logger = get_run_logger(context)
assert logger.name == "prefect.task_runs"
assert logger.extra == {
"task_name": foo.name,
"task_run_id": str(task_run.id),
"task_run_name": task_run.name,
"flow_run_id": str(flow_run.id),
"flow_name": "<unknown>",
"flow_run_name": "<unknown>",
}
async def test_run_logger_with_explicit_context_overrides_existing(
prefect_client: PrefectClient,
flow_run: "FlowRun",
):
@task
def foo():
pass
@task
def bar():
pass
task_run = await prefect_client.create_task_run(foo, flow_run.id, dynamic_key="")
# Use `bar` instead of `foo` in context
context = TaskRunContext.model_construct(
task=bar,
task_run=task_run,
client=prefect_client,
)
logger = get_run_logger(context)
assert logger.extra["task_name"] == bar.name
async def test_run_logger_in_flow(prefect_client: PrefectClient):
@flow
def test_flow():
return get_run_logger()
state = test_flow(return_state=True)
flow_run = await prefect_client.read_flow_run(state.state_details.flow_run_id)
logger = await state.result()
assert logger.name == "prefect.flow_runs"
assert logger.extra == {
"flow_name": test_flow.name,
"flow_run_id": str(flow_run.id),
"flow_run_name": flow_run.name,
}
async def test_run_logger_extra_data(prefect_client: PrefectClient):
@flow
def test_flow():
return get_run_logger(foo="test", flow_name="bar")
state = test_flow(return_state=True)
flow_run = await prefect_client.read_flow_run(state.state_details.flow_run_id)
logger = await state.result()
assert logger.name == "prefect.flow_runs"
assert logger.extra == {
"flow_name": "bar",
"foo": "test",
"flow_run_id": str(flow_run.id),
"flow_run_name": flow_run.name,
}
async def test_run_logger_in_nested_flow(prefect_client: PrefectClient):
@flow
def child_flow():
return get_run_logger()
@flow
def test_flow():
return child_flow(return_state=True)
child_state = await test_flow(return_state=True).result()
flow_run = await prefect_client.read_flow_run(child_state.state_details.flow_run_id)
logger = await child_state.result()
assert logger.name == "prefect.flow_runs"
assert logger.extra == {
"flow_name": child_flow.name,
"flow_run_id": str(flow_run.id),
"flow_run_name": flow_run.name,
}
async def test_run_logger_in_task(
prefect_client: PrefectClient, events_pipeline: "EventsPipeline"
):
@task
def test_task():
return get_run_logger()
@flow
def test_flow():
return test_task(return_state=True)
flow_state = test_flow(return_state=True)
flow_run = await prefect_client.read_flow_run(flow_state.state_details.flow_run_id)
task_state = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(task_state.state_details.task_run_id)
logger = await task_state.result()
assert logger.name == "prefect.task_runs"
assert logger.extra == {
"task_name": test_task.name,
"task_run_id": str(task_run.id),
"task_run_name": task_run.name,
"flow_name": test_flow.name,
"flow_run_id": str(flow_run.id),
"flow_run_name": flow_run.name,
}
| TestAPILogWorker |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self1.py | {
"start": 288,
"end": 770
} | class ____(Self): ...
# This should generate an error because Self can't be used in this context.
x: Self
def func1() -> None:
# This should generate an error because Self can't be used in this context.
x: Self
# This should generate an error because Self can't be used in this context.
def func2(a: Self) -> None: ...
# This should generate an error because Self can't be used in this context.
def func3() -> Self: ...
def is_self(t: object):
return t is Self
| A |
python | matplotlib__matplotlib | galleries/examples/misc/demo_agg_filter.py | {
"start": 1399,
"end": 1793
} | class ____(BaseFilter):
def __init__(self, offsets=(0, 0)):
self.offsets = offsets
def get_pad(self, dpi):
return int(max(self.offsets) / 72 * dpi)
def process_image(self, padded_src, dpi):
ox, oy = self.offsets
a1 = np.roll(padded_src, int(ox / 72 * dpi), axis=1)
a2 = np.roll(a1, -int(oy / 72 * dpi), axis=0)
return a2
| OffsetFilter |
python | scipy__scipy | scipy/optimize/tests/test_least_squares.py | {
"start": 929,
"end": 1866
} | class ____:
def __init__(self):
self.nfev = 0
def __call__(self, x, a=0):
self.nfev += 1
return fun_rosenbrock(x)
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-D array, return is 2-D array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
| Fun_Rosenbrock |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 3394,
"end": 3544
} | class ____(ParentD):
def f(self):
if False: __class__ # Python injects __class__ into scope
builtins.super(ChildD1, self).f()
| ChildD1 |
python | Textualize__textual | docs/examples/styles/column_span.py | {
"start": 106,
"end": 511
} | class ____(App):
CSS_PATH = "column_span.tcss"
def compose(self):
yield Grid(
Placeholder(id="p1"),
Placeholder(id="p2"),
Placeholder(id="p3"),
Placeholder(id="p4"),
Placeholder(id="p5"),
Placeholder(id="p6"),
Placeholder(id="p7"),
)
if __name__ == "__main__":
app = MyApp()
app.run()
| MyApp |
python | FactoryBoy__factory_boy | tests/test_transformer.py | {
"start": 3705,
"end": 4596
} | class ____(TestCase):
def test_default_transform(self):
instance = WithMaybeFactory()
self.assertIs(instance.one, True)
self.assertEqual(instance.two, "YES")
self.assertIsNone(instance.three)
def test_yes_transform(self):
instance = WithMaybeFactory(one=True)
self.assertIs(instance.one, True)
self.assertEqual(instance.two, "YES")
self.assertIsNone(instance.three)
def test_no_transform(self):
instance = WithMaybeFactory(one=False)
self.assertIs(instance.one, False)
self.assertEqual(instance.two, "NO")
self.assertEqual(instance.three, "THREE")
def test_override(self):
instance = WithMaybeFactory(one=True, two="NI")
self.assertIs(instance.one, True)
self.assertEqual(instance.two, "NI")
self.assertIsNone(instance.three)
| TransformerMaybeTest |
python | pyodide__pyodide | src/py/pyodide/ffi/wrappers.py | {
"start": 318,
"end": 714
} | class ____(Protocol):
""":meta private:"""
def destroy(self):
pass
# An object with a no-op destroy method so we can do
#
# TIMEOUTS.pop(id, DUMMY_DESTROYABLE).destroy()
#
# and either it gets a real object and calls the real destroy method or it gets
# the fake which does nothing. This is to handle the case where clear_timeout is
# called after the timeout executes.
| Destroyable |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/util.py | {
"start": 19213,
"end": 20123
} | class ____(sql_util.ColumnAdapter):
"""ColumnAdapter which includes a role attribute."""
__slots__ = ("role",)
def __init__(
self,
role: _TraceAdaptRole,
selectable: Selectable,
*,
equivalents: Optional[_EquivalentColumnMap] = None,
adapt_required: bool = False,
allow_label_resolve: bool = True,
anonymize_labels: bool = False,
adapt_on_names: bool = False,
adapt_from_selectables: Optional[AbstractSet[FromClause]] = None,
):
self.role = role
super().__init__(
selectable,
equivalents=equivalents,
adapt_required=adapt_required,
allow_label_resolve=allow_label_resolve,
anonymize_labels=anonymize_labels,
adapt_on_names=adapt_on_names,
adapt_from_selectables=adapt_from_selectables,
)
| ORMStatementAdapter |
python | ray-project__ray | python/ray/data/_internal/logical/interfaces/physical_plan.py | {
"start": 248,
"end": 930
} | class ____(Plan):
"""The plan with a DAG of physical operators."""
def __init__(
self,
dag: "PhysicalOperator",
op_map: Dict["PhysicalOperator", LogicalOperator],
context: "DataContext",
):
super().__init__(context)
self._dag = dag
self._op_map = op_map
@property
def dag(self) -> "PhysicalOperator":
"""Get the DAG of physical operators."""
return self._dag
@property
def op_map(self) -> Dict["PhysicalOperator", LogicalOperator]:
"""
Get a mapping from physical operators to their corresponding logical operator.
"""
return self._op_map
| PhysicalPlan |
python | coleifer__peewee | tests/shortcuts.py | {
"start": 2003,
"end": 2154
} | class ____(TestModel):
name = TextField()
tag = ForeignKeyField(NodeTag)
parent = ForeignKeyField('self', null=True, backref='children')
| Node |
python | openai__openai-python | src/openai/resources/beta/chatkit/chatkit.py | {
"start": 651,
"end": 1646
} | class ____(SyncAPIResource):
@cached_property
def sessions(self) -> Sessions:
return Sessions(self._client)
@cached_property
def threads(self) -> Threads:
return Threads(self._client)
@cached_property
def with_raw_response(self) -> ChatKitWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ChatKitWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ChatKitWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ChatKitWithStreamingResponse(self)
| ChatKit |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 199152,
"end": 199487
} | class ____(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
| NetworkConnectionTest |
python | numba__numba | numba/tests/test_cgutils.py | {
"start": 4109,
"end": 4863
} | class ____(TestCase):
"""Tests for code generation context functionality"""
def test_printf(self):
# Tests the printf() method
value = 123456
code = f"""if 1:
from numba import njit, types
from numba.extending import intrinsic
@intrinsic
def printf(tyctx, int_arg):
sig = types.void(int_arg)
def codegen(cgctx, builder, sig, llargs):
cgctx.printf(builder, \"%d\\n\", *llargs)
return sig, codegen
@njit
def foo():
printf({value})
foo()
"""
out, _ = run_in_subprocess(code)
self.assertIn(str(value), out.decode())
if __name__ == '__main__':
unittest.main()
| TestCGContext |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 240487,
"end": 241348
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of GrantMigratorRole"""
__schema__ = github_schema
__field_names__ = ("organization_id", "actor", "actor_type", "client_mutation_id")
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""The ID of the organization that the user/team belongs to."""
actor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="actor")
"""The user login or Team slug to grant the migrator role."""
actor_type = sgqlc.types.Field(sgqlc.types.non_null(ActorType), graphql_name="actorType")
"""Specifies the type of the actor, can be either USER or TEAM."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| GrantMigratorRoleInput |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 219784,
"end": 220043
} | class ____(VegaLiteSchema):
"""ConditionalAxisLabelBaseline schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalAxisLabelBaseline"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalAxisLabelBaseline |
python | PyCQA__bandit | tests/unit/formatters/test_csv.py | {
"start": 272,
"end": 2498
} | class ____(testtools.TestCase):
def setUp(self):
super().setUp()
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, "file")
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.context = {
"filename": self.tmp_fname,
"lineno": 4,
"linerange": [4],
"col_offset": 8,
"end_col_offset": 16,
}
self.check_name = "hardcoded_bind_all_interfaces"
self.issue = issue.Issue(
bandit.MEDIUM,
123,
bandit.MEDIUM,
"Possible binding to all interfaces.",
)
self.manager.out_file = self.tmp_fname
self.issue.fname = self.context["filename"]
self.issue.lineno = self.context["lineno"]
self.issue.linerange = self.context["linerange"]
self.issue.col_offset = self.context["col_offset"]
self.issue.end_col_offset = self.context["end_col_offset"]
self.issue.test = self.check_name
self.manager.results.append(self.issue)
def test_report(self):
with open(self.tmp_fname, "w") as tmp_file:
b_csv.report(
self.manager,
tmp_file,
self.issue.severity,
self.issue.confidence,
)
with open(self.tmp_fname) as f:
reader = csv.DictReader(f)
data = next(reader)
self.assertEqual(self.tmp_fname, data["filename"])
self.assertEqual(self.issue.severity, data["issue_severity"])
self.assertEqual(self.issue.confidence, data["issue_confidence"])
self.assertEqual(self.issue.text, data["issue_text"])
self.assertEqual(str(self.context["lineno"]), data["line_number"])
self.assertEqual(
str(self.context["linerange"]), data["line_range"]
)
self.assertEqual(self.check_name, data["test_name"])
self.assertIsNotNone(data["more_info"])
self.assertEqual(str(self.issue.col_offset), data["col_offset"])
self.assertEqual(
str(self.issue.end_col_offset), data["end_col_offset"]
)
| CsvFormatterTests |
python | django-compressor__django-compressor | compressor/tests/test_storages.py | {
"start": 887,
"end": 1239
} | class ____(Storage):
"""
A dummy storage backend that mimics a remote storage that does not implement
`.path()` e.g. `storages.backends.s3.S3Storage`.
"""
def exists(self, name):
return True
def path(self, name):
raise NotImplementedError
@override_settings(COMPRESS_ENABLED=True)
| DummyPathNotImplementedStorage |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/pg8000.py | {
"start": 5653,
"end": 5917
} | class ____(INTERVAL):
render_bind_cast = True
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
@classmethod
def adapt_emulated_to_native(cls, interval, **kw):
return _PGInterval(precision=interval.second_precision)
| _PGInterval |
python | pydantic__pydantic | tests/benchmarks/basemodel_eq_performance.py | {
"start": 8364,
"end": 12698
} | class ____(pydantic.BaseModel, frozen=True):
def __eq__(self, other: Any) -> bool:
if isinstance(other, pydantic.BaseModel):
# When comparing instances of generic types for equality, as long as all field values are equal,
# only require their generic origin types to be equal, rather than exact type equality.
# This prevents headaches like MyGeneric(x=1) != MyGeneric[Any](x=1).
self_type = self.__pydantic_generic_metadata__['origin'] or self.__class__
other_type = other.__pydantic_generic_metadata__['origin'] or other.__class__
# Perform common checks first
if not (
self_type == other_type
and self.__pydantic_private__ == other.__pydantic_private__
and self.__pydantic_extra__ == other.__pydantic_extra__
):
return False
# Fix GH-7444 by comparing only pydantic fields
# We provide a fast-path for performance: __dict__ comparison is *much* faster
# See tests/benchmarks/test_basemodel_eq_performances.py and GH-7825 for benchmarks
if self.__dict__ == other.__dict__:
# If the check above passes, then pydantic fields are equal, we can return early
return True
else:
# Else, we need to perform a more detailed, costlier comparison
model_fields = type(self).model_fields.keys()
getter = operator.itemgetter(*model_fields) if model_fields else lambda _: None
try:
return getter(self.__dict__) == getter(other.__dict__)
except KeyError:
return getter(_SafeGetItemProxy(self.__dict__)) == getter(_SafeGetItemProxy(other.__dict__))
else:
return NotImplemented # delegate to the other item in the comparison
IMPLEMENTATIONS = {
# Commented out because it is too slow for benchmark to complete in reasonable time
# "dict comprehension": DictComprehensionEqModel,
'itemgetter': ItemGetterEqModel,
'itemgetter+fastpath': ItemGetterEqModelFastPath,
# Commented-out because it is too slow to run with run_benchmark_random_unequal
#'itemgetter+safety+fastpath': SafeItemGetterEqModelFastPath,
'itemgetter+fastpath+safe-fallback': ItemGetterEqModelFastPathFallback,
}
# Benchmark running & plotting code
def plot_all_benchmark(
bases: dict[str, type[pydantic.BaseModel]],
sizes: list[int],
) -> figure.Figure:
import matplotlib.pyplot as plt
n_rows, n_cols = len(BENCHMARKS), 2
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols * 6, n_rows * 4))
for row, (name, benchmark) in enumerate(BENCHMARKS.items()):
for col, mimic_cached_property in enumerate([False, True]):
plot_benchmark(
f'{name}, {mimic_cached_property=}',
benchmark,
bases=bases,
sizes=sizes,
mimic_cached_property=mimic_cached_property,
ax=axes[row, col],
)
for ax in axes.ravel():
ax.legend()
fig.suptitle(f'python {PYTHON_VERSION}, pydantic {PYDANTIC_VERSION}')
return fig
def plot_benchmark(
title: str,
benchmark: Callable,
bases: dict[str, type[pydantic.BaseModel]],
sizes: list[int],
mimic_cached_property: bool,
ax: axes.Axes | None = None,
):
import matplotlib.pyplot as plt
import numpy as np
ax = ax or plt.gca()
arr_sizes = np.asarray(sizes)
baseline = benchmark(
title=f'{title}, baseline',
base=OldImplementationModel,
sizes=sizes,
mimic_cached_property=mimic_cached_property,
)
ax.plot(sizes, baseline / baseline, label='baseline')
for name, base in bases.items():
times = benchmark(
title=f'{title}, {name}',
base=base,
sizes=sizes,
mimic_cached_property=mimic_cached_property,
)
mask_valid = ~np.isnan(times)
ax.plot(arr_sizes[mask_valid], times[mask_valid] / baseline[mask_valid], label=name)
ax.set_title(title)
ax.set_xlabel('Number of pydantic fields')
ax.set_ylabel('Average time relative to baseline')
return ax
| ItemGetterEqModelFastPathFallback |
python | kamyu104__LeetCode-Solutions | Python/minimum-size-subarray-sum.py | {
"start": 29,
"end": 565
} | class ____(object):
# @param {integer} s
# @param {integer[]} nums
# @return {integer}
def minSubArrayLen(self, s, nums):
start = 0
sum = 0
min_size = float("inf")
for i in xrange(len(nums)):
sum += nums[i]
while sum >= s:
min_size = min(min_size, i - start + 1)
sum -= nums[start]
start += 1
return min_size if min_size != float("inf") else 0
# Time: O(nlogn)
# Space: O(n)
# Binary search solution.
| Solution |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 14979,
"end": 15318
} | class ____(OracleToleranceMixin, GeoFuncMixin, Transform):
lookup_name = "isvalid"
output_field = BooleanField()
def as_oracle(self, compiler, connection, **extra_context):
sql, params = super().as_oracle(compiler, connection, **extra_context)
return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params
| IsValid |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 1566,
"end": 2481
} | class ____(typing.TypedDict, total=False):
load_default: typing.Any
dump_default: typing.Any
data_key: str | None
attribute: str | None
validate: types.Validator | typing.Iterable[types.Validator] | None
required: bool
allow_none: bool | None
load_only: bool
dump_only: bool
error_messages: dict[str, str] | None
metadata: typing.Mapping[str, typing.Any] | None
def _resolve_field_instance(cls_or_instance: Field | type[Field]) -> Field:
"""Return a Field instance from a Field class or instance.
:param cls_or_instance: Field class or instance.
"""
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Field):
raise _FieldInstanceResolutionError
return cls_or_instance()
if not isinstance(cls_or_instance, Field):
raise _FieldInstanceResolutionError
return cls_or_instance
| _BaseFieldKwargs |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 15632,
"end": 17320
} | class ____(ContextWrappingVariable):
"""represents torch.func.grad increment/decrement nesting"""
# A guard is needed as the grad level is baked into the torch FX graph
# This is fine if grad is only called from within the function
# being compiled. But the FX graph may be invalid in the case of a grad
# call from eager that calls the compiled function, as the grad levels
# may be different.
_guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) # type: ignore[arg-type]
@staticmethod
def create(
tx: "InstructionTranslator", **kwargs: Any
) -> "GradIncrementNestingCtxManagerVariable":
var = GradIncrementNestingCtxManagerVariable(
target_values=None,
initial_values=None,
**kwargs,
)
return var
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
install_guard(self._guards_singleton)
grad_level = torch._C._functorch._grad_increment_nesting()
self.set_cleanup_hook(tx, lambda: torch._C._functorch._grad_decrement_nesting())
self.proxy = tx.output.create_node(
"call_function",
torch._C._functorch._grad_increment_nesting,
(),
{},
)
return variables.ConstantVariable.create(grad_level)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self.cleanup()
tx.output.create_node(
"call_function", torch._C._functorch._grad_decrement_nesting, (), {}
)
return variables.ConstantVariable.create(None)
| GradIncrementNestingCtxManagerVariable |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_tutorial01.py | {
"start": 315,
"end": 1559
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("tutorial01.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Example spreadsheet used in the tutorial."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Some data we want to write to the worksheet.
expenses = (
["Rent", 1000],
["Gas", 100],
["Food", 300],
["Gym", 50],
)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
for item, cost in expenses:
worksheet.write(row, col, item)
worksheet.write(row, col + 1, cost)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, "Total")
worksheet.write(row, 1, "=SUM(B1:B4)", None, 1450)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver32.py | {
"start": 344,
"end": 480
} | class ____(Protocol[TD]):
def __add__(self, other: TD, /) -> Self: ...
def __sub__(self, other: Self, /) -> TD: ...
| DateTimeProto |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 21978,
"end": 26861
} | class ____:
def check_saved_checkpoints(
self, output_dir, freq, total, is_pretrained=True, safe_weights=True, use_scaler=False
):
weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME
file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"]
if is_pretrained:
file_list.append("config.json")
if use_scaler:
file_list.append("scaler.pt")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint))
for filename in file_list:
self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))
def check_best_model_has_been_loaded(
self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True
):
checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}")
log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history
values = [d[metric] for d in log_history]
best_value = max(values) if greater_is_better else min(values)
best_checkpoint = (values.index(best_value) + 1) * freq
checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}")
if is_pretrained:
best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
best_model.to(trainer.args.device)
else:
best_model = RegressionModel()
if not safe_weights:
check_torch_load_is_safe()
state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME), weights_only=True)
else:
state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
torch.testing.assert_close(best_model.a, trainer.model.a)
torch.testing.assert_close(best_model.b, trainer.model.b)
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
def remove_nan_logs(self, log):
for key in list(log.keys()):
if log[key] != log[key]: # Check if the value is NaN
del log[key]
def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
# We'll pop things so operate on copies.
state = trainer_state.copy()
state1 = trainer_state1.copy()
# Log history main contain different logs for the time metrics (after resuming a training).
log_history = state.pop("log_history", None)
log_history1 = state1.pop("log_history", None)
self.assertEqual(state, state1)
skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"]
for log, log1 in zip(log_history, log_history1):
for key in skip_log_keys:
_ = log.pop(key, None)
_ = log1.pop(key, None)
self.remove_nan_logs(log)
self.remove_nan_logs(log1)
self.assertEqual(log, log1)
def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True):
# Converts a checkpoint of a regression model to a sharded checkpoint.
if load_safe:
loader = safetensors.torch.load_file
weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME)
else:
check_torch_load_is_safe()
loader = torch.load
weights_file = os.path.join(folder, WEIGHTS_NAME)
if save_safe:
extension = "safetensors"
saver = safetensors.torch.save_file
index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME)
shard_name = SAFE_WEIGHTS_NAME
else:
extension = "bin"
saver = torch.save
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
shard_name = WEIGHTS_NAME
state_dict = loader(weights_file)
os.remove(weights_file)
keys = list(state_dict.keys())
shard_files = [
shard_name.replace(f".{extension}", f"-{idx + 1:05d}-of-{len(keys):05d}.{extension}")
for idx in range(len(keys))
]
index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}}
with open(index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
for param_name, shard_file in zip(keys, shard_files):
saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file))
@require_torch
@require_sentencepiece
@require_tokenizers
| TrainerIntegrationCommon |
python | google__jax | tests/pallas/tpu_pallas_pipeline_test.py | {
"start": 71021,
"end": 72505
} | class ____(parameterized.TestCase):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu_at_least(4):
self.skipTest('Only TPU v4+ allowed.')
@parameterized.named_parameters(
('float32', 'float32'), ('bfloat16', 'bfloat16'), ('int8', 'int8')
)
@hp.given(
hps.integers(1, 1024),
hps.integers(1, 1024),
hps.integers(1, 1024),
hps.sampled_from([8, 16, 32, 128, 256, 512]),
hps.sampled_from([128, 256, 512]),
hps.sampled_from([128, 256, 512]),
hps.integers(0, 4),
)
def test_padded_matmul(self, dtype, m, k, n, bm, bk, bn, seed):
if dtype == 'int8' and jtu.is_device_tpu_at_least(6):
self.skipTest('Not implemented for TPU v6.')
hp.assume(bm <= m)
hp.assume(bn <= n)
hp.assume(bk <= k)
if dtype == 'bfloat16':
hp.assume(bm >= 16)
if dtype == 'int8':
if not jtu.is_device_tpu_at_least(5):
self.skipTest('Only TPU v5+ allowed for int8.')
hp.assume(bm >= 32)
k1, k2 = jax.random.split(jax.random.key(seed))
x = jax.random.normal(k1, (m, k), jnp.float32).astype(dtype)
y = jax.random.normal(k2, (k, n), jnp.float32).astype(dtype)
out = matmul(x, y, bm=bm, bk=bk, bn=bn)
expected = x @ y
atol = rtol = 2.3e-5
if dtype == 'bfloat16':
out = out.astype('float32')
expected = expected.astype('float32')
atol = rtol = 1e-2
np.testing.assert_allclose(out, expected, atol=atol, rtol=rtol)
| PaddedPipelineEmitterTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_points06.py | {
"start": 315,
"end": 1668
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_points06.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with point formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [71050368, 71051904]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"marker": {"type": "automatic"},
"points": [{"fill": {"color": "red"}}],
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
"marker": {"type": "automatic"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 136551,
"end": 138643
} | class ____(TestCase):
def test_none(self) -> None:
assert list(get_available_action_integrations_for_org(self.organization)) == []
def test_unregistered(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization, user=None, external_id="1", provider="something_random"
)
assert list(get_available_action_integrations_for_org(self.organization)) == []
def test_registered(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization, user=None, external_id="1", provider="slack"
)
assert list(get_available_action_integrations_for_org(self.organization)) == [
serialize_integration(integration)
]
def test_mixed(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization, user=None, external_id="1", provider="slack"
)
other_integration, _ = self.create_provider_integration_for(
self.organization, user=None, external_id="12345", provider="random"
)
assert list(get_available_action_integrations_for_org(self.organization)) == [
serialize_integration(integration)
]
def test_disabled_integration(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization,
user=None,
external_id="1",
provider="slack",
status=ObjectStatus.DISABLED,
)
assert list(get_available_action_integrations_for_org(self.organization)) == []
def test_disabled_org_integration(self) -> None:
integration, org_integration = self.create_provider_integration_for(
self.organization, user=None, external_id="1", provider="slack"
)
with assume_test_silo_mode_of(OrganizationIntegration):
org_integration.update(status=ObjectStatus.DISABLED)
assert list(get_available_action_integrations_for_org(self.organization)) == []
| GetAvailableActionIntegrationsForOrgTest |
python | pytorch__pytorch | test/distributed/test_c10d_functional_native.py | {
"start": 19334,
"end": 21220
} | class ____(dist.ProcessGroup):
"""
This process group discards all data passed to it and returns success. This
is intended for rare cases where we want to discard certain operations
without modifying the underlying library.
This PG only supports world_size of 1.
"""
def __init__(self) -> None:
super().__init__(0, 1)
self._group_name = "dummy:dummy"
self.waits = 0
self.dels = 0
def broadcast(self, tensor_list: list[torch.Tensor], opts: object) -> dist.Work:
return _DummyWork(self)
def allgather_into_tensor_coalesced(
self,
output_lists: list[torch.Tensor],
input_list: list[torch.Tensor],
opts: object,
) -> dist.Work:
return _DummyWork(self)
def allreduce(self, tensors: list[torch.Tensor], opts: object) -> dist.Work:
return _DummyWork(self)
def reduce_scatter_tensor_coalesced(
self,
outputTensors: list[torch.Tensor],
inputTensors: list[torch.Tensor],
opts: object,
) -> dist.Work:
return _DummyWork(self)
@property
def group_name(self) -> str:
if self._group_name is None:
raise ValueError("ProcessGroup name not set")
return self._group_name
def _set_group_name(self, name: str) -> None:
self._group_name = name
def register(self) -> dist.ProcessGroup:
def create_pg(
prefix_store: dist.PrefixStore, rank: int, world_size: int, timeout: float
) -> dist.ProcessGroup:
return self
dist.Backend.register_backend(self.group_name, create_pg, devices=["cpu"])
return dist.new_group(
ranks=[0],
backend=self.group_name,
group_desc=self.group_name,
timeout=timedelta(seconds=60.0), # this timeout isn't used
)
| ProcessGroupDummy |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_by_name.py | {
"start": 221,
"end": 301
} | class ____(GQLResult):
project: Optional[ArtifactByNameProject]
| ArtifactByName |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 45093,
"end": 48368
} | class ____(InputMixin, HtmlElement):
"""
``<select>`` element. You can get the name with ``.name``.
``.value`` will be the value of the selected option, unless this
is a multi-select element (``<select multiple>``), in which case
it will be a set-like object. In either case ``.value_options``
gives the possible values.
The boolean attribute ``.multiple`` shows if this is a
multi-select.
"""
@property
def value(self):
"""
Get/set the value of this select (the selected option).
If this is a multi-select, this is a set-like object that
represents all the selected options.
"""
if self.multiple:
return MultipleSelectOptions(self)
options = _options_xpath(self)
try:
selected_option = next(el for el in reversed(options) if el.get('selected') is not None)
except StopIteration:
try:
selected_option = next(el for el in options if el.get('disabled') is None)
except StopIteration:
return None
value = selected_option.get('value')
if value is None:
value = (selected_option.text or '').strip()
return value
@value.setter
def value(self, value):
if self.multiple:
if isinstance(value, str):
raise TypeError("You must pass in a sequence")
values = self.value
values.clear()
values.update(value)
return
checked_option = None
if value is not None:
for el in _options_xpath(self):
opt_value = el.get('value')
if opt_value is None:
opt_value = (el.text or '').strip()
if opt_value == value:
checked_option = el
break
else:
raise ValueError(
"There is no option with the value of %r" % value)
for el in _options_xpath(self):
if 'selected' in el.attrib:
del el.attrib['selected']
if checked_option is not None:
checked_option.set('selected', '')
@value.deleter
def value(self):
# FIXME: should del be allowed at all?
if self.multiple:
self.value.clear()
else:
self.value = None
@property
def value_options(self):
"""
All the possible values this select can have (the ``value``
attribute of all the ``<option>`` elements.
"""
options = []
for el in _options_xpath(self):
value = el.get('value')
if value is None:
value = (el.text or '').strip()
options.append(value)
return options
@property
def multiple(self):
"""
Boolean attribute: is there a ``multiple`` attribute on this element.
"""
return 'multiple' in self.attrib
@multiple.setter
def multiple(self, value):
if value:
self.set('multiple', '')
elif 'multiple' in self.attrib:
del self.attrib['multiple']
HtmlElementClassLookup._default_element_classes['select'] = SelectElement
| SelectElement |
python | django__django | django/db/models/expressions.py | {
"start": 76073,
"end": 76263
} | class ____(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
| ValueRange |
python | pypa__pip | tests/unit/test_req_file.py | {
"start": 24531,
"end": 25705
} | class ____:
# this suite is really just testing optparse, but added it anyway
def test_variant1(
self, line_processor: LineProcessor, finder: PackageFinder
) -> None:
line_processor("-i url", "file", 1, finder=finder)
assert finder.index_urls == ["url"]
def test_variant2(
self, line_processor: LineProcessor, finder: PackageFinder
) -> None:
line_processor("-i 'url'", "file", 1, finder=finder)
assert finder.index_urls == ["url"]
def test_variant3(
self, line_processor: LineProcessor, finder: PackageFinder
) -> None:
line_processor("--index-url=url", "file", 1, finder=finder)
assert finder.index_urls == ["url"]
def test_variant4(
self, line_processor: LineProcessor, finder: PackageFinder
) -> None:
line_processor("--index-url url", "file", 1, finder=finder)
assert finder.index_urls == ["url"]
def test_variant5(
self, line_processor: LineProcessor, finder: PackageFinder
) -> None:
line_processor("--index-url='url'", "file", 1, finder=finder)
assert finder.index_urls == ["url"]
| TestOptionVariants |
python | getsentry__sentry | tests/sentry/models/test_group.py | {
"start": 1269,
"end": 15201
} | class ____(TestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1).isoformat()
def test_is_resolved(self) -> None:
group = self.create_group(status=GroupStatus.RESOLVED)
assert group.is_resolved()
group.status = GroupStatus.IGNORED
assert not group.is_resolved()
group.status = GroupStatus.UNRESOLVED
assert not group.is_resolved()
group.last_seen = timezone.now() - timedelta(hours=12)
group.project.update_option("sentry:resolve_age", 24)
assert not group.is_resolved()
group.project.update_option("sentry:resolve_age", 1)
assert group.is_resolved()
def test_is_ignored_with_expired_snooze(self) -> None:
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(minutes=1))
assert not group.is_ignored()
def test_status_with_expired_snooze(self) -> None:
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(minutes=1))
assert group.get_status() == GroupStatus.UNRESOLVED
def test_deleting_release_does_not_delete_group(self) -> None:
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
release.add_project(project)
group = self.create_group(project=project, first_release=release)
with pytest.raises(ProtectedError):
release.delete()
group = Group.objects.get(id=group.id)
assert group.first_release == release
def test_save_truncate_message(self) -> None:
assert len(self.create_group(message="x" * 300).message) == 255
assert self.create_group(message="\nfoo\n ").message == "foo"
assert self.create_group(message="foo").message == "foo"
assert self.create_group(message="").message == ""
def test_get_group_with_redirect(self) -> None:
group = self.create_group()
assert get_group_with_redirect(group.id) == (group, False)
duplicate_id = self.create_group().id
Group.objects.filter(id=duplicate_id).delete()
GroupRedirect.objects.create(group_id=group.id, previous_group_id=duplicate_id)
assert get_group_with_redirect(duplicate_id) == (group, True)
# We shouldn't end up in a case where the redirect points to a bad
# reference, but testing this path for completeness.
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(duplicate_id)
def test_get_group_with_redirect_from_qualified_short_id(self) -> None:
group = self.create_group()
assert group.qualified_short_id
assert get_group_with_redirect(
group.qualified_short_id, organization=group.project.organization
) == (group, False)
duplicate_group = self.create_group()
duplicate_id = duplicate_group.id
GroupRedirect.create_for_group(duplicate_group, group)
Group.objects.filter(id=duplicate_id).delete()
assert get_group_with_redirect(
duplicate_group.qualified_short_id, organization=group.project.organization
) == (group, True)
# We shouldn't end up in a case where the redirect points to a bad
# reference, but testing this path for completeness.
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(
duplicate_group.qualified_short_id, organization=group.project.organization
)
def test_invalid_shared_id(self) -> None:
with pytest.raises(Group.DoesNotExist):
Group.objects.from_share_id("adc7a5b902184ce3818046302e94f8ec")
def test_qualified_share_id(self) -> None:
project = self.create_project(name="foo bar")
group = self.create_group(project=project, short_id=project.next_short_id())
short_id = group.qualified_short_id
assert short_id.startswith("FOO-BAR-")
group2 = Group.objects.by_qualified_short_id(group.organization.id, short_id)
assert group2 == group
with pytest.raises(Group.DoesNotExist):
Group.objects.by_qualified_short_id(
group.organization.id, "server_name:my-server-with-dashes-0ac14dadda3b428cf"
)
group.update(status=GroupStatus.PENDING_DELETION, substatus=None)
with pytest.raises(Group.DoesNotExist):
Group.objects.by_qualified_short_id(group.organization.id, short_id)
def test_qualified_share_id_bulk(self) -> None:
project = self.create_project(name="foo bar")
group = self.create_group(project=project, short_id=project.next_short_id())
group_2 = self.create_group(project=project, short_id=project.next_short_id())
group_short_id = group.qualified_short_id
group_2_short_id = group_2.qualified_short_id
assert [group] == Group.objects.by_qualified_short_id_bulk(
group.organization.id, [group_short_id]
)
assert {group, group_2} == set(
Group.objects.by_qualified_short_id_bulk(
group.organization.id,
[group_short_id, group_2_short_id],
)
)
group.update(status=GroupStatus.PENDING_DELETION, substatus=None)
with pytest.raises(Group.DoesNotExist):
Group.objects.by_qualified_short_id_bulk(
group.organization.id, [group_short_id, group_2_short_id]
)
def test_by_qualified_short_id_bulk_case_insensitive_project_slug(self) -> None:
project = self.create_project(slug="mixedcaseslug")
group = self.create_group(project=project, short_id=project.next_short_id())
Project.objects.filter(id=project.id).update(slug="MixedCaseSlug")
assert Project.objects.get(id=project.id).slug == "MixedCaseSlug"
# Re-fetch to ensure updated relation is used when computing qualified_short_id
group = Group.objects.get(id=group.id)
short_id = group.qualified_short_id
# Should resolve via case-insensitive slug fallback
resolved = Group.objects.by_qualified_short_id_bulk(group.organization.id, [short_id])
assert resolved == [group]
def test_first_last_release(self) -> None:
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
event = self.store_event(
data={"release": "a", "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
release = Release.objects.get(version="a")
assert group.first_release == release
assert group.get_first_release() == release.version
cache.delete(_get_cache_key(group.id, group.project_id, True))
assert group.get_last_release() == release.version
def test_first_release_from_tag(self) -> None:
project = self.create_project()
event = self.store_event(
data={"release": "a", "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
assert group.get_first_release() == "a"
cache.delete(_get_cache_key(group.id, group.project_id, True))
assert group.get_last_release() == "a"
def test_first_last_release_miss(self) -> None:
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
release.add_project(project)
group = self.create_group(project=project)
assert group.first_release is None
assert group.get_first_release() is None
assert group.get_last_release() is None
def test_get_email_subject(self) -> None:
project = self.create_project()
group = self.create_group(project=project)
expect = f"{group.qualified_short_id} - {group.title}"
assert group.get_email_subject() == expect
def test_get_absolute_url(self) -> None:
for org_slug, group_id, params, expected in [
("org1", 23, None, "http://testserver/organizations/org1/issues/23/"),
(
"org2",
42,
{"environment": "dev"},
"http://testserver/organizations/org2/issues/42/?environment=dev",
),
(
"\u00f6rg3",
86,
{"env\u00edronment": "d\u00e9v"},
"http://testserver/organizations/org3/issues/86/?env%C3%ADronment=d%C3%A9v",
),
]:
org = self.create_organization(slug=org_slug)
project = self.create_project(organization=org)
group = self.create_group(id=group_id, project=project)
actual = group.get_absolute_url(params)
assert actual == expected
def test_get_absolute_url_feedback(self) -> None:
org_slug = "org1"
org = self.create_organization(slug=org_slug)
project = self.create_project(organization=org)
group_id = 23
params = None
expected = f"http://testserver/organizations/org1/feedback/?feedbackSlug={project.slug}%3A23&project={project.id}"
group = self.create_group(id=group_id, project=project, type=FeedbackGroup.type_id)
actual = group.get_absolute_url(params)
assert actual == expected
def test_get_absolute_url_event(self) -> None:
project = self.create_project()
event = self.store_event(
data={"fingerprint": ["group1"], "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
url = f"http://testserver/organizations/{project.organization.slug}/issues/{group.id}/events/{event.event_id}/"
assert url == group.get_absolute_url(event_id=event.event_id)
@with_feature("system:multi-region")
def test_get_absolute_url_customer_domains(self) -> None:
project = self.create_project()
event = self.store_event(
data={"fingerprint": ["group1"], "timestamp": self.min_ago}, project_id=project.id
)
org = self.organization
group = event.group
expected = f"http://{org.slug}.testserver/issues/{group.id}/events/{event.event_id}/"
assert expected == group.get_absolute_url(event_id=event.event_id)
expected = f"http://{org.slug}.testserver/issues/{group.id}/"
assert expected == group.get_absolute_url()
def test_get_absolute_api_url(self) -> None:
project = self.create_project()
event = self.store_event(
data={"fingerprint": ["group1"], "timestamp": self.min_ago}, project_id=project.id
)
org = self.organization
group = event.group
assert (
group.get_absolute_api_url()
== f"http://testserver/api/0/organizations/{org.slug}/issues/{group.id}/"
)
def test_get_releases(self) -> None:
now = timezone.now().replace(microsecond=0)
project = self.create_project()
group = self.create_group(project=project)
group2 = self.create_group(project=project)
last_release = Release.objects.create(
organization_id=self.organization.id,
version="100",
date_added=now - timedelta(seconds=10),
)
first_release = Release.objects.create(
organization_id=self.organization.id,
version="200",
date_added=now - timedelta(seconds=100),
)
GroupRelease.objects.create(
project_id=project.id,
group_id=group.id,
release_id=first_release.id,
environment="",
last_seen=first_release.date_added,
first_seen=first_release.date_added,
)
GroupRelease.objects.create(
project_id=project.id,
group_id=group.id,
release_id=last_release.id,
environment="",
last_seen=last_release.date_added,
first_seen=last_release.date_added,
)
assert group.get_first_release() == "200"
cache.delete(_get_cache_key(group2.id, group2.project_id, True))
assert group2.get_first_release() is None
cache.delete(_get_cache_key(group.id, group.project_id, True))
assert group.get_last_release() == "100"
assert group2.get_last_release() is None
@patch("sentry.models.group.logger.error")
def test_group_substatus_defaults(self, mock_logger: MagicMock) -> None:
group = self.create_group(status=GroupStatus.UNRESOLVED)
assert group.substatus is None
assert mock_logger.call_count == 1
for nullable_status in (
GroupStatus.IGNORED,
GroupStatus.MUTED,
GroupStatus.RESOLVED,
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
GroupStatus.REPROCESSING,
):
assert self.create_group(status=nullable_status).substatus is None
def test_group_valid_substatus(self) -> None:
desired_status_substatus_pairs = [
(GroupStatus.UNRESOLVED, GroupSubStatus.ESCALATING),
(GroupStatus.UNRESOLVED, GroupSubStatus.REGRESSED),
(GroupStatus.UNRESOLVED, GroupSubStatus.NEW),
(GroupStatus.IGNORED, GroupSubStatus.FOREVER),
(GroupStatus.IGNORED, GroupSubStatus.UNTIL_CONDITION_MET),
(GroupStatus.IGNORED, GroupSubStatus.UNTIL_ESCALATING),
]
for status, substatus in desired_status_substatus_pairs:
group = self.create_group(status=status, substatus=substatus)
assert group.substatus is substatus
| GroupTest |
python | pytorch__pytorch | test/test_sparse.py | {
"start": 7578,
"end": 189579
} | class ____(TestSparseBase):
def setUp(self):
super().setUp()
self.index_tensor = lambda *args, **kwargs: torch.tensor(*args, **kwargs, dtype=torch.int64)
def sparse_empty_factory(*args, **kwargs):
kwargs['layout'] = kwargs.get('layout', torch.sparse_coo)
return torch.empty(*args, **kwargs)
self.sparse_empty = sparse_empty_factory
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
def _gen_sparse(self, sparse_dim, nnz, with_size, dtype, device, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dim
x, i, v = self.genSparseTensor(with_size, sparse_dim, nnz, not coalesced, dtype=dtype, device=device)
if not coalesced:
self.assert_uncoalesced(x)
return x, i, v
def assert_uncoalesced(self, x):
"""
Test if a CPU tensor is uncoalesced. This is used to ensure
correctness of the uncoalesced tensor generation algorithm.
"""
assert not x.is_coalesced()
existing_indices = set()
indices = x._indices()
for i in range(x._nnz()):
index = str(indices[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
def test_negative_indices(self):
indices = torch.tensor([[0, 1, -1], [2, 0, 1]])
values = torch.tensor([1, 2, 3])
shape = torch.Size([3, 3])
self.assertRaisesRegex(RuntimeError, "found negative index", lambda: torch.sparse_coo_tensor(indices, values, shape))
def randn(self, *args, **kwargs):
"""
Variant of torch.randn that also works in the TEST_CUDA case.
"""
# TODO: Put this in torch.cuda.randn
return torch.empty(*args, **kwargs).normal_()
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_print_coalesced(self, device, dtype):
self._test_print(device, dtype, True)
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_print_uncoalesced(self, device, dtype):
self._test_print(device, dtype, False)
def _test_print(self, device, dtype, coalesced):
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
printed = []
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
printed.append(f"# shape: {torch.Size(shape)}")
printed.append(f"# nnz: {nnz}")
printed.append(f"# sparse_dim: {sparse_dim}")
printed.append(f"# indices shape: {indices_shape}")
printed.append(f"# values shape: {values_shape}")
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape, dtype=dtype, device=device)
dtypes = [torch.int32]
if values.dtype == torch.double:
dtypes.append(torch.float)
else:
dtypes.append(torch.double if values.device != torch.device("mps:0") else torch.float32)
for dtype in dtypes:
printed.append(f"########## {dtype} ##########")
x = sp_tensor.detach().to(dtype)
printed.append("# sparse tensor")
printed.append(str(x))
if x.dtype.is_floating_point:
printed.append("# after requires_grad_")
printed.append(str(x.requires_grad_()))
printed.append("# after addition")
printed.append(str(x + x))
printed.append("# _indices")
printed.append(str(x._indices()))
printed.append("# _values")
printed.append(str(x._values()))
printed.append('')
self.assertExpected('\n'.join(printed))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_basic(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
@coalescedonoff
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
@dtypesIfMPS(torch.float32, torch.complex64)
@precisionOverride({torch.bfloat16: 1e-2})
def test_coalesce(self, device, dtype, coalesced):
def _test_coalesce(t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map: dict[Any, Any] = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(value_map.keys())
_new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(_new_values)
else:
new_values = torch.stack(_new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
for empty_i, empty_v, empty_nnz in itertools.product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
t, _, _ = self._gen_sparse(len(sparse_size), nnz, sparse_size + dense_size, dtype, device, coalesced)
_test_coalesce(t) # this tests correctness
@onlyCUDA
@largeTensorTest("30GB", "cuda")
@skipCUDAIf(not SM80OrLater and not TEST_WITH_ROCM, "CUDA capability < SM80 and not ROCM")
@dtypes(torch.float)
def test_coalesce_accepts_large_tensor(self, device, dtype):
N = 22500000
NNZ = 272500000
rows = torch.randint(0, N, (NNZ,), dtype=torch.int64, device=device)
cols = torch.randint(0, N, (NNZ,), dtype=torch.int64, device=device)
indices = torch.stack([rows, cols], dim=0)
values = torch.randn(NNZ, dtype=dtype, device=device)
sparse_matrix = torch.sparse_coo_tensor(indices, values, size=(N, N), dtype=torch.float32, device=device)
sparse_matrix = sparse_matrix.coalesce()
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/89395")
def test_coalesce_reference_cycle(self, device, dtype):
# Test coalesce doesn't create autograd graph cycles (gh-52253)
# Sanity check that the helper class works as expected
t = torch.rand(2)
t_ref = torch._C._WeakTensorRef(t)
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
def test_sparse_sum():
i = torch.tensor([[0], [4]], dtype=torch.long, device=device)
v = torch.tensor([[[-0.4567, -1.8797, 0.0380, 1.4316]]],
dtype=dtype, device=device)
S = torch.sparse_coo_tensor(i, v)
S = S.coalesce()
S.requires_grad_(True)
S2 = S.coalesce()
self.assertTrue(S2.is_coalesced())
return torch._C._WeakTensorRef(S2)
ref = test_sparse_sum()
self.assertTrue(ref.expired())
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_ctor_large_sizes(self, device, dtype):
# Test that integer overflow is detected when computing numel
# of a sparse tensor with large dimensions (gh-57416). Notice
# that numel is computed internally when constructing a
# tensor, hence the overflow may appear during the tensor
# construction step.
N = 100000
indices = torch.tensor([[N, N - 1]] * 4, dtype=torch.int64, device=device)
values = torch.tensor([1, 2], dtype=dtype, device=device)
self.assertRaises(RuntimeError,
lambda: torch.sparse_coo_tensor(
indices, values, (N + 1,) * 4, device=device))
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_ctor_size_checks(self, device, dtype):
indices = self.index_tensor([
[0, 0, 0],
[0, 3, 0],
[0, 0, 0],
[0, 0, 0],
], device=device)
values = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
# indices inconsistent with size
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 1, 1])))
# values inconsistent with size
values = torch.tensor([
[2, 1, 2, 1],
[1, 0, 5, 2],
], dtype=dtype, device=device)
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 4, 2, 1])))
@expectedFailureMPS
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_ctor_is_coalesced_with_gradcheck(self, device, dtype, coalesced):
for sparse_size, nnz in (((3, 3), 5), ((2, 3, 1, 5), 11)):
t, _, _ = self._gen_sparse(len(sparse_size), nnz, sparse_size, dtype, device, coalesced)
self.assertEqual(t.is_coalesced(), coalesced)
def func(indices, values, shape, is_coalesced):
if shape is None:
s = torch.sparse_coo_tensor(indices, values, check_invariants=True, is_coalesced=is_coalesced)
else:
s = torch.sparse_coo_tensor(indices, values, shape, check_invariants=True, is_coalesced=is_coalesced)
self.assertEqual(s.is_coalesced(), is_coalesced)
return s.to_dense(masked_grad=False)
for shape in {t.shape, None}:
if coalesced:
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, False))
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, True))
else:
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, False))
with self.assertRaisesRegex(RuntimeError,
"cannot set is_coalesced to true if indices correspond to uncoalesced COO tensor"):
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, True))
@dtypes(*floating_and_complex_types_and(torch.float16, torch.bfloat16))
@dtypesIfMPS(*all_mps_types())
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
@gradcheck_semantics()
def test_to_dense_with_gradcheck(self, device, dtype, gradcheck):
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
values_types = [torch.double, torch.cdouble] if device != "mps:0" else [torch.float32, torch.complex64]
for value_type in values_types:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
@coalescedonoff
@dtypes(torch.float16, torch.bfloat16, torch.float64, torch.int, torch.cfloat, torch.cdouble)
@dtypesIfMPS(torch.float16, torch.bfloat16, torch.float32, torch.int, torch.cfloat)
def test_to_sparse(self, device, dtype, coalesced):
shape = [5, 2, 10, 4]
max_nnz = 1
dtypes = [torch.double, torch.cdouble] if device != "mps:0" else [torch.float32, torch.complex64]
for value_type in dtypes:
for dim, dim_sz in enumerate(shape, 1):
max_nnz *= dim_sz
rnnz = torch.randint(2, max_nnz, (1,)).item()
for nnz in [0, 1, rnnz]:
expected, _, _ = self._gen_sparse(dim, nnz, shape, dtype=value_type, device=device,
coalesced=coalesced)
expected = expected.to(dtype)
d = expected.to_dense()
result = d.to_sparse(dim)
self.assertEqual(d, result.to_dense())
self.assertEqual(expected.size(), result.size())
self.assertEqual(dim, result.sparse_dim())
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_sparse_bool(self, device, dtype):
a = torch.tensor([True, False], dtype=dtype, device=device).to(torch.bool)
b = a.to_sparse().to_dense()
self.assertEqual(a, b)
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/108667")
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_scalar(self, device, dtype):
# tensor with value
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1), 12.3, [], dtype=dtype, device=device)
self.assertEqual(1, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
# tensor with multiple values
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1).expand(0, 2),
[12.3, 12.3], [], dtype=dtype, device=device)
self.assertEqual(2, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3 * 2, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a.coalesce(), a.coalesce().to_dense().to_sparse())
# tensor without value
a = self.sparse_empty((), dtype=dtype, device=device)
self.assertEqual(0, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(0, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_shared(self, device, dtype):
i = self.index_tensor([[2]], device=device)
v = torch.tensor([5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
v[0] = 6
self.assertEqual(torch.tensor([0, 0, 6], dtype=dtype, device=device), self.safeToDense(x))
i[0][0] = 0
self.assertEqual(torch.tensor([6, 0, 0], dtype=dtype, device=device), self.safeToDense(x))
i = self.index_tensor([[2]], device=device)
v = torch.empty((1, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
i[0][0] = 0
self.assertEqual(torch.empty((3, 0), dtype=dtype, device=device), self.safeToDense(x))
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
@gradcheck_semantics()
def test_to_dense_hybrid(self, device, dtype, gradcheck):
def test_tensor(x, res):
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
self.assertEqual(res, self.safeToDense(x))
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
kwargs = {"eps": 1e-4} if device == "mps:0" else {}
gradcheck(fn, (x,), **kwargs)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.tensor([[2, 3], [1, 2], [3, 4], [4, 5]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2]))
res = torch.tensor([
[[2, 3],
[0, 0],
[0, 0],
[0, 0]],
[[1, 2],
[0, 0],
[0, 0],
[0, 0]],
[[3, 4],
[0, 0],
[0, 0],
[4, 5]],
], dtype=dtype, device=device)
test_tensor(x, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.empty((4, 2, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2, 0]))
res = torch.empty((3, 4, 2, 0), dtype=dtype, device=device)
test_tensor(x, res)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_contig(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([6, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_contig_hybrid(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([
[1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
[6, 7], [7, 8], [8, 9], [9, 10], [10, 11],
], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100, 2]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([
[2, 3], [1, 2], [6, 7], [4, 5], [10, 11],
[3, 4], [5, 6], [9, 10], [8, 9], [7, 8],
], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 3, 3], [2, 2, 2], [4, 4, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([[2, 2, 2], [1, 1, 1], [3, 3, 3], [4, 4, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 2, 3], [2, 1, 1], [4, 3, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([[6, 4, 5], [4, 3, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@coalescedonoff
@dtypesIfMPS(torch.float32, torch.complex64)
@dtypes(torch.double, torch.cdouble)
def test_clone(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
if not coalesced:
self.assertFalse(x.is_coalesced())
y = x.clone()
self.assertFalse(y.is_coalesced())
x = x.coalesce()
self.assertTrue(x.is_coalesced())
y = x.clone()
self.assertTrue(y.is_coalesced())
test_shape(4, 20, 5)
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
@dtypesIfMPS(torch.float32, torch.complex64, torch.bfloat16)
@precisionOverride({torch.bfloat16: 2e-2})
def test_Sparse_to_Sparse_copy_(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor)
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
# test copy
x2_dense = x2.to_dense()
x1.copy_(x2)
self.assertEqual(x2_dense, x1.to_dense())
# test type conversion (when x1.copy_(x2), x1.dtype should stay the same)
x1 = x1.to(torch.float32)
x2 = x2.to(torch.float16)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
x2 = x2.to(torch.float64) if device != "mps:0" else x2.to(torch.float32)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
# test no broadcast
self.assertRaises(RuntimeError, lambda: x1.copy_(x2.narrow_copy(0, 0, 1)))
# test raise error on copy_() between dense and sparse Tensors
self.assertRaises(RuntimeError, lambda: x1.copy_(torch.randn(5, 5)))
# test autograd
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone()
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to_dense())
self.assertEqual(None, x1.grad)
@coalescedonoff
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@dtypes(torch.double, torch.cdouble)
def test_Sparse_to_Sparse_copy_multi_gpu(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor) across GPU devices
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x1 = x1.to('cuda:0')
def test_cross_device(x1, x2):
x1_device = x1.device
x1.copy_(x2)
self.assertEqual(x2.to('cuda:0').to_dense(), x1.to_dense())
self.assertEqual(x1_device, x1.device)
test_cross_device(x1, x2.to('cuda:1')) # test across gpu devices
test_cross_device(x1, x2.to('cpu')) # test between cpu and gpu
# test autograd
x2 = x2.to('cuda:1')
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone().to('cuda:0')
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to('cuda:0').to_dense())
self.assertEqual(None, x1.grad)
@onlyCUDA
def test_cuda_empty(self, device):
def test_tensor(x):
y = x.to(device)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
x = y.cpu()
self.assertEqual(y.sparse_dim(), x.sparse_dim())
self.assertEqual(y.dense_dim(), x.dense_dim())
x = torch.sparse_coo_tensor((2, 3, 4), dtype=torch.float32)
test_tensor(x)
x = torch.sparse_coo_tensor((2, 3, 4), dtype=torch.float16)
test_tensor(x)
x = torch.sparse_coo_tensor((2, 3, 4), dtype=torch.float16)
test_tensor(x)
x = torch.sparse_coo_tensor((2, 3, 4, 0), dtype=torch.float32)
test_tensor(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_transpose(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
test_shape(4, 6, 3)
test_shape(4, 3, [7, 7, 7, 3, 3, 3, 0])
test_shape(4, 0, [0, 0, 7, 3, 3, 3, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
@gradcheck_semantics()
def test_permute(self, device, dtype, coalesced, gradcheck):
# trivial checks
s = torch.rand(3, 3, 3, device=device, dtype=dtype).to_sparse()
with self.assertRaisesRegex(RuntimeError, "does not match the length"):
s.permute(dims=(1, 0))
with self.assertRaisesRegex(RuntimeError, "duplicate dims"):
s.permute(dims=(1, 1, 1))
# Calling permute on a sparse tensor with an empty tuple used to segfault,
# see https://github.com/pytorch/pytorch/issues/116325
x = torch.rand((), device=device, dtype=dtype).to_sparse()
x.permute(())
self.assertEqual(len(x.values()), 1)
def test_shape(sparse_dims, nnz, with_size):
ndim = len(with_size)
valid_sparse_dims = torch.arange(-ndim, -ndim + sparse_dims)
valid_dense_dims = torch.arange(-ndim + sparse_dims, 0)
for dims in itertools.permutations(range(-ndim, 0)):
s = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
d = self.safeToDense(s)
dims_sparse, _ = torch.tensor(dims[:sparse_dims]).sort()
dims_dense, _ = torch.tensor(dims[sparse_dims:]).sort()
if (valid_sparse_dims == dims_sparse).all() and (valid_dense_dims == dims_dense).all():
# if valid permutation, test for correctness
s_permuted = s.permute(dims)
self.assertEqual(s_permuted, d.permute(dims))
# if s is coalesced, and perm does not touch 0-dim,
# the result has to be coalesced as well
if dims[0] == 0:
self.assertEqual(s_permuted.is_coalesced(), s.is_coalesced())
else:
self.assertFalse(s_permuted.is_coalesced())
kwargs = {"eps": 1e-4} if device == "mps:0" else {}
gradcheck(lambda t: t.permute(dims).to_dense(masked_grad=gradcheck.masked), s.requires_grad_(), **kwargs)
else:
# otherwise check if exception is thrown
fail_message = "transpositions between sparse and dense dimensions are not allowed"
with self.assertRaisesRegex(RuntimeError, fail_message):
s.permute(dims)
test_shape(2, 3, [2, 3, 4, 5])
test_shape(2, 3, [2, 2, 0])
# if nnz=0, it is not true that t == t.to_dense().to_sparse()
# unless t.sparse_dim == t.dim (i.e. t is not hybrid)
test_shape(3, 0, [0, 0, 2])
@coalescedonoff
@onlyCPU
@dtypes(torch.double)
def test_coalesce_transpose_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [dj, di], dtype, device, coalesced)
y = torch.randn(dj, dk, dtype=dtype, device=device)
x_coalesced = x.coalesce()
self.assertTrue(x_coalesced.is_coalesced())
x_coalesced_t = x_coalesced.t()
# Transpose is `colasced`-preserving if the indices tensor is empty.
self.assertEqual(x_coalesced_t.is_coalesced(), di * nnz == 0)
res = torch.mm(x_coalesced_t, y)
expected = torch.mm(self.safeToDense(x_coalesced_t), y)
self.assertEqual(res, expected)
test_shape(10, 20, 30, 20)
test_shape(0, 20, 30, 0)
test_shape(10, 0, 30, 0)
test_shape(10, 20, 0, 0)
test_shape(10, 20, 0, 20)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1166")
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_t_empty(self, device, dtype):
def test_in_place(x):
shape_original = x.shape
x.t_()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), x.size())
self.assertEqual(0, x._indices().numel())
self.assertEqual(0, x._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
def test_not_in_place(x):
shape_original = x.shape
y = x.t()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), y.size())
self.assertEqual(0, y._indices().numel())
self.assertEqual(0, y._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
x = self.sparse_empty(2, 3, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
x = self.sparse_empty(2, 0, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_add_zeros(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
zeros = torch.sparse_coo_tensor(sizes, device=x.device)
r1 = zeros + x
r2 = x + zeros
self.assertEqual(r1, x)
self.assertEqual(r2, x)
test_shape(1, 20, [1])
test_shape(4, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 0])
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_add_sub_nnz(self, device, dtype):
# nnz should not grow unbounded (gh-34964)
x = torch.randn(10, dtype=dtype, device=device).to_sparse()
x.add_(x)
x.add_(x)
self.assertLessEqual(x._nnz(), 10)
x.sub_(2 * x)
x.sub_(2 * x)
self.assertLessEqual(x._nnz(), 10)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_cat(self, device, dtype, coalesced):
# shapes: list of tuples (sparse_dims, nnz, sizes)
def test_shapes(shapes, dim, fail_message=None):
inputs = [self._gen_sparse(shape[0], shape[1], shape[2], dtype, device, coalesced)[0]
for shape in shapes]
if fail_message:
with self.assertRaisesRegex(RuntimeError, fail_message):
torch.cat(inputs, dim)
else:
result = torch.cat(inputs, dim)
dense_result = torch.cat([t.to_dense() for t in inputs], dim)
self.assertEqual(dense_result, result.to_dense())
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], 1)
# mismatched sizes
test_shapes([(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4])], 0,
"All tensors must have the same shape: \\[2, 3, 4].*\\[2, 1, 4]")
# hybrid sparse/dense
test_shapes(
[(2, 10, [2, 3, 4]), (2, 10, [2, 1, 4]), (2, 10, [2, 4, 4])], 1)
# cat along dense dim
test_shapes([(2, 10, [2, 3, 4]), (2, 10, [2, 3, 7])], 2)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 1)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 2)
# mismatched dimensions
test_shapes([(2, 10, [2, 3, 4]), (3, 10, [2, 3, 4])], 0,
"All tensors must have the same.*2, 1, but tensor at position 1 has 3, 0.")
# wrapped dimension
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], -2)
# sparse with dense
sp = self._gen_sparse(3, 10, [2, 3, 4], dtype, device, coalesced)[0]
dn = sp.to_dense()
with self.assertRaisesRegex(RuntimeError,
"Concatenating sparse tensors, but a dense tensor was found at position 1."):
torch.cat((sp, dn))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_unsqueeze(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, unsqueeze_dim, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.unsqueeze(x, unsqueeze_dim)
else:
result = torch.unsqueeze(x, unsqueeze_dim)
dense_result = torch.unsqueeze(x.to_dense(), unsqueeze_dim)
self.assertEqual(dense_result, result.to_dense())
# basic case
test_shape(3, 10, [5, 7, 11], 0)
# hybrid sparse/dense, unsqueeze along sparse dim
test_shape(3, 10, [5, 7, 11, 13, 17], 0)
test_shape(3, 10, [5, 7, 11, 13, 17], 3)
# unsqueeze along dense dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], 4)
test_shape(3, 10, [5, 7, 11, 13, 17], 5)
# wrapped dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], -1)
test_shape(3, 10, [5, 7, 11, 13, 17], -6)
# bounds
test_shape(3, 10, [5, 7, 11, 13, 17], -7, "Dimension out of range")
test_shape(3, 10, [5, 7, 11, 13, 17], 6, "Dimension out of range")
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.select(x, select_dim, select_index)
else:
result = torch.select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
# hybrid sparse/dense, select sparse dim, result is dense
for i in range(sizes[0]):
test_shape(1, 10, sizes, 0, i)
test_shape(1, 10, sizes, 0, sizes[0] + 1, r'select[(][)][:] index \d out of range.*')
# hybrid sparse/dense, select sparse dim, result is sparse
for d in range(3):
for i in range(sizes[d]):
test_shape(3, 10, sizes, d, i)
# hybrid sparse/dense, select dense dim, result is sparse
for d in range(1, 3):
for i in range(sizes[d]):
test_shape(1, 10, sizes, d, i)
@dtypes(*integral_types())
def test_select_no_type_promotion(self, device, dtype):
# see https://github.com/pytorch/pytorch/issues/82150
idx = torch.tensor([[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]])
val = torch.ones(6, dtype=dtype)
s = torch.sparse_coo_tensor(idx, val, size=(3, 3))
for t in (s, s * torch.tensor(0, dtype=dtype)):
# empty checks
self.assertEqual(t.dtype, t[2].dtype)
self.assertEqual(t.dtype, t[0, 1].dtype)
# sum should not promote
self.assertEqual(t.dtype, t[0, 0].dtype)
self.assertEqual(t.dtype, t[1, 1].dtype)
@expectedFailureMPS
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_index_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
if isinstance(select_index, int):
select_index = [select_index]
if isinstance(select_index, list):
select_index = torch.tensor(select_index, device=device, dtype=torch.long)
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.index_select(x, select_dim, select_index)
else:
result = torch.index_select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.index_select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
for d in range(len(sizes)):
for index in [0, sizes[d] - 1, [0, sizes[d] // 2, sizes[d] - 1]]:
test_shape(1, 10, sizes, d, index)
test_shape(len(sizes) // 2, 10, sizes, d, index)
test_shape(len(sizes), 10, sizes, d, index)
def _test_index_select_exhaustive_index(self, sizes, dims, device, dtype, coalesced):
t = make_tensor(sizes, dtype=dtype, device=device)
t_sparse = t.to_sparse().coalesce() if coalesced else t.to_sparse()
t_small_sparse, _, _ = self._gen_sparse(len(sizes), 2, sizes, dtype, device, coalesced)
t_small = t_small_sparse.to_dense()
for d in dims:
# NOTE: indices are negative
idx_dim_d_range = list(range(-sizes[d], 0))
for idx_len in range(sizes[d], sizes[d] + 1):
# creates all possible valid indices into dim d of length idx_len
for idx in itertools.product(*itertools.repeat(idx_dim_d_range, idx_len)):
t_idx = torch.tensor(idx, dtype=torch.long, device=device)
# NOTE: index_select for dense does not support negative indices,
# hence + sizes[d]. See https://github.com/pytorch/pytorch/issues/76347
# tests the nnz > sizes[d] branch
dense_result = t.index_select(d, t_idx + sizes[d])
sparse_result = t_sparse.index_select(d, t_idx)
self.assertEqual(dense_result, sparse_result)
# tests the nnz <= sizes[d] branch
small_dense_result = t_small.index_select(d, t_idx + sizes[d])
small_sparse_result = t_small_sparse.index_select(d, t_idx)
self.assertEqual(small_dense_result, small_sparse_result)
@expectedFailureMPS
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_index_select_exhaustive_index_small(self, device, dtype, coalesced):
# will trigger brute-force algo
self._test_index_select_exhaustive_index((3, 3, 4), range(3), device, dtype, coalesced)
@expectedFailureMPS
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_index_select_exhaustive_index_large(self, device, dtype, coalesced):
# will trigger more sophisticated algos
self._test_index_select_exhaustive_index((100, 50, 3, 3), (2, 3), device, dtype, coalesced)
@expectedFailureMPS
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_index_select_empty_and_non_contiguous_index(self, device, dtype, coalesced):
# empty index
idx_empty = torch.tensor([], dtype=torch.long, device=device)
t = make_tensor((5, 5), dtype=dtype, device=device)
res_dense = t.index_select(0, idx_empty)
res_sparse = t.to_sparse().index_select(0, idx_empty)
self.assertEqual(res_dense, res_sparse)
# non-contiguous index
idx = torch.randint(low=0, high=5, size=(10, 2), device=device)[:, 0]
def run_test(sizes):
# case nnz > size[d]
t = make_tensor(sizes, dtype=dtype, device=device)
res_dense = t.index_select(0, idx)
res_sparse = t.to_sparse().index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# case nnz <= size[d]
t_small_sparse, _, _ = self._gen_sparse(len(sizes), 2, sizes, dtype, device, coalesced)
res_sparse = t_small_sparse.index_select(0, idx)
res_dense = t_small_sparse.to_dense().index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# brute-force
run_test((10, 10))
# more sophisticated algos
run_test((10, 100, 100))
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_parallelization(self, device, dtype, coalesced):
"""
Test with sizes that will trigger parallelization (i.e. with sizes
that are >= at::internal::GRAIN_SIZE)
"""
def run_test(nnz, size):
t_sparse, _, _ = self._gen_sparse(1, nnz, (size,), dtype, device, coalesced)
t_dense = t_sparse.to_dense()
# idx_small to (sort) and (binary) search into t_sparse
idx_small = torch.randint(size, (nnz // 2,), device=device)
# idx_large to (sort) and (binary) search into idx_large
# NOTE: when coalesced=True, the (binary) search will be
# done over t_sparse anyway, as it is already sorted.
idx_large = torch.randint(size, (nnz * 2,), device=device)
for idx in (idx_small, idx_large):
res_dense = t_dense.index_select(0, idx)
res_sparse = t_sparse.index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# NOTE: GRAIN_SIZE = 32768
# case nnz <= size[d]
tlen = 70000 # > 2 * GRAIN_SIZE
run_test(tlen, tlen)
# case nnz > size[d]
run_test(tlen, tlen // 2)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(di, dk, dtype=dtype, device=device)
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, self.safeToDense(x), y)
self.assertEqual(res, expected)
res = torch.mm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 200, 20)
test_shape(64, 10000, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 100, 0)
test_shape(10, 100, 0, 0)
test_shape(10, 100, 0, 20)
@unittest.skipIf(
IS_WINDOWS and TEST_CUDA,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_bmm(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for _ in range(num_mats):
a_mat = self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0]
b_mat = torch.randn([dim_j, dim_k], dtype=dtype, device=device)
a_list.append(a_mat)
b_list.append(b_mat)
a = torch.stack(a_list)
b = torch.stack(b_list)
ab = a.bmm(b)
# Compare each matrix against result from mm()
for mat_idx in range(num_mats):
a_mat = a_list[mat_idx]
b_mat = b_list[mat_idx]
ab_mat_bmm = ab[mat_idx]
ab_mat_mm = a_mat.mm(b_mat)
self.assertEqual(ab_mat_bmm, ab_mat_mm)
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
a = torch.rand([10, 23, 32], dtype=dtype, device=device)
a[3] = torch.zeros(23, 32, dtype=dtype, device=device)
a[6] = torch.zeros(23, 32, dtype=dtype, device=device)
a = a.to_sparse()
b = torch.rand([10, 32, 10], dtype=dtype, device=device)
b[4] = torch.zeros(32, 10, dtype=dtype, device=device)
b[6] = torch.zeros(32, 10, dtype=dtype, device=device)
ab = a.bmm(b)
for mat_idx in range(ab.size(0)):
ab_mat = ab[mat_idx]
ab_mat_check = a[mat_idx].mm(b[mat_idx])
self.assertEqual(ab_mat, ab_mat_check)
ab_traspose_check = b.transpose(1, 2).to_sparse().bmm(
a.transpose(1, 2).to_dense()
).transpose(1, 2)
self.assertEqual(ab, ab_traspose_check)
@onlyCUDA
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(
IS_WINDOWS,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
def test_bmm_deterministic(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for _ in range(num_mats):
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
a = torch.stack(a_list).cuda()
b = torch.stack(b_list).cuda()
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
torch.use_deterministic_algorithms(False)
ab_nondeterministic = torch.bmm(a, b)
torch.use_deterministic_algorithms(True)
ab_deterministic = torch.bmm(a, b)
diff_abs = (ab_deterministic - ab_nondeterministic).abs()
diff_rel = diff_abs / ab_deterministic.abs()
diff_rel[torch.isnan(diff_rel)] = 0
# deterministic and non-deterministic results should either be
# equal or within a small relative difference
equal_abs_or_rel = diff_abs.eq(0).logical_or(diff_rel.lt(0.001))
self.assertTrue(equal_abs_or_rel.all())
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
@onlyCUDA
@unittest.skipIf(
IS_WINDOWS and TEST_CUDA,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
def test_bmm_oob(self, device):
# Targets an out of bounds error when the sparse tensor has no non-zero
# values in the first batch dimension (#131977).
# NOTE: This test is separated from the other bmm tests to avoid
# interference from prior memory allocations on the device. Since CUDA
# doesn't perform bounds checking, we need the error to cause an
# illegal memory access (by indexing into unallocated memory) for the
# test to fail.
torch.cuda.empty_cache()
indices = torch.tensor([[1], [0], [0]], device=device)
values = torch.tensor([1.], device=device)
a = torch.sparse_coo_tensor(indices, values, size=(2, 1, 1))
b = torch.zeros((2, 1, 1), device=device)
ab = torch.bmm(a, b)
self.assertEqual(ab, torch.zeros((2, 1, 1), device=device))
@onlyCUDA
@unittest.skipIf(
not IS_WINDOWS or not TEST_WITH_ROCM,
"this test ensures bmm sparse-dense CUDA gives an error when run on Windows with CUDA < 11.0"
)
@dtypes(torch.double)
def test_bmm_windows_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0"):
ab = a.bmm(b)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_saddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.saddmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = torch.saddmm(t, x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
res = torch.smm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
@onlyCPU
@coalescedonoff
# adding a graph break before self.assertFalse(weight._indices().is_contiguous())
# makes the test pass so some existent sparse related bug
@skipIfTorchDynamo("skip")
@dtypes(torch.double, torch.cdouble)
def test_sspaddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = t.sspaddmm(x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = t.sspaddmm(x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
# Test code from issue https://github.com/pytorch/pytorch/issues/45113
batch_size, input_size, hidden_size = 5, 3, 7
# Create coalesced sparse tensor with non-contiguous indices
weight = torch.randn(hidden_size, input_size, dtype=dtype, device=device).to_sparse()
self.assertTrue(weight.is_coalesced())
non_contig_indices = weight.indices().mT.contiguous().mT
weight = torch.sparse_coo_tensor(
indices=non_contig_indices, values=weight.values(), size=weight.shape)
weight._coalesced_(True)
self.assertFalse(weight._indices().is_contiguous())
# Create un/coalesced sparse tensor
bias = torch.randn((hidden_size, 1), dtype=dtype, device=device).to_sparse()
bias = torch.cat([bias] * batch_size, dim=1)
if coalesced:
bias = bias.coalesce()
x = torch.randn(input_size, batch_size, dtype=dtype, device=device)
res = bias.sspaddmm(weight, x)
true_result = (bias.to_dense() + torch.matmul(weight.to_dense(), x)).to_sparse()
self.assertEqual(self.safeToDense(res), self.safeToDense(true_result))
@coalescedonoff
@precisionOverride({torch.bfloat16: 5e-2, torch.float16: 5e-2})
@dtypes(torch.double, torch.cdouble, torch.bfloat16, torch.float16)
@dtypesIfMPS(torch.float32, torch.complex64, torch.bfloat16, torch.float16)
def test_sparse_addmm(self, device, dtype, coalesced):
if (dtype is torch.bfloat16 or dtype is torch.float16) and device.startswith("cuda"):
self.skipTest('addmm_sparse_cuda is not implemented for BFloat16 and Half')
def test_shape(m, n, p, nnz, broadcast, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device, requires_grad=True)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device, requires_grad=True)
D2 = make_tensor([m, p], dtype=dtype, device=device, requires_grad=True)
S = self._gen_sparse(2, nnz, [n, m], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
if dtype not in {torch.double, torch.cdouble}:
# gradcheck will likely fail with low-precision input dtypes.
return
def fn(S, D1, D2, beta=beta, alpha=alpha):
return torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
gradcheck(fn, (S, D1, D2), masked=True)
test_shape(7, 8, 9, 20, False, None)
test_shape(7, 8, 9, 20, True, None)
test_shape(7, 8, 9, 20, False, (1, 0))
test_shape(7, 8, 9, 20, True, (1, 0))
test_shape(7, 8, 9, 20, False, (1, 1))
test_shape(7, 8, 9, 20, True, (1, 1))
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
def test_sparse_mm(self, device, dtype, coalesced):
def test_shape(d1, d2, d3, nnz, transposed):
if transposed:
D = torch.randn(d3, d2, dtype=dtype,
device=device).t_().requires_grad_(True)
else:
D = torch.randn(d2, d3, dtype=dtype, device=device).requires_grad_(True)
S = self._gen_sparse(2, nnz, [d1, d2], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
def fn(S, D):
return torch.sparse.mm(S, D)
kwargs = {"eps": 1e-4, "atol": 2e-5} if device == "mps:0" else {}
gradcheck(fn, (S, D), masked=True, **kwargs)
test_shape(7, 8, 9, 20, False)
test_shape(7, 8, 9, 20, True)
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
@gradcheck_semantics()
def test_sparse_mul(self, device, dtype, coalesced, gradcheck):
# https://github.com/pytorch/pytorch/issues/79914
a = torch.tensor([[0., 1]], dtype=dtype, device=device).to_sparse().requires_grad_(True)
b = torch.tensor([[0., 1]], dtype=dtype, device=device).to_sparse().requires_grad_(True)
gradcheck(lambda x, y: torch.sparse.sum(x * y).to_dense(masked_grad=gradcheck.masked), [a, b], eps=1e-4)
def test_shape(sparse_dims, nnz, with_shape):
a = self._gen_sparse(sparse_dims, nnz, with_shape, dtype, device, coalesced)[0].requires_grad_(True)
b = self._gen_sparse(sparse_dims, nnz, with_shape, dtype, device, coalesced)[0].requires_grad_(True)
self.assertEqual((a * b).to_dense(), a.to_dense() * b.to_dense())
gradcheck(lambda x, y: (x * y).to_dense(), [a, b], eps=1e-4)
# Issues with 0-dim indices/values
gradcheck(lambda x, y: torch.sparse.sum(x * y).to_dense(), [a, b], masked=True, eps=3e-4, atol=5e-5)
test_shape(2, 3, [2, 3, 4, 5])
test_shape(2, 3, [2, 2, 0])
test_shape(2, 3, [4, 5])
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_dsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.dsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@expectedFailureMPS
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_hsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.hsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_spadd(self, device, dtype, coalesced):
def _test_spadd_shape(nnz, shape_i, shape_v=None):
shape = shape_i + (shape_v or [])
x, _, _ = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
y = self.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = self.randn(*s, dtype=dtype, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
x, i, v = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
nnz = i.size(1)
# Non contiguous sparse indices tensor
x_ = self.sparse_tensor(i[:, ::2], v[:(nnz + 1) // 2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse values tensor
x_ = self.sparse_tensor(i[:, :(nnz + 1) // 2], v[::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse indices and values tensors
x_ = self.sparse_tensor(i[:, 1::2], v[1::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
def _test_spadd():
_test_spadd_shape(10, [5, 6])
_test_spadd_shape(10, [10, 10, 10])
_test_spadd_shape(10, [50, 30, 20])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5])
_test_spadd_shape(0, [0, 30, 20])
_test_spadd_shape(0, [50, 0, 20])
_test_spadd_shape(0, [50, 30, 0])
def _test_spadd_hybrid():
_test_spadd_shape(10, [5, 6], [2, 3])
_test_spadd_shape(10, [10, 10, 10], [3])
_test_spadd_shape(10, [50, 30, 20], [2])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5], [2])
_test_spadd_shape(0, [0, 30, 20], [2, 0])
_test_spadd_shape(0, [50, 0, 20], [2, 0])
_test_spadd_shape(0, [50, 30, 0], [2, 0])
_test_spadd_shape(10, [50, 30, 20], [2, 0])
_test_spadd()
_test_spadd_hybrid()
@coalescedonoff
@dtypes(torch.float)
def test_sparse_add_out_bfloat16(self, device, dtype, coalesced):
# fp32
x, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
y, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
res_fp32 = torch.add(x, y)
# bfloat16
x = x.bfloat16()
y = y.bfloat16()
res_bf16 = torch.add(x, y)
res_bf16 = res_bf16.float() # to compare with reference
self.assertEqual(res_fp32, res_bf16, atol=1e-2, rtol=0)
@coalescedonoff
@expectedFailureMPSComplex
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_norm(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, _, _ = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
y = x.coalesce()
self.assertEqual(x.norm(), y._values().norm())
test_shape(3, 10, 100)
test_shape(4, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(4, 0, [0, 0, 100, 5, 5, 5, 0])
# Unsupported arguments should error
kwarg_error_pairs = [
({'keepdim': True},
RuntimeError, r'norm_sparse currently does not support keepdim=True'),
({'dim': 0},
RuntimeError, r'norm_sparse currently only supports full reductions'),
({'dtype': torch.double, 'p': 'fro'},
ValueError, r'dtype argument is not supported in frobenius norm'),
({'dtype': torch.double, 'p': 0},
RuntimeError, r"norm_sparse currently does not support 'dtype' argument")
]
x = self._gen_sparse(3, 10, 100, dtype, device, coalesced)[0]
for kwargs, err, msg in kwarg_error_pairs:
with self.assertRaisesRegex(err, msg):
x.norm(**kwargs)
@coalescedonoff
@expectedFailureMPS
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
@unittest.skipIf(TEST_WITH_CROSSREF, "fallback triggers cuda device error")
def test_sparse_sum(self, device, dtype, coalesced):
def run_tests(S, td=None):
D = S.coalesce().to_dense().detach().requires_grad_(True)
if td is None:
S_sum = torch.sparse.sum(S)
D_sum = D.sum()
self.assertEqual(S_sum.item(), D_sum.item())
def fn(S):
return torch.sparse.sum(S)
gradcheck(fn, (S,), masked=True)
else:
S_sum = torch.sparse.sum(S, td)
D_sum = D.sum(td)
self.assertEqual(S_sum.to_dense() if S_sum.is_sparse else S_sum, D_sum)
def fn(S):
res = torch.sparse.sum(S, td)
return res.to_dense(masked_grad=True)
gradcheck(fn, (S,), masked=True)
nnz = 10
sparse_dims = 2
with_size = [5, 5, 1, 4] # use a dense dim = 1 to test for squeeze
test_dims = []
for i in range(1, 5):
test_dims += itertools.combinations(range(len(with_size)), i)
# https://github.com/pytorch/pytorch/issues/16501
x = torch.tensor([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 2.]], dtype=dtype, device=device).to_sparse()
self.assertEqual(torch.sparse.sum(x, dim=0), torch.sparse.sum(x, dim=-2))
self.assertEqual(torch.sum(x.to_dense(), dim=0), torch.sparse.sum(x, dim=0).to_dense())
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
# dim out of range
self.assertRaises(IndexError, lambda: torch.sparse.sum(S, 5))
# dim 0 appears multiple times in the list of dims
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(S, [0, 0]))
# sum an empty tensor
empty_S = torch.sparse_coo_tensor(size=with_size, dtype=dtype, device=device)
self.assertEqual(torch.sparse.sum(empty_S, [0]).to_dense(), torch.sum(empty_S.to_dense(), [0]))
self.assertEqual(torch.sparse.sum(empty_S), torch.tensor(0, dtype=dtype, device=device))
empty_S.requires_grad_(True)
empty_S_sum = torch.sparse.sum(empty_S)
empty_S_sum.backward()
self.assertEqual(empty_S.grad.to_dense(), empty_S.detach().clone().to_dense())
# test values().sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True))
for test_dim in test_dims:
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True), test_dim)
def _test_basic_ops_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v)
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 - x2
y2 = x1.clone()
y2.sub_(x2)
expected = self.safeToDense(x1) - self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * x2
y2 = x1.clone()
y2.mul_(x2)
expected = self.safeToDense(x1) * self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * 37.5
y2 = x1.clone()
y2.mul_(37.5)
expected = self.safeToDense(x1) * 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 / 37.5
y2 = x1.clone()
y2.div_(37.5)
expected = self.safeToDense(x1) / 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 // 37.5
y2 = x1.clone()
y2.floor_divide_(37.5)
expected = self.safeToDense(x1) // 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
# TODO: add back inplace support
y1 = x1 ** 2
y2 = x1.clone()
y2 = y2.pow(2)
expected = self.safeToDense(x1) ** 2
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y = x1.clone()
y.zero_()
expected = torch.zeros(x1.size(), dtype=dtype, device=device)
self.assertEqual(self.safeToDense(y), expected)
self.assertEqual(x1.is_coalesced(), coalesced)
y = x1.coalesce()
z = x1.coalesce()
self.assertEqual(x1.is_coalesced(), coalesced)
self.assertTrue(y.is_coalesced())
y._values().add_(1)
if not x1.is_coalesced():
# check that coalesce is out of place if the original tensor is not
# coalesced.
self.assertEqual(z._values() + 1, y._values())
else:
# check that coalesce is in-place if the original tensor is
# coalesced.
self.assertEqual(z._values(), y._values())
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_basic_ops(self, device, dtype, coalesced):
def _test_basic_ops():
self._test_basic_ops_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [], [], dtype, device, coalesced)
def _test_basic_ops_hybrid():
self._test_basic_ops_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
_test_basic_ops()
_test_basic_ops_hybrid()
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_add_dense_sparse_mismatch(self, device, dtype):
def test_shape(dense_size, sparse_dims_shape, dense_dims_shape, sparse_size):
x = torch.zeros(dense_size, dtype=dtype, device=device)
sparse_y = self.sparse_tensor(torch.zeros(sparse_dims_shape, dtype=torch.int64, device=device),
torch.randn(dense_dims_shape, dtype=dtype, device=device),
torch.Size(sparse_size))
with self.assertRaisesRegex(
RuntimeError,
"add: expected 'self' and 'other' to have same size"):
x + sparse_y
test_shape([3, 4], [1, 4], [4, 4, 4], [3, 4, 4])
test_shape([3, 4, 0], [1, 4], [4, 4, 4, 0], [3, 4, 4, 0])
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_add_noncontiguous(self, device, dtype):
indices = self.index_tensor([[1, 2], [0, 2]], device=device)
values = torch.tensor([1.], dtype=dtype, device=device).expand(2, 3, 4, 5)
x = self.sparse_tensor(indices, values, dtype=dtype, device=device)
assert not x._values().is_contiguous()
y = x + x
expected = self.safeToDense(x) + self.safeToDense(x)
self.assertEqual(self.safeToDense(y), expected)
def _test_sparse_mask_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v or [])
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_sparse_mask(self, device, dtype, coalesced):
def _test_sparse_mask_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.tensor([1, 2, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
dense = torch.tensor([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
], dtype=dtype, device=device)
exp_v = torch.tensor([7, 14, 3, 20], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse()
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
dense = torch.empty([5, 4, 0], dtype=dtype, device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 0]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
_test_sparse_mask_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
# check repetitions and matchings in the intersection
lhs = torch.randint(0, 5, (100,), device=device)
rhs = torch.randint(0, 5, (100,), device=device).to_sparse()
self.assertEqual(lhs.to_sparse().sparse_mask(rhs), lhs.sparse_mask(rhs))
# check coalesce
sparse_c = torch.rand(3, 3, device=device).to_sparse()
sparse_unc = torch.rand(3, 3, device=device).to_sparse()._coalesced_(False)
for lhs, rhs in [(sparse_c, sparse_unc), (sparse_unc, sparse_c)]:
res_all_sparse = lhs.sparse_mask(rhs)
res_dense_sparse = lhs.to_dense().sparse_mask(rhs)
self.assertEqual(res_all_sparse.coalesce(), res_dense_sparse.coalesce())
self.assertEqual(rhs.is_coalesced(), res_all_sparse.is_coalesced())
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_sparse_mask_hybrid(self, device, dtype, coalesced):
def _test_sparse_mask_hybrid_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.tensor([[1, 2], [2, 3], [3, 4], [4, 5]])
# TODO: This is also testing that, if coalesce is a no-op,
# the indices don't get permuted. I don't know if we actually
# want to give this invariant.
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2])).coalesce()
dense = torch.tensor([
[[1, 3], [2, 2], [3, 3], [4, 2]],
[[5, 7], [6, 7], [7, 9], [8, 9]],
[[9, 2], [10, 4], [11, 1], [12, 3]],
[[13, 5], [14, 1], [15, 1], [16, 6]],
[[17, 7], [18, 2], [19, 7], [20, 1]],
])
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
exp_v = torch.tensor([[7, 9], [14, 1], [3, 3], [20, 1]])
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2]))
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.empty(4, 2, 0)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2, 0])).coalesce()
dense = torch.empty(5, 4, 2, 0)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
exp_v = torch.empty(4, 2, 0)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2, 0]))
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
_test_sparse_mask_hybrid_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
@skipIfCrossRef
def test_sparse_mask_backward(self, device, dtype):
from itertools import product, repeat
shape = (5, 5)
sparse_dims = len(shape)
nnzs = (0, 5, 15, 25)
lhs_data = torch.arange(1, 26, device=device).reshape(shape).to(dtype).to_sparse(sparse_dims)
for nnz in nnzs:
for lhs_is_coalesced, rhs_is_coalesced in product(*repeat((True, False), 2)):
lhs = torch.sparse_coo_tensor(
lhs_data._indices()[:, :nnz],
lhs_data._values()[:nnz],
lhs_data.shape
).clone()._coalesced_(lhs_is_coalesced).requires_grad_(True)
rhs = torch.sparse_coo_tensor(
lhs_data._indices()[:, -nnz:],
lhs_data._values()[-nnz:],
lhs_data.shape
).clone()._coalesced_(rhs_is_coalesced)
# To test masked semantics we need to make sure that
# sparsity_pattern(lhs) == sparsity_pattern(lhs.grad).
# lhs.sparse_mask(lhs_mask) accomplishes that.
lhs_mask = lhs.detach().clone()
gradcheck(lambda x: x.sparse_mask(lhs_mask).sparse_mask(rhs).to_dense(masked_grad=True), (lhs,),
masked=True, eps=3e-4, atol=5e-5)
gradcheck(lambda x: x.sparse_mask(rhs).to_dense(masked_grad=False), (lhs,), masked=False, eps=3e-4, atol=5e-5)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_zeros(self, device, dtype, coalesced):
def _test_zeros(nnzs, shape, out_shape_i, out_shape_v=None):
out_shape = out_shape_i + (out_shape_v or [])
for nnz in nnzs:
out, _, _ = self._gen_sparse(len(out_shape_i), nnz, out_shape, dtype, device, coalesced)
torch.zeros(*shape, out=out, dtype=dtype, device=device)
self.assertEqual(tuple(out.size()), tuple(shape))
self.assertTrue(out._indices().numel() == out._values().numel() == 0)
self.assertEqual(out._nnz(), 0)
self.assertEqual(out.sparse_dim(), len(shape))
self.assertEqual(out.dense_dim(), 0)
def test_shape(i_shapes, v_shapes, shape, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros(nnzs, shape, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 4], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 0], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 0], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 0], [9, 12])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_zeros_like(self, device, dtype, coalesced):
def _test_zeros_like(nnzs, template_shape_i, template_shape_v=None):
template_shape_v = template_shape_v or []
template_shape = template_shape_i + template_shape_v
for nnz in nnzs:
t, _, _ = self._gen_sparse(len(template_shape_i), nnz, template_shape, dtype, device, coalesced)
res = torch.zeros_like(t)
self.assertEqual(tuple(res.size()), tuple(template_shape))
self.assertTrue(res._indices().numel() == res._values().numel() == 0)
self.assertEqual(res._nnz(), 0)
self.assertEqual(res.sparse_dim(), len(template_shape_i))
self.assertEqual(res.dense_dim(), len(template_shape_v))
def test_shape(i_shapes, v_shapes, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros_like(nnzs, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.zeros_like(x, memory_format=mem_format)
result = torch.zeros_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
dense_tensor = sparse_tensor.to_dense()
result = torch.zeros_like(dense_tensor, layout=torch.sparse_coo)
self.assertEqual(dense_tensor.shape, result.shape)
self.assertEqual(result.layout, torch.sparse_coo)
sparse_zeros = torch.sparse_coo_tensor(dense_tensor.shape)
self.assertEqual(result._indices().shape, sparse_zeros._indices().shape)
self.assertEqual(result._values().shape, sparse_zeros._values().shape)
def _assert_sparse_invars(self, t):
# SparseTensor has the following invariants:
# - sparse_dim + dense_dim = len(SparseTensor.shape)
# - SparseTensor._indices().shape = (sparse_dim, nnz)
# - SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
self.assertEqual(t.sparse_dim() + t.dense_dim(), len(t.shape))
self.assertEqual(tuple(t._indices().shape), (t.sparse_dim(), t._nnz()))
self.assertEqual(tuple(t._values().shape), (t._nnz(), ) + t.shape[t.sparse_dim():])
def _test_empty_like(self, sparse_tensor, dtype, device, coalesced):
result = torch.empty_like(sparse_tensor)
self.assertTrue(result.is_sparse)
self._assert_sparse_invars(result)
self.assertEqual(result.shape, sparse_tensor.shape)
self.assertEqual(result.dtype, sparse_tensor.dtype)
self.assertEqual(result.device, sparse_tensor.device)
self.assertEqual(result.sparse_dim(), sparse_tensor.sparse_dim())
self.assertEqual(result.dense_dim(), sparse_tensor.dense_dim())
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.empty_like(x, memory_format=mem_format)
result = torch.empty_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
with self.assertRaisesRegex(
RuntimeError, r"Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA|MPS)' backend"
):
dense_tensor = sparse_tensor.to_dense()
result = torch.empty_like(dense_tensor, layout=torch.sparse_coo)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_empty_like(self, device, dtype, coalesced):
# tests https://github.com/pytorch/pytorch/issues/43699
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
def _test_narrow(self, input, narrow_args):
expected = input.to_dense().narrow(*narrow_args)
self.assertEqual(expected, input.narrow_copy(*narrow_args).to_dense())
def _all_narrow_combs(self, shape):
for dim, dim_sz in enumerate(shape):
for start in range(dim_sz):
for length in range(dim_sz - start):
yield [dim, start, length]
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_narrow(self, device, dtype, coalesced):
shape = [3, 3, 4, 2]
input, _, _ = self._gen_sparse(4, 19, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(input, narrow_args)
self.assertRaises(RuntimeError, lambda: input.narrow_copy(-1, 0, 3)) # dim < 0
self.assertRaises(RuntimeError, lambda: input.narrow_copy(10, 0, 3)) # dim > input.dim()
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, shape[0] + 1, 3)) # start > size of dim
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, 2, shape[0])) # start+length > size of dim
with_dense, _, _ = self._gen_sparse(2, 7, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(with_dense, narrow_args)
self.assertRaises(RuntimeError, lambda: with_dense.narrow_copy(10, 0, 3)) # dim > sparseDim + denseDim
def _test_log1p_tensor(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in integral_types()
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.log1p()
is_integral_dtype = is_integral(sparse_tensor.dtype)
self.assertEqual(expected_output, sparse_tensor.log1p().to_dense())
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
sparse_tensor.coalesce().log1p_()
else:
self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "log1p_ requires coalesced input"):
sparse_tensor.log1p_()
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "only Tensors of floating point dtype can require gradients"):
sparse_tensor.requires_grad_()
@coalescedonoff
@dtypesIfMPS(*all_mps_types())
@dtypes(*all_types())
def test_log1p(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2]]).transpose(1, 0),
values=torch.tensor([3.0, 4.0, 5.0]),
size=[3, ],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[1.0, 3.0], [5.0, 7.0]]),
size=[4, 5, 2],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, 3.0, 4.0, 1.0, 1.0, 1.0]),
size=[3, ],
device=device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
device=device,
dtype=dtype
)
# empty tensors are coalesced at creation (nnz < 2) we must force the uncoalesced state
input_uncoalesced._coalesced_(False)
self._test_log1p_tensor(input_uncoalesced, coalesced)
def _test_neg_negative(self, sparse_tensor):
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.neg()
ops = (
torch.neg, torch.Tensor.neg, torch.Tensor.neg_,
torch.negative, torch.Tensor.negative, torch.Tensor.negative_,
operator.neg
)
for op in ops:
sparse_tensor_copy = sparse_tensor.clone()
self.assertEqual(expected_output, op(sparse_tensor_copy).to_dense())
if op in (torch.neg, torch.negative):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_neg_negative(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
def _test_asin_arcsin(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in integral_types()
is_integral_dtype = is_integral(sparse_tensor.dtype)
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.asin()
ops = (
torch.asin, torch.Tensor.asin,
torch.arcsin, torch.Tensor.arcsin,
)
for op in ops:
self.assertEqual(expected_output, op(sparse_tensor).to_dense())
if op in (torch.asin, torch.arcsin):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
if not is_integral_dtype:
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
else:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor, out=sparse_tensor_out)
for op in (torch.Tensor.asin_, torch.Tensor.arcsin_):
if is_integral_dtype:
# test coalesce on integral dtype tensor
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor.clone().coalesce()).to_dense()
else:
self.assertEqual(expected_output, op(sparse_tensor.clone().coalesce()).to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "asin_ requires coalesced input"):
op(sparse_tensor)
@coalescedonoff
@dtypes(*all_types())
@dtypesIfMPS(*all_mps_types())
def test_asin_arcsin(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2, 3]]),
values=torch.tensor([0.5, -0.5, 0.7, -0.7]),
size=[4, ],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-0.1, 0.24], [-0.44, 0.1]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([0.3, -0.3, -0.4, 0.3, -0.5, 0.15]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_asin_arcsin(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
# empty tensors are coalesced at creation (nnz < 2) we must force the uncoalesced state
input_uncoalesced._coalesced_(False)
self._test_asin_arcsin(input_uncoalesced, coalesced)
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_mv(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(dk, dtype=dtype, device=device)
res = x.matmul(t)
expected = self.safeToDense(x).matmul(t)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 1000, 20)
test_shape(64, 10000, 10000, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 0, 0)
test_shape(10, 100, 100, 0)
test_shape(10, 100, 100, 20)
with self.assertRaisesRegex(RuntimeError, r"mv: expected self\.size\(-1\) == vec\.size\(-1\)"):
test_shape(10, 100, 10, 20)
with self.assertRaisesRegex(RuntimeError, "mv: two tensor dim should be 2 and 1"):
x, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
y, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
res = x.mv(y)
@dtypes(*floating_and_complex_types())
@dtypesIfMPS(torch.float32, torch.bfloat16, torch.complex64)
def test_sparse_add_coalesce(self, device, dtype):
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.tensor([3, 4, 5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
y = self.sparse_tensor(i, v, torch.Size([3]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.empty([3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
y = self.sparse_tensor(i, v, torch.Size([3, 0]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
@onlyCUDA
def test_storage_not_null(self, device):
x = torch.sparse_coo_tensor((2,), dtype=torch.float32, device=device)
self.assertNotEqual(x.get_device(), -1)
x = torch.sparse_coo_tensor((2, 0), dtype=torch.float32, device=device)
self.assertNotEqual(x.get_device(), -1)
@onlyCUDA
@deviceCountAtLeast(2)
def test_same_gpu(self, devices):
def check_device(x, device_id):
self.assertEqual(x.get_device(), device_id)
self.assertEqual(x._values().get_device(), device_id)
self.assertEqual(x._indices().get_device(), device_id)
dev1, dev2 = devices[0], devices[1]
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3]), device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3, 0]), device=1)
check_device(x, 1)
x = self.sparse_empty(3, device=1)
check_device(x, 1)
x = self.sparse_empty(3, 0, device=1)
check_device(x, 1)
def _test_new_device(self, size, device=torch.cuda):
with torch.cuda.device(device):
x = torch.sparse_coo_tensor(size, device='cuda', dtype=torch.float64)
self.assertEqual(x.get_device(), device)
x1 = x.new()
x2 = x.new(2, 3)
self.assertEqual(x1.get_device(), device)
self.assertEqual(x2.get_device(), device)
@onlyCUDA
def test_new_device_single_gpu(self):
self._test_new_device((), 0)
self._test_new_device((30, 20), 0)
self._test_new_device((30, 20, 10), 0)
self._test_new_device((30, 20, 10, 0), 0)
@onlyCUDA
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new_device_multi_gpu(self):
self._test_new_device((), 1)
self._test_new_device((30, 20), 1)
self._test_new_device((30, 20, 10), 1)
self._test_new_device((30, 20, 10, 0), 1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_new(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, indices, values = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
if not x.is_cuda:
# CUDA sparse tensors currently requires the size to be
# specified if nDimV > 0
out = x.new(indices, values).coalesce()
x_c = x.coalesce()
self.assertEqual((out.indices(), out.values()), (x_c.indices(), x_c.values()))
self.assertEqual(x.new(indices, values, x.size()), x)
test_shape(3, 10, 100)
test_shape(3, 0, [100, 100, 0])
@onlyCPU # not really, but we only really want to run this once
@dtypes(torch.float64, torch.float32, torch.float16, torch.cfloat, torch.cdouble)
def test_factory(self, device, dtype):
for test_empty_tensor in [True, False]:
if test_empty_tensor:
default_size = torch.Size([1, 3, 0])
size = torch.Size([3, 3, 0])
else:
default_size = torch.Size([1, 3])
size = torch.Size([3, 3])
for include_size in [True, False]:
for use_tensor_idx in [True, False]:
for use_tensor_val in [True, False]:
for use_cuda in ([False] if not torch.cuda.is_available() else [True, False]):
# have to include size with cuda sparse tensors
include_size = include_size or use_cuda
long_dtype = torch.int64
device = torch.device('cpu') if not use_cuda else \
torch.device(torch.cuda.device_count() - 1)
indices = torch.tensor(([0], [2]), dtype=long_dtype) if use_tensor_idx else ([0], [2])
if test_empty_tensor:
values = torch.empty(1, 0).to(dtype)
else:
if use_tensor_val:
values = torch.tensor([1.], dtype=dtype)
else:
values = 1.
if include_size:
sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
device=device, requires_grad=True)
else:
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
device=device, requires_grad=True)
self.assertEqual(indices, sparse_tensor._indices())
self.assertEqual(values, sparse_tensor._values())
self.assertEqual(size if include_size else default_size, sparse_tensor.size())
self.assertEqual(dtype, sparse_tensor.dtype)
if use_cuda:
self.assertEqual(device, sparse_tensor._values().device)
self.assertEqual(True, sparse_tensor.requires_grad)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_factory_size_check(self, device, dtype):
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([.5, .5], dtype=dtype, device=device)
sizes = torch.Size([2, 3])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices.fill_(-1)
with self.assertRaisesRegex(RuntimeError, "found negative index"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([2, 3, 1, 0])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 2, 2], dtype=dtype, device=device)
sizes = torch.Size([0, 0, 2, 2])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([[1, 1, 1], [1, 1, 1]], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
def test_factory_empty_indices(self, device):
tensor = torch.sparse_coo_tensor(torch.Size([2, 0]), device=device)
expected_indices = torch.empty((2, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0]), device=device)
expected_indices = torch.empty((3, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0, 0]), device=device)
expected_indices = torch.empty((4, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_factory_nnz(self, device, dtype):
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.tensor([[1, 1], [1, 1]], dtype=dtype, device=device) # (nnz, ...): (2, 2)
sizes = torch.Size([2, 2])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.empty([2, 0], dtype=dtype, device=device) # (nnz, ...): (2, 0)
sizes = torch.Size([2, 0])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_factory_nnz_zero(self, device, dtype):
def test_shape(i_shape, v_shape, size, expected_size):
if size:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), torch.Size(size),
dtype=dtype, device=device)
else:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), dtype=dtype, device=device)
expected_indices = torch.empty(i_shape, device=device, dtype=torch.int64)
expected_values = torch.empty(v_shape, device=device, dtype=dtype)
expected_size = torch.Size(expected_size)
self.assertEqual(t._indices(), expected_indices)
self.assertEqual(t._values(), expected_values)
self.assertEqual(t.size(), expected_size)
test_shape([1, 0], [0, 2, 4, 0], None, [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], None, [0, 0, 0, 2, 4, 0])
test_shape([1, 0], [0, 2, 4, 0], [0, 2, 4, 0], [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [0, 0, 0, 2, 4, 0], [0, 0, 0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [1, 2, 3, 2, 4, 0], [1, 2, 3, 2, 4, 0])
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_factory_dense_dim(self, device, dtype):
indices = self.index_tensor([[0]], device=device)
values = torch.tensor([[[1, 1, 1], [1, 1, 1]]], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
indices = self.index_tensor([[0]], device=device)
values = torch.empty([1, 2, 3, 0], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
@onlyCPU
@dtypes(torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble, torch.int64)
def test_factory_type_inference(self, device, dtype):
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1.], dtype=dtype))
self.assertEqual(dtype, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1]))
self.assertEqual(torch.int64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.HalfTensor(1, 0))
self.assertEqual(torch.float16, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.FloatTensor(1, 0))
self.assertEqual(torch.float32, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.DoubleTensor(1, 0))
self.assertEqual(torch.float64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.LongTensor(1, 0))
self.assertEqual(torch.int64, t.dtype)
@onlyCUDA
def test_factory_device_type_inference(self, device):
# both indices/values are CUDA
cpu_cuda = ('cpu', 'cuda')
cpu_cuda_none = cpu_cuda + (None,)
for indices_device, values_device, device in itertools.product(cpu_cuda,
cpu_cuda,
cpu_cuda_none):
indices = torch.tensor(([0], [2]), device=indices_device)
values = torch.tensor([1.], device=values_device)
empty_values = torch.empty(1, 0).to(values_device)
shape = (1, 3)
empty_shape = (1, 3, 0)
if device is None and indices_device != values_device:
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, values, shape, device=device)
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
else:
t = torch.sparse_coo_tensor(indices, values, shape, device=device)
t_empty = torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
self.assertEqual(should_be_cuda, t.is_cuda)
self.assertEqual(t.is_cuda, t_empty.is_cuda)
@onlyCPU
def test_factory_copy(self, device):
def test_tensor(indices, values, indices_equal, values_equal):
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64, device=device)
if indices_equal:
self.assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
else:
self.assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
if values_equal:
self.assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
else:
self.assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
# both correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, True, True)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, True, True)
# only indices correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float16)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, True, True) # An empty tensor's data_ptr is always equal to 0
# only values correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, False, True)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, False, True)
# neither correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, False, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, False, True) # An empty tensor's data_ptr is always equal to 0
# complex support
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = make_tensor([1, ], dtype=torch.cdouble, device=device)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = make_tensor([1, 1], dtype=torch.cdouble, device=device)
test_tensor(indices, values, False, False)
@onlyCPU # just run once, we test both cpu and cuda
def test_legacy_new_device(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
x = torch.sparse_coo_tensor(i, v, size, device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
if torch.cuda.is_available():
x = torch.sparse_coo_tensor(i, v, size, device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
def test_legacy_new(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
s = torch.sparse_coo_tensor(i, v, size)
self.assertEqual(torch.sparse_coo, s.new(device='cpu').layout)
self.assertRaises(TypeError, lambda: s.new(v.untyped_storage()))
self.assertRaises(TypeError, lambda: s.new(v))
self.assertEqual(torch.sparse_coo, s.new(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: s.new([6]))
@onlyCPU # not really, but we only really want to run this once
def test_dtypes(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.is_available():
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
def _test_empty_full(self, device, dtype, requires_grad):
shape = (2, 3)
layout = torch.sparse_coo
def check_value(tensor, value=None, dtype=dtype, requires_grad=requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.empty(shape, dtype=dtype).fill_(value)
self.assertEqual(tensor, fill)
v = torch.sparse_coo_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
check_value(v)
out = v.new()
check_value(torch.zeros(shape, out=out, device=device, requires_grad=requires_grad))
int64_dtype = torch.int64
check_value(v.new_empty(shape), requires_grad=False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
dtype=int64_dtype, requires_grad=False)
check_value(torch.empty_like(v), requires_grad=False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
dtype=int64_dtype, requires_grad=False)
@onlyCPU # not really, but we only really want to run this once
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@parametrize('requires_grad', (True, False))
def test_empty_full(self, device, dtype, requires_grad):
if requires_grad and not (dtype.is_floating_point or dtype.is_complex):
self.skipTest(f'requires_grad==True requires float or complex dtype, got {dtype}')
self._test_empty_full(device, dtype, requires_grad)
if torch.cuda.is_available():
self._test_empty_full(None, dtype, requires_grad)
self._test_empty_full(torch.device('cuda:0'), dtype, requires_grad)
def test_is_sparse(self, device):
x = torch.randn(3, 3)
self.assertFalse(x.is_sparse)
x = torch.randn(3, 3, 0)
self.assertFalse(x.is_sparse)
x = self.sparse_empty(1, 0, device=device)
self.assertTrue(x.is_sparse)
def test_resize_as(self, device):
def do_test(t):
y = t.new().resize_as_(t).zero_()
self.assertEqual(y.shape, t.shape)
# Check that y can be added to t. Currently, this requires that
# sparse_dim and dense_dim match.
self.assertEqual(t, t + y)
do_test(self.sparse_empty([3, 0], device=device))
do_test(self.sparse_empty([3, 3], device=device))
def _test_resize_shape(self, x_i, x_v, x_size, y_i, y_v, y_size, dtype, device):
x_v_numel = torch.zeros(x_v).numel()
x = torch.sparse_coo_tensor(torch.zeros(x_i),
torch.arange(x_v_numel).resize_(x_v).to(torch.float),
torch.Size(x_size), dtype=dtype, device=device)
x_dense = x.to_dense()
y = torch.sparse_coo_tensor(torch.zeros(y_i),
torch.ones(y_v).to(torch.float),
torch.Size(y_size), dtype=dtype, device=device)
y_dense = y.to_dense()
x.resize_as_(y)
x_dense.resize_as_(y_dense)
self.assertEqual(x.shape, y.shape)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
self.assertEqual(x.shape, x_dense.shape)
self.assertEqual(y.shape, y_dense.shape)
# Here we make sure that the original data are preserved after resizing
self.assertEqual(x.to_dense().view(-1)[0:x_v_numel].view(x_v),
x_dense.view(-1)[0:x_v_numel].view(x_v))
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_resize(self, device, dtype):
# 1. Expand the size of some dense dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
self._test_resize_shape([1, 1], [1, 2, 0], [2, 2, 0],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
# 2. Expand the size of some sparse dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [4, 2, 3],
dtype=dtype, device=device)
# 3. Change the shapes of both sparse and dense dimensions when nnz is zero [Supported]
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 5], [1, 1, 2, 4, 5],
dtype=dtype, device=device)
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 0], [1, 1, 2, 4, 0],
dtype=dtype, device=device)
# 4. Add dims to dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 4], [2, 2, 3, 4],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 0], [2, 2, 3, 0],
dtype=dtype, device=device)
# 5. Remove dims from dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2], [2, 2],
dtype=dtype, device=device)
# 6. Change the number of sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[2, 1], [1, 2, 3], [1, 2, 2, 3],
dtype=dtype, device=device)
# 7. Shrink the size of some sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [1, 2, 3],
dtype=dtype, device=device)
# 8. Shrink the size of some dense dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 2], [2, 2, 2],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 0], [2, 2, 0],
dtype=dtype, device=device)
def test_is_nonzero(self, device):
self.assertTrue(torch.sparse_coo_tensor(([0],), 1., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0], [0]), 0., (1, 1), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,), device=device).is_nonzero())
# scalar sparse tensor
self.assertTrue(torch.sparse_coo_tensor(torch.zeros(0, 1), 12.3, [], device=device).is_nonzero())
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch.sparse_coo_tensor(([0, 1],), torch.empty(2, 0), (4, 0), device=device).is_nonzero()
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_change_tensor_metadata(self, device, dtype):
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]), dtype=dtype, device=device)
i.resize_(2, 3)
v.resize_(4, 5)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.resize_as_(self.index_tensor([0, 1], device=device))
v.resize_as_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.as_strided_((2, 1), (1, 1))
v.as_strided_((1, 3), (1, 1))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.set_(self.index_tensor([0, 1], device=device))
v.set_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.transpose_(0, 1)
v.transpose_(0, 1)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
@coalescedonoff
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_pickle(self, device, dtype, coalesced):
import pickle
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape)
serialized = pickle.dumps(sp_tensor)
sp_tensor_loaded = pickle.loads(serialized)
self.assertEqual(sp_tensor, sp_tensor_loaded)
def test_any(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([False, False]), device=device)
t_any = torch.tensor(False)
self.assertEqual(torch.any(t), t_any)
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([True, False]), device=device)
t_any = torch.tensor(True)
self.assertEqual(torch.any(t), t_any)
def test_isnan(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([1, 4]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([False, False]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([1, float("nan")]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([False, True]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
@coalescedonoff
@dtypes(torch.float32, torch.float64)
@dtypesIfMPS(torch.float16, torch.float32)
def test_div_rounding_mode(self, device, dtype, coalesced):
sparse, _, _ = self._gen_sparse(2, 10, (10, 10), dtype,
device, coalesced)
dense = self.safeToDense(sparse)
for mode in (None, 'floor', 'trunc'):
actual = sparse.div(-2, rounding_mode=mode)
expect = dense.div(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test inplace
actual = sparse.clone().div_(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test out argument
actual.zero_()
torch.div(sparse, -2, rounding_mode=mode, out=actual)
self.assertEqual(self.safeToDense(actual), expect)
def test_div_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse division requires',
lambda: torch.tensor(1., device=device).to_sparse()
/ torch.tensor(1., device=device).to_sparse())
def test_floor_divide_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse floor division requires',
lambda: torch.tensor(1., device=device).to_sparse()
// torch.tensor(1., device=device).to_sparse())
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
@onlyCPU
def test_sparse_to_numpy(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([1, 4]))
self.assertRaises(TypeError, lambda: t.numpy())
@coalescedonoff
@expectedFailureMPS
@dtypes(torch.double)
@dtypesIfMPS(torch.float32)
def test_softmax(self, device, dtype, coalesced):
import torch.nn.functional as F
def to_dense(sparse, fill_value=None):
"""
Return dense tensor from a sparse tensor using given fill value.
"""
if fill_value is None or fill_value == 0:
return sparse.to_dense()
sparse = sparse.coalesce()
dense = torch.full(sparse.shape, fill_value, dtype=sparse.dtype, device=sparse.device)
for idx, value in zip(sparse._indices().t(), sparse._values()):
dense[tuple(idx)] = value
return dense
def softmax_to_dense(sparse, dim):
"""Dense softmax of a sparse tensor. Useful only for testing softmax
correctness.
When computing softmax of a sparse tensor, the value of
unspecified items is negative infinity rather than zero so
that
softmax(sparse.to_dense(fill_value=-inf), dim) == softmax(sparse, dim).to_dense()
holds for non-empty lines. One empty lines, the softmax
values are defined as 0 in order to preserve the sparsity
of result.
Note that in PyTorch, ``to_dense`` method does not
implement the ``fill_value`` keyword argument.
"""
dtype = sparse.dtype
device = sparse.device
dense = to_dense(sparse, fill_value=-float('inf'))
r = F.softmax(dense, dim)
# softmax on empty lines results nan, replace with zeros to match the definition
r[r != r] = 0
return r
def sparse_softmax(sparse, dim):
"""Pure Python softmax of a sparse tensor. Assuming -inf for
unspecified sparse tensor data. This is a prototype of
sparse softmax algorithm in Python.
"""
dtype = sparse.dtype
device = sparse.device
# softmax is non-linear operation, so sparse tensors must
# be coalesced.
sparse = sparse.coalesce()
inf = float('inf')
indices = sparse._indices()
values = sparse._values()
if dim < sparse.sparse_dim():
nnz = sparse._nnz()
# compute pool indices
size = sparse.size()
strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)
for i in reversed(range(sparse.sparse_dim() - 1)):
strides[i, 0] = strides[i + 1, 0] * size[i + 1]
strides[dim, 0] = 0
pool = (indices * strides).sum(dim=0)
i2p = {}
for i in range(nnz):
c = int(pool[i])
if c not in i2p:
i2p[c] = len(i2p)
pool[i] = i2p[c]
# compute max
dense_size = tuple(size[sparse.sparse_dim():])
mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)
mx[:] = -inf
for n in range(nnz):
p = pool[n]
mx[p] = torch.max(mx[p], values[n])
# apply exp to (v - mx) and sum the results
exp_values = torch.empty_like(values)
exp_sums = torch.zeros_like(mx)
for n in range(nnz):
p = pool[n]
v = exp_values[n] = (values[n] - mx[p]).exp()
exp_sums[p] = exp_sums[p] + v
# normalize with the sum of exponents
for n in range(nnz):
p = pool[n]
exp_values[n] = exp_values[n] / exp_sums[p]
return torch.sparse_coo_tensor(indices,
exp_values,
sparse.size(),
dtype=dtype, device=device)
elif dim < sparse.sparse_dim() + sparse.dense_dim():
return torch.sparse_coo_tensor(indices,
F.softmax(values, dim - sparse.sparse_dim() + 1),
sparse.size(),
dtype=dtype, device=device)
else:
raise ValueError(
f'`dim(={dim})` must be smaller than `sparse_dim(={sparse.sparse_dim()}) + dense_dim(={sparse.dense_dim()})`')
def softmax_jacobian_analytic(x, dim):
"""Return Jacobian of softmax using analytic formula
D_jS_i = S_i * (1[i==j] - S_j).
where S = softmax(x, dim), x is dense tensor, i,j in
range(x.shape[dim]).
"""
y = F.softmax(x, dim)
y[y != y] = 0 # replace nan-s with zeros
J = torch.zeros((x.shape[dim],) + tuple(x.shape), dtype=x.dtype, device=x.device)
si = [slice(None)] * len(y.shape)
sj = [slice(None)] * len(y.shape)
s = [slice(None)] * len(J.shape)
for i in range(y.shape[dim]):
si[dim] = i
s[dim + 1] = i
yi = y[tuple(si)]
for j in range(y.shape[dim]):
sj[dim] = j
s[0] = j
if i == j:
J[tuple(s)] = yi * (1 - yi)
else:
yj = y[tuple(sj)]
J[tuple(s)] = - yi * yj
sj[dim] = slice(None)
si[dim] = slice(None)
s[dim + 1] = slice(None)
return J
def softmax_jacobian_autograd(x, dim, log=False):
"""Return Jacobian of softmax using PyTorch autograd feature.
x can be dense or sparse tensor.
"""
import itertools
if x.is_sparse:
x = x.coalesce()
dtype = x.dtype
device = x.device
shape = tuple(x.shape)
J = torch.zeros((shape[dim],) + shape, dtype=dtype, device=device)
for i in range(shape[dim]):
if x.is_sparse:
sparse_dim = x.sparse_dim()
dense_dim = x.dense_dim()
if dim < sparse_dim:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
if dim == j:
ranges.append([i])
else:
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.ones((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
else:
ranges = []
for sz in shape[:sparse_dim]:
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.zeros((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
sv = [slice(None)] * (dense_dim + 1)
sv[dim - sparse_dim + 1] = i
values[tuple(sv)] = 1
v = torch.sparse_coo_tensor(indices, values, shape, dtype=dtype, device=device)
else:
v = torch.zeros_like(x)
sv = [slice(None)] * len(v.shape)
sv[dim] = i
v[tuple(sv)] = 1
x_ = x.clone()
x_.requires_grad_(True)
if log:
if x_.is_sparse:
y = torch.sparse.log_softmax(x_, dim)
else:
y = F.log_softmax(x_, dim)
else:
if x_.is_sparse:
y = torch.sparse.softmax(x_, dim)
else:
y = F.softmax(x_, dim)
# replace nan-s with zeros
y.data[y != y] = 0
y.backward(v)
g = x_.grad
if not g.is_sparse:
# replace nan-s with zeros
g.data[g != g] = 0
J[i] = g.to_dense() if g.is_sparse else g
return J
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1166")
def test_op(sparse_dims, nnz, with_size, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
# Check dim out of bounds
with self.assertRaisesRegex(IndexError, r"Dimension out of range"):
torch.sparse.softmax(x, x.dim())
with self.assertRaisesRegex(IndexError, r"Dimension out of range"):
torch.sparse.softmax(x, -x.dim() - 1)
for dim in range(x.dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
for d in (dim, dim - x.dim()):
y1 = torch.sparse.softmax(x, d)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, d)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
test_op(1, 10, [3], coalesced)
test_op(1, 10, [2, 3], coalesced)
test_op(1, 10, [3, 2], coalesced)
test_op(2, 10, [2, 3, 4], coalesced)
test_op(2, 10, [3, 4], coalesced)
test_op(2, 5, [5, 4], coalesced)
test_op(2, 10, [3, 4, 2], coalesced)
test_op(3, 10, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2, 3], coalesced)
test_op(3, 100, [3, 4, 2, 3, 5, 2], coalesced)
test_op(4, 100, [3, 4, 2, 3, 5, 2], coalesced)
def _check_zero_nnz_softmax_op(self, func, ndim, device, dtype):
# create a sparse tensor with shape (0,..., 3) it has no materialize values
t = torch.sparse_coo_tensor([[] for _ in range(ndim)], [], (0,) * (ndim - 1) + (3,), device=device, dtype=dtype)
out = func(t, 0)
self.assertEqual(out, torch.zeros_like(t))
# gradient
t = t.requires_grad_()
gradcheck(lambda x: func(x, 0).to_dense(), (t,), masked=True)
@dtypes(torch.double, torch.float)
@dtypesIfMPS(torch.float32)
@expectedFailureMPS
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
def test_softmax_zero_nnz(self, device, dtype):
self._check_zero_nnz_softmax_op(torch.sparse.softmax, 1, device, dtype)
self._check_zero_nnz_softmax_op(torch.sparse.softmax, 10, device, dtype)
@dtypes(torch.double, torch.float)
@dtypesIfMPS(torch.float32)
@expectedFailureMPS
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupported triggers assertion error")
def test_log_softmax_zero_nnz(self, device, dtype):
self._check_zero_nnz_softmax_op(torch.sparse.log_softmax, 1, device, dtype)
self._check_zero_nnz_softmax_op(torch.sparse.log_softmax, 10, device, dtype)
@dtypes(torch.float)
@expectedFailureMPS
def test_log_softmax_float(self, device, dtype):
x = (torch.rand(4, 3, dtype=dtype, device=device) - 10000000.0).to_sparse()
out = torch.sparse.log_softmax(x, dim=1).to_dense()
x_double = x.double()
out_double = torch.sparse.log_softmax(x_double, dim=1).to_dense()
self.assertEqual(out, out_double.to(dtype=dtype))
# TODO: Check after why ROCm's cusparseXcsrgemm2Nnz function doesn't return the same nnz value as CUDA
@coalescedonoff
@dtypes(*floating_and_complex_types())
@dtypesIfMPS(*all_mps_types())
@dtypesIfCUDA(*floating_types_and(*[torch.half] if SM53OrLater and not TEST_WITH_ROCM else [],
*[torch.bfloat16] if SM80OrLater and not TEST_WITH_ROCM else [],
torch.complex64,
*[torch.complex128]
if CUSPARSE_SPMM_COMPLEX128_SUPPORTED or HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
else []))
@unittest.skipIf(TEST_WITH_CROSSREF, "not working with fake tensor")
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2, torch.complex64: 1e-2, torch.float32: 1e-2})
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
# We convert grad to dense since dense and sparse mm
# implementations handle materialized zeroes differently.
self.assertEqual(a.grad.to_dense(), a_grad.to_dense())
self.assertEqual(b.grad.to_dense(), b_grad.to_dense())
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
# Check result is truly coalesced
self.assertTrue(r2.is_coalesced() and is_coalesced_indices(r2))
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), nondet_tol=1e-5, masked=True)
else:
gradcheck(fn, (a, b), masked=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
def test_error_cases():
def fn(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
r2 = torch.sparse.mm(a, b)
# This is not a matrix
self.assertRaises(RuntimeError, lambda: fn(3, 4, [2, 2, 2], [2, 2, 2]))
# Shapes does not
self.assertRaisesRegex(RuntimeError,
r"mat1 and mat2 shapes cannot be multiplied \(2x3 and 4x2\)",
lambda: fn(2, 10, [2, 3], [4, 2]))
def different_dtypes():
a, i_a, v_a = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
r2 = torch.sparse.mm(a.to(torch.float32), a.to(torch.float16))
self.assertRaisesRegex(RuntimeError, 'mat1 dtype Float does not match mat2 dtype Half', different_dtypes)
def test_backward_noncontiguous():
# Sparse.mm backward used to wrong with non-contiguous grads,
# see https://github.com/pytorch/pytorch/issues/102493.
n_reps = 7
for _ in range(n_reps):
A = torch.eye(5).to_sparse().requires_grad_(True)
B = torch.eye(5).to_sparse()
out = torch.sparse.mm(A, B)
out.coalesce().values().sum().backward()
self.assertEqual(A.grad, A)
for n in range(2, 5):
for m in range(2, 8):
for p in range(2, 8):
test_sparse_matmul(2, 10, [n, m], [m, p])
test_sparse_matmul(2, 0, [0, 0], [0, 0])
test_sparse_matmul(2, 0, [0, 10], [10, 0])
test_error_cases()
test_backward_noncontiguous()
@coalescedonoff
@dtypes(torch.double)
def test_assign(self, device, dtype, coalesced):
def assign_to():
a, i_a, v_a = self._gen_sparse(2, 5, [2, 3], dtype, device, coalesced)
a[0] = 100
self.assertRaises(TypeError, assign_to)
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_full_broadcast_to(self, device, dtype):
def can_broadcast(s0, s1):
s0 = tuple(reversed(s0))
s1 = tuple(reversed(s1))
for i in range(len(s0)):
if s0[i] != 1 and s0[i] != s1[i]:
return False
return True
sizes = (
(), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)
)
for s0, s1 in itertools.combinations(sizes, r=2):
t = make_tensor(s0, dtype=dtype, device=device, low=-9, high=9)
for sparse_dims in range(1, len(s0) + 1):
s = t.to_sparse(sparse_dims)
if can_broadcast(s0, s1):
t_res = torch.broadcast_to(t, s1)
s_res = torch._sparse_broadcast_to(s, s1)
torch._validate_sparse_coo_tensor_args(s_res._indices(), s_res._values(), s_res.shape)
if s_res.is_coalesced():
# ensure that is_coalesced is estimated correctly
self.assertEqual(s_res, torch.sparse_coo_tensor(s_res._indices(), s_res._values(), s_res.shape).coalesce())
self.assertEqual(s_res.to_dense(), t_res)
else:
with self.assertRaisesRegex(RuntimeError,
r"does not broadcast"):
torch._sparse_broadcast_to(s, s1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@dtypesIfMPS(torch.float32, torch.complex64)
def test_sparse_broadcast_to(self, device, dtype, coalesced):
def test(sparse_dims, nnz, with_size, new_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
x1 = torch._sparse_broadcast_to(x, new_size)
y1 = y.broadcast_to(new_size)
self.assertEqual(self.safeToDense(x1), y1)
test(4, 6, [7, 3, 1, 3, 0], [7, 3, 4, 3, 0])
test(4, 6, [7, 3, 1, 3, 0], [2, 7, 3, 1, 3, 0])
test(4, 6, [7, 3, 1, 3, 1, 3], [7, 3, 1, 3, 2, 3])
test(4, 6, [7, 3, 1, 3, 2, 1], [7, 3, 1, 3, 2, 3])
def _test_mul_skips(self, device, dtype, coalesced):
skipTestIfUncoalesced = False
# This case always coalesce inputs and that could lead to loss of precision,
# hence it is inhibited for float16/bfloat16 by providing already coalesced tensors.
if not coalesced and dtype in {torch.float16, torch.bfloat16}:
skipTestIfUncoalesced = True
# to_dense is problematic for boolean non-coalesced CUDA tensors
# see https://github.com/pytorch/pytorch/issues/81648
if not coalesced and dtype == torch.bool and torch.device(device).type == "cuda":
skipTestIfUncoalesced = True
if skipTestIfUncoalesced:
self.skipTest(f"Test with dtype={dtype}, device={device} runs only with coalesced inputs")
@coalescedonoff
# NOTE: addcmul_out is not implemented for bool.
@dtypes(*all_types_and_complex_and(torch.bfloat16, torch.float16))
@dtypesIfMPS(*all_mps_types())
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_sparse_mul(self, device, dtype, coalesced):
self._test_mul_skips(device, dtype, coalesced)
shape = (2, 3, 4, 10)
nnz = 10
def check(self, x, y):
res_sparse = x * y
res_dense = x.to_dense() * y.to_dense()
self.assertEqual(res_sparse.to_dense(), res_dense)
def check_empty(sparse_shape, nnz, dense_shape, coalesce):
from itertools import product
for nnz_val, shape_suffix in product((nnz, 0), ((), (0,))):
empty_sparse_shape = sparse_shape + shape_suffix
empty_dense_shape = dense_shape + shape_suffix
x = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
check(self, x, x)
# TODO: uncomment once backward is implemented for sparse tensors that broadcast in dense dims.
# def check_autograd(x, y):
# if dtype in {torch.double, torch.cdouble}:
# xa = x.detach().clone().requires_grad_(True)
# ya = y.detach().clone().requires_grad_(True)
# gradcheck(lambda a, b: (a * b).to_dense(), (xa, ya), masked=True)
# gradcheck(lambda a, b: (a * b).to_dense(), (ya, xa), masked=True)
for dim in range(len(shape) + 1):
sub_shape = shape[dim:]
sparse_dim = len(sub_shape) // 2
check_empty(sub_shape, nnz, shape, coalesced)
x = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
y = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
check(self, x, y)
# TODO: uncomment once supported
# check_autograd(x, y)
# check broadcasting in dense dims
for d in range(sparse_dim, len(sub_shape)):
new_shape = sub_shape[:d] + (1,) + sub_shape[d + 1:]
y = self._gen_sparse(sparse_dim, nnz, new_shape, dtype, device, coalesced)[0]
check(self, x, y)
# TODO: uncomment once supported
# check_autograd(x, y)
@coalescedonoff
@expectedFailureMPS
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
@dtypesIfMPS(*all_mps_types())
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_dense_mul(self, device, dtype, coalesced):
self._test_mul_skips(device, dtype, coalesced)
shape = (2, 3, 4, 10)
nnz = 10
def check(self, s, d):
res = d * s
# check commutativity
self.assertEqual(res, s * d)
# check correctness
self.assertEqual(res.to_dense(), s.to_dense() * d)
# check in-placeness for dense
if d.dim() >= s.dim():
dc = d.clone()
self.assertEqual(d.mul_(s), dc.mul_(s.to_dense()))
# check in-placeness for sparse
if s.dim() >= d.dim():
# for sparse
sc = s.clone()
self.assertEqual(s.mul_(d).to_dense(), sc.to_dense().mul_(d))
for dim in range(len(shape) + 1):
sub_shape = shape[dim:]
sparse_dim = len(sub_shape) // 2
def check_empty(sparse_shape, nnz, dense_shape, coalesce):
from itertools import product
for nnz_val, shape_suffix in product((nnz, 0), ((), (0,))):
empty_sparse_shape = sparse_shape + shape_suffix
empty_dense_shape = dense_shape + shape_suffix
s = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
d = make_tensor(empty_dense_shape, dtype=dtype, device=device)
check(self, s, d)
# check scalar multiplication
s = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
for scalar in (True, 1, 1.0):
res_sparse_right = s * scalar
res_sparse_left = scalar * s
res_dense = s.to_dense() * scalar
# check correctness and dtype
self.assertEqual(s.to(res_sparse_right.dtype), res_sparse_right)
self.assertEqual(res_sparse_right, res_sparse_left)
self.assertEqual(res_sparse_right.dtype, res_dense.dtype)
self.assertEqual(res_sparse_left.dtype, res_dense.dtype)
# check scalar as 0-dim sparse tensor
tscalar = torch.tensor(scalar, device=device)
sscalar = tscalar.to_sparse()
res_sparse_right = s * sscalar
res_sparse_left = sscalar * s
self.assertEqual(res_sparse_right, res_sparse_left)
self.assertEqual(s.to(res_sparse_right.dtype), res_sparse_right)
# check non-coalesced 0-dim scalar
# we skip torch.bool because for such tensors
# coalesce.to_dense != to_dense
if dtype == torch.bool:
return
for scalar_dtype in (int, float):
scalar = scalar_dtype(1)
idx = torch.tensor([], device=device).reshape(0, 2)
val = torch.tensor([scalar, scalar], device=device)
sscalar = torch.sparse_coo_tensor(idx, val, ())
res_dense = s.to_dense() * sscalar.to_dense()
self.assertEqual((s * sscalar).to_dense(), res_dense)
self.assertEqual((sscalar * s).to_dense(), res_dense)
# Case 1: sparse broadcasts over dense
s = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
d = make_tensor(shape, dtype=dtype, device=device)
check(self, s, d)
check_empty(sub_shape, nnz, shape, coalesced)
# Case 2: dense broadcasts over sparse
s = self._gen_sparse(3, nnz, shape, dtype, device, coalesced)[0]
d = make_tensor(sub_shape, dtype=dtype, device=device)
check(self, s, d)
check_empty(shape, nnz, sub_shape, coalesced)
@unittest.skipIf(not TEST_NUMPY, "NumPy is not available")
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.bool))
def test_sparse_spdiags(self, device, dtype):
make_diags = functools.partial(make_tensor, dtype=dtype, device=device)
make_offsets = functools.partial(torch.tensor, dtype=torch.long, device=device)
if TEST_SCIPY:
def reference(diags, offsets, shape):
return scipy.sparse.spdiags(diags, offsets, *shape).toarray()
else:
def reference(diags, offsets, shape):
result = torch.zeros(shape, dtype=dtype, device=device)
for i, off in enumerate(offsets):
res_view = result.diagonal(off)
data = diags[i]
if off > 0:
data = data[off:]
m = min(res_view.shape[0], data.shape[0])
res_view[:m] = data[:m]
return result
def check_valid(diags, offsets, shape, layout=None):
ref_out = reference(diags, offsets, shape)
out = torch.sparse.spdiags(diags, offsets, shape, layout=layout)
if layout is None:
ex_layout = torch.sparse_coo
else:
ex_layout = layout
out_dense = out.to_dense()
self.assertTrue(out.layout == ex_layout, f"Output layout {out.layout} expected {ex_layout}")
self.assertEqual(out_dense, ref_out, f"Result:\n{out_dense} does not match reference:\n{ref_out}")
def check_invalid(args, error):
with self.assertRaisesRegex(RuntimeError, error):
torch.sparse.spdiags(*args)
def valid_cases():
# some normal cases
yield (make_diags((1, 5)), make_offsets([0]), (5, 5))
yield (make_diags((3, 3)), make_offsets([-1, 0, 1]), (4, 4))
# non-contiguous diags
yield (make_diags((5, 4), noncontiguous=True), make_offsets([-1, 1, 0, 2, -2]), (5, 5))
# non-contiguous offsets
yield (make_diags((3, 4)), make_offsets([1, -1, 0, -2, 2])[::2], (5, 5))
# non-contiguous diags + offsets
yield (make_diags((3, 4), noncontiguous=True), make_offsets([1, -1, 0, -2, 2])[::2], (5, 5))
# correct dimensionality, 2d, 2d , and shapes match, but the number of diagonals is zero
yield (make_diags((0, 3)), make_offsets([]), (3, 3))
# forward rotation of upper diagonals
yield (make_diags((3, 8)), make_offsets([1, 2, 3]), (4, 4))
# rotation exausts input space to read from
yield (make_diags((2, 3)), make_offsets([2, 1]), (3, 3))
# Simple cases repeated with special output format
yield (make_diags((1, 5)), make_offsets([0]), (5, 5), torch.sparse_csc)
yield (make_diags((3, 3)), make_offsets([-1, 0, 1]), (4, 4), torch.sparse_csr)
# vector diags
yield (make_diags((3, )), make_offsets([1]), (4, 4))
# Scalar offset
yield (make_diags((1, 3)), make_offsets(2), (4, 4))
# offsets out of range
yield (make_diags((1, 3)), make_offsets([3]), (3, 3))
yield (make_diags((1, 3)), make_offsets([-3]), (3, 3))
for case in valid_cases():
check_valid(*case)
def invalid_cases():
yield (make_diags((1, 3)), make_offsets([0]), (3, 2, 3)), "Output shape must be 2d"
yield (make_diags((2, 3)), make_offsets([[1, 2], [0, 3]]), (3, 3)), "Offsets must be scalar or vector"
yield (make_diags((3, 2, 3)), make_offsets([0, 1, 2]), (4, 4)), "Diagonals must be vector or matrix"
yield (make_diags((3, 3)), make_offsets([-1, 0]), (3, 3)), \
r"Number of diagonals \(\d\) does not match the number of offsets \(\d\)"
yield (make_diags((5,)), make_offsets([0, 1, 2, 3, 4]), (3, 3)), \
r"Number of diagonals \(\d\) does not match the number of offsets \(\d\)"
yield (make_diags((2, 2)), make_offsets([-1, 0]), (2, 3), torch.strided), \
r"Only output layouts \(\w+, \w+, \w+\) are supported, got \w+"
yield (make_diags((2, 5)), make_offsets([0, 0]), (5, 5)), "Offset tensor contains duplicate values"
yield (make_diags((1, 5)), make_offsets([0]).to(torch.int32), (5, 5)), r"Offset Tensor must have dtype Long but got \w+"
for case, error_regex in invalid_cases():
check_invalid(case, error_regex)
def test_small_nnz_coalesced(self):
# creating a coo tensor with nnz == 0 is always coalesced
self.assertTrue(torch.sparse_coo_tensor([[], []], [], (2, 2)).is_coalesced())
# same for a coo tensor with only 1 nnz
self.assertTrue(torch.sparse_coo_tensor([[0], [0]], [1], (2, 2)).is_coalesced())
# two or more nnz coalesced is false as it can't be verified without an expensive check
self.assertFalse(torch.sparse_coo_tensor([[0, 0], [0, 0]], [1, 2], (2, 2)).is_coalesced())
# even if there are no duplicates
self.assertFalse(torch.sparse_coo_tensor([[0, 1], [0, 1]], [1, 2], (2, 2)).is_coalesced())
@coalescedonoff
@dtypes(*all_types_and_complex_and(torch.bool))
@dtypesIfMPS(*all_mps_types())
def test_sum(self, device, dtype, coalesced):
def run_test(shape, nnz):
a = self._gen_sparse(2, nnz, shape, dtype, device, coalesced)[0]
self.assertEqual(a.sum(), a._values().sum())
if dtype.is_floating_point or dtype.is_complex:
a.requires_grad_(True)
a_inter = a.sum()
a_inter.abs().backward()
with torch.no_grad():
self.assertEqual(a.grad, torch.ones(shape, dtype=dtype, device=device) * torch.sgn(a_inter))
for shape in [(10, 5), (10, 10)]:
run_test(shape, 0)
run_test(shape, max(shape))
run_test(shape, shape[0] * shape[1])
| TestSparse |
python | python-openxml__python-docx | src/docx/opc/coreprops.py | {
"start": 366,
"end": 3468
} | class ____:
"""Corresponds to part named ``/docProps/core.xml``, containing the core document
properties for this document package."""
def __init__(self, element: CT_CoreProperties):
self._element = element
@property
def author(self):
return self._element.author_text
@author.setter
def author(self, value: str):
self._element.author_text = value
@property
def category(self):
return self._element.category_text
@category.setter
def category(self, value: str):
self._element.category_text = value
@property
def comments(self):
return self._element.comments_text
@comments.setter
def comments(self, value: str):
self._element.comments_text = value
@property
def content_status(self):
return self._element.contentStatus_text
@content_status.setter
def content_status(self, value: str):
self._element.contentStatus_text = value
@property
def created(self):
return self._element.created_datetime
@created.setter
def created(self, value: dt.datetime):
self._element.created_datetime = value
@property
def identifier(self):
return self._element.identifier_text
@identifier.setter
def identifier(self, value: str):
self._element.identifier_text = value
@property
def keywords(self):
return self._element.keywords_text
@keywords.setter
def keywords(self, value: str):
self._element.keywords_text = value
@property
def language(self):
return self._element.language_text
@language.setter
def language(self, value: str):
self._element.language_text = value
@property
def last_modified_by(self):
return self._element.lastModifiedBy_text
@last_modified_by.setter
def last_modified_by(self, value: str):
self._element.lastModifiedBy_text = value
@property
def last_printed(self):
return self._element.lastPrinted_datetime
@last_printed.setter
def last_printed(self, value: dt.datetime):
self._element.lastPrinted_datetime = value
@property
def modified(self):
return self._element.modified_datetime
@modified.setter
def modified(self, value: dt.datetime):
self._element.modified_datetime = value
@property
def revision(self):
return self._element.revision_number
@revision.setter
def revision(self, value: int):
self._element.revision_number = value
@property
def subject(self):
return self._element.subject_text
@subject.setter
def subject(self, value: str):
self._element.subject_text = value
@property
def title(self):
return self._element.title_text
@title.setter
def title(self, value: str):
self._element.title_text = value
@property
def version(self):
return self._element.version_text
@version.setter
def version(self, value: str):
self._element.version_text = value
| CoreProperties |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 10777,
"end": 11332
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"BAD_CERT",
"BAD_EMAIL",
"EXPIRED_KEY",
"GPGVERIFY_ERROR",
"GPGVERIFY_UNAVAILABLE",
"INVALID",
"MALFORMED_SIG",
"NOT_SIGNING_KEY",
"NO_USER",
"OCSP_ERROR",
"OCSP_PENDING",
"OCSP_REVOKED",
"UNKNOWN_KEY",
"UNKNOWN_SIG_TYPE",
"UNSIGNED",
"UNVERIFIED_EMAIL",
"VALID",
)
| GitSignatureState |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 78621,
"end": 81929
} | class ____(WebTestCase):
def get_app_kwargs(self):
class WithoutUserModule(UIModule):
def render(self):
return ""
class WithUserModule(UIModule):
def render(self):
return str(self.current_user)
loader = DictLoader(
{
"without_user.html": "",
"with_user.html": "{{ current_user }}",
"without_user_module.html": "{% module WithoutUserModule() %}",
"with_user_module.html": "{% module WithUserModule() %}",
}
)
return dict(
template_loader=loader,
ui_modules={
"WithUserModule": WithUserModule,
"WithoutUserModule": WithoutUserModule,
},
)
def tearDown(self):
super().tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
class CurrentUserHandler(RequestHandler):
def prepare(self):
self.has_loaded_current_user = False
def get_current_user(self):
self.has_loaded_current_user = True
return ""
class WithoutUserHandler(CurrentUserHandler):
def get(self):
self.render_string("without_user.html")
self.finish(str(self.has_loaded_current_user))
class WithUserHandler(CurrentUserHandler):
def get(self):
self.render_string("with_user.html")
self.finish(str(self.has_loaded_current_user))
class CurrentUserModuleHandler(CurrentUserHandler):
def get_template_namespace(self):
# If RequestHandler.get_template_namespace is called, then
# get_current_user is evaluated. Until #820 is fixed, this
# is a small hack to circumvent the issue.
return self.ui
class WithoutUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string("without_user_module.html")
self.finish(str(self.has_loaded_current_user))
class WithUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string("with_user_module.html")
self.finish(str(self.has_loaded_current_user))
return [
("/without_user", WithoutUserHandler),
("/with_user", WithUserHandler),
("/without_user_module", WithoutUserModuleHandler),
("/with_user_module", WithUserModuleHandler),
]
@unittest.skip("needs fix")
def test_get_current_user_is_lazy(self):
# TODO: Make this test pass. See #820.
response = self.fetch("/without_user")
self.assertEqual(response.body, b"False")
def test_get_current_user_works(self):
response = self.fetch("/with_user")
self.assertEqual(response.body, b"True")
def test_get_current_user_from_ui_module_is_lazy(self):
response = self.fetch("/without_user_module")
self.assertEqual(response.body, b"False")
def test_get_current_user_from_ui_module_works(self):
response = self.fetch("/with_user_module")
self.assertEqual(response.body, b"True")
| GetCurrentUserTest |
python | PyCQA__pylint | doc/data/messages/i/invalid-slots-object/bad.py | {
"start": 0,
"end": 68
} | class ____:
__slots__ = ("name", 3) # [invalid-slots-object]
| Person |
python | numpy__numpy | numpy/f2py/tests/test_crackfortran.py | {
"start": 9645,
"end": 9939
} | class ____:
def test_dependencies(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
| TestModuleDeclaration |
python | pypa__setuptools | setuptools/tests/config/test_setupcfg.py | {
"start": 3269,
"end": 15489
} | class ____:
def test_basic(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'version = 10.1.1\n'
'description = Some description\n'
'long_description_content_type = text/something\n'
'long_description = file: README\n'
'name = fake_name\n'
'keywords = one, two\n'
'provides = package, package.sub\n'
'license = otherlic\n'
'download_url = http://test.test.com/test/\n'
'maintainer_email = test@test.com\n',
)
tmpdir.join('README').write('readme contents\nline2')
meta_initial = {
# This will be used so `otherlic` won't replace it.
'license': 'BSD 3-Clause License',
}
with get_dist(tmpdir, meta_initial) as dist:
metadata = dist.metadata
assert metadata.version == '10.1.1'
assert metadata.description == 'Some description'
assert metadata.long_description_content_type == 'text/something'
assert metadata.long_description == 'readme contents\nline2'
assert metadata.provides == ['package', 'package.sub']
assert metadata.license == 'BSD 3-Clause License'
assert metadata.name == 'fake_name'
assert metadata.keywords == ['one', 'two']
assert metadata.download_url == 'http://test.test.com/test/'
assert metadata.maintainer_email == 'test@test.com'
def test_license_cfg(self, tmpdir):
fake_env(
tmpdir,
DALS(
"""
[metadata]
name=foo
version=0.0.1
license=Apache 2.0
"""
),
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.name == "foo"
assert metadata.version == "0.0.1"
assert metadata.license == "Apache 2.0"
def test_file_mixed(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\nlong_description = file: README.rst, CHANGES.rst\n\n',
)
tmpdir.join('README.rst').write('readme contents\nline2')
tmpdir.join('CHANGES.rst').write('changelog contents\nand stuff')
with get_dist(tmpdir) as dist:
assert dist.metadata.long_description == (
'readme contents\nline2\nchangelog contents\nand stuff'
)
def test_file_sandboxed(self, tmpdir):
tmpdir.ensure("README")
project = tmpdir.join('depth1', 'depth2')
project.ensure(dir=True)
fake_env(project, '[metadata]\nlong_description = file: ../../README\n')
with get_dist(project, parse=False) as dist:
with pytest.raises(DistutilsOptionError):
dist.parse_config_files() # file: out of sandbox
def test_aliases(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'author_email = test@test.com\n'
'home_page = http://test.test.com/test/\n'
'summary = Short summary\n'
'platform = a, b\n'
'classifier =\n'
' Framework :: Django\n'
' Programming Language :: Python :: 3.5\n',
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.author_email == 'test@test.com'
assert metadata.url == 'http://test.test.com/test/'
assert metadata.description == 'Short summary'
assert metadata.platforms == ['a', 'b']
assert metadata.classifiers == [
'Framework :: Django',
'Programming Language :: Python :: 3.5',
]
def test_multiline(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'name = fake_name\n'
'keywords =\n'
' one\n'
' two\n'
'classifiers =\n'
' Framework :: Django\n'
' Programming Language :: Python :: 3.5\n',
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.keywords == ['one', 'two']
assert metadata.classifiers == [
'Framework :: Django',
'Programming Language :: Python :: 3.5',
]
def test_dict(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'project_urls =\n'
' Link One = https://example.com/one/\n'
' Link Two = https://example.com/two/\n',
)
with get_dist(tmpdir) as dist:
metadata = dist.metadata
assert metadata.project_urls == {
'Link One': 'https://example.com/one/',
'Link Two': 'https://example.com/two/',
}
def test_version(self, tmpdir):
package_dir, config = fake_env(
tmpdir, '[metadata]\nversion = attr: fake_package.VERSION\n'
)
sub_a = package_dir.mkdir('subpkg_a')
sub_a.join('__init__.py').write('')
sub_a.join('mod.py').write('VERSION = (2016, 11, 26)')
sub_b = package_dir.mkdir('subpkg_b')
sub_b.join('__init__.py').write('')
sub_b.join('mod.py').write(
'import third_party_module\nVERSION = (2016, 11, 26)'
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
config.write('[metadata]\nversion = attr: fake_package.get_version\n')
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '3.4.5.dev'
config.write('[metadata]\nversion = attr: fake_package.VERSION_MAJOR\n')
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1'
config.write('[metadata]\nversion = attr: fake_package.subpkg_a.mod.VERSION\n')
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '2016.11.26'
config.write('[metadata]\nversion = attr: fake_package.subpkg_b.mod.VERSION\n')
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '2016.11.26'
def test_version_file(self, tmpdir):
fake_env(tmpdir, '[metadata]\nversion = file: fake_package/version.txt\n')
tmpdir.join('fake_package', 'version.txt').write('1.2.3\n')
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
tmpdir.join('fake_package', 'version.txt').write('1.2.3\n4.5.6\n')
with pytest.raises(DistutilsOptionError):
with get_dist(tmpdir) as dist:
dist.metadata.version
def test_version_with_package_dir_simple(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'version = attr: fake_package_simple.VERSION\n'
'[options]\n'
'package_dir =\n'
' = src\n',
package_path='src/fake_package_simple',
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
def test_version_with_package_dir_rename(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'version = attr: fake_package_rename.VERSION\n'
'[options]\n'
'package_dir =\n'
' fake_package_rename = fake_dir\n',
package_path='fake_dir',
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
def test_version_with_package_dir_complex(self, tmpdir):
fake_env(
tmpdir,
'[metadata]\n'
'version = attr: fake_package_complex.VERSION\n'
'[options]\n'
'package_dir =\n'
' fake_package_complex = src/fake_dir\n',
package_path='src/fake_dir',
)
with get_dist(tmpdir) as dist:
assert dist.metadata.version == '1.2.3'
def test_unknown_meta_item(self, tmpdir):
fake_env(tmpdir, '[metadata]\nname = fake_name\nunknown = some\n')
with get_dist(tmpdir, parse=False) as dist:
dist.parse_config_files() # Skip unknown.
def test_usupported_section(self, tmpdir):
fake_env(tmpdir, '[metadata.some]\nkey = val\n')
with get_dist(tmpdir, parse=False) as dist:
with pytest.raises(DistutilsOptionError):
dist.parse_config_files()
def test_classifiers(self, tmpdir):
expected = set([
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
])
# From file.
_, config = fake_env(tmpdir, '[metadata]\nclassifiers = file: classifiers\n')
tmpdir.join('classifiers').write(
'Framework :: Django\n'
'Programming Language :: Python :: 3\n'
'Programming Language :: Python :: 3.5\n'
)
with get_dist(tmpdir) as dist:
assert set(dist.metadata.classifiers) == expected
# From list notation
config.write(
'[metadata]\n'
'classifiers =\n'
' Framework :: Django\n'
' Programming Language :: Python :: 3\n'
' Programming Language :: Python :: 3.5\n'
)
with get_dist(tmpdir) as dist:
assert set(dist.metadata.classifiers) == expected
def test_interpolation(self, tmpdir):
fake_env(tmpdir, '[metadata]\ndescription = %(message)s\n')
with pytest.raises(configparser.InterpolationMissingOptionError):
with get_dist(tmpdir):
pass
def test_non_ascii_1(self, tmpdir):
fake_env(tmpdir, '[metadata]\ndescription = éàïôñ\n', encoding='utf-8')
with get_dist(tmpdir):
pass
def test_non_ascii_3(self, tmpdir):
fake_env(tmpdir, '\n# -*- coding: invalid\n')
with get_dist(tmpdir):
pass
def test_non_ascii_4(self, tmpdir):
fake_env(
tmpdir,
'# -*- coding: utf-8\n[metadata]\ndescription = éàïôñ\n',
encoding='utf-8',
)
with get_dist(tmpdir) as dist:
assert dist.metadata.description == 'éàïôñ'
def test_not_utf8(self, tmpdir):
"""
Config files encoded not in UTF-8 will fail
"""
fake_env(
tmpdir,
'# vim: set fileencoding=iso-8859-15 :\n[metadata]\ndescription = éàïôñ\n',
encoding='iso-8859-15',
)
with pytest.raises(UnicodeDecodeError):
with get_dist(tmpdir):
pass
@pytest.mark.parametrize(
("error_msg", "config", "invalid"),
[
(
"Invalid dash-separated key 'author-email' in 'metadata' (setup.cfg)",
DALS(
"""
[metadata]
author-email = test@test.com
maintainer_email = foo@foo.com
"""
),
{"author-email": "test@test.com"},
),
(
"Invalid uppercase key 'Name' in 'metadata' (setup.cfg)",
DALS(
"""
[metadata]
Name = foo
description = Some description
"""
),
{"Name": "foo"},
),
],
)
def test_invalid_options_previously_deprecated(
self, tmpdir, error_msg, config, invalid
):
# This test and related methods can be removed when no longer needed.
# Deprecation postponed due to push-back from the community in
# https://github.com/pypa/setuptools/issues/4910
fake_env(tmpdir, config)
with pytest.warns(SetuptoolsDeprecationWarning, match=re.escape(error_msg)):
dist = get_dist(tmpdir).__enter__()
tmpdir.join('setup.cfg').remove()
for field, value in invalid.items():
attr = field.replace("-", "_").lower()
assert getattr(dist.metadata, attr) == value
| TestMetadata |
python | mlflow__mlflow | dev/clint/src/clint/rules/mlflow_class_name.py | {
"start": 36,
"end": 174
} | class ____(Rule):
def _message(self) -> str:
return "Should use `Mlflow` in class name, not `MLflow` or `MLFlow`."
| MlflowClassName |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 26546,
"end": 26914
} | class ____(BaseModel):
"""
TimeDelta can be used to interact with datetime.timedelta objects.
"""
field__type: Annotated[str | None, Field(alias="__type", title="Type")] = "TimeDelta"
days: Annotated[int, Field(title="Days")]
seconds: Annotated[int, Field(title="Seconds")]
microseconds: Annotated[int, Field(title="Microseconds")]
| TimeDelta |
python | sympy__sympy | sympy/vector/orienters.py | {
"start": 371,
"end": 622
} | class ____(Basic):
"""
Super-class for all orienter classes.
"""
def rotation_matrix(self):
"""
The rotation matrix corresponding to this orienter
instance.
"""
return self._parent_orient
| Orienter |
python | apache__airflow | providers/databricks/tests/unit/databricks/hooks/test_databricks.py | {
"start": 52730,
"end": 53276
} | class ____(TestDatabricksHookToken):
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=None,
password=None,
extra=json.dumps({"token": TOKEN}),
)
)
self.hook = DatabricksHook()
@pytest.mark.db_test
| TestDatabricksHookTokenWhenNoHostIsProvidedInExtra |
python | pallets__flask | tests/type_check/typing_route.py | {
"start": 2154,
"end": 2532
} | class ____(View):
def __init__(self: RenderTemplateView, template_name: str) -> None:
self.template_name = template_name
def dispatch_request(self: RenderTemplateView) -> str:
return render_template(self.template_name)
app.add_url_rule(
"/about",
view_func=RenderTemplateView.as_view("about_page", template_name="about.html"),
)
| RenderTemplateView |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 4247,
"end": 5654
} | class ____:
"""Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = "standard deviations\n"
for i, std in enumerate(self.stds):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += (
f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n"
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError(
"Standard deviation can be indexed by parameter name or integer."
)
return self.stds[i]
| StandardDeviations |
python | kamyu104__LeetCode-Solutions | Python/step-by-step-directions-from-a-binary-tree-node-to-another.py | {
"start": 221,
"end": 1515
} | class ____(object):
def getDirections(self, root, startValue, destValue):
"""
:type root: Optional[TreeNode]
:type startValue: int
:type destValue: int
:rtype: str
"""
def iter_dfs(root, val):
path = []
stk = [(1, (root,))]
while stk:
step, args = stk.pop()
if step == 1:
node = args[0]
if node.val == val:
path.reverse()
return path
for i, child in enumerate((node.left, node.right)):
if not child:
continue
stk.append((3, None))
stk.append((1, (child,)))
stk.append((2, ("LR"[i],)))
elif step == 2:
path.append(args[0])
elif step == 3:
path.pop()
return []
src = iter_dfs(root, startValue)
dst = iter_dfs(root, destValue)
while len(src) and len(dst) and src[-1] == dst[-1]:
src.pop()
dst.pop()
dst.reverse()
return "".join(['U']*len(src) + dst)
# Time: O(n)
# Space: O(h)
| Solution |
python | huggingface__transformers | src/transformers/generation/stopping_criteria.py | {
"start": 2236,
"end": 3801
} | class ____(StoppingCriteria):
"""
This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
in mind for decoder-only type of transformers, this will include the initial prompted tokens.
Args:
max_length (`int`):
The maximum length that the output sequence can have in number of tokens.
max_position_embeddings (`int`, *optional*):
The maximum model length, as defined by the model's `config.max_position_embeddings` attribute.
"""
def __init__(self, max_length: int, max_position_embeddings: int | None = None):
self.max_length = max_length
self.max_position_embeddings = max_position_embeddings
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
cur_len = input_ids.shape[1]
is_done = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len > self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call has exceeded the model's predefined "
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"exceptions, performance degradation, or nothing at all."
)
return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
| MaxLengthCriteria |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_tagkey_values.py | {
"start": 23908,
"end": 34076
} | class ____(OrganizationTagKeyTestCase, ReplaysSnubaTestCase):
def setUp(self) -> None:
super().setUp()
replay1_id = uuid.uuid4().hex
replay2_id = uuid.uuid4().hex
replay3_id = uuid.uuid4().hex
date_now = datetime.datetime.now(tz=timezone.utc).replace(
hour=0, minute=0, second=0, microsecond=0
)
self.r1_seq1_timestamp = date_now - datetime.timedelta(seconds=22)
self.r1_seq2_timestamp = date_now - datetime.timedelta(seconds=15)
self.r2_seq1_timestamp = date_now - datetime.timedelta(seconds=10)
self.r3_seq1_timestamp = date_now - datetime.timedelta(seconds=10)
self.r4_seq1_timestamp = date_now - datetime.timedelta(seconds=5)
self.store_replays(
mock_replay(
self.r1_seq1_timestamp,
self.project.id,
replay1_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/test123",
"http://localhost:3000/test123",
],
tags={"fruit": "orange"},
segment_id=0,
),
)
self.store_replays(
mock_replay(
self.r1_seq2_timestamp,
self.project.id,
replay1_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
"http://localhost:3000/test456",
],
tags={"fruit": "orange"},
segment_id=1,
),
)
self.store_replays(
mock_replay(
self.r2_seq1_timestamp,
self.project.id,
replay2_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/otherpage",
],
tags={"fruit": "orange"},
)
)
self.store_replays(
mock_replay(
self.r3_seq1_timestamp,
self.project.id,
replay3_id,
urls=[
"http://localhost:3000/",
"http://localhost:3000/login",
],
tags={"fruit": "apple", "drink": "water"},
)
)
self.store_replays(
mock_replay(
self.r4_seq1_timestamp,
self.project.id,
uuid.uuid4().hex,
platform="python",
replay_type="error",
environment="development",
dist="def456",
release="1.0.0",
user_id="456",
user_name="test",
user_email="test@bacon.com",
ipv4="10.0.0.1",
browser_name="Firefox",
browser_version="99.0.0",
sdk_name="sentry.javascript.browser",
sdk_version="5.15.5",
os_name="SuseLinux",
os_version="1.0.0",
device_name="Microwave",
device_brand="Samsung",
device_model="123",
device_family="Sears",
)
)
def get_replays_response(self, key, kwargs):
qs_params = kwargs.get("qs_params", {})
qs_params["includeReplays"] = "1"
kwargs["qs_params"] = qs_params
response = self.get_success_response(key, **kwargs)
return sorted(response.data, key=lambda x: x["value"])
def run_test(self, key, expected, **kwargs):
# all tests here require that we search in replays so make that the default here
res = self.get_replays_response(key, kwargs)
assert [(val["value"], val["count"]) for val in res] == expected
def run_test_and_check_seen(self, key, expected, **kwargs):
res = self.get_replays_response(key, kwargs)
assert [
(val["value"], val["count"], val["firstSeen"], val["lastSeen"]) for val in res
] == expected
def test_replays_tags_values(self) -> None:
# 3 orange values were mocked, but we only return 2 because two of them
# were in the same replay
self.run_test("fruit", expected=[("apple", 1), ("orange", 2)])
self.run_test("replay_type", expected=[("error", 1), ("session", 3)])
self.run_test("environment", expected=[("development", 1), ("production", 3)])
self.run_test("dist", expected=[("abc123", 3), ("def456", 1)])
self.run_test("platform", expected=[("javascript", 3), ("python", 1)])
self.run_test("release", expected=[("1.0.0", 1), ("version@1.3", 3)])
self.run_test("user.id", expected=[("123", 3), ("456", 1)])
self.run_test("user.username", expected=[("test", 1), ("username", 3)])
self.run_test("user.email", expected=[("test@bacon.com", 1), ("username@example.com", 3)])
self.run_test("user.ip", expected=[("10.0.0.1", 1), ("127.0.0.1", 3)])
self.run_test(
"sdk.name", expected=[("sentry.javascript.browser", 1), ("sentry.javascript.react", 3)]
)
self.run_test("sdk.version", expected=[("5.15.5", 1), ("6.18.1", 3)])
self.run_test("os.name", expected=[("SuseLinux", 1), ("iOS", 3)])
self.run_test("os.version", expected=[("1.0.0", 1), ("16.2", 3)])
self.run_test(
"browser.name",
expected=[("Chrome", 3), ("Firefox", 1)],
)
self.run_test("browser.version", expected=[("103.0.38", 3), ("99.0.0", 1)])
self.run_test("device.name", expected=[("Microwave", 1), ("iPhone 13 Pro", 3)])
self.run_test("device.brand", expected=[("Apple", 3), ("Samsung", 1)])
self.run_test("device.family", expected=[("Sears", 1), ("iPhone", 3)])
# check firstSeen/lastSeen for some of the tags
self.run_test_and_check_seen(
"device.model_id",
expected=[
("123", 1, self.r4_seq1_timestamp, self.r4_seq1_timestamp),
("13 Pro", 3, self.r1_seq1_timestamp, self.r3_seq1_timestamp),
],
)
self.run_test_and_check_seen(
"url",
expected=[
("http://localhost:3000/", 3, self.r1_seq1_timestamp, self.r3_seq1_timestamp),
("http://localhost:3000/login", 2, self.r1_seq2_timestamp, self.r3_seq1_timestamp),
(
"http://localhost:3000/otherpage",
1,
self.r2_seq1_timestamp,
self.r2_seq1_timestamp,
),
(
"http://localhost:3000/test123",
1,
self.r1_seq1_timestamp,
self.r1_seq1_timestamp,
),
(
"http://localhost:3000/test456",
1,
self.r1_seq2_timestamp,
self.r1_seq2_timestamp,
),
],
)
def test_replays_tags_values_query(self) -> None:
# requests may pass in a "query" param to filter the return values with a substring
# custom tag
self.run_test("fruit", expected=[("orange", 2)], qs_params={"query": "ora"})
self.run_test("fruit", expected=[("apple", 1), ("orange", 2)], qs_params={"query": "e"})
self.run_test("fruit", expected=[], qs_params={"query": "zz"})
# column aliases
self.run_test("replay_type", expected=[("error", 1)], qs_params={"query": "err"})
self.run_test(
"environment",
expected=[("development", 1), ("production", 3)],
qs_params={"query": "d"},
)
self.run_test("dist", expected=[], qs_params={"query": "z"})
self.run_test("platform", expected=[("python", 1)], qs_params={"query": "o"})
self.run_test(
"release", expected=[("1.0.0", 1), ("version@1.3", 3)], qs_params={"query": "1."}
)
self.run_test("user.id", expected=[("123", 3)], qs_params={"query": "1"})
self.run_test("user.username", expected=[("username", 3)], qs_params={"query": "a"})
self.run_test(
"user.email",
expected=[("test@bacon.com", 1), ("username@example.com", 3)],
qs_params={"query": "@"},
)
self.run_test("user.ip", expected=[], qs_params={"query": "!^"})
self.run_test("sdk.name", expected=[], qs_params={"query": "sentry-javascript"})
self.run_test(
"sdk.version", expected=[("5.15.5", 1), ("6.18.1", 3)], qs_params={"query": ".1"}
)
self.run_test("os.name", expected=[("SuseLinux", 1)], qs_params={"query": "Linux"})
self.run_test("os.version", expected=[("1.0.0", 1)], qs_params={"query": "0.0"})
self.run_test("browser.name", expected=[("Chrome", 3)], qs_params={"query": "Chrome"})
self.run_test("browser.version", expected=[("99.0.0", 1)], qs_params={"query": "99"})
self.run_test(
"device.name",
expected=[("Microwave", 1), ("iPhone 13 Pro", 3)],
qs_params={"query": "i"},
)
self.run_test("device.brand", expected=[("Samsung", 1)], qs_params={"query": "S"})
self.run_test("device.family", expected=[], qs_params={"query": "$$$"})
def test_replays_tags_values_query_case_insensitive(self) -> None:
# custom tag
self.run_test("fruit", expected=[("orange", 2)], qs_params={"query": "OrA"})
# some column aliases
self.run_test("browser.name", expected=[("Chrome", 3)], qs_params={"query": "chrom"})
self.run_test(
"environment",
expected=[("development", 1), ("production", 3)],
qs_params={"query": "D"},
)
def test_schema(self) -> None:
res = self.get_replays_response("fruit", {})
assert sorted(res[0].keys()) == [
"count",
"firstSeen",
"key",
"lastSeen",
"name",
"value",
]
| ReplayOrganizationTagKeyValuesTest |
python | getsentry__sentry | src/sentry/issue_detection/detectors/n_plus_one_db_span_detector.py | {
"start": 647,
"end": 10599
} | class ____(PerformanceDetector):
"""
Detector goals:
- identify a database N+1 query with high accuracy
- collect enough information to create a good fingerprint (see below)
- only return issues with good fingerprints
A good fingerprint is one that gives us confidence that, if two fingerprints
match, then they correspond to the same issue location in code (and
therefore, the same fix).
To do this we look for a specific structure:
[-------- transaction span -----------]
[-------- parent span -----------]
[source query]
[n0]
[n1]
[n2]
...
If we detect two different N+1 problems, and both have matching parents,
source queries, and repeated (n) queries, then we can be fairly confident
they are the same issue.
"""
__slots__ = (
"potential_parents",
"source_span",
"n_hash",
"n_spans",
"transaction",
)
type = DetectorType.N_PLUS_ONE_DB_QUERIES
settings_key = DetectorType.N_PLUS_ONE_DB_QUERIES
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.potential_parents = {}
self.n_hash: str | None = None
self.n_spans: list[Span] = []
self.source_span: Span | None = None
root_span = get_path(self._event, "contexts", "trace")
if root_span:
self.potential_parents[root_span.get("span_id")] = root_span
def is_creation_allowed_for_organization(self, organization: Organization | None) -> bool:
return not features.has(
"organizations:experimental-n-plus-one-db-detector-rollout", organization
)
def is_creation_allowed_for_project(self, project: Project | None) -> bool:
return self.settings["detection_enabled"]
def visit_span(self, span: Span) -> None:
span_id = span.get("span_id", None)
op = span.get("op", None)
if not span_id or not op:
return
if not self._is_db_op(op):
# This breaks up the N+1 we're currently tracking.
self._maybe_store_problem()
self._reset_detection()
# Treat it as a potential parent as long as it isn't the root span.
if span.get("parent_span_id", None):
self.potential_parents[span_id] = span
return
if not self.source_span:
# We aren't currently tracking an N+1. Maybe this span triggers one!
self._maybe_use_as_source(span)
return
# If we got this far, we know we're a DB span and we're looking for a
# sequence of N identical DB spans.
if self._continues_n_plus_1(span):
self.n_spans.append(span)
else:
previous_span = self.n_spans[-1] if self.n_spans else None
self._maybe_store_problem()
self._reset_detection()
# Maybe this DB span starts a whole new N+1!
if previous_span:
self._maybe_use_as_source(previous_span)
if self.source_span and self._continues_n_plus_1(span):
self.n_spans.append(span)
else:
self.source_span = None
self._maybe_use_as_source(span)
def on_complete(self) -> None:
self._maybe_store_problem()
def _is_db_op(self, op: str) -> bool:
return op.startswith("db") and not op.startswith("db.redis")
def _maybe_use_as_source(self, span: Span) -> None:
parent_span_id = span.get("parent_span_id", None)
if not parent_span_id or parent_span_id not in self.potential_parents:
return
self.source_span = span
def _continues_n_plus_1(self, span: Span) -> bool:
if self.source_span is None:
return False
expected_parent_id = self.source_span.get("parent_span_id", None)
parent_id = span.get("parent_span_id", None)
if not parent_id or parent_id != expected_parent_id:
return False
span_hash = span.get("hash", None)
if not span_hash:
return False
if span_hash == self.source_span.get("hash", None):
# The source span and n repeating spans must have different queries.
return False
if not self.n_hash:
self.n_hash = span_hash
return True
return span_hash == self.n_hash
def _maybe_store_problem(self) -> None:
if not self.source_span or not self.n_spans:
return
# Do we have enough spans?
count = self.settings.get("count")
if len(self.n_spans) < count:
return
# Do the spans take enough total time?
if not self._is_slower_than_threshold():
return
# We require a parent span in order to improve our fingerprint accuracy.
parent_span_id = self.source_span.get("parent_span_id", None)
if not parent_span_id:
return
parent_span = self.potential_parents[parent_span_id]
if not parent_span:
return
# Track how many N+1-looking problems we found but dropped because we
# couldn't be sure (maybe the truncated part of the query differs).
if not contains_complete_query(
self.source_span, is_source=True
) or not contains_complete_query(self.n_spans[0]):
metrics.incr("performance.performance_issue.truncated_np1_db")
return
if not self._contains_valid_repeating_query(self.n_spans[0]):
metrics.incr("performance.performance_issue.unparametrized_first_span")
return
fingerprint = self._fingerprint(
parent_span.get("op", None),
parent_span.get("hash", None),
self.source_span.get("hash", None),
self.n_spans[0].get("hash", None),
)
if fingerprint not in self.stored_problems:
self._metrics_for_extra_matching_spans()
offender_span_ids = [span["span_id"] for span in self.n_spans]
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint=fingerprint,
op="db",
desc=self.n_spans[0].get("description", ""),
type=PerformanceNPlusOneGroupType,
parent_span_ids=[parent_span_id],
cause_span_ids=[self.source_span["span_id"]],
offender_span_ids=offender_span_ids,
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
"db",
self.n_spans[0].get("description", ""),
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
evidence_data={
"transaction_name": self._event.get("transaction", ""),
"op": "db",
"parent_span_ids": [parent_span_id],
"parent_span": get_span_evidence_value(parent_span),
"cause_span_ids": [self.source_span.get("span_id", None)],
"offender_span_ids": offender_span_ids,
"repeating_spans": get_span_evidence_value(self.n_spans[0]),
"repeating_spans_compact": get_span_evidence_value(
self.n_spans[0], include_op=False
),
"num_repeating_spans": str(len(offender_span_ids)),
},
)
def _is_slower_than_threshold(self) -> bool:
duration_threshold = self.settings.get("duration_threshold")
return total_span_time(self.n_spans) >= duration_threshold
def _contains_valid_repeating_query(self, span: Span) -> bool:
# Make sure we at least have a space, to exclude e.g. MongoDB and
# Prisma's `rawQuery`.
query = span.get("description")
return bool(query and " " in query)
def _metrics_for_extra_matching_spans(self) -> None:
# Checks for any extra spans that match the detected problem but are not part of affected spans.
# Temporary check since we eventually want to capture extra perf problems on the initial pass while walking spans.
n_count = len(self.n_spans)
all_matching_spans = [
span
for span in self._event.get("spans", [])
if self.n_hash is not None and span["span_id"] == self.n_hash
]
all_count = len(all_matching_spans)
if n_count > 0 and n_count != all_count:
metrics.incr("performance.performance_issue.np1_db.extra_spans")
def _reset_detection(self) -> None:
self.source_span = None
self.n_hash = None
self.n_spans = []
def _fingerprint(
self, parent_op: str, parent_hash: str, source_hash: str | None, n_hash: str | None
) -> str:
# XXX: this has to be a hardcoded string otherwise grouping will break
problem_class = "GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES"
full_fingerprint = hashlib.sha1(
(str(parent_op) + str(parent_hash) + str(source_hash) + str(n_hash)).encode("utf8"),
).hexdigest()
return f"1-{problem_class}-{full_fingerprint}"
def contains_complete_query(span: Span, is_source: bool | None = False) -> bool:
# Remove the truncation check from the n_plus_one db detector.
query = span.get("description")
if is_source and query:
return True
else:
return bool(query and not query.endswith("..."))
| NPlusOneDBSpanDetector |
python | wandb__wandb | wandb/jupyter.py | {
"start": 2642,
"end": 9088
} | class ____(Magics):
def __init__(self, shell):
super().__init__(shell)
@magic_arguments()
@argument(
"path",
default=None,
nargs="?",
help="The path to a resource you want to display.",
)
@argument(
"-h",
"--height",
default=420,
type=int,
help="The height of the iframe in pixels.",
)
@line_cell_magic
def wandb(self, line: str, cell: str | None = None) -> None:
"""Display wandb resources in Jupyter.
This can be used as a line magic:
%wandb USERNAME/PROJECT/runs/RUN_ID
Or as a cell magic:
%%wandb -h 1024
with wandb.init() as run:
run.log({"loss": 1})
"""
global _current_cell_wandb_magic
args = parse_argstring(self.wandb, line)
path: str | None = args.path
height: int = args.height
if path:
_display_by_wandb_path(path, height=height)
displayed = True
elif run := wandb_setup.singleton().most_recent_active_run:
_display_wandb_run(run, height=height)
displayed = True
else:
displayed = False
# If this is being used as a line magic ("%wandb"), we are done.
# When used as a cell magic ("%%wandb"), we must run the cell.
if cell is None:
return
if not displayed:
_current_cell_wandb_magic = _WandbCellMagicState(height=height)
try:
IPython.get_ipython().run_cell(cell)
finally:
_current_cell_wandb_magic = None
def notebook_metadata_from_jupyter_servers_and_kernel_id():
# When running in VS Code's notebook extension,
# the extension creates a temporary file to start the kernel.
# This file is not actually the same as the notebook file.
#
# The real notebook path is stored in the user namespace
# under the key "__vsc_ipynb_file__"
try:
from IPython import get_ipython
ipython = get_ipython()
if ipython is not None:
notebook_path = ipython.kernel.shell.user_ns.get("__vsc_ipynb_file__")
if notebook_path:
return {
"root": os.path.dirname(notebook_path),
"path": notebook_path,
"name": os.path.basename(notebook_path),
}
except ModuleNotFoundError:
return None
servers, kernel_id = jupyter_servers_and_kernel_id()
for s in servers:
if s.get("password"):
raise ValueError("Can't query password protected kernel")
res = requests.get(
urljoin(s["url"], "api/sessions"), params={"token": s.get("token", "")}
).json()
for nn in res:
if isinstance(nn, dict) and nn.get("kernel") and "notebook" in nn:
if nn["kernel"]["id"] == kernel_id:
return {
"root": s.get("root_dir", s.get("notebook_dir", os.getcwd())),
"path": nn["notebook"]["path"],
"name": nn["notebook"]["name"],
}
if not kernel_id:
return None
def notebook_metadata(silent: bool) -> dict[str, str]:
"""Attempt to query jupyter for the path and name of the notebook file.
This can handle different jupyter environments, specifically:
1. Colab
2. Kaggle
3. JupyterLab
4. Notebooks
5. Other?
"""
error_message = (
"Failed to detect the name of this notebook. You can set it manually"
" with the WANDB_NOTEBOOK_NAME environment variable to enable code"
" saving."
)
try:
jupyter_metadata = notebook_metadata_from_jupyter_servers_and_kernel_id()
# Colab:
# request the most recent contents
ipynb = attempt_colab_load_ipynb()
if ipynb is not None and jupyter_metadata is not None:
return {
"root": "/content",
"path": jupyter_metadata["path"],
"name": jupyter_metadata["name"],
}
# Kaggle:
if wandb.util._is_kaggle():
# request the most recent contents
ipynb = attempt_kaggle_load_ipynb()
if ipynb:
return {
"root": "/kaggle/working",
"path": ipynb["metadata"]["name"],
"name": ipynb["metadata"]["name"],
}
if jupyter_metadata:
return jupyter_metadata
except Exception:
logger.exception(error_message)
wandb.termerror(error_message)
return {}
def jupyter_servers_and_kernel_id():
"""Return a list of servers and the current kernel_id.
Used to query for the name of the notebook.
"""
try:
import ipykernel # type: ignore
kernel_id = re.search(
"kernel-(.*).json", ipykernel.connect.get_connection_file()
).group(1)
# We're either in jupyterlab or a notebook, lets prefer the newer jupyter_server package
serverapp = wandb.util.get_module("jupyter_server.serverapp")
notebookapp = wandb.util.get_module("notebook.notebookapp")
servers = []
if serverapp is not None:
servers.extend(list(serverapp.list_running_servers()))
if notebookapp is not None:
servers.extend(list(notebookapp.list_running_servers()))
except (AttributeError, ValueError, ImportError):
return [], None
return servers, kernel_id
def attempt_colab_load_ipynb():
colab = wandb.util.get_module("google.colab")
if colab:
# This isn't thread safe, never call in a thread
response = colab._message.blocking_request("get_ipynb", timeout_sec=5)
if response:
return response["ipynb"]
def attempt_kaggle_load_ipynb():
kaggle = wandb.util.get_module("kaggle_session")
if not kaggle:
return None
try:
client = kaggle.UserSessionClient()
parsed = json.loads(client.get_exportable_ipynb()["source"])
# TODO: couldn't find a way to get the name of the notebook...
parsed["metadata"]["name"] = "kaggle.ipynb"
except Exception:
wandb.termerror("Unable to load kaggle notebook.")
logger.exception("Unable to load kaggle notebook.")
return None
return parsed
| WandBMagics |
python | pennersr__django-allauth | allauth/headless/mfa/inputs.py | {
"start": 1956,
"end": 2024
} | class ____(BaseSignupForm, inputs.Input):
pass
| SignupWebAuthnInput |
python | ansible__ansible | lib/ansible/plugins/inventory/host_list.py | {
"start": 1052,
"end": 2348
} | class ____(BaseInventoryPlugin):
NAME = 'host_list'
# host_list does not set vars, so needs no special trust assistance from the inventory API
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
| InventoryModule |
python | openai__openai-python | src/openai/types/responses/web_search_tool.py | {
"start": 1218,
"end": 1821
} | class ____(BaseModel):
type: Literal["web_search", "web_search_2025_08_26"]
"""The type of the web search tool.
One of `web_search` or `web_search_2025_08_26`.
"""
filters: Optional[Filters] = None
"""Filters for the search."""
search_context_size: Optional[Literal["low", "medium", "high"]] = None
"""High level guidance for the amount of context window space to use for the
search.
One of `low`, `medium`, or `high`. `medium` is the default.
"""
user_location: Optional[UserLocation] = None
"""The approximate location of the user."""
| WebSearchTool |
python | huggingface__transformers | src/transformers/integrations/deepspeed.py | {
"start": 1934,
"end": 3076
} | class ____(DeepSpeedConfig):
"""
This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
it's important that this object remains alive while the program is still running.
[`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
the DeepSpeed configuration is not modified in any way.
Args:
config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
"""
def __init__(self, config_file_or_dict):
# set global weakref object
set_hf_deepspeed_config(self)
dep_version_check("accelerate")
dep_version_check("deepspeed")
super().__init__(config_file_or_dict)
| HfDeepSpeedConfig |
python | docker__docker-py | docker/models/secrets.py | {
"start": 70,
"end": 537
} | class ____(Model):
"""A secret."""
id_attribute = 'ID'
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
return self.attrs['Spec']['Name']
def remove(self):
"""
Remove this secret.
Raises:
:py:class:`docker.errors.APIError`
If secret failed to remove.
"""
return self.client.api.remove_secret(self.id)
| Secret |
python | kamyu104__LeetCode-Solutions | Python/smallest-range-ii.py | {
"start": 33,
"end": 418
} | class ____(object):
def smallestRangeII(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
A.sort()
result = A[-1]-A[0]
for i in xrange(len(A)-1):
result = min(result,
max(A[-1]-K, A[i]+K) -
min(A[0]+K, A[i+1]-K))
return result
| Solution |
python | coleifer__peewee | peewee.py | {
"start": 7145,
"end": 11502
} | class ____(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value): self[attr] = value
def __iadd__(self, rhs): self.update(rhs); return self
def __add__(self, rhs): d = attrdict(self); d.update(rhs); return d
SENTINEL = object()
#: Operations for use in SQL expressions.
OP = attrdict(
AND='AND',
OR='OR',
ADD='+',
SUB='-',
MUL='*',
DIV='/',
BIN_AND='&',
BIN_OR='|',
XOR='#',
MOD='%',
EQ='=',
LT='<',
LTE='<=',
GT='>',
GTE='>=',
NE='!=',
IN='IN',
NOT_IN='NOT IN',
IS='IS',
IS_NOT='IS NOT',
LIKE='LIKE',
ILIKE='ILIKE',
BETWEEN='BETWEEN',
REGEXP='REGEXP',
IREGEXP='IREGEXP',
CONCAT='||',
BITWISE_NEGATION='~')
# To support "django-style" double-underscore filters, create a mapping between
# operation name and operation code, e.g. "__eq" == OP.EQ.
DJANGO_MAP = attrdict({
'eq': operator.eq,
'lt': operator.lt,
'lte': operator.le,
'gt': operator.gt,
'gte': operator.ge,
'ne': operator.ne,
'in': operator.lshift,
'is': lambda l, r: Expression(l, OP.IS, r),
'like': lambda l, r: Expression(l, OP.LIKE, r),
'ilike': lambda l, r: Expression(l, OP.ILIKE, r),
'regexp': lambda l, r: Expression(l, OP.REGEXP, r),
})
#: Mapping of field type to the data-type supported by the database. Databases
#: may override or add to this list.
FIELD = attrdict(
AUTO='INTEGER',
BIGAUTO='BIGINT',
BIGINT='BIGINT',
BLOB='BLOB',
BOOL='SMALLINT',
CHAR='CHAR',
DATE='DATE',
DATETIME='DATETIME',
DECIMAL='DECIMAL',
DEFAULT='',
DOUBLE='REAL',
FLOAT='REAL',
INT='INTEGER',
SMALLINT='SMALLINT',
TEXT='TEXT',
TIME='TIME',
UUID='TEXT',
UUIDB='BLOB',
VARCHAR='VARCHAR')
#: Join helpers (for convenience) -- all join types are supported, this object
#: is just to help avoid introducing errors by using strings everywhere.
JOIN = attrdict(
INNER='INNER JOIN',
LEFT_OUTER='LEFT OUTER JOIN',
RIGHT_OUTER='RIGHT OUTER JOIN',
FULL='FULL JOIN',
FULL_OUTER='FULL OUTER JOIN',
CROSS='CROSS JOIN',
NATURAL='NATURAL JOIN',
LATERAL='LATERAL',
LEFT_LATERAL='LEFT JOIN LATERAL')
# Row representations.
ROW = attrdict(
TUPLE=1,
DICT=2,
NAMED_TUPLE=3,
CONSTRUCTOR=4,
MODEL=5)
# Query type to use with prefetch
PREFETCH_TYPE = attrdict(
WHERE=1,
JOIN=2)
SCOPE_NORMAL = 1
SCOPE_SOURCE = 2
SCOPE_VALUES = 4
SCOPE_CTE = 8
SCOPE_COLUMN = 16
# Rules for parentheses around subqueries in compound select.
CSQ_PARENTHESES_NEVER = 0
CSQ_PARENTHESES_ALWAYS = 1
CSQ_PARENTHESES_UNNESTED = 2
# Regular expressions used to convert class names to snake-case table names.
# First regex handles acronym followed by word or initial lower-word followed
# by a capitalized word. e.g. APIResponse -> API_Response / fooBar -> foo_Bar.
# Second regex handles the normal case of two title-cased words.
SNAKE_CASE_STEP1 = re.compile('(.)_*([A-Z][a-z]+)')
SNAKE_CASE_STEP2 = re.compile('([a-z0-9])_*([A-Z])')
# Helper functions that are used in various parts of the codebase.
MODEL_BASE = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(MODEL_BASE, (base,), {})
def merge_dict(source, overrides):
merged = source.copy()
if overrides:
merged.update(overrides)
return merged
def quote(path, quote_chars):
if len(path) == 1:
return path[0].join(quote_chars)
return '.'.join([part.join(quote_chars) for part in path])
is_model = lambda o: isclass(o) and issubclass(o, Model)
def ensure_tuple(value):
if value is not None:
return value if isinstance(value, (list, tuple)) else (value,)
def ensure_entity(value):
if value is not None:
return value if isinstance(value, Node) else Entity(value)
def make_snake_case(s):
first = SNAKE_CASE_STEP1.sub(r'\1_\2', s)
return SNAKE_CASE_STEP2.sub(r'\1_\2', first).lower()
def chunked(it, n):
marker = object()
for group in (list(g) for g in izip_longest(*[iter(it)] * n,
fillvalue=marker)):
if group[-1] is marker:
del group[group.index(marker):]
yield group
| attrdict |
python | coleifer__peewee | tests/models.py | {
"start": 126775,
"end": 128059
} | class ____(BaseTestCase):
database = get_in_memory_db()
def test_subclass_aware_metadata(self):
class SchemaPropagateMetadata(SubclassAwareMetadata):
@property
def schema(self):
return self._schema
@schema.setter
def schema(self, value):
# self.models is a singleton, essentially, shared among all
# classes that use this metadata implementation.
for model in self.models:
model._meta._schema = value
class Base(Model):
class Meta:
database = self.database
model_metadata_class = SchemaPropagateMetadata
class User(Base):
username = TextField()
class Tweet(Base):
user = ForeignKeyField(User, backref='tweets')
content = TextField()
self.assertTrue(User._meta.schema is None)
self.assertTrue(Tweet._meta.schema is None)
Base._meta.schema = 'temp'
self.assertEqual(User._meta.schema, 'temp')
self.assertEqual(Tweet._meta.schema, 'temp')
User._meta.schema = None
for model in (Base, User, Tweet):
self.assertTrue(model._meta.schema is None)
| TestModelMetadataMisc |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_row.py | {
"start": 347,
"end": 2931
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_row() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_row_1(self):
"""Test the _write_row() method"""
self.worksheet._write_row(0, None)
exp = """<row r="1">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_2(self):
"""Test the _write_row() method"""
self.worksheet._write_row(2, "2:2")
exp = """<row r="3" spans="2:2">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_3(self):
"""Test the _write_row() method"""
row_info = RowInfo(height=40)
self.worksheet._write_row(1, None, row_info)
exp = """<row r="2" ht="30" customHeight="1">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_4(self):
"""Test the _write_row() method"""
row_info = RowInfo(height=20, hidden=True)
self.worksheet._write_row(3, None, row_info)
exp = """<row r="4" hidden="1">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_5(self):
"""Test the _write_row() method"""
row_format = Format({"xf_index": 1})
row_info = RowInfo(height=20, row_format=row_format)
self.worksheet._write_row(6, None, row_info)
exp = """<row r="7" s="1" customFormat="1">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_6(self):
"""Test the _write_row() method"""
row_info = RowInfo(height=4)
self.worksheet._write_row(9, None, row_info)
exp = """<row r="10" ht="3" customHeight="1">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_7(self):
"""Test the _write_row() method"""
row_info = RowInfo(height=32, hidden=True)
self.worksheet._write_row(12, None, row_info)
exp = """<row r="13" ht="24" hidden="1" customHeight="1">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_8(self):
"""Test the _write_row() method"""
row_info = RowInfo(height=32, hidden=True)
self.worksheet._write_row(12, None, row_info, 1)
exp = """<row r="13" ht="24" hidden="1" customHeight="1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteRow |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 39513,
"end": 39963
} | class ____(MemoryLeakMixin, TestCase):
"""Test list can take strings as items. """
def test_string_item(self):
@njit
def foo():
l = listobject.new_list(types.unicode_type)
l.append('a')
l.append('b')
l.append('c')
l.append('d')
return l[0], l[1], l[2], l[3]
items = foo()
self.assertEqual(['a', 'b', 'c', 'd'], list(items))
| TestStringItem |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 5980,
"end": 6318
} | class ____(BaseModel):
"""
Logs returned by `mev_simBundle`.
"""
tx_logs: Optional[list[dict]] = Field(None, alias="txLogs")
"""
Logs for transactions in bundle.
"""
bundle_logs: Optional[list["SimBundleLogs"]] = Field(None, alias="bundleLogs")
"""
Logs for bundles in bundle.
"""
| SimBundleLogs |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 902062,
"end": 920739
} | class ____(FieldChannelMixin, core.StringFieldDefWithCondition):
r"""
Url schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, Literal['binned'], :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefstringExprRef`, :class:`ConditionalParameterValueDefstringExprRef`, :class:`ConditionalPredicateValueDefstringExprRef`, Sequence[dict, :class:`ConditionalValueDefstringExprRef`, :class:`ConditionalParameterValueDefstringExprRef`, :class:`ConditionalPredicateValueDefstringExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
format : str, dict, :class:`Dict`, :class:`Format`, :class:`TimeFormatSpecifier`
The text format specifier for formatting number and date/time in labels of guides
(axes, legends, headers) and text marks.
If the format type is ``"number"`` (e.g., for quantitative fields), this is a D3's
`number format pattern string <https://github.com/d3/d3-format#locale_format>`__.
If the format type is ``"time"`` (e.g., for temporal fields), this is either: a)
D3's `time format pattern <https://d3js.org/d3-time-format#locale_format>`__ if you
desire to set a static time format.
b) `dynamic time format specifier object
<https://vega.github.io/vega-lite/docs/format.html#dynamic-time-format>`__ if you
desire to set a dynamic time format that uses different formats depending on the
granularity of the input date (e.g., if the date lies on a year, month, date, hour,
etc. boundary).
When used with a `custom formatType
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__, this
value will be passed as ``format`` alongside ``datum.value`` to the registered
function.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : str
The format type for labels. One of ``"number"``, ``"time"``, or a `registered custom
format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__.
**Default value:**
* ``"time"`` for temporal fields and ordinal and nominal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nominal fields without
``timeUnit``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "url"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Url: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> Url: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> Url: ...
@overload
def bandPosition(self, _: float, /) -> Url: ...
@overload
def bin(self, _: bool | Bin | Literal["binned"] | None, /) -> Url: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Url: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map] = Undefined,
) -> Url: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map] = Undefined,
) -> Url: ...
@overload
def condition(self, _: list[core.ConditionalValueDefstringExprRef], /) -> Url: ...
@overload
def field(self, _: str | RepeatRef, /) -> Url: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Url: ...
@overload
def format(self, _: str, /) -> Url: ...
@overload
def format(
self,
*,
date: Optional[str] = Undefined,
day: Optional[str] = Undefined,
hours: Optional[str] = Undefined,
milliseconds: Optional[str] = Undefined,
minutes: Optional[str] = Undefined,
month: Optional[str] = Undefined,
quarter: Optional[str] = Undefined,
seconds: Optional[str] = Undefined,
week: Optional[str] = Undefined,
year: Optional[str] = Undefined,
) -> Url: ...
@overload
def format(self, _: Map, /) -> Url: ...
@overload
def formatType(self, _: str, /) -> Url: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Url: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Url: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Url: ...
@overload
def type(self, _: StandardType_T, /) -> Url: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
format=format,
formatType=formatType,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| Url |
python | ijl__orjson | test/test_dataclass.py | {
"start": 1521,
"end": 1641
} | class ____(AbstractBase):
__slots__ = ("attr",)
attr: float
def key(self):
return "dkjf"
| ConcreteAbc |
python | jd__tenacity | tenacity/stop.py | {
"start": 3350,
"end": 4111
} | class ____(stop_base):
"""
Stop right before the next attempt would take place after the time from the first attempt >= limit.
Most useful when you are using with a `wait` function like wait_random_exponential, but need to make
sure that the max_delay is not exceeded.
"""
def __init__(self, max_delay: _utils.time_unit_type) -> None:
self.max_delay = _utils.to_seconds(max_delay)
def __call__(self, retry_state: "RetryCallState") -> bool:
if retry_state.seconds_since_start is None:
raise RuntimeError("__call__() called but seconds_since_start is not set")
return (
retry_state.seconds_since_start + retry_state.upcoming_sleep
>= self.max_delay
)
| stop_before_delay |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/experiment_service.py | {
"start": 1275,
"end": 4334
} | class ____(GoogleCloudBaseOperator):
"""
Use the Vertex AI SDK to create experiment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_description: Optional. Description of the evaluation experiment.
:param experiment_tensorboard: Optional. The Vertex TensorBoard instance to use as a backing
TensorBoard for the provided experiment. If no TensorBoard is provided, a default TensorBoard
instance is created and used by this experiment.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"location",
"project_id",
"impersonation_chain",
"experiment_name",
)
def __init__(
self,
*,
project_id: str,
location: str,
experiment_name: str,
experiment_description: str = "",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
experiment_tensorboard: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.experiment_name = experiment_name
self.experiment_description = experiment_description
self.experiment_tensorboard = experiment_tensorboard
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
self.hook = ExperimentHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.hook.create_experiment(
project_id=self.project_id,
location=self.location,
experiment_name=self.experiment_name,
experiment_description=self.experiment_description,
experiment_tensorboard=self.experiment_tensorboard,
)
except exceptions.AlreadyExists:
raise AirflowException(f"Experiment with name {self.experiment_name} already exist")
self.log.info("Created experiment: %s", self.experiment_name)
| CreateExperimentOperator |
python | openai__openai-python | src/openai/types/realtime/realtime_response_create_mcp_tool.py | {
"start": 2324,
"end": 4512
} | class ____(BaseModel):
server_label: str
"""A label for this MCP server, used to identify it in tool calls."""
type: Literal["mcp"]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[AllowedTools] = None
"""List of allowed tool names or a filter object."""
authorization: Optional[str] = None
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Optional[
Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
] = None
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[RequireApproval] = None
"""Specify which of the MCP server's tools require approval."""
server_description: Optional[str] = None
"""Optional description of the MCP server, used to provide more context."""
server_url: Optional[str] = None
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
| RealtimeResponseCreateMcpTool |
python | huggingface__transformers | src/transformers/models/glm46v/modular_glm46v.py | {
"start": 5550,
"end": 5618
} | class ____(Glm4vImageProcessorFast):
pass
| Glm46VImageProcessorFast |
python | walkccc__LeetCode | solutions/754. Reach a Number/754.py | {
"start": 0,
"end": 251
} | class ____:
def reachNumber(self, target: int) -> int:
ans = 0
pos = 0
target = abs(target)
while pos < target:
ans += 1
pos += ans
while (pos - target) % 2 == 1:
ans += 1
pos += ans
return ans
| Solution |
python | cherrypy__cherrypy | cherrypy/test/test_xmlrpc.py | {
"start": 1967,
"end": 4685
} | class ____(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def testXmlRpc(self):
scheme = self.scheme
if scheme == 'https':
url = 'https://%s:%s/xmlrpc/' % (self.interface(), self.PORT)
proxy = ServerProxy(url, transport=HTTPSTransport())
else:
url = 'http://%s:%s/xmlrpc/' % (self.interface(), self.PORT)
proxy = ServerProxy(url)
# begin the tests ...
self.getPage('/xmlrpc/foo')
self.assertBody('Hello world!')
self.assertEqual(proxy.return_single_item_list(), [42])
self.assertNotEqual(proxy.return_single_item_list(), 'one bazillion')
self.assertEqual(proxy.return_string(), 'here is a string')
self.assertEqual(
proxy.return_tuple(),
list(('here', 'is', 1, 'tuple')),
)
self.assertEqual(proxy.return_dict(), {'a': 1, 'c': 3, 'b': 2})
self.assertEqual(
proxy.return_composite(),
[{'a': 1, 'z': 26}, 'hi', ['welcome', 'friend']],
)
self.assertEqual(proxy.return_int(), 42)
self.assertEqual(proxy.return_float(), 3.14)
self.assertEqual(
proxy.return_datetime(),
DateTime((2003, 10, 7, 8, 1, 0, 1, 280, -1)),
)
self.assertEqual(proxy.return_boolean(), True)
self.assertEqual(proxy.test_argument_passing(22), 22 * 2)
# Test an error in the page handler (should raise an xmlrpclib.Fault)
try:
proxy.test_argument_passing({})
except Exception:
x = sys.exc_info()[1]
self.assertEqual(x.__class__, Fault)
self.assertEqual(
x.faultString,
("unsupported operand type(s) for *: 'dict' and 'int'"),
)
else:
self.fail('Expected xmlrpclib.Fault')
# https://github.com/cherrypy/cherrypy/issues/533
# if a method is not found, an xmlrpclib.Fault should be raised
try:
proxy.non_method()
except Exception:
x = sys.exc_info()[1]
self.assertEqual(x.__class__, Fault)
self.assertEqual(
x.faultString,
'method "non_method" is not supported',
)
else:
self.fail('Expected xmlrpclib.Fault')
# Test returning a Fault from the page handler.
try:
proxy.test_returning_Fault()
except Exception:
x = sys.exc_info()[1]
self.assertEqual(x.__class__, Fault)
self.assertEqual(x.faultString, ('custom Fault response'))
else:
self.fail('Expected xmlrpclib.Fault')
| XmlRpcTest |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 8642,
"end": 8739
} | class ____(VyperException):
"""Invalid reference to an existing definition."""
| InvalidReference |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 233,
"end": 458
} | class ____(Benchmark):
def setup(self):
self.d = np.array([1, 2, 3])
def time_linspace_scalar(self):
np.linspace(0, 10, 2)
def time_linspace_array(self):
np.linspace(self.d, 10, 10)
| Linspace |
python | doocs__leetcode | solution/1800-1899/1840.Maximum Building Height/Solution.py | {
"start": 0,
"end": 611
} | class ____:
def maxBuilding(self, n: int, restrictions: List[List[int]]) -> int:
r = restrictions
r.append([1, 0])
r.sort()
if r[-1][0] != n:
r.append([n, n - 1])
m = len(r)
for i in range(1, m):
r[i][1] = min(r[i][1], r[i - 1][1] + r[i][0] - r[i - 1][0])
for i in range(m - 2, 0, -1):
r[i][1] = min(r[i][1], r[i + 1][1] + r[i + 1][0] - r[i][0])
ans = 0
for i in range(m - 1):
t = (r[i][1] + r[i + 1][1] + r[i + 1][0] - r[i][0]) // 2
ans = max(ans, t)
return ans
| Solution |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_pbag.py | {
"start": 249,
"end": 6743
} | class ____(object):
"""
A persistent bag/multiset type.
Requires elements to be hashable, and allows duplicates, but has no
ordering. Bags are hashable.
Do not instantiate directly, instead use the factory functions :py:func:`b`
or :py:func:`pbag` to create an instance.
Some examples:
>>> s = pbag([1, 2, 3, 1])
>>> s2 = s.add(4)
>>> s3 = s2.remove(1)
>>> s
pbag([1, 1, 2, 3])
>>> s2
pbag([1, 1, 2, 3, 4])
>>> s3
pbag([1, 2, 3, 4])
"""
__slots__ = ('_counts', '__weakref__')
def __init__(self, counts):
self._counts = counts
def add(self, element):
"""
Add an element to the bag.
>>> s = pbag([1])
>>> s2 = s.add(1)
>>> s3 = s.add(2)
>>> s2
pbag([1, 1])
>>> s3
pbag([1, 2])
"""
return PBag(_add_to_counters(self._counts, element))
def update(self, iterable):
"""
Update bag with all elements in iterable.
>>> s = pbag([1])
>>> s.update([1, 2])
pbag([1, 1, 2])
"""
if iterable:
return PBag(reduce(_add_to_counters, iterable, self._counts))
return self
def remove(self, element):
"""
Remove an element from the bag.
>>> s = pbag([1, 1, 2])
>>> s2 = s.remove(1)
>>> s3 = s.remove(2)
>>> s2
pbag([1, 2])
>>> s3
pbag([1, 1])
"""
if element not in self._counts:
raise KeyError(element)
elif self._counts[element] == 1:
newc = self._counts.remove(element)
else:
newc = self._counts.set(element, self._counts[element] - 1)
return PBag(newc)
def count(self, element):
"""
Return the number of times an element appears.
>>> pbag([]).count('non-existent')
0
>>> pbag([1, 1, 2]).count(1)
2
"""
return self._counts.get(element, 0)
def __len__(self):
"""
Return the length including duplicates.
>>> len(pbag([1, 1, 2]))
3
"""
return sum(self._counts.itervalues())
def __iter__(self):
"""
Return an iterator of all elements, including duplicates.
>>> list(pbag([1, 1, 2]))
[1, 1, 2]
>>> list(pbag([1, 2]))
[1, 2]
"""
for elt, count in self._counts.iteritems():
for i in range(count):
yield elt
def __contains__(self, elt):
"""
Check if an element is in the bag.
>>> 1 in pbag([1, 1, 2])
True
>>> 0 in pbag([1, 2])
False
"""
return elt in self._counts
def __repr__(self):
return "pbag({0})".format(list(self))
def __eq__(self, other):
"""
Check if two bags are equivalent, honoring the number of duplicates,
and ignoring insertion order.
>>> pbag([1, 1, 2]) == pbag([1, 2])
False
>>> pbag([2, 1, 0]) == pbag([0, 1, 2])
True
"""
if type(other) is not PBag:
raise TypeError("Can only compare PBag with PBags")
return self._counts == other._counts
def __lt__(self, other):
raise TypeError('PBags are not orderable')
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
# Multiset-style operations similar to collections.Counter
def __add__(self, other):
"""
Combine elements from two PBags.
>>> pbag([1, 2, 2]) + pbag([2, 3, 3])
pbag([1, 2, 2, 2, 3, 3])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
result[elem] = self.count(elem) + other_count
return PBag(result.persistent())
def __sub__(self, other):
"""
Remove elements from one PBag that are present in another.
>>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4])
pbag([1, 2, 2])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
newcount = self.count(elem) - other_count
if newcount > 0:
result[elem] = newcount
elif elem in self:
result.remove(elem)
return PBag(result.persistent())
def __or__(self, other):
"""
Union: Keep elements that are present in either of two PBags.
>>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3])
pbag([1, 2, 2, 2, 3, 3])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
count = self.count(elem)
newcount = max(count, other_count)
result[elem] = newcount
return PBag(result.persistent())
def __and__(self, other):
"""
Intersection: Only keep elements that are present in both PBags.
>>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3])
pbag([2])
"""
if not isinstance(other, PBag):
return NotImplemented
result = pmap().evolver()
for elem, count in self._counts.iteritems():
newcount = min(count, other.count(elem))
if newcount > 0:
result[elem] = newcount
return PBag(result.persistent())
def __hash__(self):
"""
Hash based on value of elements.
>>> m = pmap({pbag([1, 2]): "it's here!"})
>>> m[pbag([2, 1])]
"it's here!"
>>> pbag([1, 1, 2]) in m
False
"""
return hash(self._counts)
Container.register(PBag)
Iterable.register(PBag)
Sized.register(PBag)
Hashable.register(PBag)
def b(*elements):
"""
Construct a persistent bag.
Takes an arbitrary number of arguments to insert into the new persistent
bag.
>>> b(1, 2, 3, 2)
pbag([1, 2, 2, 3])
"""
return pbag(elements)
def pbag(elements):
"""
Convert an iterable to a persistent bag.
Takes an iterable with elements to insert.
>>> pbag([1, 2, 3, 2])
pbag([1, 2, 2, 3])
"""
if not elements:
return _EMPTY_PBAG
return PBag(reduce(_add_to_counters, elements, pmap()))
_EMPTY_PBAG = PBag(pmap())
| PBag |
python | tensorflow__tensorflow | tensorflow/python/util/deprecation_test.py | {
"start": 39139,
"end": 40271
} | class ____(test.TestCase):
def testDeprecatedArgumentLookup(self):
good_value = 3
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value, "val_old",
None), good_value)
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", None, "val_old",
good_value), good_value)
with self.assertRaisesRegex(ValueError,
"Cannot specify both 'val_old' and 'val_new'"):
deprecation.deprecated_argument_lookup("val_new", good_value,
"val_old", good_value)
def testRewriteArgumentDocstring(self):
docs = """Add `a` and `b`
Args:
a: first arg
b: second arg
"""
new_docs = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(docs, "a", "left"), "b", "right")
new_docs_ref = """Add `left` and `right`
Args:
left: first arg
right: second arg
"""
self.assertEqual(new_docs, new_docs_ref)
| DeprecationArgumentsTest |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/events.py | {
"start": 3269,
"end": 3344
} | class ____(Event):
__slots__ = ()
# Implementations.
| CollectionEndEvent |
python | ray-project__ray | python/ray/tune/schedulers/hyperband.py | {
"start": 17812,
"end": 24762
} | class ____:
"""Logical object for tracking Hyperband bracket progress. Keeps track
of proper parameters as designated by HyperBand.
Also keeps track of progress to ensure good scheduling.
"""
def __init__(
self,
time_attr: str,
max_trials: int,
init_t_attr: int,
max_t_attr: int,
eta: float,
s: int,
stop_last_trials: bool = True,
):
self._live_trials = {} # maps trial -> current result
self._all_trials = []
self._time_attr = time_attr # attribute to
self._n = self._n0 = max_trials
self._r = self._r0 = init_t_attr
self._max_t_attr = max_t_attr
self._cumul_r = self._r0
self._eta = eta
self._halves = s
self._total_work = self._calculate_total_work(self._n0, self._r0, s)
self._completed_progress = 0
self.stop_last_trials = stop_last_trials
self.is_being_processed = False
self.trials_to_unpause = set()
def add_trial(self, trial: Trial):
"""Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!"
self._live_trials[trial] = None
self._all_trials.append(trial)
def cur_iter_done(self) -> bool:
"""Checks if all iterations have completed.
TODO(rliaw): also check that `t.iterations == self._r`"""
return all(
self._get_result_time(result) >= self._cumul_r
for result in self._live_trials.values()
)
def finished(self) -> bool:
if not self.stop_last_trials:
return False
return self._halves == 0 and self.cur_iter_done()
def current_trials(self) -> List[Trial]:
return list(self._live_trials)
def continue_trial(self, trial: Trial) -> bool:
result = self._live_trials[trial]
if not self.stop_last_trials and self._halves == 0:
return True
elif self._get_result_time(result) < self._cumul_r:
logger.debug(
f"Continuing trial {trial} as it hasn't reached the time threshold "
f"{self._cumul_r}, yet."
)
return True
return False
def filled(self) -> bool:
"""Checks if bracket is filled.
Only let new trials be added at current level minimizing the need
to backtrack and bookkeep previous medians."""
return len(self._live_trials) == self._n
def successive_halving(
self, metric: str, metric_op: float
) -> Tuple[List[Trial], List[Trial]]:
if self._halves == 0 and not self.stop_last_trials:
return self._live_trials, []
assert self._halves > 0
# "Halving" is a misnomer. We're actually reducing by factor `eta`.
self._halves -= 1
# If we had 8 trials in the bracket and eta=2, we will keep 4.
# If we had 9 trials in the bracket and eta=3, we will keep 3.
self._n = int(np.ceil(self._n / self._eta))
# Likewise, we increase the number of iterations until we process the bracket
# again.
# Remember r0 = max_t * self._eta ** (-s)
# Let max_t=16, eta=2, s=1. Then r0=8, and we calculate r1=16.
# Let max_t=16, eta=2, s=2. Then r0=4, and we calculate r1=8, r2=16.
# Let max_t=81, eta=3, s=1. Then r0=27, and we calculate r1=81.
# Let max_t=81, eta=3, s=2. Then r0=9, and we calculate r1=27, r2=81.
self._r *= self._eta
self._r = int(min(self._r, self._max_t_attr))
self._cumul_r = self._r
sorted_trials = sorted(
self._live_trials, key=lambda t: metric_op * self._live_trials[t][metric]
)
good, bad = sorted_trials[-self._n :], sorted_trials[: -self._n]
return good, bad
def update_trial_stats(self, trial: Trial, result: Dict):
"""Update result for trial. Called after trial has finished
an iteration - will decrement iteration count.
TODO(rliaw): The other alternative is to keep the trials
in and make sure they're not set as pending later."""
assert trial in self._live_trials
assert self._get_result_time(result) >= 0
observed_time = self._get_result_time(result)
last_observed = self._get_result_time(self._live_trials[trial])
delta = observed_time - last_observed
if delta <= 0:
logger.info(
"Restoring from a previous point in time. "
"Previous={}; Now={}".format(last_observed, observed_time)
)
self._completed_progress += delta
self._live_trials[trial] = result
self.trials_to_unpause.discard(trial)
def cleanup_trial(self, trial: Trial):
"""Clean up statistics tracking for terminated trials (either by force
or otherwise).
This may cause bad trials to continue for a long time, in the case
where all the good trials finish early and there are only bad trials
left in a bracket with a large max-iteration."""
self._live_trials.pop(trial, None)
def cleanup_full(self, tune_controller: "TuneController"):
"""Cleans up bracket after bracket is completely finished.
Lets the last trial continue to run until termination condition
kicks in."""
for trial in self.current_trials():
if trial.status == Trial.PAUSED:
tune_controller.stop_trial(trial)
def completion_percentage(self) -> float:
"""Returns a progress metric.
This will not be always finish with 100 since dead trials
are dropped."""
if self.finished():
return 1.0
return min(self._completed_progress / self._total_work, 1.0)
def _get_result_time(self, result: Dict) -> float:
if result is None:
return 0
return result[self._time_attr]
def _calculate_total_work(self, n: int, r: float, s: int):
work = 0
cumulative_r = r
for _ in range(s + 1):
work += int(n) * int(r)
n /= self._eta
n = int(np.ceil(n))
r *= self._eta
r = int(min(r, self._max_t_attr - cumulative_r))
return work
def __repr__(self) -> str:
status = ", ".join(
[
"Max Size (n)={}".format(self._n),
"Milestone (r)={}".format(self._cumul_r),
"completed={:.1%}".format(self.completion_percentage()),
]
)
counts = collections.Counter([t.status for t in self._all_trials])
trial_statuses = ", ".join(
sorted("{}: {}".format(k, v) for k, v in counts.items())
)
return "Bracket({}): {{{}}} ".format(status, trial_statuses)
| _Bracket |
python | django-crispy-forms__django-crispy-forms | crispy_forms/helper.py | {
"start": 339,
"end": 3660
} | class ____:
def _check_layout(self):
if self.layout is None:
raise FormHelpersException("You need to set a layout in your FormHelper")
def _check_layout_and_form(self):
self._check_layout()
if self.form is None:
raise FormHelpersException("You need to pass a form instance to your FormHelper")
def all(self):
"""
Returns all layout objects of first level of depth
"""
self._check_layout()
return LayoutSlice(self.layout, slice(0, len(self.layout.fields), 1))
def filter(self, *LayoutClasses, max_level=0, greedy=False):
"""
Returns a LayoutSlice pointing to layout objects of type `LayoutClass`
"""
self._check_layout()
filtered_layout_objects = self.layout.get_layout_objects(LayoutClasses, max_level=max_level, greedy=greedy)
return LayoutSlice(self.layout, filtered_layout_objects)
def filter_by_widget(self, widget_type):
"""
Returns a LayoutSlice pointing to fields with widgets of `widget_type`
"""
self._check_layout_and_form()
layout_field_names = self.layout.get_field_names()
# Let's filter all fields with widgets like widget_type
filtered_fields = []
for pointer in layout_field_names:
if isinstance(self.form.fields[pointer.name].widget, widget_type):
filtered_fields.append(pointer)
return LayoutSlice(self.layout, filtered_fields)
def exclude_by_widget(self, widget_type):
"""
Returns a LayoutSlice pointing to fields with widgets NOT matching `widget_type`
"""
self._check_layout_and_form()
layout_field_names = self.layout.get_field_names()
# Let's exclude all fields with widgets like widget_type
filtered_fields = []
for pointer in layout_field_names:
if not isinstance(self.form.fields[pointer.name].widget, widget_type):
filtered_fields.append(pointer)
return LayoutSlice(self.layout, filtered_fields)
def __getitem__(self, key):
"""
Return a LayoutSlice that makes changes affect the current instance of the layout
and not a copy.
"""
# when key is a string containing the field name
if isinstance(key, str):
# Django templates access FormHelper attributes using dictionary [] operator
# This could be a helper['form_id'] access, not looking for a field
if hasattr(self, key):
return getattr(self, key)
self._check_layout()
layout_field_names = self.layout.get_field_names()
filtered_field = []
for pointer in layout_field_names:
# There can be an empty pointer
if pointer.name == key:
filtered_field.append(pointer)
return LayoutSlice(self.layout, filtered_field)
return LayoutSlice(self.layout, key)
def __setitem__(self, key, value):
self.layout[key] = value
def __delitem__(self, key):
del self.layout.fields[key]
def __len__(self):
if self.layout is not None:
return len(self.layout.fields)
else:
return 0
| DynamicLayoutHandler |
python | Textualize__textual | docs/examples/guide/command_palette/command01.py | {
"start": 108,
"end": 459
} | class ____(App):
"""An app with a 'bell' command."""
def get_system_commands(self, screen: Screen) -> Iterable[SystemCommand]:
yield from super().get_system_commands(screen) # (1)!
yield SystemCommand("Bell", "Ring the bell", self.bell) # (2)!
if __name__ == "__main__":
app = BellCommandApp()
app.run()
| BellCommandApp |
python | spyder-ide__spyder | spyder/plugins/run/widgets.py | {
"start": 19310,
"end": 41818
} | class ____(BaseRunConfigDialog, SpyderFontsMixin):
"""Run dialog used to configure run executors."""
sig_delete_config_requested = Signal(str, str, str, str)
def __init__(
self,
parent=None,
run_conf_model=None,
executors_model=None,
parameter_model=None,
disable_run_btn=False
):
super().__init__(parent, disable_run_btn=disable_run_btn)
self.run_conf_model = run_conf_model
self.executors_model = executors_model
self.parameter_model = parameter_model
self.current_widget = None
self.status = RunDialogStatus.Close
self._is_shown = False
# ---- Public methods
# -------------------------------------------------------------------------
def setup(self):
# --- Header
self.header_label = QLabel(self)
self.header_label.setObjectName("run-header-label")
# --- File combobox
# It's hidden by default to decrease the complexity of this dialog
self.configuration_combo = SpyderComboBox(self)
self.configuration_combo.hide()
# --- Executor and parameters widgets
executor_label = QLabel(_("Runner:"))
self.executor_combo = SpyderComboBox(self)
self.executor_combo.setMinimumWidth(250)
executor_tip = TipWidget(
_("Configure the selected runner for this file"),
icon=ima.icon('question_tip'),
hover_icon=ima.icon('question_tip_hover'),
size=23,
wrap_text=True
)
parameters_label = QLabel(_("Preset configuration:"))
self.parameters_combo = SpyderComboBox(self)
self.parameters_combo.setMinimumWidth(250)
parameters_tip = TipWidget(
_(
"Select between global or local (i.e. for this file) "
"configuration presets. You can set the latter below"
),
icon=ima.icon('question_tip'),
hover_icon=ima.icon('question_tip_hover'),
size=23,
wrap_text=True
)
executor_g_layout = QGridLayout()
executor_g_layout.addWidget(executor_label, 0, 0)
executor_g_layout.addWidget(self.executor_combo, 0, 1)
executor_g_layout.addWidget(executor_tip, 0, 2)
executor_g_layout.addWidget(parameters_label, 1, 0)
executor_g_layout.addWidget(self.parameters_combo, 1, 1)
executor_g_layout.addWidget(parameters_tip, 1, 2)
executor_layout = QHBoxLayout()
executor_layout.addLayout(executor_g_layout)
executor_layout.addStretch()
# --- Configuration properties
config_props_group = QGroupBox(_("Configuration properties"))
config_props_layout = QGridLayout(config_props_group)
# Increase margin between title and line edit below so this looks good
config_props_margins = config_props_layout.contentsMargins()
config_props_margins.setTop(12)
config_props_layout.setContentsMargins(config_props_margins)
# Name to save custom configuration
name_params_label = QLabel(_("Name:"))
self.name_params_text = QLineEdit(self)
self.name_params_text.setPlaceholderText(
_("Set a name for this configuration")
)
name_params_tip = TipWidget(
_(
"You can set as many configuration presets as you want by "
"providing different names. Each one will be saved after "
"clicking the Ok button below"
),
icon=ima.icon('question_tip'),
hover_icon=ima.icon('question_tip_hover'),
size=23,
wrap_text=True
)
# This action needs to be added before setting an icon for it so that
# it doesn't show up in the line edit (despite being set as not visible
# below). That's probably a Qt bug.
status_action = QAction(self)
self.name_params_text.addAction(
status_action, QLineEdit.TrailingPosition
)
self.name_params_text.status_action = status_action
status_action.setIcon(ima.icon("error"))
status_action.setVisible(False)
config_props_layout.addWidget(name_params_label, 0, 0)
config_props_layout.addWidget(self.name_params_text, 0, 1)
config_props_layout.addWidget(name_params_tip, 0, 2)
# --- Runner settings
self.stack = QStackedWidget(self)
# --- Working directory settings
self.wdir_group = QGroupBox(_("Working directory settings"))
wdir_layout = QVBoxLayout(self.wdir_group)
self.file_dir_radio = QRadioButton(FILE_DIR)
wdir_layout.addWidget(self.file_dir_radio)
self.cwd_radio = QRadioButton(CW_DIR)
wdir_layout.addWidget(self.cwd_radio)
self.fixed_dir_radio = QRadioButton(FIXED_DIR)
self.wd_edit = QLineEdit(self)
self.fixed_dir_radio.toggled.connect(self.wd_edit.setEnabled)
self.wd_edit.setEnabled(False)
browse_btn = QPushButton(ima.icon('DirOpenIcon'), '', self)
browse_btn.setToolTip(_("Select directory"))
browse_btn.clicked.connect(self.select_directory)
browse_btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
fixed_dir_layout = QHBoxLayout()
fixed_dir_layout.addWidget(self.fixed_dir_radio)
fixed_dir_layout.addWidget(self.wd_edit)
fixed_dir_layout.addWidget(browse_btn)
wdir_layout.addLayout(fixed_dir_layout)
# --- Group all customization widgets into a collapsible one
custom_config = CollapsibleWidget(self, _("Custom configuration"))
custom_config.addWidget(config_props_group)
custom_config.addWidget(self.stack)
custom_config.addWidget(self.wdir_group)
# Fix bottom and left margins.
custom_config.set_content_bottom_margin(0)
custom_config.set_content_right_margin(AppStyle.MarginSize)
# Center dialog after custom_config is expanded/collapsed
custom_config._animation.finished.connect(self._center_dialog)
# --- Final layout
layout = self.add_widgets(
self.header_label,
self.configuration_combo, # Hidden for simplicity
executor_layout,
custom_config,
(-2 if MAC else 1) * AppStyle.MarginSize,
)
layout.setContentsMargins(
AppStyle.InnerContentPadding,
# This needs to be bigger to make the layout look better
AppStyle.InnerContentPadding + AppStyle.MarginSize,
# This makes the left and right padding be the same
AppStyle.InnerContentPadding + 4,
AppStyle.InnerContentPadding,
)
self.add_button_box(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.delete_button = QPushButton(_("Delete"))
self.delete_button.clicked.connect(self.delete_btn_clicked)
self.bbox.addButton(self.delete_button, QDialogButtonBox.ActionRole)
# --- Settings
self.executor_combo.currentIndexChanged.connect(
self.display_executor_configuration)
self.executor_combo.setModel(self.executors_model)
# This signal needs to be connected after
# executor_combo.currentIndexChanged and before
# configuration_combo.currentIndexChanged for parameters_combo to be
# updated as expected when opening the dialog.
self.parameters_combo.currentIndexChanged.connect(
self.update_parameter_set
)
self.parameters_combo.setModel(self.parameter_model)
self.configuration_combo.currentIndexChanged.connect(
self.update_configuration_run_index)
self.configuration_combo.setModel(self.run_conf_model)
self.configuration_combo.setCurrentIndex(
self.run_conf_model.get_initial_index())
self.configuration_combo.setMaxVisibleItems(1)
self.executor_combo.setMaxVisibleItems(20)
self.executor_combo.view().setVerticalScrollBarPolicy(
Qt.ScrollBarAsNeeded)
self.setWindowTitle(_("Run configuration per file"))
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self.setStyleSheet(self._stylesheet)
def select_directory(self):
"""Select directory"""
basedir = str(self.wd_edit.text())
if not osp.isdir(basedir):
basedir = getcwd_or_home()
directory = getexistingdirectory(self, _("Select directory"), basedir)
if directory:
self.wd_edit.setText(directory)
self.dir = directory
def update_configuration_run_index(self, index: int):
self.executor_combo.setCurrentIndex(-1)
self.run_conf_model.update_index(index)
self.executor_combo.setCurrentIndex(
self.executors_model.get_initial_index()
)
def update_parameter_set(self, index: int):
if index < 0:
return
# Get parameters
stored_params = self.parameter_model.get_parameters(index)
global_params = stored_params["file_uuid"] is None
# Set parameters name
if global_params:
# We set this name for global params so users don't have to think
# about selecting one when customizing them
custom_name = self._get_auto_custom_name(stored_params["name"])
self.name_params_text.setText(custom_name)
else:
# We show the actual name for file params
self.name_params_text.setText(stored_params["name"])
# Disable delete button for global configs
if global_params:
self.delete_button.setEnabled(False)
else:
self.delete_button.setEnabled(True)
# Set parameters in their corresponding graphical elements
params = stored_params["params"]
working_dir_params = params['working_dir']
exec_params = params['executor_params']
self.current_widget.set_configuration(exec_params)
source = working_dir_params['source']
path = working_dir_params['path']
if source == WorkingDirSource.ConfigurationDirectory:
self.file_dir_radio.setChecked(True)
self.cwd_radio.setChecked(False)
self.fixed_dir_radio.setChecked(False)
self.wd_edit.setText('')
elif source == WorkingDirSource.CurrentDirectory:
self.file_dir_radio.setChecked(False)
self.cwd_radio.setChecked(True)
self.fixed_dir_radio.setChecked(False)
self.wd_edit.setText('')
elif source == WorkingDirSource.CustomDirectory:
self.file_dir_radio.setChecked(False)
self.cwd_radio.setChecked(False)
self.fixed_dir_radio.setChecked(True)
self.wd_edit.setText(path)
def display_executor_configuration(self, index: int):
if index == -1:
return
# Clear the QStackWidget contents
self.current_widget = None
while self.stack.count() > 0:
widget = self.stack.widget(0)
self.stack.removeWidget(widget)
exec_tuple = self.executors_model.get_selected_run_executor(index)
executor_name, executor_info = exec_tuple
enable_cwd = executor_info['requires_cwd']
self.wdir_group.setEnabled(enable_cwd)
ConfigWidget = (executor_info['configuration_widget'] or
RunExecutorConfigurationGroup)
if executor_info['configuration_widget'] is None:
self.stack.setVisible(False)
else:
self.stack.setVisible(True)
metadata = self.run_conf_model.get_selected_metadata()
context = metadata['context']
input_extension = metadata['input_extension']
uuid = metadata['uuid']
self.current_widget = ConfigWidget(
self, context, input_extension, metadata)
self.stack.addWidget(self.current_widget)
if uuid not in self.run_conf_model:
return
stored_params = self.run_conf_model.get_run_configuration_parameters(
uuid, executor_name)['params']
# Only show global parameters (i.e. those with file_uuid = None) or
# those that correspond to the current file.
stored_params = {
k: v for (k, v) in stored_params.items()
if v.get("file_uuid") in [None, uuid]
}
self.parameter_model.set_parameters(stored_params)
selected_params = self.run_conf_model.get_last_used_execution_params(
uuid, executor_name)
params_index = self.parameter_model.get_parameters_index_by_uuid(
selected_params
)
self.parameters_combo.setCurrentIndex(params_index)
self.adjustSize()
def select_executor(self, executor_name: str):
self.executor_combo.setCurrentIndex(
self.executors_model.get_run_executor_index(executor_name))
def reset_btn_clicked(self):
self.parameters_combo.setCurrentIndex(0)
def run_btn_clicked(self):
self.status |= RunDialogStatus.Run
self.accept()
def delete_btn_clicked(self):
answer = QMessageBox.question(
self,
_("Delete"),
_("Do you want to delete the current configuration?"),
)
if answer == QMessageBox.Yes:
# Get executor name
executor_name, __ = self.executors_model.get_selected_run_executor(
self.executor_combo.currentIndex()
)
# Get extension and context_id
metadata = self.run_conf_model.get_selected_metadata()
extension = metadata["input_extension"]
context_id = metadata["context"]["identifier"]
# Get index associated with config
idx = self.parameters_combo.currentIndex()
# Get config uuid
uuid, __ = self.parameter_model.get_parameters_uuid_name(idx)
self.sig_delete_config_requested.emit(
executor_name, extension, context_id, uuid
)
# Close dialog to not have to deal with the difficult case of
# updating its contents after this config is deleted
self.reject()
def get_configuration(
self
) -> Tuple[str, str, ExtendedRunExecutionParameters, bool]:
return self.saved_conf
# ---- Qt methods
# -------------------------------------------------------------------------
def accept(self) -> None:
self.status |= RunDialogStatus.Save
# Configuration to save/execute
widget_conf = self.current_widget.get_configuration()
# Hide status action in case users fix the problem reported through it
# on a successive try
self.name_params_text.status_action.setVisible(False)
# Get index of current params
current_index = self.parameters_combo.currentIndex()
if running_in_ci() and current_index == -1:
# This error seems to happen only on CIs
self.status = RunDialogStatus.Close
return
# Detect if the current params are global
params = self.parameter_model.get_parameters(current_index)
global_params = params["file_uuid"] is None
if global_params:
custom_name = self._get_auto_custom_name(params["name"])
else:
custom_name = ""
# Working directory params
path = None
source = None
if self.file_dir_radio.isChecked():
source = WorkingDirSource.ConfigurationDirectory
elif self.cwd_radio.isChecked():
source = WorkingDirSource.CurrentDirectory
else:
source = WorkingDirSource.CustomDirectory
path = self.wd_edit.text()
cwd_opts = WorkingDirOpts(source=source, path=path)
# Execution params
exec_params = RunExecutionParameters(
working_dir=cwd_opts, executor_params=widget_conf
)
# Different validations for the params name
params_name = self.name_params_text.text()
if self.isVisible():
allow_to_close = True
if not params_name:
# Don't allow to save params without a name
self.name_params_text.status_action.setVisible(True)
self.name_params_text.status_action.setToolTip(
'\n'.join(textwrap.wrap(EMPTY_NAME, 50))
)
allow_to_close = False
elif global_params and params_name == custom_name:
# We don't need to perform a validation in this case because we
# set the params name on behalf of users
pass
elif params_name != self.parameters_combo.lineEdit().text():
if params_name in self.parameter_model.get_parameter_names():
# Don't allow to save params with the same name of an
# existing one because it doesn't make sense.
allow_to_close = False
self.name_params_text.status_action.setVisible(True)
self.name_params_text.status_action.setToolTip(
'\n'.join(textwrap.wrap(REPEATED_NAME, 50))
)
elif params["params"] == exec_params:
# Don't allow to save params that are exactly the same as
# the current ones.
allow_to_close = False
self.name_params_text.status_action.setVisible(True)
self.name_params_text.status_action.setToolTip(
'\n'.join(textwrap.wrap(SAME_PARAMETERS, 50))
)
if not allow_to_close:
# With this the dialog can be closed when clicking the Cancel
# button
self.status = RunDialogStatus.Close
return
# Get index associated with config
if params["params"] == exec_params:
# This avoids saving an unnecessary custom config when the current
# parameters haven't been modified with respect to the selected
# config
idx = current_index
else:
idx = self.parameter_model.get_parameters_index_by_name(
params_name
)
# Get uuid and name from index
if idx == -1:
# This means that there are no saved parameters for params_name, so
# we need to generate a new uuid for them.
uuid = str(uuid4())
name = params_name
else:
# Retrieve uuid and name from our config system
uuid, name = self.parameter_model.get_parameters_uuid_name(idx)
# Build configuration to be saved or executed
metadata_info = self.run_conf_model.get_metadata(
self.configuration_combo.currentIndex()
)
ext_exec_params = ExtendedRunExecutionParameters(
uuid=uuid,
name=name,
params=exec_params,
file_uuid=None
if (global_params and idx >= 0)
else metadata_info["uuid"],
default=True
if (global_params and params["default"] and idx >= 0)
else False,
)
executor_name, __ = self.executors_model.get_selected_run_executor(
self.executor_combo.currentIndex()
)
self.saved_conf = (
metadata_info["uuid"],
executor_name,
ext_exec_params,
)
return super().accept()
def showEvent(self, event):
"""Adjustments when the widget is shown."""
if not self._is_shown:
# Set file name as the header
fname = self.configuration_combo.currentText()
header_font = (
self.get_font(SpyderFontType.Interface, font_size_delta=1)
)
# Elide fname in case fname is too long
fm = QFontMetrics(header_font)
text = fm.elidedText(
fname, Qt.ElideLeft, self.header_label.width()
)
self.header_label.setFont(header_font)
self.header_label.setAlignment(Qt.AlignCenter)
self.header_label.setText(text)
if text != fname:
self.header_label.setToolTip(fname)
self._is_shown = True
super().showEvent(event)
# ---- Private methods
# -------------------------------------------------------------------------
@property
def _stylesheet(self):
# --- Style for the header
self._css["QLabel#run-header-label"].setValues(
# Add good enough margin with the widgets below it.
marginBottom=f"{3 * AppStyle.MarginSize}px",
# This is necessary to align the label to the widgets below it.
marginLeft="4px",
)
# --- Style for the collapsible
self._css["CollapsibleWidget"].setValues(
# Separate it from the widgets above it
marginTop=f"{3 * AppStyle.MarginSize}px"
)
return self._css.toString()
def _center_dialog(self):
"""
Center dialog relative to the main window after collapsing/expanding
the custom configuration widget.
"""
# This doesn't work in our tests because the main window is usually
# not available in them.
if running_under_pytest():
return
qapp = qapplication()
main_window_pos = qapp.get_mainwindow_position()
main_window_height = qapp.get_mainwindow_height()
# We only center the dialog vertically because there's no need to
# do it horizontally.
x = self.x()
y = main_window_pos.y() + ((main_window_height - self.height()) // 2)
self.move(x, y)
def _get_auto_custom_name(self, global_params_name: str) -> str:
"""
Get an auto-generated custom name given the a global parameters one.
"""
n_custom = self.parameter_model.get_number_of_custom_params(
global_params_name
)
return (
global_params_name
+ " ("
+ _("custom")
+ (")" if n_custom == 0 else f" {n_custom})")
)
| RunDialog |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_collections.py | {
"start": 56,
"end": 5338
} | class ____:
def collection_view_url(self, view):
return (
"https://raw.githubusercontent.com/"
"PrefectHQ/prefect-collection-registry/main/"
f"views/aggregate-{view}-metadata.json"
)
@pytest.fixture
def mock_flow_response(self):
return {
"collection-name": {
"flow-name": {
"name": "flow-name",
},
}
}
@pytest.fixture
def mock_block_response(self):
return {
"collection-name": {
"block_types": {
"block-name": {
"name": "block-name",
},
},
}
}
@pytest.fixture
def mock_collection_response(self):
return {
"collection-name": {
"name": "collection-name",
},
}
@pytest.fixture
def mock_worker_response(self):
return {
"prefect": {
"prefect-agent": {},
"process": {},
}
}
@pytest.fixture
def mock_get_view(
self,
mock_flow_response,
mock_block_response,
mock_worker_response,
):
with respx.mock(
using="httpx", assert_all_mocked=False, assert_all_called=False
) as respx_mock:
flow_route = respx_mock.get(self.collection_view_url("flow")).mock(
return_value=Response(200, json=mock_flow_response)
)
block_route = respx_mock.get(self.collection_view_url("block")).mock(
return_value=Response(200, json=mock_block_response)
)
worker_route = respx_mock.get(self.collection_view_url("worker")).mock(
return_value=Response(200, json=mock_worker_response)
)
respx_mock.route(host="test").pass_through()
yield respx_mock, flow_route, block_route, worker_route
@pytest.fixture
def mock_get_missing_view(
self,
mock_flow_response,
mock_block_response,
mock_collection_response,
):
with respx.mock(
using="httpx",
assert_all_mocked=False,
assert_all_called=False,
base_url="https://raw.githubusercontent.com",
) as respx_mock:
respx_mock.get(self.collection_view_url("flow")).mock(
return_value=Response(404, json=mock_flow_response)
)
respx_mock.get(self.collection_view_url("block")).mock(
return_value=Response(404, json=mock_block_response)
)
respx_mock.get(self.collection_view_url("worker")).mock(
return_value=Response(404, json=mock_collection_response)
)
respx_mock.route(host="test").pass_through()
yield respx_mock
@pytest.mark.parametrize(
"view", ["aggregate-flow-metadata", "aggregate-block-metadata"]
)
async def test_read_view(self, client, view, mock_get_view):
res = await client.get(f"/collections/views/{view}")
assert res.status_code == 200
assert isinstance(res.json(), dict)
async def test_read_collection_view_when_missing(
self, client, mock_get_missing_view
):
res = await client.get("/collections/views/aggregate-flow-metadata")
detail = res.json()["detail"]
assert res.status_code == 404
assert detail == "Requested content missing for view aggregate-flow-metadata"
async def test_read_collection_view_invalid(self, client):
res = await client.get("/collections/views/invalid")
detail = res.json()["detail"]
assert res.status_code == 404
assert detail == "View invalid not found in registry"
@pytest.mark.parametrize(
"view", ["aggregate-flow-metadata", "aggregate-block-metadata"]
)
async def test_collection_view_cached(self, client, mock_get_view, view):
respx_mock, flow_route, block_route, worker_route = mock_get_view
res1 = await client.get(f"/collections/views/{view}")
assert res1.status_code == 200
assert isinstance(res1.json(), dict)
res2 = await client.get(f"/collections/views/{view}")
assert res2.status_code == 200
assert isinstance(res2.json(), dict)
assert res1.json() == res2.json()
if view == "aggregate-flow-metadata":
flow_route.calls.assert_called_once()
elif view == "aggregate-block-metadata":
block_route.calls.assert_called_once()
async def test_read_worker_view_failed_fetch(self, client, mock_get_missing_view):
res = await client.get("/collections/views/aggregate-worker-metadata")
assert res.status_code == 200
# check for expected key to ensure it isn't an error
assert isinstance(res.json()["prefect"], dict)
async def test_prefect_agent_excluded_from_worker_metadata(
self, client, mock_get_view
):
res = await client.get("/collections/views/aggregate-worker-metadata")
assert res.status_code == 200
assert "prefect-agent" not in res.json()["prefect"]
| TestReadCollectionViews |
python | getsentry__sentry | src/sentry/grouping/api.py | {
"start": 2274,
"end": 4526
} | class ____:
"""Load a grouping config based on global or project options"""
cache_prefix: str # Set in subclasses
def get_config_dict(self, project: Project) -> GroupingConfig:
return {
"id": self._get_config_id(project),
"enhancements": self._get_base64_enhancements(project),
}
def _get_base64_enhancements(self, project: Project) -> str:
derived_enhancements = project.get_option(DERIVED_ENHANCEMENTS_OPTION_KEY)
project_enhancements = project.get_option("sentry:grouping_enhancements")
config_id = self._get_config_id(project)
enhancements_base = GROUPING_CONFIG_CLASSES[config_id].enhancements_base
enhancements_version = get_enhancements_version(project, config_id)
cache_prefix = self.cache_prefix
cache_prefix += f"{enhancements_version}:"
cache_key = (
cache_prefix
+ md5_text(
f"{enhancements_base}|{derived_enhancements}|{project_enhancements}"
).hexdigest()
)
base64_enhancements = cache.get(cache_key)
if base64_enhancements is not None:
return base64_enhancements
try:
# Automatic enhancements are always applied first, so they can be overridden by
# project-specific enhancements.
enhancements_string = project_enhancements or ""
if derived_enhancements:
enhancements_string = (
f"{derived_enhancements}\n{enhancements_string}"
if enhancements_string
else derived_enhancements
)
base64_enhancements = EnhancementsConfig.from_rules_text(
enhancements_string,
bases=[enhancements_base] if enhancements_base else [],
version=enhancements_version,
referrer="project_rules",
).base64_string
except InvalidEnhancerConfig:
base64_enhancements = _get_default_base64_enhancements()
cache.set(cache_key, base64_enhancements)
return base64_enhancements
def _get_config_id(self, project: Project) -> str:
raise NotImplementedError
| GroupingConfigLoader |
python | pytorch__pytorch | torchgen/model.py | {
"start": 42363,
"end": 47398
} | class ____:
functional: NativeFunction
inplace: NativeFunction | None
mutable: NativeFunction | None
out: NativeFunction
@property
def structured(self) -> bool:
# Whether or not the operator has a meta() function. This information is backend-agnostic.
return self.out.structured
def __post_init__(self) -> None:
test_sig: FunctionSchema = self.functional.func.signature()
for f in self.functions():
if test_sig != f.func.signature():
raise AssertionError(
"NativeFunctionsGroup constructed from two NativeFunctions "
f"that don't have matching signatures: {test_sig} != {f.func.signature()}"
)
if self.structured != f.part_of_structured_group:
raise AssertionError(
"NativeFunctionsGroup constructed from structured and unstructured "
f"functions: {self.out.func.name} and {f.func.name}"
)
assert self.functional.func.kind() == SchemaKind.functional
assert self.out.func.kind() == SchemaKind.out
assert self.functional.namespace == self.out.namespace
if self.inplace is not None:
assert self.inplace.func.kind() == SchemaKind.inplace
assert self.inplace.namespace == self.functional.namespace
if self.mutable is not None:
assert self.mutable.func.kind() == SchemaKind.mutable
assert self.mutable.namespace == self.functional.namespace
# See Note [Overload Ambiguity With Functional Variants]
assert self.functional.func.name.name.functional_overload
if self.structured:
# For now, structured composite kernels are not supported (need some
# design work to figure out how to make the composite case work)
assert (
not self.out.has_composite_implicit_autograd_kernel
and not self.out.has_composite_implicit_autograd_nested_tensor_kernel
)
assert self.functional.structured_delegate == self.out.func.name, (
f"{self.functional.func.name} delegates to {self.functional.structured_delegate} "
f"but its actual delegate is {self.out.func.name}"
)
if self.inplace is not None:
assert self.inplace.structured_delegate == self.out.func.name
generated_fns = sorted(
[str(f.func.name) for f in self.functions() if "generated" in f.tags]
)
generated_fns_str = ", ".join(str(x) for x in generated_fns)
expected_generated_fns: set[str] = set()
for f in self.functions():
expected_generated_fns.update(str(op) for op in f.autogen)
expected_generated_fns_str = ", ".join(
str(x) for x in sorted(expected_generated_fns)
)
if len(expected_generated_fns) == 0 and len(generated_fns) > 0:
raise RuntimeError(
f"The codegen expects to be able to generate '{generated_fns_str}'."
" In order to generate them however, we expect them to be called out explicitly in the yaml."
f" Please add an 'autogen: {generated_fns_str}' line to the entry for {str(f.func.name)}"
)
if expected_generated_fns_str != generated_fns_str:
raise RuntimeError(
f"The codegen expects to be able to generate '{generated_fns_str}'."
f" To do so, it expects a line: 'autogen: {generated_fns_str}'."
f" Instead, it found 'autogen: {expected_generated_fns_str}'"
)
def signature(self) -> FunctionSchema:
return self.out.func.signature()
def functions(self) -> Iterator[NativeFunction]:
yield self.functional
yield self.out
if self.inplace is not None:
yield self.inplace
if self.mutable is not None:
yield self.mutable
@property
def root_name(self) -> str:
return self.functional.root_name
@staticmethod
def from_dict(d: dict[SchemaKind, NativeFunction]) -> NativeFunctionsGroup | None:
assert d
if len(d) == 1:
return None
d = dict(d) # non-destructive updates please
functional = d.pop(SchemaKind.functional, None)
inplace = d.pop(SchemaKind.inplace, None)
mutable = d.pop(SchemaKind.mutable, None)
out = d.pop(SchemaKind.out, None)
assert not d
assert functional is not None
# There are a few operators which only have functional/inplace variants;
# these don't count as structured for our purposes here
if out is None:
return None
# assuming all variants have the same namespace
return NativeFunctionsGroup(
functional=functional,
inplace=inplace,
mutable=mutable,
out=out,
)
@dataclass(frozen=True)
| NativeFunctionsGroup |
python | aio-libs__aiohttp | aiohttp/web_response.py | {
"start": 18648,
"end": 27253
} | class ____(StreamResponse):
_compressed_body: bytes | None = None
_send_headers_immediately = False
def __init__(
self,
*,
body: Any = None,
status: int = 200,
reason: str | None = None,
text: str | None = None,
headers: LooseHeaders | None = None,
content_type: str | None = None,
charset: str | None = None,
zlib_executor_size: int | None = None,
zlib_executor: Executor | None = None,
) -> None:
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
real_headers: CIMultiDict[str] = CIMultiDict()
else:
real_headers = CIMultiDict(headers)
if content_type is not None and "charset" in content_type:
raise ValueError("charset must not be in content_type argument")
if text is not None:
if hdrs.CONTENT_TYPE in real_headers:
if content_type or charset:
raise ValueError(
"passing both Content-Type header and "
"content_type or charset params "
"is forbidden"
)
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" % type(text))
if content_type is None:
content_type = "text/plain"
if charset is None:
charset = "utf-8"
real_headers[hdrs.CONTENT_TYPE] = content_type + "; charset=" + charset
body = text.encode(charset)
text = None
elif hdrs.CONTENT_TYPE in real_headers:
if content_type is not None or charset is not None:
raise ValueError(
"passing both Content-Type header and "
"content_type or charset params "
"is forbidden"
)
elif content_type is not None:
if charset is not None:
content_type += "; charset=" + charset
real_headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, _real_headers=real_headers)
if text is not None:
self.text = text
else:
self.body = body
self._zlib_executor_size = zlib_executor_size
self._zlib_executor = zlib_executor
@property
def body(self) -> bytes | bytearray | Payload | None:
return self._body
@body.setter
def body(self, body: Any) -> None:
if body is None:
self._body = None
elif isinstance(body, (bytes, bytearray)):
self._body = body
else:
try:
self._body = body = payload.PAYLOAD_REGISTRY.get(body)
except payload.LookupError:
raise ValueError("Unsupported body type %r" % type(body))
headers = self._headers
# set content-type
if hdrs.CONTENT_TYPE not in headers:
headers[hdrs.CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for key, value in body.headers.items():
if key not in headers:
headers[key] = value
self._compressed_body = None
@property
def text(self) -> str | None:
if self._body is None:
return None
# Note: When _body is a Payload (e.g. FilePayload), this may do blocking I/O
# This is generally safe as most common payloads (BytesPayload, StringPayload)
# don't do blocking I/O, but be careful with file-based payloads
return self._body.decode(self.charset or "utf-8")
@text.setter
def text(self, text: str) -> None:
assert isinstance(text, str), "text argument must be str (%r)" % type(text)
if self.content_type == "application/octet-stream":
self.content_type = "text/plain"
if self.charset is None:
self.charset = "utf-8"
self._body = text.encode(self.charset)
self._compressed_body = None
@property
def content_length(self) -> int | None:
if self._chunked:
return None
if hdrs.CONTENT_LENGTH in self._headers:
return int(self._headers[hdrs.CONTENT_LENGTH])
if self._compressed_body is not None:
# Return length of the compressed body
return len(self._compressed_body)
elif isinstance(self._body, Payload):
# A payload without content length, or a compressed payload
return None
elif self._body is not None:
return len(self._body)
else:
return 0
@content_length.setter
def content_length(self, value: int | None) -> None:
raise RuntimeError("Content length is set automatically")
async def write_eof(self, data: bytes = b"") -> None:
if self._eof_sent:
return
if self._compressed_body is None:
body = self._body
else:
body = self._compressed_body
assert not data, f"data arg is not supported, got {data!r}"
assert self._req is not None
assert self._payload_writer is not None
if body is None or self._must_be_empty_body:
await super().write_eof()
elif isinstance(self._body, Payload):
await self._body.write(self._payload_writer)
await self._body.close()
await super().write_eof()
else:
await super().write_eof(cast(bytes, body))
async def _start(self, request: "BaseRequest") -> AbstractStreamWriter:
if hdrs.CONTENT_LENGTH in self._headers:
if should_remove_content_length(request.method, self.status):
del self._headers[hdrs.CONTENT_LENGTH]
elif not self._chunked:
if isinstance(self._body, Payload):
if (size := self._body.size) is not None:
self._headers[hdrs.CONTENT_LENGTH] = str(size)
else:
body_len = len(self._body) if self._body else "0"
# https://www.rfc-editor.org/rfc/rfc9110.html#section-8.6-7
if body_len != "0" or (
self.status != 304 and request.method not in hdrs.METH_HEAD_ALL
):
self._headers[hdrs.CONTENT_LENGTH] = str(body_len)
return await super()._start(request)
async def _do_start_compression(self, coding: ContentCoding) -> None:
if self._chunked or isinstance(self._body, Payload):
return await super()._do_start_compression(coding)
if coding is ContentCoding.identity:
return
# Instead of using _payload_writer.enable_compression,
# compress the whole body
compressor = ZLibCompressor(
encoding=coding.value,
max_sync_chunk_size=self._zlib_executor_size,
executor=self._zlib_executor,
)
assert self._body is not None
if self._zlib_executor_size is None and len(self._body) > LARGE_BODY_SIZE:
warnings.warn(
"Synchronous compression of large response bodies "
f"({len(self._body)} bytes) might block the async event loop. "
"Consider providing a custom value to zlib_executor_size/"
"zlib_executor response properties or disabling compression on it."
)
self._compressed_body = (
await compressor.compress(self._body) + compressor.flush()
)
self._headers[hdrs.CONTENT_ENCODING] = coding.value
self._headers[hdrs.CONTENT_LENGTH] = str(len(self._compressed_body))
def json_response(
data: Any = sentinel,
*,
text: str | None = None,
body: bytes | None = None,
status: int = 200,
reason: str | None = None,
headers: LooseHeaders | None = None,
content_type: str = "application/json",
dumps: JSONEncoder = json.dumps,
) -> Response:
if data is not sentinel:
if text or body:
raise ValueError("only one of data, text, or body should be specified")
else:
text = dumps(data)
return Response(
text=text,
body=body,
status=status,
reason=reason,
headers=headers,
content_type=content_type,
)
| Response |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_emr_terminate_job_flow.py | {
"start": 1383,
"end": 2654
} | class ____:
def test_execute_terminates_the_job_flow_and_does_not_error(self, mocked_hook_client):
mocked_hook_client.terminate_job_flows.return_value = TERMINATE_SUCCESS_RETURN
operator = EmrTerminateJobFlowOperator(
task_id="test_task", job_flow_id="j-8989898989", aws_conn_id="aws_default"
)
operator.execute(MagicMock())
def test_create_job_flow_deferrable(self, mocked_hook_client):
mocked_hook_client.terminate_job_flows.return_value = TERMINATE_SUCCESS_RETURN
operator = EmrTerminateJobFlowOperator(
task_id="test_task",
job_flow_id="j-8989898989",
aws_conn_id="aws_default",
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
operator.execute(MagicMock())
assert isinstance(exc.value.trigger, EmrTerminateJobFlowTrigger), (
"Trigger is not a EmrTerminateJobFlowTrigger"
)
def test_template_fields(self):
operator = EmrTerminateJobFlowOperator(
task_id="test_task",
job_flow_id="j-8989898989",
aws_conn_id="aws_default",
deferrable=True,
)
validate_template_fields(operator)
| TestEmrTerminateJobFlowOperator |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_secrets_manager.py | {
"start": 971,
"end": 2660
} | class ____:
def test_get_conn_returns_a_boto3_connection(self):
hook = SecretsManagerHook(aws_conn_id="aws_default")
assert hook.get_conn() is not None
def test_get_secret_string(self):
secret_name = "arn:aws:secretsmanager:us-east-2:999999999999:secret:db_cluster-YYYYYYY"
secret_value = "test"
hook = SecretsManagerHook(aws_conn_id="aws_default")
create_param = {
"Name": secret_name,
"SecretString": secret_value,
}
hook.get_conn().create_secret(**create_param)
secret = hook.get_secret(secret_name)
assert secret == secret_value
def test_get_secret_dict(self):
secret_name = "arn:aws:secretsmanager:us-east-2:999999999999:secret:db_cluster-YYYYYYY"
secret_value = '{"user": "test"}'
hook = SecretsManagerHook(aws_conn_id="aws_default")
create_param = {
"Name": secret_name,
"SecretString": secret_value,
}
hook.get_conn().create_secret(**create_param)
secret = hook.get_secret_as_dict(secret_name)
assert secret == json.loads(secret_value)
def test_get_secret_binary(self):
secret_name = "arn:aws:secretsmanager:us-east-2:999999999999:secret:db_cluster-YYYYYYY"
secret_value_binary = base64.b64encode(b'{"username": "test"}')
hook = SecretsManagerHook(aws_conn_id="aws_default")
create_param = {"Name": secret_name, "SecretBinary": secret_value_binary}
hook.get_conn().create_secret(**create_param)
secret = hook.get_secret(secret_name)
assert secret == base64.b64decode(secret_value_binary)
| TestSecretsManagerHook |
python | celery__celery | t/unit/app/test_loaders.py | {
"start": 7744,
"end": 11599
} | class ____:
def test_autodiscover_tasks(self):
base._RACE_PROTECTION = True
try:
base.autodiscover_tasks(['foo'])
finally:
base._RACE_PROTECTION = False
with patch('celery.loaders.base.find_related_module') as frm:
base.autodiscover_tasks(['foo'])
frm.assert_called()
# Happy - get something back
def test_find_related_module__when_existent_package_alone(self):
with patch('importlib.import_module') as imp:
imp.return_value = Mock()
imp.return_value.__path__ = 'foo'
assert base.find_related_module('foo', None).__path__ == 'foo'
imp.assert_called_once_with('foo')
def test_find_related_module__when_existent_package_and_related_name(self):
with patch('importlib.import_module') as imp:
first_import = Mock()
first_import.__path__ = 'foo'
second_import = Mock()
second_import.__path__ = 'foo/tasks'
imp.side_effect = [first_import, second_import]
assert base.find_related_module('foo', 'tasks').__path__ == 'foo/tasks'
imp.assert_any_call('foo')
imp.assert_any_call('foo.tasks')
def test_find_related_module__when_existent_package_parent_and_related_name(self):
with patch('importlib.import_module') as imp:
first_import = ModuleNotFoundError(name='foo.BarApp') # Ref issue #2248
second_import = Mock()
second_import.__path__ = 'foo/tasks'
imp.side_effect = [first_import, second_import]
assert base.find_related_module('foo.BarApp', 'tasks').__path__ == 'foo/tasks'
imp.assert_any_call('foo.BarApp')
imp.assert_any_call('foo.tasks')
# Sad - nothing returned
def test_find_related_module__when_package_exists_but_related_name_does_not(self):
with patch('importlib.import_module') as imp:
first_import = Mock()
first_import.__path__ = 'foo'
second_import = ModuleNotFoundError(name='foo.tasks')
imp.side_effect = [first_import, second_import]
assert base.find_related_module('foo', 'tasks') is None
imp.assert_any_call('foo')
imp.assert_any_call('foo.tasks')
def test_find_related_module__when_existent_package_parent_but_no_related_name(self):
with patch('importlib.import_module') as imp:
first_import = ModuleNotFoundError(name='foo.bar')
second_import = ModuleNotFoundError(name='foo.tasks')
imp.side_effect = [first_import, second_import]
assert base.find_related_module('foo.bar', 'tasks') is None
imp.assert_any_call('foo.bar')
imp.assert_any_call('foo.tasks')
# Sad - errors
def test_find_related_module__when_no_package_parent(self):
with patch('importlib.import_module') as imp:
non_existent_import = ModuleNotFoundError(name='foo')
imp.side_effect = non_existent_import
with pytest.raises(ModuleNotFoundError) as exc:
base.find_related_module('foo', 'tasks')
assert exc.value.name == 'foo'
imp.assert_called_once_with('foo')
def test_find_related_module__when_nested_import_missing(self):
expected_error = 'dummy import error - e.g. missing nested package'
with patch('importlib.import_module') as imp:
first_import = Mock()
first_import.__path__ = 'foo'
second_import = ModuleNotFoundError(expected_error)
imp.side_effect = [first_import, second_import]
with pytest.raises(ModuleNotFoundError) as exc:
base.find_related_module('foo', 'tasks')
assert exc.value.msg == expected_error
| test_autodiscovery |
python | google__pytype | pytype/abstract/_instances.py | {
"start": 9407,
"end": 10493
} | class ____(BaseGenerator):
"""A representation of instances of generators."""
def __init__(
self, generator_frame: "state.Frame", ctx: "context.Context"
) -> None:
super().__init__(ctx.convert.generator_type, generator_frame, ctx, True)
def get_special_attribute(
self, node: cfg.CFGNode, name: str, valself: cfg.Variable
):
if name == "__iter__":
f = _abstract.NativeFunction(name, self.__iter__, self.ctx)
return f.to_variable(node)
elif name == "__next__":
return self.to_variable(node)
elif name == "throw":
# We don't model exceptions in a way that would allow us to induce one
# inside a coroutine. So just return ourself, mapping the call of
# throw() to a next() (which won't be executed).
return self.to_variable(node)
else:
return super().get_special_attribute(node, name, valself)
def __iter__(self, node: cfg.CFGNode) -> tuple[cfg.CFGNode, cfg.Variable]: # pylint: disable=non-iterator-returned,unexpected-special-method-signature
return node, self.to_variable(node)
| Generator |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 145893,
"end": 148100
} | class ____(ASTBase):
def __init__(self, identifier: ASTIdentifier, parameterPack: bool) -> None:
self.identifier = identifier
self.parameterPack = parameterPack
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTemplateIntroductionParameter):
return NotImplemented
return (
self.identifier == other.identifier
and self.parameterPack == other.parameterPack
)
def __hash__(self) -> int:
return hash((self.identifier, self.parameterPack))
@property
def name(self) -> ASTNestedName:
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
@property
def isPack(self) -> bool:
return self.parameterPack
def get_identifier(self) -> ASTIdentifier:
return self.identifier
def get_id(
self, version: int, objectType: str | None = None, symbol: Symbol | None = None
) -> str:
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=None)
else:
if self.parameterPack:
return 'Dp'
else:
return '0' # we need to put something
def get_id_as_arg(self, version: int) -> str:
assert version >= 2
# used for the implicit requires clause
res = self.identifier.get_id(version)
if self.parameterPack:
return 'sp' + res
else:
return res
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.parameterPack:
res.append('...')
res.append(transform(self.identifier))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
if self.parameterPack:
signode += addnodes.desc_sig_punctuation('...', '...')
self.identifier.describe_signature(signode, mode, env, '', '', symbol)
| ASTTemplateIntroductionParameter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.