language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | falconry__falcon | tests/test_sinks.py | {
"start": 339,
"end": 650
} | class ____(Sink):
async def __call__(self, req, resp, **kwargs):
super().__call__(req, resp, **kwargs)
def kitchen_sink(req, resp, **kwargs):
resp.set_header('X-Missing-Feature', 'kitchen-sink')
async def async_kitchen_sink(req, resp, **kwargs):
kitchen_sink(req, resp, **kwargs)
| SinkAsync |
python | walkccc__LeetCode | solutions/3129. Find All Possible Stable Binary Arrays I/3129.py | {
"start": 0,
"end": 988
} | class ____:
# Same as 3129. Find All Possible Stable Binary Arrays I
def numberOfStableArrays(self, zero: int, one: int, limit: int) -> int:
MOD = 1_000_000_007
# dp[i][j][k] := the number of stable arrays, where the number of
# occurrences of 0 is i and the number of occurrences of 1 is j and the last
# number is k (0/1)
dp = [[[0] * 2
for _ in range(one + 1)]
for _ in range(zero + 1)]
for i in range(min(zero, limit) + 1):
dp[i][0][0] = 1
for j in range(min(one, limit) + 1):
dp[0][j][1] = 1
for i in range(1, zero + 1):
for j in range(1, one + 1):
dp[i][j][0] = (
dp[i - 1][j][0] + dp[i - 1][j][1] -
(dp[i - limit - 1][j][1] if i - limit >= 1 else 0) + MOD) % MOD
dp[i][j][1] = (
dp[i][j - 1][0] + dp[i][j - 1][1] -
(dp[i][j - limit - 1][0] if j - limit >= 1 else 0) + MOD) % MOD
return (dp[zero][one][0] + dp[zero][one][1]) % MOD
| Solution |
python | kamyu104__LeetCode-Solutions | Python/smallest-palindromic-rearrangement-i.py | {
"start": 71,
"end": 534
} | class ____(object):
def smallestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
cnt = [0]*26
for i in xrange(len(s)//2):
cnt[ord(s[i])-ord('a')] += 1
result = [chr(ord('a')+i)*c for i, c in enumerate(cnt)]
if len(s)%2:
result.append(s[len(s)//2])
result.extend((result[i] for i in reversed(xrange(len(result)-len(s)%2))))
return "".join(result)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/lucky-numbers-in-a-matrix.py | {
"start": 56,
"end": 474
} | class ____(object):
def luckyNumbers (self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
rows = map(min, matrix)
cols = map(max, itertools.izip(*matrix))
return [cell for i, row in enumerate(matrix)
for j, cell in enumerate(row) if rows[i] == cols[j]]
# Time: O(m * n)
# Space: O(m + n)
import itertools
| Solution |
python | fluentpython__example-code | 19-dyn-attr-prop/oscon/schedule2.py | {
"start": 814,
"end": 1125
} | class ____:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __eq__(self, other): # <3>
if isinstance(other, Record):
return self.__dict__ == other.__dict__
else:
return NotImplemented
# END SCHEDULE2_RECORD
# BEGIN SCHEDULE2_DBRECORD
| Record |
python | jazzband__django-pipeline | tests/tests/test_collector.py | {
"start": 347,
"end": 2281
} | class ____(TestCase):
def tearDown(self):
super().tearDown()
default_collector.clear()
def test_collect(self):
self.assertEqual(
set(default_collector.collect()), set(self._get_collectable_files())
)
def test_collect_with_files(self):
self.assertEqual(
set(
default_collector.collect(
files=[
"pipeline/js/first.js",
"pipeline/js/second.js",
]
)
),
{
"pipeline/js/first.js",
"pipeline/js/second.js",
},
)
def test_delete_file_with_modified(self):
list(default_collector.collect())
storage = FileSystemStorage(local_path("assets"))
new_mtime = os.path.getmtime(storage.path("js/first.js")) - 1000
os.utime(
default_collector.storage.path("pipeline/js/first.js"),
(new_mtime, new_mtime),
)
self.assertTrue(
default_collector.delete_file(
"js/first.js", "pipeline/js/first.js", storage
)
)
def test_delete_file_with_unmodified(self):
list(default_collector.collect(files=["pipeline/js/first.js"]))
self.assertFalse(
default_collector.delete_file(
"js/first.js",
"pipeline/js/first.js",
FileSystemStorage(local_path("assets")),
)
)
def _get_collectable_files(self):
for finder in finders.get_finders():
if not isinstance(finder, PipelineFinder):
for path, storage in finder.list(["CVS", ".*", "*~"]):
if getattr(storage, "prefix", None):
yield os.path.join(storage.prefix, path)
else:
yield path
| CollectorTest |
python | sqlalchemy__sqlalchemy | test/engine/test_processors.py | {
"start": 4263,
"end": 8763
} | class ____(fixtures.TestBase):
def test_distill_20_none(self):
eq_(self.module._distill_params_20(None), ())
def test_distill_20_empty_sequence(self):
with expect_deprecated(
r"Empty parameter sequence passed to execute\(\). "
"This use is deprecated and will raise an exception in a "
"future SQLAlchemy release"
):
eq_(self.module._distill_params_20(()), ())
eq_(self.module._distill_params_20([]), [])
def test_distill_20_sequence_dict(self):
eq_(self.module._distill_params_20(({"a": 1},)), ({"a": 1},))
eq_(
self.module._distill_params_20([{"a": 1}, {"a": 2}]),
[{"a": 1}, {"a": 2}],
)
eq_(
self.module._distill_params_20((MappingProxyType({"a": 1}),)),
(MappingProxyType({"a": 1}),),
)
@combinations(
[(1, 2, 3)],
[([1, 2, 3],)],
[[1, 2, 3]],
[["a", "b"]],
[((1, 2, 3),)],
[[(1, 2, 3)]],
[((1, 2), (2, 3))],
[[(1, 2), (2, 3)]],
argnames="arg",
)
def test_distill_20_sequence_error(self, arg):
with expect_raises_message(
exc.ArgumentError,
"List argument must consist only of dictionaries",
):
self.module._distill_params_20(arg)
def test_distill_20_dict(self):
eq_(self.module._distill_params_20({"foo": "bar"}), [{"foo": "bar"}])
eq_(
self.module._distill_params_20(immutabledict({"foo": "bar"})),
[immutabledict({"foo": "bar"})],
)
eq_(
self.module._distill_params_20(MappingProxyType({"foo": "bar"})),
[MappingProxyType({"foo": "bar"})],
)
def test_distill_20_error(self):
with expect_raises_message(
exc.ArgumentError, "mapping or list expected for parameters"
):
self.module._distill_params_20("foo")
with expect_raises_message(
exc.ArgumentError, "mapping or list expected for parameters"
):
self.module._distill_params_20(1)
def test_distill_raw_none(self):
eq_(self.module._distill_raw_params(None), ())
def test_distill_raw_empty_list(self):
eq_(self.module._distill_raw_params([]), [])
def test_distill_raw_list_sequence(self):
eq_(self.module._distill_raw_params([(1, 2, 3)]), [(1, 2, 3)])
eq_(
self.module._distill_raw_params([(1, 2), (2, 3)]), [(1, 2), (2, 3)]
)
def test_distill_raw_list_dict(self):
eq_(
self.module._distill_raw_params([{"a": 1}, {"a": 2}]),
[{"a": 1}, {"a": 2}],
)
eq_(
self.module._distill_raw_params([MappingProxyType({"a": 1})]),
[MappingProxyType({"a": 1})],
)
def test_distill_raw_sequence_error(self):
with expect_raises_message(
exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
):
self.module._distill_raw_params([1, 2, 3])
with expect_raises_message(
exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
):
self.module._distill_raw_params([[1, 2, 3]])
with expect_raises_message(
exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
):
self.module._distill_raw_params(["a", "b"])
def test_distill_raw_tuple(self):
eq_(self.module._distill_raw_params(()), [()])
eq_(self.module._distill_raw_params((1, 2, 3)), [(1, 2, 3)])
def test_distill_raw_dict(self):
eq_(self.module._distill_raw_params({"foo": "bar"}), [{"foo": "bar"}])
eq_(
self.module._distill_raw_params(immutabledict({"foo": "bar"})),
[immutabledict({"foo": "bar"})],
)
eq_(
self.module._distill_raw_params(MappingProxyType({"foo": "bar"})),
[MappingProxyType({"foo": "bar"})],
)
def test_distill_raw_error(self):
with expect_raises_message(
exc.ArgumentError, "mapping or sequence expected for parameters"
):
self.module._distill_raw_params("foo")
with expect_raises_message(
exc.ArgumentError, "mapping or sequence expected for parameters"
):
self.module._distill_raw_params(1)
| _DistillArgsTest |
python | huggingface__transformers | src/transformers/models/convnextv2/modeling_convnextv2.py | {
"start": 2524,
"end": 3359
} | class ____(nn.Module):
"""GRN (Global Response Normalization) layer"""
def __init__(self, dim: int):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
# Compute and normalize global spatial feature maps
global_features = torch.linalg.vector_norm(hidden_states, ord=2, dim=(1, 2), keepdim=True)
norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
return hidden_states
# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
| ConvNextV2GRN |
python | kubernetes-client__python | kubernetes/client/models/v1_network_policy_spec.py | {
"start": 383,
"end": 9596
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'egress': 'list[V1NetworkPolicyEgressRule]',
'ingress': 'list[V1NetworkPolicyIngressRule]',
'pod_selector': 'V1LabelSelector',
'policy_types': 'list[str]'
}
attribute_map = {
'egress': 'egress',
'ingress': 'ingress',
'pod_selector': 'podSelector',
'policy_types': 'policyTypes'
}
def __init__(self, egress=None, ingress=None, pod_selector=None, policy_types=None, local_vars_configuration=None): # noqa: E501
"""V1NetworkPolicySpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._egress = None
self._ingress = None
self._pod_selector = None
self._policy_types = None
self.discriminator = None
if egress is not None:
self.egress = egress
if ingress is not None:
self.ingress = ingress
if pod_selector is not None:
self.pod_selector = pod_selector
if policy_types is not None:
self.policy_types = policy_types
@property
def egress(self):
"""Gets the egress of this V1NetworkPolicySpec. # noqa: E501
egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8 # noqa: E501
:return: The egress of this V1NetworkPolicySpec. # noqa: E501
:rtype: list[V1NetworkPolicyEgressRule]
"""
return self._egress
@egress.setter
def egress(self, egress):
"""Sets the egress of this V1NetworkPolicySpec.
egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8 # noqa: E501
:param egress: The egress of this V1NetworkPolicySpec. # noqa: E501
:type: list[V1NetworkPolicyEgressRule]
"""
self._egress = egress
@property
def ingress(self):
"""Gets the ingress of this V1NetworkPolicySpec. # noqa: E501
ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default) # noqa: E501
:return: The ingress of this V1NetworkPolicySpec. # noqa: E501
:rtype: list[V1NetworkPolicyIngressRule]
"""
return self._ingress
@ingress.setter
def ingress(self, ingress):
"""Sets the ingress of this V1NetworkPolicySpec.
ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default) # noqa: E501
:param ingress: The ingress of this V1NetworkPolicySpec. # noqa: E501
:type: list[V1NetworkPolicyIngressRule]
"""
self._ingress = ingress
@property
def pod_selector(self):
"""Gets the pod_selector of this V1NetworkPolicySpec. # noqa: E501
:return: The pod_selector of this V1NetworkPolicySpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._pod_selector
@pod_selector.setter
def pod_selector(self, pod_selector):
"""Sets the pod_selector of this V1NetworkPolicySpec.
:param pod_selector: The pod_selector of this V1NetworkPolicySpec. # noqa: E501
:type: V1LabelSelector
"""
self._pod_selector = pod_selector
@property
def policy_types(self):
"""Gets the policy_types of this V1NetworkPolicySpec. # noqa: E501
policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8 # noqa: E501
:return: The policy_types of this V1NetworkPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._policy_types
@policy_types.setter
def policy_types(self, policy_types):
"""Sets the policy_types of this V1NetworkPolicySpec.
policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8 # noqa: E501
:param policy_types: The policy_types of this V1NetworkPolicySpec. # noqa: E501
:type: list[str]
"""
self._policy_types = policy_types
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NetworkPolicySpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NetworkPolicySpec):
return True
return self.to_dict() != other.to_dict()
| V1NetworkPolicySpec |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/bedrock.py | {
"start": 12256,
"end": 15905
} | class ____(BedrockBaseSensor[BedrockAgentHook]):
"""
Poll the ingestion job status until it reaches a terminal state; fails if creation fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BedrockIngestionJobSensor`
:param knowledge_base_id: The unique identifier of the knowledge base for which to get information. (templated)
:param data_source_id: The unique identifier of the data source in the ingestion job. (templated)
:param ingestion_job_id: The unique identifier of the ingestion job. (templated)
:param deferrable: If True, the sensor will operate in deferrable more. This mode requires aiobotocore
module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
:param poke_interval: Polling period in seconds to check for the status of the job. (default: 60)
:param max_retries: Number of times before returning the current state (default: 10)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
INTERMEDIATE_STATES: tuple[str, ...] = ("STARTING", "IN_PROGRESS")
FAILURE_STATES: tuple[str, ...] = ("FAILED",)
SUCCESS_STATES: tuple[str, ...] = ("COMPLETE",)
FAILURE_MESSAGE = "Bedrock ingestion job sensor failed."
aws_hook_class = BedrockAgentHook
template_fields: Sequence[str] = aws_template_fields(
"knowledge_base_id", "data_source_id", "ingestion_job_id"
)
def __init__(
self,
*,
knowledge_base_id: str,
data_source_id: str,
ingestion_job_id: str,
poke_interval: int = 60,
max_retries: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poke_interval = poke_interval
self.max_retries = max_retries
self.knowledge_base_id = knowledge_base_id
self.data_source_id = data_source_id
self.ingestion_job_id = ingestion_job_id
def get_state(self) -> str:
return self.hook.conn.get_ingestion_job(
knowledgeBaseId=self.knowledge_base_id,
ingestionJobId=self.ingestion_job_id,
dataSourceId=self.data_source_id,
)["ingestionJob"]["status"]
def execute(self, context: Context) -> Any:
if self.deferrable:
self.defer(
trigger=BedrockIngestionJobTrigger(
knowledge_base_id=self.knowledge_base_id,
ingestion_job_id=self.ingestion_job_id,
data_source_id=self.data_source_id,
waiter_delay=int(self.poke_interval),
waiter_max_attempts=self.max_retries,
aws_conn_id=self.aws_conn_id,
),
method_name="poke",
)
else:
super().execute(context=context)
| BedrockIngestionJobSensor |
python | django-import-export__django-import-export | tests/core/admin.py | {
"start": 2497,
"end": 2579
} | class ____(ModelResource):
class Meta:
model = UUIDBook
| UUIDBookResource |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-split-array.py | {
"start": 42,
"end": 364
} | class ____(object):
def waysToSplitArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = sum(nums)
result = curr = 0
for i in xrange(len(nums)-1):
curr += nums[i]
result += int(curr >= total-curr)
return result
| Solution |
python | allegroai__clearml | clearml/automation/job.py | {
"start": 19432,
"end": 27530
} | class ____(BaseJob):
def __init__(
self,
base_task_id: str,
parameter_override: Optional[Mapping[str, str]] = None,
task_overrides: Optional[Mapping[str, str]] = None,
configuration_overrides: Optional[Mapping[str, Union[str, Mapping]]] = None,
tags: Optional[Sequence[str]] = None,
parent: Optional[str] = None,
disable_clone_task: bool = False,
allow_caching: bool = False,
target_project: Optional[str] = None,
output_uri: Optional[Union[str, bool]] = None,
enable_local_imports: bool = True,
**kwargs: Any,
) -> None:
"""
Create a new Task based on a base_task_id with a different set of parameters
:param str base_task_id: base task ID to clone from
:param dict parameter_override: dictionary of parameters and values to set fo the cloned task
:param dict task_overrides: Task object specific overrides.
for example ``{'script.version_num': None, 'script.branch': 'main'}``
:param configuration_overrides: Optional, override Task configuration objects.
Expected dictionary of configuration object name and configuration object content.
Examples:
``{'config_section': dict(key='value')}``
``{'config_file': 'configuration file content'}``
``{'OmegaConf': YAML.dumps(full_hydra_dict)}``
:param list tags: additional tags to add to the newly cloned task
:param str parent: Set newly created Task parent task field, default: base_tak_id.
:param dict kwargs: additional Task creation parameters
:param bool disable_clone_task: if False (default), clone base task id.
If True, use the base_task_id directly (base-task must be in draft-mode / created),
:param bool allow_caching: If True, check if we have a previously executed Task with the same specification.
If we do, use it and set internal is_cached flag. Default False (always create new Task).
:param output_uri: The storage / output url for this job. This is the default location for
output models and other artifacts. Check Task.init reference docs for more info (output_uri is a parameter).
:param str target_project: Optional, Set the target project name to create the cloned Task in.
:param enable_local_imports: If True, allow jobs to import from local files
by appending PYTHONPATH sys.path[0].
If False, the current path directory won't be appended to PYTHONPATH. Default is True.
Ignored while running remotely.
"""
super(ClearmlJob, self).__init__()
base_temp_task = Task.get_task(task_id=base_task_id)
self._enable_local_imports = enable_local_imports
if disable_clone_task:
self.task = base_temp_task
task_status = self.task.status
if task_status != Task.TaskStatusEnum.created:
logger.warning(
"Task cloning disabled but requested Task [{}] status={}. "
"Reverting to clone Task".format(base_task_id, task_status)
)
disable_clone_task = False
self.task = None
elif parent:
self.task.set_parent(parent)
else:
self.task = None
self.task_parameter_override = None
task_params = None
if parameter_override:
task_params = base_temp_task.get_parameters(backwards_compatibility=False)
task_params.update(parameter_override)
self.task_parameter_override = dict(**parameter_override)
task_configurations = None
if configuration_overrides:
task_configurations = deepcopy(base_temp_task.data.configuration or {})
for k, v in configuration_overrides.items():
if not isinstance(v, (str, dict)):
raise ValueError(
"Configuration override dictionary value must be wither str or dict, "
"got {} instead".format(type(v))
)
value = v if isinstance(v, str) else json.dumps(v)
if k in task_configurations:
task_configurations[k].value = value
else:
task_configurations[k] = tasks_service.ConfigurationItem(
name=str(k),
value=value,
description=None,
type="json" if isinstance(v, dict) else None,
)
configuration_overrides = {k: v.value for k, v in task_configurations.items()}
sections = {}
if task_overrides:
# set values inside the Task
for k, v in task_overrides.items():
# notice we can allow ourselves to change the base-task object as we will not use it any further
# noinspection PyProtectedMember
base_temp_task._set_task_property(k, v, raise_on_error=False, log_on_error=True)
section = k.split(".")[0]
sections[section] = getattr(base_temp_task.data, section, None)
# check cached task
self._is_cached_task = False
task_hash = None
if allow_caching:
# look for a cached copy of the Task
# get parameters + task_overrides + as dict and hash it.
task_hash = self._create_task_hash(
base_temp_task,
section_overrides=sections,
params_override=task_params,
configurations_override=configuration_overrides or None,
explicit_docker_image=kwargs.get("explicit_docker_image"),
)
task = self._get_cached_task(task_hash)
# if we found a task, just use
if task:
if disable_clone_task and self.task and self.task.status == self.task.TaskStatusEnum.created:
# if the base task at is in draft mode, and we are using cached task
# we assume the base Task was created adhoc and we can delete it.
pass # self.task.delete()
self._is_cached_task = True
self.task = task
self.task_started = True
self._worker = None
return
# if we have target_project, remove project from kwargs if we have it.
if target_project and "project" in kwargs:
logger.info(
"target_project={} and project={} passed, using target_project.".format(
target_project, kwargs["project"]
)
)
kwargs.pop("project", None)
# check again if we need to clone the Task
if not disable_clone_task:
# noinspection PyProtectedMember
self.task = Task.clone(
base_task_id,
parent=parent or base_task_id,
project=get_or_create_project(session=Task._get_default_session(), project_name=target_project)
if target_project
else kwargs.pop("project", None),
**kwargs,
)
if tags:
self.task.set_tags(list(set(self.task.get_tags()) | set(tags)))
if parameter_override:
self.task.update_parameters(parameter_override)
# store back Task configuration object into backend
if task_configurations:
# noinspection PyProtectedMember
self.task._edit(configuration=task_configurations)
if task_overrides and sections:
# store back Task parameters into backend
# noinspection PyProtectedMember
self.task._edit(**sections)
if output_uri is not None:
self.task.output_uri = output_uri
self._set_task_cache_hash(self.task, task_hash)
self.task_started = False
self._worker = None
| ClearmlJob |
python | allegroai__clearml | clearml/utilities/process/mp.py | {
"start": 5714,
"end": 6498
} | class ____(_ForkSafeThreadSyncObject):
def __init__(self) -> None:
super(ForkQueue, self).__init__(TrQueue)
def get(self, *args: Any, **kwargs: Any) -> Any:
self._create()
return self._sync.get(*args, **kwargs)
def put(self, *args: Any, **kwargs: Any) -> None:
self._create()
return self._sync.put(*args, **kwargs)
def empty(self) -> bool:
if not self._sync:
return True
self._create()
return self._sync.empty()
def full(self) -> bool:
if not self._sync:
return False
self._create()
return self._sync.full()
def close(self) -> None:
if not self._sync:
return
self._create()
return self._sync.close()
| ForkQueue |
python | apache__airflow | airflow-core/tests/unit/always/test_project_structure.py | {
"start": 22451,
"end": 37720
} | class ____(ExampleCoverageTest, AssetsCoverageTest):
PROVIDER = "google"
CLASS_DIRS = ProjectStructureTest.CLASS_DIRS | {"operators/vertex_ai"}
DEPRECATED_CLASSES = {
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service"
".CloudDataTransferServiceS3ToGCSOperator",
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service"
".CloudDataTransferServiceGCSToGCSOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLTablesListColumnSpecsOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLTablesListTableSpecsOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLTablesUpdateDatasetOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLDeployModelOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLTrainModelOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLPredictOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLCreateDatasetOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLImportDataOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLGetModelOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLDeleteModelOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLListDatasetOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLDeleteDatasetOperator",
"airflow.providers.google.cloud.operators.vertex_ai.auto_ml.CreateAutoMLVideoTrainingJobOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryCreateEmptyTableOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryCreateExternalTableOperator",
"airflow.providers.google.cloud.operators.datapipeline.CreateDataPipelineOperator",
"airflow.providers.google.cloud.operators.datapipeline.RunDataPipelineOperator",
"airflow.providers.google.cloud.operators.mlengine.MLEngineCreateModelOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.TextGenerationModelPredictOperator",
"airflow.providers.google.marketing_platform.operators.GoogleDisplayVideo360CreateQueryOperator",
"airflow.providers.google.marketing_platform.operators.GoogleDisplayVideo360DeleteReportOperator",
"airflow.providers.google.marketing_platform.operators.GoogleDisplayVideo360RunQueryOperator",
"airflow.providers.google.marketing_platform.operators.GoogleDisplayVideo360DownloadReportV2Operator",
"airflow.providers.google.marketing_platform.operators.GoogleDisplayVideo360UploadLineItemsOperator",
"airflow.providers.google.marketing_platform.operators.GoogleDisplayVideo360DownloadLineItemsOperator",
"airflow.providers.google.marketing_platform.sensors.GoogleDisplayVideo360RunQuerySensor",
"airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook",
"airflow.providers.google.cloud.links.datacatalog.DataCatalogEntryGroupLink",
"airflow.providers.google.cloud.links.datacatalog.DataCatalogEntryLink",
"airflow.providers.google.cloud.links.datacatalog.DataCatalogTagTemplateLink",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogCreateEntryOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogCreateEntryGroupOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogCreateTagOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogCreateTagTemplateOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogCreateTagTemplateFieldOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteEntryGroupOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteTagOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteTagTemplateOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteTagTemplateFieldOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogGetEntryOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogGetEntryGroupOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogGetTagTemplateOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogListTagsOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogLookupEntryOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogRenameTagTemplateFieldOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogSearchCatalogOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogUpdateEntryOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogUpdateTagOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogUpdateTagTemplateOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogCreateEntryOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogUpdateTagTemplateFieldOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.GenerateFromCachedContentOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.CreateCachedContentOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.CountTokensOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.SupervisedFineTuningTrainOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.GenerativeModelGenerateContentOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.TextEmbeddingModelGetEmbeddingsOperator",
}
BASE_CLASSES = {
"airflow.providers.google.cloud.operators.alloy_db.AlloyDBBaseOperator",
"airflow.providers.google.cloud.operators.alloy_db.AlloyDBWriteBaseOperator",
"airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator",
"airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryToSqlBaseOperator",
"airflow.providers.google.cloud.operators.cloud_sql.CloudSQLBaseOperator",
"airflow.providers.google.cloud.operators.dataproc.DataprocJobBaseOperator",
"airflow.providers.google.cloud.operators.dataproc._DataprocStartStopClusterBaseOperator",
"airflow.providers.google.cloud.operators.dataplex.DataplexCatalogBaseOperator",
"airflow.providers.google.cloud.operators.managed_kafka.ManagedKafkaBaseOperator",
"airflow.providers.google.cloud.operators.vertex_ai.custom_job.CustomTrainingJobBaseOperator",
"airflow.providers.google.cloud.operators.vertex_ai.ray.RayBaseOperator",
"airflow.providers.google.cloud.operators.cloud_base.GoogleCloudBaseOperator",
"airflow.providers.google.marketing_platform.operators.search_ads._GoogleSearchAdsBaseOperator",
}
MISSING_EXAMPLES_FOR_CLASSES = {
"airflow.providers.google.cloud.operators.dlp.CloudDLPRedactImageOperator",
"airflow.providers.google.cloud.transfers.cassandra_to_gcs.CassandraToGCSOperator",
"airflow.providers.google.cloud.transfers.adls_to_gcs.ADLSToGCSOperator",
"airflow.providers.google.cloud.transfers.sql_to_gcs.BaseSQLToGCSOperator",
"airflow.providers.google.cloud.operators.vertex_ai.endpoint_service.GetEndpointOperator",
"airflow.providers.google.cloud.operators.vertex_ai.auto_ml.AutoMLTrainingJobBaseOperator",
"airflow.providers.google.cloud.operators.vertex_ai.endpoint_service.UpdateEndpointOperator",
"airflow.providers.google.cloud.operators.vertex_ai.batch_prediction_job.GetBatchPredictionJobOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteEntryOperator",
"airflow.providers.google.cloud.operators.vertex_ai.generative_model.DeleteExperimentRunOperator",
}
ASSETS_NOT_REQUIRED = {
"airflow.providers.google.cloud.operators.automl.AutoMLDeleteDatasetOperator",
"airflow.providers.google.cloud.operators.automl.AutoMLDeleteModelOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryDeleteDatasetOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryDeleteTableOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryIntervalCheckOperator",
"airflow.providers.google.cloud.operators.bigquery.BigQueryValueCheckOperator",
"airflow.providers.google.cloud.operators.bigquery_dts.BigQueryDeleteDataTransferConfigOperator",
"airflow.providers.google.cloud.operators.bigtable.BigtableDeleteInstanceOperator",
"airflow.providers.google.cloud.operators.bigtable.BigtableDeleteTableOperator",
"airflow.providers.google.cloud.operators.cloud_build.CloudBuildDeleteBuildTriggerOperator",
"airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreDeleteInstanceOperator",
"airflow.providers.google.cloud.operators.cloud_memorystore."
"CloudMemorystoreMemcachedDeleteInstanceOperator",
"airflow.providers.google.cloud.operators.cloud_sql.CloudSQLBaseOperator",
"airflow.providers.google.cloud.operators.cloud_sql.CloudSQLDeleteInstanceDatabaseOperator",
"airflow.providers.google.cloud.operators.cloud_sql.CloudSQLDeleteInstanceOperator",
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceDeleteJobOperator",
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceGetOperationOperator",
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceListOperationsOperator",
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServicePauseOperationOperator",
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceResumeOperationOperator",
"airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteEntryGroupOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteEntryOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteTagOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteTagTemplateFieldOperator",
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogDeleteTagTemplateOperator",
"airflow.providers.google.cloud.operators.datafusion.CloudDataFusionDeleteInstanceOperator",
"airflow.providers.google.cloud.operators.datafusion.CloudDataFusionDeletePipelineOperator",
"airflow.providers.google.cloud.operators.dataproc.DataprocDeleteBatchOperator",
"airflow.providers.google.cloud.operators.dataproc.DataprocDeleteClusterOperator",
"airflow.providers.google.cloud.operators.dataproc_metastore.DataprocMetastoreDeleteBackupOperator",
"airflow.providers.google.cloud.operators.dataproc_metastore.DataprocMetastoreDeleteServiceOperator",
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreBeginTransactionOperator",
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreDeleteOperationOperator",
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreGetOperationOperator",
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreRollbackOperator",
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreRunQueryOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeidentifyContentOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDLPJobOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDeidentifyTemplateOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteInspectTemplateOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteJobTriggerOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteStoredInfoTypeOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPInspectContentOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPRedactImageOperator",
"airflow.providers.google.cloud.operators.dlp.CloudDLPReidentifyContentOperator",
"airflow.providers.google.cloud.operators.functions.CloudFunctionDeleteFunctionOperator",
"airflow.providers.google.cloud.operators.gcs.GCSDeleteBucketOperator",
"airflow.providers.google.cloud.operators.gcs.GCSDeleteObjectsOperator",
"airflow.providers.google.cloud.operators.kubernetes_engine.GKEDeleteClusterOperator",
"airflow.providers.google.cloud.operators.pubsub.PubSubDeleteSubscriptionOperator",
"airflow.providers.google.cloud.operators.pubsub.PubSubDeleteTopicOperator",
"airflow.providers.google.cloud.operators.spanner.SpannerDeleteDatabaseInstanceOperator",
"airflow.providers.google.cloud.operators.spanner.SpannerDeleteInstanceOperator",
"airflow.providers.google.cloud.operators.stackdriver.StackdriverDeleteAlertOperator",
"airflow.providers.google.cloud.operators.stackdriver.StackdriverDeleteNotificationChannelOperator",
"airflow.providers.google.cloud.operators.tasks.CloudTasksQueueDeleteOperator",
"airflow.providers.google.cloud.operators.tasks.CloudTasksTaskDeleteOperator",
"airflow.providers.google.cloud.operators.translate.CloudTranslateTextOperator",
"airflow.providers.google.cloud.operators.translate_speech.CloudTranslateSpeechOperator",
"airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductOperator",
"airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductSetOperator",
"airflow.providers.google.cloud.operators.vision.CloudVisionDeleteReferenceImageOperator",
"airflow.providers.google.cloud.operators.workflows.WorkflowsDeleteWorkflowOperator",
"airflow.providers.google.marketing_platform.sensors.campaign_manager."
"GoogleCampaignManagerReportSensor",
"airflow.providers.google.marketing_platform.sensors.display_video."
"GoogleDisplayVideo360GetSDFDownloadOperationSensor",
"airflow.providers.google.marketing_platform.sensors.display_video.GoogleDisplayVideo360ReportSensor",
}
@pytest.mark.xfail(reason="We did not reach full coverage yet")
def test_missing_assets(self):
super().test_missing_assets()
| TestGoogleProviderProjectStructure |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 2449,
"end": 2613
} | class ____[U]:
def more_generic(u: U, t: T) -> tuple[U, T]:
return (u, t)
# default requires 3.13
V = TypeVar("V", default=Any, bound=str)
| MixedGenerics |
python | ray-project__ray | python/ray/experimental/channel/torch_tensor_accelerator_channel.py | {
"start": 1552,
"end": 15266
} | class ____(ChannelInterface):
def __init__(
self,
writer: ray.actor.ActorHandle,
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]],
typ: "TorchTensorType",
driver_actor_id: str,
tensor_metadata_channel: Optional["Channel"] = None,
_cpu_data_channel: Optional["Channel"] = None,
_gpu_data_channel: Optional["_TorchTensorAcceleratorChannel"] = None,
_local_channel: Optional["IntraProcessChannel"] = None,
):
"""
Can be used to send accelerator tensors nested inside other data. The data is
sent via shared memory while the accelerator tensors are sent through a P2P
transport (e.g., NCCL for GPU).
NOTE: This class is currently not thread-safe because it reads and
writes the worker-local
ray.experimental.channel.serialization_context._SerializationContext
when serializing data.
Args:
writer: The actor that may write to the channel. None signifies the
driver.
reader_and_node_list: A list of tuples, where each tuple contains a reader
actor handle and the node ID where the actor is located.
typ: Type information about the values passed through the channel.
driver_actor_id: The actor ID of the DAGDriverProxyActor.
tensor_metadata_channel: A shared-memory channel for sending tensor
metadata.
_cpu_data_channel: A shared-memory channel for sending
non-tensor data. Its writer and readers should match the given
writer and readers. If None is provided, then we assume that
there is no CPU-specific data, i.e. the task directly returned
a CUDA torch.Tensor.
_gpu_data_channel: A channel for sending torch.Tensors via accelerator.
_local_channel: A channel for sending data between the writer and
local readers.
NOTE: `tensor_metadata_channel` will be set only for testing purposes.
`_cpu_data_channel` is set for testing purposes and for deserialization.
`_gpu_data_channel` and `_local_channel` are set only during deserialization.
"""
self._writer = writer
self._reader_and_node_list = reader_and_node_list
self._typ = typ
(
remote_reader_and_node_list,
local_reader_and_node_list,
) = utils.split_readers_by_locality(self._writer, self._reader_and_node_list)
num_local_readers = len(local_reader_and_node_list)
self._local_channel = _local_channel
if self._local_channel is None and num_local_readers > 0:
# There are some local readers which are the same worker process as
# the writer. Create a local channel for the writer and the local readers.
#
# Use num_readers = 1 when creating the local channel,
# because we have channel cache to support reading
# from the same channel multiple times.
self._local_channel = IntraProcessChannel(num_readers=1)
assert len(remote_reader_and_node_list) > 0, (
"All readers are from the same actor. "
"The TorchTensorType type hint is not needed. "
"No accelerator channel will be created."
)
self._gpu_data_channel = _gpu_data_channel
if self._gpu_data_channel is None:
self._gpu_data_channel: _TorchTensorAcceleratorChannel = (
_TorchTensorAcceleratorChannel(
writer,
remote_reader_and_node_list,
typ,
_meta_channel=tensor_metadata_channel,
)
)
self._cpu_data_channel: Optional["Channel"] = _cpu_data_channel
if self._cpu_data_channel is not None:
assert (
not self._typ.direct_return
), "CPU channel should be None if direct return is enabled"
if self._cpu_data_channel is None and not self._typ.direct_return:
# Create a CPU channel to send non-tensor data.
self._cpu_data_channel = SharedMemoryType().create_channel(
writer, remote_reader_and_node_list, driver_actor_id
)
# Used for serialization.
self._worker = ray._private.worker.global_worker
self._worker.check_connected()
ctx = ChannelContext.get_current()
self.serialization_ctx = ctx.serialization_context
assert self.serialization_ctx is not None
def __reduce__(self):
return (
TorchTensorAcceleratorChannel,
(
self._writer,
self._reader_and_node_list,
self._typ,
# driver_actor_id and tensor_metadata_channel are used to initialize
# the _cpu_data_channel and _gpu_data_channel, so we don't need to
# pass them in here.
None,
None,
self._cpu_data_channel,
self._gpu_data_channel,
self._local_channel,
),
)
def ensure_registered_as_writer(self):
if self._local_channel is not None:
self._local_channel.ensure_registered_as_writer()
self._gpu_data_channel.ensure_registered_as_writer()
if self._cpu_data_channel is not None:
self._cpu_data_channel.ensure_registered_as_writer()
def ensure_registered_as_reader(self):
reader = utils.get_self_actor()
if reader == self._writer:
self._local_channel.ensure_registered_as_reader()
return
self._gpu_data_channel.ensure_registered_as_reader()
if self._cpu_data_channel is not None:
self._cpu_data_channel.ensure_registered_as_reader()
def _send_cpu_and_gpu_data(self, value: Any, timeout: Optional[float]):
self.serialization_ctx.reset_out_of_band_tensors([])
# All tensors found in `value` will be transferred via accelerator.
self.serialization_ctx.set_use_external_transport(True)
try:
# Serialize the data. All tensors that match our current device
# will be extracted into the serialization context and replaced
# with a placeholder.
cpu_data = self._worker.get_serialization_context().serialize(value)
except TypeError as e:
sio = io.StringIO()
ray.util.inspect_serializability(value, print_file=sio)
msg = (
"Could not serialize the put value "
f"{repr(value)}:\n"
f"{sio.getvalue()}"
)
raise TypeError(msg) from e
finally:
# Pop the tensors that were found during serialization of `value`.
gpu_tensors, _ = self.serialization_ctx.reset_out_of_band_tensors([])
# Reset the serialization method to now serialize torch.Tensors
# normally.
self.serialization_ctx.set_use_external_transport(False)
# First send the extracted tensors through a GPU-specific channel.
self._gpu_data_channel.write(gpu_tensors)
# Next send the non-tensor data through a CPU-specific channel. The
# data contains placeholders for the extracted tensors.
self._cpu_data_channel.write(cpu_data)
def write(self, value: Any, timeout: Optional[float] = None) -> None:
"""
Send a value that may contain torch.Tensors that should be sent via
external transport.
Case 1: Use `_local_channel` to send the data to local readers.
Case 2: Otherwise, use the following method to send the data to remote readers.
1) Serializes `value`. During serialization, all torch.Tensors that are
on the default device are extracted and replaced with a unique
placeholder. Thus, the serialized value will contain all non-tensor
data, and any tensors that were not on the default device (e.g., CPU
tensor returned by a GPU actor).
2) Sends extracted torch.Tensors via the tensor data channel (e.g.,
NCCL).
3) Sends the non-tensor data via the non-tensor data channel.
If static_non_tensor_data=True was specified, then we only perform step
(3) on the first `write` call. The reader is expected to reuse the sent
data for subsequent messages.
"""
self.ensure_registered_as_writer()
if self._local_channel is not None:
self._local_channel.write(value)
if isinstance(value, ray.exceptions.RayTaskError):
if self._typ.static_shape or self._typ.direct_return:
# Raise a fatal error to teardown the DAG.
# This error will also be caught from `CompiledDAGRef.get()`
# and raised to the user
# TODO(swang): Write exceptions to the tensor metadata or
# non-tensor data channel if it is available to make these
# exceptions recoverable.
raise value
if self._cpu_data_channel is None:
# Handle the case where _direct_return=True. In this case, we check
# that the task returned a CUDA torch.Tensor and just send it
# directly without trying to serialize it first.
import torch
# These ValueErrors will also be caught from `CompiledDAGRef.get()`
# and raised to the user
if not isinstance(value, torch.Tensor):
# TODO(swang): These errors are currently fatal for the DAG.
# This could be improved by sending the exception through the
# gpu_data_channel's CPU-based metadata channel, if one exists.
raise ValueError(
"Task annotated with _direct_return=True must "
"return a CUDA torch.Tensor, instead found value "
f"`{value}`. DAG will shut down."
)
elif not value.is_cuda:
raise ValueError(
"Task annotated with _direct_return=True must "
"return a CUDA torch.Tensor, instead found CPU tensor. "
"DAG will shut down."
)
self._gpu_data_channel.write([value], timeout=timeout)
else:
self._send_cpu_and_gpu_data(value, timeout)
def _recv_cpu_and_gpu_data(
self, tensors: List["torch.Tensor"], timeout: Optional[float] = None
) -> Any:
"""
Helper method to receive data that contains a mix of CPU and GPU data.
Args:
tensors: The GPU data. This is a list of the torch.Tensors that
were found in the sent data.
timeout: Timeout for channel receive.
"""
self.serialization_ctx.reset_out_of_band_tensors(tensors)
# Next, read and deserialize the non-tensor data. The registered custom
# deserializer will replace the found tensor placeholders with
# `tensors`.
data = self._cpu_data_channel.read(
timeout=timeout,
)
# Check that all placeholders had a corresponding tensor.
(
_,
deserialized_tensor_placeholders,
) = self.serialization_ctx.reset_out_of_band_tensors([])
assert deserialized_tensor_placeholders == set(range(len(tensors)))
return data
def read(self, timeout: Optional[float] = None) -> Any:
"""
Read a value that may contain torch.Tensors sent via external
transport.
Case 1: If the reader is a local reader and is the same actor as the writer,
then use the `_local_channel` to read the data.
Case 2: Otherwise, use the following method to read data from remote readers.
1) Receives torch.Tensors via the tensor data channel (e.g., NCCL).
2) Reads the serialized non-tensor data.
3) Deserializes the non-tensor data. During deserialization, replaces
all found placeholders with the received torch.Tensors.
If _direct_return=True was specified, then we skip step (2) and (3) and
directly return the data received in (1).
"""
self.ensure_registered_as_reader()
# If the reader is the same actor as the writer, then we can use the
# local channel to read the data.
reader = utils.get_self_actor()
if reader == self._writer:
assert self._local_channel is not None
return self._local_channel.read()
# First, read the tensor data.
tensors = self._gpu_data_channel.read(timeout)
if self._cpu_data_channel is None:
# Handle _direct_return=True. In this case, we expect to receive
# only one tensor, and we return it directly.
assert len(tensors) == 1
data = tensors[0]
else:
data = self._recv_cpu_and_gpu_data(tensors, timeout)
return data
def close(self) -> None:
self._gpu_data_channel.close()
if self._cpu_data_channel is not None:
self._cpu_data_channel.close()
if self._local_channel is not None:
self._local_channel.close()
def _torch_tensor_allocator(
shape: Union[int, Tuple[int]],
dtype: "torch.dtype",
):
"""
Allocate a tensor buffer matching the given metadata.
"""
import torch
ctx = ChannelContext.get_current()
return torch.empty(shape, dtype=dtype, device=ctx.torch_device)
| TorchTensorAcceleratorChannel |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 12480,
"end": 13665
} | class ____(PreTrainedModel):
config: InstructBlipConfig
base_model_prefix = "blip"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_supports_attention_backend = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_no_split_modules = [
"InstructBlipQFormerEmbeddings",
"InstructBlipAttention",
"InstructBlipQFormerMultiHeadAttention",
"InstructBlipQFormerSelfOutput",
]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
factor = self.config.initializer_range
if isinstance(module, InstructBlipVisionEmbeddings):
init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
elif isinstance(module, (InstructBlipForConditionalGeneration, InstructBlipModel)):
init.zeros_(module.query_tokens)
# Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->InstructBlip
| InstructBlipPreTrainedModel |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 29626,
"end": 30439
} | class ____(Glyph, LineGlyph):
''' Render several lines.
The data for the ``MultiLine`` glyph is different in that the vector of
values is not a vector of scalars. Rather, it is a "list of lists".
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/MultiLine.py"
_args = ('xs', 'ys')
xs = NumberSpec(default=field("xs"), help="""
The x-coordinates for all the lines, given as a "list of lists".
""")
ys = NumberSpec(default=field("ys"), help="""
The y-coordinates for all the lines, given as a "list of lists".
""")
line_props = Include(LineProps, help="""
The {prop} values for the lines.
""")
| MultiLine |
python | scrapy__scrapy | tests/test_utils_spider.py | {
"start": 214,
"end": 807
} | class ____(Spider):
name = "myspider2"
def test_iterate_spider_output():
i = Item()
r = Request("http://scrapytest.org")
o = object()
assert list(iterate_spider_output(i)) == [i]
assert list(iterate_spider_output(r)) == [r]
assert list(iterate_spider_output(o)) == [o]
assert list(iterate_spider_output([r, i, o])) == [r, i, o]
def test_iter_spider_classes():
import tests.test_utils_spider # noqa: PLW0406,PLC0415 # pylint: disable=import-self
it = iter_spider_classes(tests.test_utils_spider)
assert set(it) == {MySpider1, MySpider2}
| MySpider2 |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 31962,
"end": 32867
} | class ____(ObjectBaseModel):
"""An ORM representation of a block type"""
name: Name = Field(default=..., description="A block type's name")
slug: str = Field(default=..., description="A block type's slug")
logo_url: Optional[HttpUrl] = Field(
default=None, description="Web URL for the block type's logo"
)
documentation_url: Optional[HttpUrl] = Field(
default=None, description="Web URL for the block type's documentation"
)
description: Optional[str] = Field(
default=None,
description="A short blurb about the corresponding block's intended use",
)
code_example: Optional[str] = Field(
default=None,
description="A code snippet demonstrating use of the corresponding block",
)
is_protected: bool = Field(
default=False, description="Protected block types cannot be modified via API."
)
| BlockType |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 18507,
"end": 19199
} | class ____(Benchmark):
param_names = ['sparse_type', 'density', 'format', 'explicit']
params = [
['spmatrix', 'sparray'],
[0.01, 0.1, 0.5],
['csr', 'csc', 'coo'],
[True, False],
]
def setup(self, sparse_type, density, format, explicit):
n = 1000
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
if sparse_type == "sparray":
self.X = sparse.random_array((n, n), format=format, density=density)
else:
self.X = sparse.random(n, n, format=format, density=density)
def time_argmax(self, sparse_type, density, format, explicit):
self.X.argmax(explicit=explicit)
| Argmax |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 13556,
"end": 14001
} | class ____(sgqlc.types.Enum):
"""Properties by which discussion poll option connections can be
ordered.
Enumeration Choices:
* `AUTHORED_ORDER`: Order poll options by the order that the poll
author specified when creating the poll.
* `VOTE_COUNT`: Order poll options by the number of votes it has.
"""
__schema__ = github_schema
__choices__ = ("AUTHORED_ORDER", "VOTE_COUNT")
| DiscussionPollOptionOrderField |
python | run-llama__llama_index | llama-index-integrations/sparse_embeddings/llama-index-sparse-embeddings-fastembed/llama_index/sparse_embeddings/fastembed/base.py | {
"start": 360,
"end": 3931
} | class ____(BaseSparseEmbedding):
"""
Qdrant FastEmbedding Sparse models.
FastEmbed is a lightweight, fast, Python library built for embedding generation.
See more documentation at:
* https://github.com/qdrant/fastembed/
* https://qdrant.github.io/fastembed/.
To use this class, you must install the `fastembed` Python package.
`pip install fastembed`
Example:
from llama_index.sparse_embeddings.fastembed import FastEmbedSparseEmbedding
fastembed = FastEmbedSparseEmbedding()
"""
model_name: str = Field(
"prithivida/Splade_PP_en_v1",
description="Name of the FastEmbedding sparse model to use.\n"
"Defaults to 'prithivida/Splade_PP_en_v1'.\n"
"Find the list of supported models at "
"https://qdrant.github.io/fastembed/examples/Supported_Models/",
)
max_length: int = Field(
512,
description="The maximum number of tokens. Defaults to 512.\n"
"Unknown behavior for values > 512.",
)
cache_dir: Optional[str] = Field(
None,
description="The path to the cache directory.\n"
"Defaults to `local_cache` in the parent directory",
)
threads: Optional[int] = Field(
None,
description="The number of threads single onnxruntime session can use.\n"
"Defaults to None",
)
_model: SparseTextEmbedding = PrivateAttr()
@classmethod
def class_name(self) -> str:
return "FastEmbedSparseEmbedding"
def __init__(
self,
model_name: Optional[str] = "prithivida/Splade_PP_en_v1",
max_length: Optional[int] = 512,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[List[Any]] = None,
):
super().__init__(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
)
self._model = SparseTextEmbedding(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
providers=providers,
)
def _fastembed_to_dict(
self, fastembed_results: List[FastEmbedSparseEmbedding]
) -> List[SparseEmbedding]:
"""Convert FastEmbedSparseEmbedding to SparseEmbedding dict."""
results = []
for embedding in fastembed_results:
result_dict = {}
for indice, value in zip(embedding.indices, embedding.values):
result_dict[int(indice)] = float(value)
results.append(result_dict)
return results
def _get_text_embedding(self, text: str) -> SparseEmbedding:
results = self._model.passage_embed([text])
return self._fastembed_to_dict(results)[0]
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
return self._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[SparseEmbedding]:
results = self._model.passage_embed(texts)
return self._fastembed_to_dict(results)
async def _aget_text_embeddings(self, texts: List[str]) -> List[SparseEmbedding]:
return self._get_text_embeddings(texts)
def _get_query_embedding(self, query: str) -> SparseEmbedding:
results = self._model.query_embed(query)
return self._fastembed_to_dict(results)[0]
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
return self._get_query_embedding(query)
| FastEmbedSparseEmbedding |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 283595,
"end": 288089
} | class ____:
@pytest.mark.filterwarnings(
"ignore:divide by zero encountered in log:RuntimeWarning:dask"
)
def test_0(self, xp):
a = [1, 0, 2]
desired = 0
check_equal_gmean(a, desired, xp=xp)
def test_1d(self, xp):
# Test a 1d case
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 45.2872868812
check_equal_gmean(a, desired, xp=xp)
a = [1, 2, 3, 4]
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, rtol=1e-14, xp=xp)
a = array([1, 2, 3, 4], float32)
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, dtype=xp.float32, xp=xp)
# Note the next tests use axis=None as default, not axis=0
def test_2d(self, xp):
# Test a 2d case
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(a, desired, xp=xp)
def test_2d_axis0(self, xp):
# Test a 2d case with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
check_equal_gmean(a, desired, axis=0, xp=xp)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14, xp=xp)
def test_2d_axis1(self, xp):
# Test a 2d case with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.13363839, 64.02171746, 104.40086817])
check_equal_gmean(a, desired, axis=1, xp=xp)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
v = power(1 * 2 * 3 * 4, 1. / 4.)
desired = array([v, v, v])
check_equal_gmean(a, desired, axis=1, rtol=1e-14, xp=xp)
def test_large_values(self, xp):
a = array([1e100, 1e200, 1e300])
desired = 1e200
check_equal_gmean(a, desired, rtol=1e-13, xp=xp)
@pytest.mark.filterwarnings(
"ignore:divide by zero encountered in log:RuntimeWarning:dask"
)
def test_1d_with_0(self, xp):
# Test a 1d case with zero element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
desired = 0.0 # due to exp(-inf)=0
with np.errstate(all='ignore'):
check_equal_gmean(a, desired, xp=xp)
@pytest.mark.filterwarnings(
"ignore:invalid value encountered in log:RuntimeWarning:dask"
)
def test_1d_neg(self, xp):
# Test a 1d case with negative element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, -1]
desired = np.nan # due to log(-1) = nan
with np.errstate(invalid='ignore'):
check_equal_gmean(a, desired, xp=xp)
@skip_xp_backends(
np_only=True,
reason='array-likes only supported for NumPy backend',
)
def test_weights_1d_list(self, xp):
# Desired result from:
# https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
a = [1, 2, 3, 4, 5]
weights = [2, 5, 6, 4, 3]
desired = 2.77748
# all the other tests use `check_equal_gmean`, which now converts
# the input to an xp-array before calling `gmean`. This time, check
# that the function still accepts the lists of ints.
res = stats.gmean(a, weights=weights)
xp_assert_close(res, np.asarray(desired), rtol=1e-5)
def test_weights_1d(self, xp):
# Desired result from:
# https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
a = np.array([1, 2, 3, 4, 5])
weights = np.array([2, 5, 6, 4, 3])
desired = 2.77748
check_equal_gmean(a, desired, weights=weights, rtol=1e-5, xp=xp)
@skip_xp_invalid_arg
def test_weights_masked_1d_array(self, xp):
# Desired result from:
# https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
a = np.array([1, 2, 3, 4, 5, 6])
weights = np.ma.array([2, 5, 6, 4, 3, 5], mask=[0, 0, 0, 0, 0, 1])
desired = 2.77748
xp = np.ma # check_equal_gmean uses xp.asarray; this will preserve the mask
check_equal_gmean(a, desired, weights=weights, rtol=1e-5,
dtype=np.float64, xp=xp)
@make_xp_test_case(stats.pmean)
| TestGMean |
python | gevent__gevent | src/greentest/3.14/test_signal.py | {
"start": 43781,
"end": 51424
} | class ____(unittest.TestCase):
"""
Stress signal delivery, especially when a signal arrives in
the middle of recomputing the signal state or executing
previously tripped signal handlers.
"""
def setsig(self, signum, handler):
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
def measure_itimer_resolution(self):
N = 20
times = []
def handler(signum=None, frame=None):
if len(times) < N:
times.append(time.perf_counter())
# 1 µs is the smallest possible timer interval,
# we want to measure what the concrete duration
# will be on this platform
signal.setitimer(signal.ITIMER_REAL, 1e-6)
self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0)
self.setsig(signal.SIGALRM, handler)
handler()
while len(times) < N:
time.sleep(1e-3)
durations = [times[i+1] - times[i] for i in range(len(times) - 1)]
med = statistics.median(durations)
if support.verbose:
print("detected median itimer() resolution: %.6f s." % (med,))
return med
def decide_itimer_count(self):
# Some systems have poor setitimer() resolution (for example
# measured around 20 ms. on FreeBSD 9), so decide on a reasonable
# number of sequential timers based on that.
reso = self.measure_itimer_resolution()
if reso <= 1e-4:
return 10000
elif reso <= 1e-2:
return 100
else:
self.skipTest("detected itimer resolution (%.3f s.) too high "
"(> 10 ms.) on this platform (or system too busy)"
% (reso,))
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_dependent(self):
"""
This test uses dependent signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def first_handler(signum, frame):
# 1e-6 is the minimum non-zero value for `setitimer()`.
# Choose a random delay so as to improve chances of
# triggering a race condition. Ideally the signal is received
# when inside critical signal-handling routines such as
# Py_MakePendingCalls().
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def second_handler(signum=None, frame=None):
sigs.append(signum)
# Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both
# ascending and descending sequences (SIGUSR1 then SIGALRM,
# SIGPROF then SIGALRM), we maximize chances of hitting a bug.
self.setsig(signal.SIGPROF, first_handler)
self.setsig(signal.SIGUSR1, first_handler)
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
expected_sigs += 1
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 1
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_simultaneous(self):
"""
This test uses simultaneous signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def handler(signum, frame):
sigs.append(signum)
# On Android, SIGUSR1 is unreliable when used in close proximity to
# another signal – see Android/testbed/app/src/main/python/main.py.
# So we use a different signal.
self.setsig(signal.SIGUSR2, handler)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# initial processing of SIGUSR2.
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
os.kill(os.getpid(), signal.SIGUSR2)
expected_sigs += 2
# Wait for handlers to run to avoid signal coalescing
for _ in support.sleeping_retry(support.SHORT_TIMEOUT):
if len(sigs) >= expected_sigs:
break
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@support.requires_gil_enabled("gh-121065: test is flaky on free-threaded build")
@unittest.skipIf(is_apple, "crashes due to system bug (FB13453490)")
@unittest.skipUnless(hasattr(signal, "SIGUSR1"),
"test needs SIGUSR1")
@threading_helper.requires_working_threading()
def test_stress_modifying_handlers(self):
# bpo-43406: race condition between trip_signal() and signal.signal
signum = signal.SIGUSR1
num_sent_signals = 0
num_received_signals = 0
do_stop = False
def custom_handler(signum, frame):
nonlocal num_received_signals
num_received_signals += 1
def set_interrupts():
nonlocal num_sent_signals
while not do_stop:
signal.raise_signal(signum)
num_sent_signals += 1
def cycle_handlers():
while num_sent_signals < 100 or num_received_signals < 1:
for i in range(20000):
# Cycle between a Python-defined and a non-Python handler
for handler in [custom_handler, signal.SIG_IGN]:
signal.signal(signum, handler)
old_handler = signal.signal(signum, custom_handler)
self.addCleanup(signal.signal, signum, old_handler)
t = threading.Thread(target=set_interrupts)
try:
ignored = False
with support.catch_unraisable_exception() as cm:
t.start()
cycle_handlers()
do_stop = True
t.join()
if cm.unraisable is not None:
# An unraisable exception may be printed out when
# a signal is ignored due to the aforementioned
# race condition, check it.
self.assertIsInstance(cm.unraisable.exc_value, OSError)
self.assertIn(
f"Signal {signum:d} ignored due to race condition",
str(cm.unraisable.exc_value))
ignored = True
# bpo-43406: Even if it is unlikely, it's technically possible that
# all signals were ignored because of race conditions.
if not ignored:
# Sanity check that some signals were received, but not all
self.assertGreater(num_received_signals, 0)
self.assertLessEqual(num_received_signals, num_sent_signals)
finally:
do_stop = True
t.join()
| StressTest |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/messages/batches.py | {
"start": 35507,
"end": 36116
} | class ____:
def __init__(self, batches: AsyncBatches) -> None:
self._batches = batches
self.create = async_to_streamed_response_wrapper(
batches.create,
)
self.retrieve = async_to_streamed_response_wrapper(
batches.retrieve,
)
self.list = async_to_streamed_response_wrapper(
batches.list,
)
self.delete = async_to_streamed_response_wrapper(
batches.delete,
)
self.cancel = async_to_streamed_response_wrapper(
batches.cancel,
)
| AsyncBatchesWithStreamingResponse |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/passthrough.py | {
"start": 10210,
"end": 20852
} | class ____(RunnableSerializable[dict[str, Any], dict[str, Any]]):
"""Runnable that assigns key-value pairs to `dict[str, Any]` inputs.
The `RunnableAssign` class takes input dictionaries and, through a
`RunnableParallel` instance, applies transformations, then combines
these with the original data, introducing new key-value pairs based
on the mapper's logic.
Examples:
```python
# This is a RunnableAssign
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnableParallel,
)
from langchain_core.runnables.base import RunnableLambda
def add_ten(x: dict[str, int]) -> dict[str, int]:
return {"added": x["input"] + 10}
mapper = RunnableParallel(
{
"add_step": RunnableLambda(add_ten),
}
)
runnable_assign = RunnableAssign(mapper)
# Synchronous example
runnable_assign.invoke({"input": 5})
# returns {'input': 5, 'add_step': {'added': 15}}
# Asynchronous example
await runnable_assign.ainvoke({"input": 5})
# returns {'input': 5, 'add_step': {'added': 15}}
```
"""
mapper: RunnableParallel
def __init__(self, mapper: RunnableParallel[dict[str, Any]], **kwargs: Any) -> None:
"""Create a `RunnableAssign`.
Args:
mapper: A `RunnableParallel` instance that will be used to transform the
input dictionary.
"""
super().__init__(mapper=mapper, **kwargs)
@classmethod
@override
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
@override
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "runnable"]`
"""
return ["langchain", "schema", "runnable"]
@override
def get_name(self, suffix: str | None = None, *, name: str | None = None) -> str:
name = (
name
or self.name
or f"RunnableAssign<{','.join(self.mapper.steps__.keys())}>"
)
return super().get_name(suffix, name=name)
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
map_input_schema = self.mapper.get_input_schema(config)
if not issubclass(map_input_schema, RootModel):
# ie. it's a dict
return map_input_schema
return super().get_input_schema(config)
@override
def get_output_schema(
self, config: RunnableConfig | None = None
) -> type[BaseModel]:
map_input_schema = self.mapper.get_input_schema(config)
map_output_schema = self.mapper.get_output_schema(config)
if not issubclass(map_input_schema, RootModel) and not issubclass(
map_output_schema, RootModel
):
fields = {}
for name, field_info in map_input_schema.model_fields.items():
fields[name] = (field_info.annotation, field_info.default)
for name, field_info in map_output_schema.model_fields.items():
fields[name] = (field_info.annotation, field_info.default)
return create_model_v2("RunnableAssignOutput", field_definitions=fields)
if not issubclass(map_output_schema, RootModel):
# ie. only map output is a dict
# ie. input type is either unknown or inferred incorrectly
return map_output_schema
return super().get_output_schema(config)
@property
@override
def config_specs(self) -> list[ConfigurableFieldSpec]:
return self.mapper.config_specs
@override
def get_graph(self, config: RunnableConfig | None = None) -> Graph:
# get graph from mapper
graph = self.mapper.get_graph(config)
# add passthrough node and edges
input_node = graph.first_node()
output_node = graph.last_node()
if input_node is not None and output_node is not None:
passthrough_node = graph.add_node(_graph_passthrough)
graph.add_edge(input_node, passthrough_node)
graph.add_edge(passthrough_node, output_node)
return graph
def _invoke(
self,
value: dict[str, Any],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> dict[str, Any]:
if not isinstance(value, dict):
msg = "The input to RunnablePassthrough.assign() must be a dict."
raise ValueError(msg) # noqa: TRY004
return {
**value,
**self.mapper.invoke(
value,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
),
}
@override
def invoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
return self._call_with_config(self._invoke, input, config, **kwargs)
async def _ainvoke(
self,
value: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> dict[str, Any]:
if not isinstance(value, dict):
msg = "The input to RunnablePassthrough.assign() must be a dict."
raise ValueError(msg) # noqa: TRY004
return {
**value,
**await self.mapper.ainvoke(
value,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
),
}
@override
async def ainvoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
return await self._acall_with_config(self._ainvoke, input, config, **kwargs)
def _transform(
self,
values: Iterator[dict[str, Any]],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> Iterator[dict[str, Any]]:
# collect mapper keys
mapper_keys = set(self.mapper.steps__.keys())
# create two streams, one for the map and one for the passthrough
for_passthrough, for_map = safetee(values, 2, lock=threading.Lock())
# create map output stream
map_output = self.mapper.transform(
for_map,
patch_config(
config,
callbacks=run_manager.get_child(),
),
**kwargs,
)
# get executor to start map output stream in background
with get_executor_for_config(config) as executor:
# start map output stream
first_map_chunk_future = executor.submit(
next,
map_output,
None,
)
# consume passthrough stream
for chunk in for_passthrough:
if not isinstance(chunk, dict):
msg = "The input to RunnablePassthrough.assign() must be a dict."
raise ValueError(msg) # noqa: TRY004
# remove mapper keys from passthrough chunk, to be overwritten by map
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
)
if filtered:
yield filtered
# yield map output
yield cast("dict[str, Any]", first_map_chunk_future.result())
for chunk in map_output:
yield chunk
@override
def transform(
self,
input: Iterator[dict[str, Any]],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[dict[str, Any]]:
yield from self._transform_stream_with_config(
input, self._transform, config, **kwargs
)
async def _atransform(
self,
values: AsyncIterator[dict[str, Any]],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> AsyncIterator[dict[str, Any]]:
# collect mapper keys
mapper_keys = set(self.mapper.steps__.keys())
# create two streams, one for the map and one for the passthrough
for_passthrough, for_map = atee(values, 2, lock=asyncio.Lock())
# create map output stream
map_output = self.mapper.atransform(
for_map,
patch_config(
config,
callbacks=run_manager.get_child(),
),
**kwargs,
)
# start map output stream
first_map_chunk_task: asyncio.Task = asyncio.create_task(
py_anext(map_output, None), # type: ignore[arg-type]
)
# consume passthrough stream
async for chunk in for_passthrough:
if not isinstance(chunk, dict):
msg = "The input to RunnablePassthrough.assign() must be a dict."
raise ValueError(msg) # noqa: TRY004
# remove mapper keys from passthrough chunk, to be overwritten by map output
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
)
if filtered:
yield filtered
# yield map output
yield await first_map_chunk_task
async for chunk in map_output:
yield chunk
@override
async def atransform(
self,
input: AsyncIterator[dict[str, Any]],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[dict[str, Any]]:
async for chunk in self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
):
yield chunk
@override
def stream(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Iterator[dict[str, Any]]:
return self.transform(iter([input]), config, **kwargs)
@override
async def astream(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[dict[str, Any]]:
async def input_aiter() -> AsyncIterator[dict[str, Any]]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
| RunnableAssign |
python | dagster-io__dagster | examples/docs_projects/project_mini/src/project_mini/defs/dynamic_fanout/dynamic_fanout.py | {
"start": 280,
"end": 9773
} | class ____(dg.Config):
input_data_path: str
processing_threshold: float = 0.5
# Helper functions for your domain logic
def extract_processing_units(record: dict[str, Any]) -> list[dict[str, Any]]:
"""Extract 10-30 processing units from a single data record."""
# Replace with your actual unit extraction logic
num_units = min(30, max(10, hash(record["id"]) % 20 + 10))
units = [
{"unit_data": f"unit_{i}", "properties": [i * 10, i * 10, 50, 50]} for i in range(num_units)
]
return units
def process_single_unit(unit_data: dict[str, Any]) -> dict[str, Any]:
"""Process a single data unit (your expensive computation)."""
# Replace with your actual unit processing logic
return {"processed": True, "result": f"processed_{unit_data}"}
def aggregate_unit_results(
record: dict[str, Any], unit_results: list[dict[str, Any]]
) -> dict[str, Any]:
"""Combine unit processing results into final record output."""
# Replace with your actual aggregation logic
return {
"record_id": record["id"],
"aggregated_output": f"combined_result_from_{len(unit_results)}_units",
"output_data": unit_results,
}
# =============================================================================
# MAIN PIPELINE IMPLEMENTATION
# =============================================================================
@dg.asset
def input_records(config: DataProcessingConfig) -> list[dict[str, Any]]:
"""MAIN PIPELINE: Load multiple related data records for processing."""
# Replace with your actual data loading logic
records = [
{"id": f"record_{i}", "data_path": f"path/to/data_{i}.dat"}
for i in range(5) # Your multiple related records
]
return records
# =============================================================================
# SUB-PIPELINE OPERATIONS
# =============================================================================
@dg.op(out=dg.DynamicOut())
def trigger_sub_pipelines(context: dg.OpExecutionContext, input_records: list[dict[str, Any]]):
"""MAIN PIPELINE: Launch sub-pipeline for each record individually
(First layer of parallelization).
"The sub_pipelines are triggered based on the number of inputs"
"""
context.log.info(f"Launching {len(input_records)} sub-pipelines based on input count")
# Each record triggers one sub-pipeline
for record in input_records:
yield dg.DynamicOutput(record, mapping_key=record["id"])
# start_option_a
@dg.op
def sub_pipeline_process_record_option_a(
context: dg.OpExecutionContext, record: dict[str, Any]
) -> dict[str, Any]:
"""SUB-PIPELINE: Complete processing workflow for a single data record.
1. "Extract processing units from the record (10-30 units per record)"
2. "Each unit goes through processing (Second layer of parallelization)"
[Currently sequential as specified]
3. "Results are aggregated to create final record output"
"""
context.log.info(f"Sub-pipeline processing record: {record['id']}")
# Step 1: Extract processing units from record (10-30 units)
processing_units = extract_processing_units(record)
context.log.info(f"Extracted {len(processing_units)} units from {record['id']}")
# Step 2: Process each unit (Second layer of parallelization)
# Currently sequential as specified, but can be parallelized when ready
unit_results = []
# Sequential processing (current implementation)
for i, unit in enumerate(processing_units):
context.log.info(f"Processing unit {i + 1}/{len(processing_units)} for {record['id']}")
result = process_single_unit(unit)
unit_results.append(result)
# Step 3: Aggregate results to create final record output
aggregated_output = aggregate_unit_results(record, unit_results)
context.log.info(
f"Sub-pipeline completed for {record['id']}: aggregated {len(unit_results)} unit results"
)
return {
"record_id": record["id"],
"sub_pipeline_result": aggregated_output,
"units_processed": len(unit_results),
"original_record": record,
}
# end_option_a
# start_option_b
@dg.op
def sub_pipeline_process_record_option_b(
context: dg.OpExecutionContext, record: dict[str, Any]
) -> dict[str, Any]:
"""SUB-PIPELINE: Complete processing workflow for a single data record.
1. "Extract processing units from the record (10-30 units per record)"
2. "Each unit goes through processing (Second layer of parallelization)"
[Implemented using multiprocessing pool]
3. "Results are aggregated to create final record output"
"""
context.log.info(f"Sub-pipeline processing record: {record['id']}")
# Step 1: Extract processing units from record (10-30 units)
processing_units = extract_processing_units(record)
context.log.info(f"Extracted {len(processing_units)} units from {record['id']}")
# Step 2: Process each unit (Second layer of parallelization)
# Currently sequential as specified, but can be parallelized when ready
unit_results = []
# Parallel processing (enable when ready for second layer)
def process_unit_worker(unit):
return process_single_unit(unit)
if len(processing_units) > 1:
num_processes = min(multiprocessing.cpu_count() - 1, len(processing_units))
with multiprocessing.Pool(processes=num_processes) as pool:
unit_results = pool.map(process_unit_worker, processing_units)
else:
unit_results = [process_unit_worker(processing_units[0])] if processing_units else []
# Step 3: Aggregate results to create final record output
aggregated_output = aggregate_unit_results(record, unit_results)
context.log.info(
f"Sub-pipeline completed for {record['id']}: aggregated {len(unit_results)} unit results"
)
return {
"record_id": record["id"],
"sub_pipeline_result": aggregated_output,
"units_processed": len(unit_results),
"original_record": record,
}
# end_option_b
@dg.op
def collect_sub_pipeline_results(
context: dg.OpExecutionContext, sub_pipeline_results: list[dict[str, Any]]
) -> list[dict[str, Any]]:
"""MAIN PIPELINE: "The individual results are then collected for additional processing".
"The main pipeline's goal is to collect the results of the sub pipelines,
so it cannot be completed before all sub_pipelines return their results"
"""
context.log.info(f"Collecting results from {len(sub_pipeline_results)} sub-pipelines")
context.log.info("Main pipeline waited for ALL sub-pipelines to complete before proceeding")
# Additional processing on collected results
processed_results = []
for result in sub_pipeline_results:
# Example: additional processing on each sub-pipeline result
enhanced_result = {
**result,
"collection_timestamp": "2025-08-25T10:00:00Z",
"processed_by_main_pipeline": True,
}
processed_results.append(enhanced_result)
return processed_results
# =============================================================================
# MAIN PIPELINE AS GRAPH-BACKED ASSET
# =============================================================================
# start_graph_backed_asset
@dg.graph_asset
def main_pipeline_results(input_records: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""COMPLETE MAIN PIPELINE.
1. ✅ Receives multiple related data records.
2. ✅ sub_pipeline processes each record individually (First layer of parallelization)
3. ✅ Individual results are collected for additional processing
4. ✅ Generate final results (used by downstream assets for final report)
The graph-backed asset ensures:
- Sub-pipelines are triggered based on number of inputs
- Main pipeline cannot complete before ALL sub-pipelines return results
- Full visibility into parallel execution
- Proper asset lineage and dependencies
"""
# Launch sub-pipelines (one per input record)
sub_pipeline_triggers = trigger_sub_pipelines(input_records)
# Execute sub-pipelines in parallel (first layer of parallelization)
sub_pipeline_results = sub_pipeline_triggers.map(sub_pipeline_process_record_option_a)
# Collect ALL results before proceeding (fan-in / synchronization barrier)
collected_results = sub_pipeline_results.collect() # This waits for all to complete
# Perform additional processing and return final results
return collect_sub_pipeline_results(collected_results)
@dg.asset
def final_report(main_pipeline_results: list[dict[str, Any]]) -> dict[str, Any]:
"""MAIN PIPELINE: "generate a comprehensive final report".
This asset depends on main_pipeline_results, ensuring the entire pipeline
completes before the final report is generated.
"""
total_units = sum(result["units_processed"] for result in main_pipeline_results)
return {
"pipeline_summary": {
"total_records_processed": len(main_pipeline_results),
"total_units_processed": total_units,
"average_units_per_record": total_units / len(main_pipeline_results)
if main_pipeline_results
else 0,
},
"detailed_results": main_pipeline_results,
"report_generated_at": "2025-08-25T10:00:00Z",
"pipeline_status": "completed_successfully",
}
# end_graph_backed_asset
| DataProcessingConfig |
python | kamyu104__LeetCode-Solutions | Python/kth-largest-sum-in-a-binary-tree.py | {
"start": 158,
"end": 1881
} | class ____(object):
def kthLargestLevelSum(self, root, k):
"""
:type root: Optional[TreeNode]
:type k: int
:rtype: int
"""
def nth_element(nums, n, left=0, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
right = len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
arr = []
q = [root]
while q:
new_q = []
for u in q:
if u.left:
new_q.append(u.left)
if u.right:
new_q.append(u.right)
arr.append(sum(x.val for x in q))
q = new_q
if k-1 >= len(arr):
return -1
nth_element(arr, k-1, compare=lambda a, b: a > b)
return arr[k-1]
| Solution |
python | ray-project__ray | python/ray/train/tests/test_iter_torch_batches_gpu.py | {
"start": 6681,
"end": 6930
} | class ____(TuplePandasBatchCollateFn):
"""Collate function that returns id and value as a list of tensors."""
def __call__(self, batch: pd.DataFrame) -> List[torch.Tensor]:
return list(super().__call__(batch))
| ListPandasBatchCollateFn |
python | pytorch__pytorch | torch/distributed/pipelining/_utils.py | {
"start": 1114,
"end": 4505
} | class ____(RuntimeError):
"""Shape mismatch between configured and runtime values."""
def validate_tensor_metadata(desc, expected, given):
if not expected.shape == given.shape:
raise PipeliningShapeError(
f"{desc} has a shape mismatch: expected {expected.shape} actual {given.shape}"
)
if not expected.dtype == given.dtype:
raise PipeliningShapeError(
f"{desc} has a dtype mismatch: expected {expected.dtype} actual {given.dtype}"
)
if not expected.stride() == given.stride():
raise PipeliningShapeError(
f"{desc} has a stride mismatch: expected {expected.stride()} actual {given.stride()}"
)
def validate_tensors_metadata(
desc,
expected_tensors: list[torch.Tensor] | tuple[torch.Tensor, ...],
actual_tensors: list[torch.Tensor] | tuple[torch.Tensor, ...],
):
if len(expected_tensors) != len(actual_tensors):
raise PipeliningShapeError(
f"{desc}: Number of values ({len(actual_tensors)}) does not match expected number ({len(expected_tensors)})"
)
for i in range(len(expected_tensors)):
validate_tensor_metadata(
f"{desc}: value {i}", expected_tensors[i], actual_tensors[i]
)
def generate_stage_to_rank_mapping(
pp_size: int, num_stages: int, style: str = "loop"
) -> dict[int, int]:
"""
Compute the stage id to rank mapping for either a looped or V-style schedule.
Most commonly num_stages == pp_size * 2, but this function can be used to
compute the mapping for any number of stages per rank.
"""
mapping = {}
if style == "loop":
for stage_index in range(num_stages):
mapping[stage_index] = stage_index % pp_size
elif style == "v":
if num_stages % pp_size != 0:
raise ValueError(
f"num_stages {num_stages} must be evenly divisible by pp_size {pp_size} for V schedules"
)
rank_index = 0
for stage_index in range(num_stages):
mapping[stage_index] = rank_index
# dont change rank if we are on the border (to keep v shape)
if (stage_index + 1) % pp_size == 0:
continue
if (stage_index // pp_size) % 2 == 0:
rank_index += 1
else:
rank_index -= 1
else:
raise ValueError(f"Style {style} is not supported.")
return mapping
def generate_rank_to_stage_mapping(
pp_size: int, num_stages: int, style: str = "loop"
) -> dict[int, list[int]]:
"""
Compute the rank to stage id mapping for either a looped or V-style schedule.
This function inverts the stage_to_rank_mapping to get which stages are assigned to each rank.
Returns a dictionary mapping rank -> list of stage indices assigned to that rank.
"""
stage_to_rank = generate_stage_to_rank_mapping(pp_size, num_stages, style)
# Invert the mapping: rank -> list of stages
rank_to_stages: dict[int, list[int]] = {}
for stage_id, rank in stage_to_rank.items():
if rank not in rank_to_stages:
rank_to_stages[rank] = []
rank_to_stages[rank].append(stage_id)
# Sort the stage lists for each rank to ensure consistent ordering
for stages in rank_to_stages.values():
stages.sort()
return rank_to_stages
@dataclass
| PipeliningShapeError |
python | django__django | tests/builtin_server/tests.py | {
"start": 1668,
"end": 2263
} | class ____(ServerHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response("200 OK", [("Content-Type", "text/plain")])
return [b"Hello World!"]
def wsgi_app_file_wrapper(environ, start_response):
start_response("200 OK", [("Content-Type", "text/plain")])
return environ["wsgi.file_wrapper"](BytesIO(b"foo"))
| FileWrapperHandler |
python | pypa__installer | tests/test_records.py | {
"start": 6741,
"end": 9568
} | class ____:
def test_accepts_empty_iterable(self):
list(parse_record_file([]))
@pytest.mark.parametrize(
"record_input",
["record_simple_list", "record_simple_iter", "record_simple_file"],
indirect=True,
)
def test_accepts_all_kinds_of_iterables(self, record_input):
"""Should accepts any iterable, e.g. container, iterator, or file object."""
records = list(parse_record_file(record_input))
assert len(records) == 2
assert records == [
(
"file.py",
"sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"3144",
),
("distribution-1.0.dist-info/RECORD", "", ""),
]
@pytest.mark.parametrize(
"line, element_count",
[
pytest.param(
"file.py,sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI,3144,",
4,
id="four",
),
pytest.param(
"distribution-1.0.dist-info/RECORD,,,,",
5,
id="five",
),
],
)
def test_rejects_wrong_element_count(self, line, element_count):
with pytest.raises(InvalidRecordEntry) as exc_info:
list(parse_record_file([line]))
message = f"expected 3 elements, got {element_count}"
assert message in str(exc_info.value)
def test_shows_correct_row_number(self):
record_lines = [
"file1.py,sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI,3144",
"file2.py,sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI,3144",
"file3.py,sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI,3144",
"distribution-1.0.dist-info/RECORD,,,,",
]
with pytest.raises(InvalidRecordEntry) as exc_info:
list(parse_record_file(record_lines))
assert "Row Index 3" in str(exc_info.value)
def test_parse_record_entry_with_comma(self):
record_lines = [
'"file1,file2.txt",sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI,3144',
"distribution-1.0.dist-info/RECORD,,",
]
records = list(parse_record_file(record_lines))
assert records == [
(
"file1,file2.txt",
"sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"3144",
),
("distribution-1.0.dist-info/RECORD", "", ""),
]
def test_parse_record_entry_with_backslash_path(self):
record_lines = [
"distribution-1.0.dist-info\\RECORD,,",
]
records = list(parse_record_file(record_lines))
assert records == [
("distribution-1.0.dist-info/RECORD", "", ""),
]
| TestParseRecordFile |
python | facebook__pyre-check | client/coverage_data.py | {
"start": 2281,
"end": 2422
} | class ____(str, Enum):
UNSAFE = "UNSAFE"
STRICT = "STRICT"
IGNORE_ALL = "IGNORE_ALL"
@dataclasses.dataclass(frozen=True)
| ModuleMode |
python | Textualize__rich | rich/markdown.py | {
"start": 5983,
"end": 6297
} | class ____(MarkdownElement):
"""A horizontal rule to divide sections."""
new_line = False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
style = console.get_style("markdown.hr", default="none")
yield Rule(style=style)
| HorizontalRule |
python | pytorch__pytorch | torch/onnx/_internal/fx/passes/type_promotion.py | {
"start": 8954,
"end": 11193
} | class ____(TypePromotionRule):
def __init__(
self,
namespace: str,
op_name: str,
promotion_kind: _prims_common.REDUCTION_OUTPUT_TYPE_KIND,
) -> None:
"""Constructs a TypePromotionRule for reduction operators.
Args:
namespace: Namespace of the op. E.g. 'aten' in 'torch.ops.aten.sum'.
op_name: Name of the op. E.g. 'sum' in 'torch.ops.aten.sum'.
promotion_kind: Type promotion kind. Refer to [_prims_common.reduction_dtypes]((https://github.com/pytorch/pytorch/blob/main/torch/_prims_common/__init__.py)) for detail. # noqa: B950
"""
super().__init__(namespace, op_name)
self.promotion_kind = promotion_kind
def __repr__(self) -> str:
return f"ReductionTypePromotionRule('{self.namespace}', '{self.op_name}', {self.promotion_kind})"
# pyrefly: ignore [bad-override]
def __eq__(self, other: object, /) -> bool:
if not isinstance(other, ElementwiseTypePromotionRule):
return False
return (
self.namespace == other.namespace
and self.op_name == other.op_name
and self.promotion_kind == other.promotion_kind
)
def __hash__(self) -> int:
return f"{type(self)}:{self.namespace}.{self.op_name}".__hash__()
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
assert len(args) >= 1, (
f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument"
)
arg = args[0]
assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor"
dtype: torch.dtype | None = kwargs.get("dtype")
computation_dtype, result_dtype = _prims_common.reduction_dtypes(
arg, self.promotion_kind, dtype
)
if result_dtype is None:
# Inspecting code, this can only happen when `promotion_kind` is `KEEP_PROMOTED_TYPE`.
# Hence set same as computation_dtype.
result_dtype = computation_dtype
return TypePromotionSnapshot(
{0: computation_dtype},
{},
result_dtype,
)
| ReductionTypePromotionRule |
python | doocs__leetcode | solution/0300-0399/0307.Range Sum Query - Mutable/Solution.py | {
"start": 0,
"end": 397
} | class ____:
__slots__ = ["n", "c"]
def __init__(self, n):
self.n = n
self.c = [0] * (n + 1)
def update(self, x: int, delta: int):
while x <= self.n:
self.c[x] += delta
x += x & -x
def query(self, x: int) -> int:
s = 0
while x > 0:
s += self.c[x]
x -= x & -x
return s
| BinaryIndexedTree |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 311965,
"end": 312732
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of TransferEnterpriseOrganization"""
__schema__ = github_schema
__field_names__ = ("organization_id", "destination_enterprise_id", "client_mutation_id")
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""The ID of the organization to transfer."""
destination_enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="destinationEnterpriseId")
"""The ID of the enterprise where the organization should be
transferred.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| TransferEnterpriseOrganizationInput |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/deps.py | {
"start": 1728,
"end": 6550
} | class ____:
local_package_paths: list[str]
def local_path_for(line: str, relative_to: str) -> Optional[str]:
# Return the abspath for a local package, iff this line points to a local package,
# otherwise return None.
# This handles relative or absolute paths specified in requirements.txt,
# eg "../some/other/dir" or "./subdir/" or "/abs/dir". For these directories we
# include the local package as part of the source pex.
# The "file://" urls (direct references) are correctly handled by the underlying pex build
# anyway and do not need special treatment here.
# Use a very specific match here to avoid accidentally matching URLs or other lines with slashes
path = None
if line.startswith("./") or line.startswith("../") or line.startswith("/"):
path = os.path.abspath(os.path.join(relative_to, line.strip()))
if path:
if not os.path.exists(path):
raise ValueError(
f"Could not find local directory {path!r} referenced in requirement {line!r}"
)
return path
return None
def get_requirements_lines(local_dir, python_interpreter: str) -> list[str]:
# Combine dependencies specified in requirements.txt, setup.py, and pyproject.toml
lines = get_requirements_txt_deps(local_dir)
lines.extend(get_setup_py_deps(local_dir, python_interpreter))
lines.extend(get_pyproject_toml_deps(local_dir))
return lines
def collect_requirements(code_directory, python_interpreter: str) -> tuple[list[str], list[str]]:
if not os.path.exists(code_directory):
raise Exception(
f"Specified a build directory that does not exist: {os.path.abspath(code_directory)}."
)
required_files = [
"setup.py",
"requirements.txt",
"pyproject.toml",
]
if not any(os.path.exists(os.path.join(code_directory, file)) for file in required_files):
raise Exception(
f"Could not find a setup.py, requirements.txt, or pyproject.toml in build directory {os.path.abspath(code_directory)}."
)
# traverse all local packages and return the list of local packages and other requirements
pending = [os.path.abspath(code_directory)] # local packages to be processed
seen = set()
local_package_paths = []
deps_lines = []
while pending:
local_dir = pending.pop()
if local_dir in seen:
continue
seen.add(local_dir)
lines = get_requirements_lines(local_dir, python_interpreter)
# Separate out the local packages from other requirements
for line in lines:
local_package_path = local_path_for(line, relative_to=local_dir)
if local_package_path:
if local_package_path not in local_package_paths:
local_package_paths.append(local_package_path)
pending.append(local_package_path)
else:
deps_lines.append(line)
return local_package_paths, deps_lines
def get_deps_requirements(
code_directory, python_version: version.Version
) -> tuple[LocalPackages, DepsRequirements]:
python_interpreter = util.python_interpreter_for(python_version)
ui.print(f"Finding dependencies using build directory {os.path.abspath(code_directory)}")
local_package_paths, deps_lines = collect_requirements(code_directory, python_interpreter)
deps_requirements_text = "\n".join(
sorted(set(deps_lines)) + [""]
) # empty string adds trailing newline
ui.print(f"List of local packages: {local_package_paths}")
ui.print(f"List of dependencies: {deps_requirements_text}")
local_packages = LocalPackages(local_package_paths=local_package_paths)
deps_requirements = DepsRequirements(
requirements_txt=deps_requirements_text,
python_version=python_version,
pex_flags=util.get_pex_flags(python_version, build_sdists=True),
)
ui.print(f"deps_requirements_hash: {deps_requirements.hash}")
return local_packages, deps_requirements
def build_deps_pex(code_directory, output_directory, python_version) -> tuple[str, str]:
_, requirements = get_deps_requirements(code_directory, python_version)
return build_deps_from_requirements(
requirements, output_directory, build_method=BuildMethod.DOCKER_FALLBACK
)
# Resolving dependencies can be flaky - depends on the version of pip and the resolver algorithm.
# These flags allow trying multiple ways of building the deps.
# This also allows us to try new flags safely, by having automatic fallback.
TRY_FLAGS = [
["--resolver-version=pip-2020-resolver"], # new resolver as recommended by pex team
# disabled but left here for easy revert
# [], # default set of flags defined in util.py
]
| LocalPackages |
python | kamyu104__LeetCode-Solutions | Python/kth-smallest-path-xor-sum.py | {
"start": 1939,
"end": 3185
} | class ____(object):
def kthSmallest(self, par, vals, queries):
"""
:type par: List[int]
:type vals: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
def small_to_large_merge(sl1, sl2): # Total Time: O(n * (logn)^2)
if len(sl1) < len(sl2):
sl1, sl2 = sl2, sl1
for x in sl2: # each node is merged at most O(logn) times
if x not in sl1:
sl1.add(x) # each add costs O(logn)
return sl1
def dfs(u, curr):
curr ^= vals[u]
sl = SortedList([curr])
for v in adj[u]:
sl = small_to_large_merge(sl, dfs(v, curr))
for i in lookup[u]: # Total Time: O(qlogn)
if queries[i][1]-1 < len(sl):
result[i] = sl[queries[i][1]-1]
return sl
adj = [[] for _ in xrange(len(par))]
for u, p in enumerate(par):
if p != -1:
adj[p].append(u)
lookup = [[] for _ in xrange(len(adj))]
for i, (u, _) in enumerate(queries):
lookup[u].append(i)
result = [-1]*len(queries)
dfs(0, 0)
return result
| Solution2 |
python | django__django | tests/csrf_tests/tests.py | {
"start": 58130,
"end": 58761
} | class ____(CsrfFunctionTestMixin, SimpleTestCase):
def test_csrf_token_on_404_stays_constant(self):
response = self.client.get("/does not exist/")
# The error handler returns status code 599.
self.assertEqual(response.status_code, 599)
response.charset = "ascii"
token1 = response.text
response = self.client.get("/does not exist/")
self.assertEqual(response.status_code, 599)
response.charset = "ascii"
token2 = response.text
secret2 = _unmask_cipher_token(token2)
self.assertMaskedSecretCorrect(token1, secret2)
| CsrfInErrorHandlingViewsTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/type1.py | {
"start": 3156,
"end": 3298
} | class ____:
x1: type
x2: type[Any]
reveal_type(Class2.x1, expected_text="type")
reveal_type(Class2.x2, expected_text="type[Any]")
| Class2 |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 13034,
"end": 18543
} | class ____:
"""enum states of bulk insert task"""
ImportPending = 0
ImportFailed = 1
ImportStarted = 2
ImportPersisted = 5
ImportCompleted = 6
ImportFailedAndCleaned = 7
ImportUnknownState = 100
"""pre-defined keys of bulk insert task info"""
FAILED_REASON = "failed_reason"
IMPORT_FILES = "files"
IMPORT_COLLECTION = "collection"
IMPORT_PARTITION = "partition"
IMPORT_PROGRESS = "progress_percent"
"""
Bulk insert state example:
- taskID : 44353845454358,
- state : "BulkLoadPersisted",
- row_count : 1000,
- infos : {"files": "rows.json",
"collection": "c1",
"partition": "",
"failed_reason": ""},
- id_list : [44353845455401, 44353845456401]
- create_ts : 1661398759,
"""
state_2_state: ClassVar[Dict] = {
common_pb2.ImportPending: ImportPending,
common_pb2.ImportFailed: ImportFailed,
common_pb2.ImportStarted: ImportStarted,
common_pb2.ImportPersisted: ImportPersisted,
common_pb2.ImportCompleted: ImportCompleted,
common_pb2.ImportFailedAndCleaned: ImportFailedAndCleaned,
}
state_2_name: ClassVar[Dict] = {
ImportPending: "Pending",
ImportFailed: "Failed",
ImportStarted: "Started",
ImportPersisted: "Persisted",
ImportCompleted: "Completed",
ImportFailedAndCleaned: "Failed and cleaned",
ImportUnknownState: "Unknown",
}
def __init__(
self,
task_id: int,
state: State,
row_count: int,
id_ranges: list,
infos: Dict,
create_ts: int,
):
self._task_id = task_id
self._state = state
self._row_count = row_count
self._id_ranges = id_ranges
self._create_ts = create_ts
self._infos = {kv.key: kv.value for kv in infos}
def __repr__(self) -> str:
fmt = """<Bulk insert state:
- taskID : {},
- state : {},
- row_count : {},
- infos : {},
- id_ranges : {},
- create_ts : {}
>"""
return fmt.format(
self._task_id,
self.state_name,
self.row_count,
self.infos,
self.id_ranges,
self.create_time_str,
)
@property
def task_id(self):
"""
Return unique id of this task.
"""
return self._task_id
@property
def row_count(self):
"""
If the task is finished, this value is the number of rows imported.
If the task is not finished, this value is the number of rows parsed.
"""
return self._row_count
@property
def state(self):
return self.state_2_state.get(self._state, BulkInsertState.ImportUnknownState)
@property
def state_name(self) -> str:
return self.state_2_name.get(self._state, "unknown state")
@property
def id_ranges(self):
"""
auto generated id ranges if the primary key is auto generated
the id list of response is id ranges
for example, if the response return [1, 100, 200, 250]
the full id list should be [1, 2, 3 ... , 99, 100, 200, 201, 202 ... , 249, 250]
"""
return self._id_ranges
@property
def ids(self):
"""
auto generated ids if the primary key is auto generated
the id list of response is id ranges
for example, if the response return [1, 100, 200, 250], the id ranges: [1,100),[200,250)
the full id list should be [1, 2, 3 ... , 99, 200, 201, 202 ... , 249]
"""
if len(self._id_ranges) % 2 != 0:
raise AutoIDException(message=ExceptionsMessage.AutoIDIllegalRanges)
ids = []
for i in range(int(len(self._id_ranges) / 2)):
begin = self._id_ranges[i * 2]
end = self._id_ranges[i * 2 + 1]
for j in range(begin, end):
ids.append(j)
return ids
@property
def infos(self):
"""more informations about the task, progress percentage, file path, failed reason, etc."""
return self._infos
@property
def failed_reason(self):
"""failed reason of the bulk insert task."""
return self._infos.get(BulkInsertState.FAILED_REASON, "")
@property
def files(self):
"""data files of the bulk insert task."""
return self._infos.get(BulkInsertState.IMPORT_FILES, "")
@property
def collection_name(self):
"""target collection's name of the bulk insert task."""
return self._infos.get(BulkInsertState.IMPORT_COLLECTION, "")
@property
def partition_name(self):
"""target partition's name of the bulk insert task."""
return self._infos.get(BulkInsertState.IMPORT_PARTITION, "")
@property
def create_timestamp(self):
"""the integer timestamp when this task is created."""
return self._create_ts
@property
def create_time_str(self):
"""A readable string converted from the timestamp when this task is created."""
ts = time.localtime(self._create_ts)
return time.strftime("%Y-%m-%d %H:%M:%S", ts)
@property
def progress(self):
"""working progress percent value."""
percent = self._infos.get(BulkInsertState.IMPORT_PROGRESS, "0")
return int(percent)
| BulkInsertState |
python | PyCQA__pylint | pylint/checkers/symilar.py | {
"start": 27262,
"end": 33937
} | class ____(BaseRawFileChecker, Symilar):
"""Checks for similarities and duplicated code.
This computation may be memory / CPU intensive, so you
should disable it if you experience some problems.
"""
name = "similarities"
msgs = MSGS
MIN_SIMILARITY_HELP = "Minimum lines number of a similarity."
IGNORE_COMMENTS_HELP = "Comments are removed from the similarity computation"
IGNORE_DOCSTRINGS_HELP = "Docstrings are removed from the similarity computation"
IGNORE_IMPORTS_HELP = "Imports are removed from the similarity computation"
IGNORE_SIGNATURES_HELP = "Signatures are removed from the similarity computation"
# for available dict keys/values see the option parser 'add_option' method
options: Options = (
(
"min-similarity-lines",
{
"default": DEFAULT_MIN_SIMILARITY_LINE,
"type": "int",
"metavar": "<int>",
"help": MIN_SIMILARITY_HELP,
},
),
(
"ignore-comments",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": IGNORE_COMMENTS_HELP,
},
),
(
"ignore-docstrings",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": IGNORE_DOCSTRINGS_HELP,
},
),
(
"ignore-imports",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": IGNORE_IMPORTS_HELP,
},
),
(
"ignore-signatures",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": IGNORE_SIGNATURES_HELP,
},
),
)
reports = (("RP0801", "Duplication", report_similarities),)
def __init__(self, linter: PyLinter) -> None:
BaseRawFileChecker.__init__(self, linter)
Symilar.__init__(
self,
min_lines=self.linter.config.min_similarity_lines,
ignore_comments=self.linter.config.ignore_comments,
ignore_docstrings=self.linter.config.ignore_docstrings,
ignore_imports=self.linter.config.ignore_imports,
ignore_signatures=self.linter.config.ignore_signatures,
)
def open(self) -> None:
"""Init the checkers: reset linesets and statistics information."""
self.linesets = []
self.linter.stats.reset_duplicated_lines()
def process_module(self, node: nodes.Module) -> None:
"""Process a module.
the module's content is accessible via the stream object
stream must implement the readlines method
"""
if self.linter.current_name is None:
# TODO: 4.0 Fix current_name
warnings.warn(
(
"In pylint 3.0 the current_name attribute of the linter object should be a string. "
"If unknown it should be initialized as an empty string."
),
DeprecationWarning,
stacklevel=2,
)
with node.stream() as stream:
self.append_stream(self.linter.current_name, stream, node.file_encoding)
def close(self) -> None:
"""Compute and display similarities on closing (i.e. end of parsing)."""
total = sum(len(lineset) for lineset in self.linesets)
duplicated = 0
stats = self.linter.stats
for num, couples in self._compute_sims():
msg = []
lineset = start_line = end_line = None
for lineset, start_line, end_line in couples:
msg.append(f"=={lineset.name}:[{start_line}:{end_line}]")
msg.sort()
if lineset:
for line in lineset.real_lines[start_line:end_line]:
msg.append(line.rstrip())
self.add_message("R0801", args=(len(couples), "\n".join(msg)))
duplicated += num * (len(couples) - 1)
stats.nb_duplicated_lines += int(duplicated)
stats.percent_duplicated_lines += float(total and duplicated * 100.0 / total)
def get_map_data(self) -> list[LineSet]:
"""Passthru override."""
return Symilar.get_map_data(self)
def reduce_map_data(self, linter: PyLinter, data: list[list[LineSet]]) -> None:
"""Reduces and recombines data into a format that we can report on.
The partner function of get_map_data()
Calls self.close() to actually calculate and report duplicate code.
"""
Symilar.combine_mapreduce_data(self, linesets_collection=data)
self.close()
def register(linter: PyLinter) -> None:
linter.register_checker(SimilaritiesChecker(linter))
def Run(argv: Sequence[str] | None = None) -> NoReturn:
"""Standalone command line access point."""
parser = argparse.ArgumentParser(
prog="symilar", description="Finds copy pasted blocks in a set of files."
)
parser.add_argument("files", nargs="+")
parser.add_argument(
"-d",
"--duplicates",
type=int,
default=DEFAULT_MIN_SIMILARITY_LINE,
help=SimilaritiesChecker.MIN_SIMILARITY_HELP,
)
parser.add_argument(
"-i",
"--ignore-comments",
action="store_true",
help=SimilaritiesChecker.IGNORE_COMMENTS_HELP,
)
parser.add_argument(
"--ignore-docstrings",
action="store_true",
help=SimilaritiesChecker.IGNORE_DOCSTRINGS_HELP,
)
parser.add_argument(
"--ignore-imports",
action="store_true",
help=SimilaritiesChecker.IGNORE_IMPORTS_HELP,
)
parser.add_argument(
"--ignore-signatures",
action="store_true",
help=SimilaritiesChecker.IGNORE_SIGNATURES_HELP,
)
parsed_args = parser.parse_args(args=argv)
similar_runner = Symilar(
min_lines=parsed_args.duplicates,
ignore_comments=parsed_args.ignore_comments,
ignore_docstrings=parsed_args.ignore_docstrings,
ignore_imports=parsed_args.ignore_imports,
ignore_signatures=parsed_args.ignore_signatures,
)
for filename in parsed_args.files:
with open(filename, encoding="utf-8") as stream:
similar_runner.append_stream(filename, stream)
similar_runner.run()
# the sys exit must be kept because of the unit tests that rely on it
sys.exit(0)
if __name__ == "__main__":
Run()
| SimilaritiesChecker |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/llama_index/packs/code_hierarchy/code_hierarchy.py | {
"start": 604,
"end": 1295
} | class ____(BaseModel):
"""
Unfortunately some languages need special options for how to make a signature.
For example, html element signatures should include their closing >, there is no
easy way to include this using an always-exclusive system.
However, using an always-inclusive system, python decorators don't work,
as there isn't an easy to define terminator for decorators that is inclusive
to their signature.
"""
type: str = Field(description="The type string to match on.")
inclusive: bool = Field(
description=(
"Whether to include the text of the node matched by this type or not."
),
)
| _SignatureCaptureType |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/run_coordinator/queued_run_coordinator_daemon.py | {
"start": 1423,
"end": 20211
} | class ____(IntervalDaemon):
"""Used with the QueuedRunCoordinator on the instance. This process finds queued runs from the run
store and launches them.
"""
def __init__(self, interval_seconds, page_size=PAGE_SIZE) -> None:
self._exit_stack = ExitStack()
self._executor: Optional[ThreadPoolExecutor] = None
self._location_timeouts_lock = threading.Lock()
self._location_timeouts: dict[str, float] = {}
self._page_size = page_size
self._global_concurrency_blocked_runs_lock = threading.Lock()
self._global_concurrency_blocked_runs = set()
super().__init__(interval_seconds)
def _get_executor(self, max_workers) -> ThreadPoolExecutor:
if self._executor is None:
# assumes max_workers wont change
self._executor = self._exit_stack.enter_context(
InheritContextThreadPoolExecutor(
max_workers=max_workers,
thread_name_prefix="run_dequeue_worker",
)
)
return self._executor
def __exit__(self, _exception_type, _exception_value, _traceback):
self._executor = None
self._exit_stack.close()
super().__exit__(_exception_type, _exception_value, _traceback)
@classmethod
def daemon_type(cls) -> str:
return "QUEUED_RUN_COORDINATOR"
def run_iteration(
self,
workspace_process_context: IWorkspaceProcessContext,
fixed_iteration_time: Optional[float] = None, # used for tests
) -> DaemonIterator:
if not isinstance(workspace_process_context.instance.run_coordinator, QueuedRunCoordinator):
check.failed(
f"Expected QueuedRunCoordinator, got {workspace_process_context.instance.run_coordinator}"
)
run_coordinator = check.inst(
workspace_process_context.instance.run_coordinator, QueuedRunCoordinator
)
concurrency_config = workspace_process_context.instance.get_concurrency_config()
if not concurrency_config.run_queue_config:
check.failed("Got invalid run queue config")
instance = workspace_process_context.instance
runs_to_dequeue = self._get_runs_to_dequeue(
instance, concurrency_config, fixed_iteration_time=fixed_iteration_time
)
yield from self._dequeue_runs_iter(
workspace_process_context,
run_coordinator,
runs_to_dequeue,
concurrency_config,
fixed_iteration_time=fixed_iteration_time,
)
def _dequeue_runs_iter(
self,
workspace_process_context: IWorkspaceProcessContext,
run_coordinator: QueuedRunCoordinator,
runs_to_dequeue: list[DagsterRun],
concurrency_config: ConcurrencyConfig,
fixed_iteration_time: Optional[float],
) -> Iterator[None]:
if run_coordinator.dequeue_use_threads:
yield from self._dequeue_runs_iter_threaded(
workspace_process_context,
runs_to_dequeue,
run_coordinator.dequeue_num_workers,
concurrency_config,
fixed_iteration_time=fixed_iteration_time,
)
else:
yield from self._dequeue_runs_iter_loop(
workspace_process_context,
runs_to_dequeue,
concurrency_config,
fixed_iteration_time=fixed_iteration_time,
)
def _dequeue_run_thread(
self,
workspace_process_context: IWorkspaceProcessContext,
run: DagsterRun,
concurrency_config: ConcurrencyConfig,
fixed_iteration_time: Optional[float],
) -> bool:
return self._dequeue_run(
workspace_process_context.instance,
workspace_process_context.create_request_context(),
run,
concurrency_config,
fixed_iteration_time,
)
def _dequeue_runs_iter_threaded(
self,
workspace_process_context: IWorkspaceProcessContext,
runs_to_dequeue: list[DagsterRun],
max_workers: Optional[int],
concurrency_config: ConcurrencyConfig,
fixed_iteration_time: Optional[float],
) -> Iterator[None]:
num_dequeued_runs = 0
for future in as_completed(
self._get_executor(max_workers).submit(
self._dequeue_run_thread,
workspace_process_context,
run,
concurrency_config,
fixed_iteration_time=fixed_iteration_time,
)
for run in runs_to_dequeue
):
run_launched = future.result()
yield None
if run_launched:
num_dequeued_runs += 1
if num_dequeued_runs > 0:
self._logger.info("Launched %d runs.", num_dequeued_runs)
def _dequeue_runs_iter_loop(
self,
workspace_process_context: IWorkspaceProcessContext,
runs_to_dequeue: list[DagsterRun],
concurrency_config: ConcurrencyConfig,
fixed_iteration_time: Optional[float],
) -> Iterator[None]:
num_dequeued_runs = 0
for run in runs_to_dequeue:
run_launched = self._dequeue_run(
workspace_process_context.instance,
workspace_process_context.create_request_context(),
run,
concurrency_config,
fixed_iteration_time=fixed_iteration_time,
)
yield None
if run_launched:
num_dequeued_runs += 1
if num_dequeued_runs > 0:
self._logger.info("Launched %d runs.", num_dequeued_runs)
def _get_runs_to_dequeue(
self,
instance: DagsterInstance,
concurrency_config: ConcurrencyConfig,
fixed_iteration_time: Optional[float],
) -> list[DagsterRun]:
if not isinstance(instance.run_coordinator, QueuedRunCoordinator):
check.failed(f"Expected QueuedRunCoordinator, got {instance.run_coordinator}")
run_queue_config = concurrency_config.run_queue_config
assert run_queue_config
max_concurrent_runs = run_queue_config.max_concurrent_runs
tag_concurrency_limits = run_queue_config.tag_concurrency_limits
in_progress_run_records = self._get_in_progress_run_records(instance)
in_progress_runs = [record.dagster_run for record in in_progress_run_records]
max_concurrent_runs_enabled = max_concurrent_runs != -1 # setting to -1 disables the limit
max_runs_to_launch = max_concurrent_runs - len(in_progress_run_records)
if max_concurrent_runs_enabled:
# Possibly under 0 if runs were launched without queuing
if max_runs_to_launch <= 0:
self._logger.info(
f"{len(in_progress_run_records)} runs are currently in progress. Maximum is {max_concurrent_runs}, won't launch more."
)
return []
cursor = None
has_more = True
batch: list[DagsterRun] = []
now = fixed_iteration_time or time.time()
with self._location_timeouts_lock:
paused_location_names = {
location_name
for location_name in self._location_timeouts
if self._location_timeouts[location_name] > now
}
locations_clause = ""
if paused_location_names:
locations_clause = (
" Temporarily skipping runs from the following locations due to a user code error: "
+ ",".join(list(paused_location_names))
)
logged_this_iteration = False
# Paginate through our runs list so we don't need to hold every run
# in memory at once. The maximum number of runs we'll hold in memory is
# max_runs_to_launch + page_size.
concurrency_keys = None
pool_limits = None
while has_more:
queued_runs = instance.get_runs(
RunsFilter(statuses=[DagsterRunStatus.QUEUED]),
cursor=cursor,
limit=self._page_size,
ascending=True,
)
has_more = len(queued_runs) >= self._page_size
if not queued_runs:
has_more = False
return batch
if not logged_this_iteration:
logged_this_iteration = True
self._logger.info(
"Priority sorting and checking tag concurrency limits for queued runs."
+ locations_clause
)
cursor = queued_runs[-1].run_id
tag_concurrency_limits_counter = TagConcurrencyLimitsCounter(
tag_concurrency_limits, in_progress_runs
)
batch += queued_runs
batch = self._priority_sort(batch)
if run_queue_config.should_block_op_concurrency_limited_runs:
try:
# fetch global concurrency information at most once per iteration
if concurrency_keys is None:
concurrency_keys = instance.event_log_storage.get_concurrency_keys()
if pool_limits is None:
pool_limits = instance.event_log_storage.get_pool_limits()
global_concurrency_limits_counter = GlobalOpConcurrencyLimitsCounter(
instance,
batch,
in_progress_run_records,
concurrency_keys=concurrency_keys,
pool_limits=pool_limits,
slot_count_offset=run_queue_config.op_concurrency_slot_buffer,
pool_granularity=concurrency_config.pool_config.pool_granularity,
)
except:
self._logger.exception("Failed to initialize op concurrency counter")
# when we cannot initialize the global concurrency counter, we should fall back
# to not blocking any runs based on op concurrency limits
global_concurrency_limits_counter = None
else:
global_concurrency_limits_counter = None
to_remove = []
for run in batch:
if tag_concurrency_limits_counter.is_blocked(run):
to_remove.append(run)
continue
else:
tag_concurrency_limits_counter.update_counters_with_launched_item(run)
if (
global_concurrency_limits_counter
and global_concurrency_limits_counter.is_blocked(run)
):
to_remove.append(run)
if run.run_id not in self._global_concurrency_blocked_runs:
with self._global_concurrency_blocked_runs_lock:
self._global_concurrency_blocked_runs.add(run.run_id)
concurrency_blocked_info = json.dumps(
global_concurrency_limits_counter.get_blocked_run_debug_info(run)
)
self._logger.info(
f"Run {run.run_id} is blocked by global concurrency limits: {concurrency_blocked_info}"
)
continue
elif global_concurrency_limits_counter:
global_concurrency_limits_counter.update_counters_with_launched_item(run)
location_name = (
run.remote_job_origin.location_name if run.remote_job_origin else None
)
if location_name and location_name in paused_location_names:
to_remove.append(run)
continue
for run in to_remove:
batch.remove(run)
if max_runs_to_launch >= 1:
batch = batch[:max_runs_to_launch]
return batch
def _get_in_progress_run_records(self, instance: DagsterInstance) -> Sequence[RunRecord]:
return instance.get_run_records(filters=RunsFilter(statuses=IN_PROGRESS_RUN_STATUSES))
def _priority_sort(self, runs: Iterable[DagsterRun]) -> list[DagsterRun]:
def get_priority(run: DagsterRun) -> int:
priority_tag_value = run.tags.get(PRIORITY_TAG, "0")
try:
return int(priority_tag_value)
except ValueError:
return 0
# sorted is stable, so fifo is maintained
return sorted(runs, key=get_priority, reverse=True)
def _is_location_pausing_dequeues(self, location_name: str, now: float) -> bool:
with self._location_timeouts_lock:
return (
location_name in self._location_timeouts
and self._location_timeouts[location_name] > now
)
def _dequeue_run(
self,
instance: DagsterInstance,
workspace: BaseWorkspaceRequestContext,
run: DagsterRun,
concurrency_config: ConcurrencyConfig,
fixed_iteration_time: Optional[float],
) -> bool:
assert concurrency_config.run_queue_config
# double check that the run is still queued before dequeing
run = check.not_none(instance.get_run_by_id(run.run_id))
with self._global_concurrency_blocked_runs_lock:
if run.run_id in self._global_concurrency_blocked_runs:
self._global_concurrency_blocked_runs.remove(run.run_id)
now = fixed_iteration_time or time.time()
if run.status != DagsterRunStatus.QUEUED:
self._logger.info(
"Run %s is now %s instead of QUEUED, skipping",
run.run_id,
run.status,
)
return False
# Very old (pre 0.10.0) runs and programatically submitted runs may not have an
# attached code location name
location_name = run.remote_job_origin.location_name if run.remote_job_origin else None
if location_name and self._is_location_pausing_dequeues(location_name, now):
self._logger.info(
"Pausing dequeues for runs from code location %s to give its code server time"
" to recover",
location_name,
)
return False
launch_started_event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_STARTING.value,
job_name=run.job_name,
)
instance.report_dagster_event(launch_started_event, run_id=run.run_id)
run = check.not_none(instance.get_run_by_id(run.run_id))
try:
instance.run_launcher.launch_run(LaunchRunContext(dagster_run=run, workspace=workspace))
except Exception as e:
error = DaemonErrorCapture.process_exception(
exc_info=sys.exc_info(),
logger=self._logger,
log_message=f"Caught an error dequeuing run {run.run_id}",
)
run = check.not_none(instance.get_run_by_id(run.run_id))
# Make sure we don't re-enqueue a run if it has already finished or moved into STARTED:
if run.status not in (DagsterRunStatus.QUEUED, DagsterRunStatus.STARTING):
self._logger.info(
f"Run {run.run_id} failed while being dequeued, but has already advanced to"
f" {run.status} - moving on. Error: {error.to_string()}"
)
return False
elif concurrency_config.run_queue_config.max_user_code_failure_retries and isinstance(
e, (DagsterUserCodeUnreachableError, DagsterCodeLocationLoadError)
):
if location_name:
with self._location_timeouts_lock:
# Don't try to dequeue runs from this location for another N seconds
self._location_timeouts[location_name] = (
now + concurrency_config.run_queue_config.user_code_failure_retry_delay
)
enqueue_event_records = instance.get_records_for_run(
run_id=run.run_id, of_type=DagsterEventType.PIPELINE_ENQUEUED
).records
check.invariant(len(enqueue_event_records), "Could not find enqueue event for run")
num_retries_so_far = len(enqueue_event_records) - 1
if (
num_retries_so_far
>= concurrency_config.run_queue_config.max_user_code_failure_retries
):
message = (
"Run dequeue failed to reach the user code server after"
f" {concurrency_config.run_queue_config.max_user_code_failure_retries} attempts, failing run"
)
instance.report_engine_event(
message,
run,
EngineEventData.engine_error(error),
)
instance.report_run_failed(run)
return False
else:
retries_left = (
concurrency_config.run_queue_config.max_user_code_failure_retries
- num_retries_so_far
)
retries_str = "retr" + ("y" if retries_left == 1 else "ies")
message = (
"Run dequeue failed to reach the user code server, re-submitting the run"
f" into the queue ({retries_left} {retries_str} remaining)"
)
instance.report_engine_event(
message,
run,
EngineEventData.engine_error(error),
)
# Re-submit the run into the queue
enqueued_event = DagsterEvent.job_enqueue(run)
instance.report_dagster_event(enqueued_event, run_id=run.run_id)
return False
else:
message = (
"Caught an unrecoverable error while dequeuing the run. Marking the run as"
" failed and dropping it from the queue"
)
instance.report_engine_event(
message,
run,
EngineEventData.engine_error(error),
)
instance.report_run_failed(run)
return False
return True
| QueuedRunCoordinatorDaemon |
python | matplotlib__matplotlib | lib/matplotlib/backends/_backend_tk.py | {
"start": 44284,
"end": 44486
} | class ____(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
NavigationToolbar2Tk.configure_subplots(self)
@backend_tools._register_tool_class(FigureCanvasTk)
| ConfigureSubplotsTk |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py | {
"start": 1292,
"end": 4930
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
# Set the hints to none to test non-symmetric PD code paths.
operator = linalg.LinearOperatorFullMatrix(
lin_op_matrix,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [1., 11.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
# Auto-detected.
self.assertTrue(operator.is_square)
def test_assert_non_singular_raises_if_cond_too_big_but_finite(self):
with self.cached_session():
tril = linear_operator_test_util.random_tril_matrix(
shape=(50, 50), dtype=np.float32)
diag = np.logspace(-2, 2, 50).astype(np.float32)
tril = array_ops.matrix_set_diag(tril, diag)
matrix = self.evaluate(math_ops.matmul(tril, tril, transpose_b=True))
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
# Ensure that we have finite condition number...just HUGE.
cond = np.linalg.cond(matrix)
self.assertTrue(np.isfinite(cond))
self.assertGreater(cond, 1e12)
operator.assert_non_singular().run()
def test_assert_non_singular_raises_if_cond_infinite(self):
with self.cached_session():
matrix = [[1., 1.], [1., 1.]]
# We don't pass the is_self_adjoint hint here, which means we take the
# generic code path.
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.cached_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
@test_util.disable_xla("Assert statements in kernels not supported in XLA")
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=True)
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
operator.assert_positive_definite().run()
def test_tape_safe(self):
matrix = variables_module.Variable([[2.]])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.check_tape_safe(operator)
def test_convert_variables_to_tensors(self):
matrix = variables_module.Variable([[3.]])
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.cached_session() as sess:
sess.run([matrix.initializer])
self.check_convert_variables_to_tensors(operator)
@test_util.run_all_in_graph_and_eager_modes
| SquareLinearOperatorFullMatrixTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-space-wasted-from-packaging.py | {
"start": 65,
"end": 752
} | class ____(object):
def minWastedSpace(self, packages, boxes):
"""
:type packages: List[int]
:type boxes: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
INF = float("inf")
packages.sort()
result = INF
for box in boxes:
box.sort()
if box[-1] < packages[-1]:
continue
curr = left = 0
for b in box:
right = bisect.bisect_right(packages, b, left)
curr += b * (right-left)
left = right
result = min(result, curr)
return (result-sum(packages))%MOD if result != INF else -1
| Solution |
python | google__jax | tests/mosaic/gpu_dialect_test.py | {
"start": 3348,
"end": 38737
} | class ____(MosaicGpuTest):
def test_dialect_module_is_loaded(self):
self.assertTrue(_cext.globals._check_dialect_module_loaded("mosaic_gpu"))
def test_initialize_barrier_op_arrival_count_must_be_strictly_positive(self):
with ir.InsertionPoint(self.module.body):
mgpu.dialect.initialize_barrier(
llvm.UndefOp(workgroup_ptr_ty()),
arrival_count=0,
num_barriers=2,
)
with self.assertRaisesRegex(ir.MLIRError, "value is positive"):
self.module.operation.verify()
def test_initialize_barrier_op_with_a_non_shared_base_pointer_fails(self):
with ir.InsertionPoint(self.module.body):
mgpu.dialect.initialize_barrier(
llvm.UndefOp(ir.Type.parse(f"!llvm.ptr<{0}>")),
arrival_count=1,
num_barriers=2,
)
with self.assertRaisesRegex(ir.MLIRError, "pointer in address space 3"):
self.module.operation.verify()
def test_initialize_barrier_op_with_a_positive_arrival_count_passes(self):
with ir.InsertionPoint(self.module.body):
mgpu.dialect.initialize_barrier(
llvm.UndefOp(workgroup_ptr_ty()),
arrival_count=1,
num_barriers=2,
)
self.assertTrue(self.module.operation.verify())
def test_async_load_op_dest_must_be_contiguous(self):
with ir.InsertionPoint(self.module.body):
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get(
[4, 8],
ir.F32Type.get(),
layout=ir.Attribute.parse("strided<[16, 1]>"),
),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[4, 8],
collective=ir.ArrayAttr.get([]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"The `destination` memref must be contiguous",
):
self.module.operation.verify()
def test_async_load_op_source_and_dest_must_have_same_element_type(self):
with ir.InsertionPoint(self.module.body):
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([4, 8], ir.F64Type.get()),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[4, 8],
collective=ir.ArrayAttr.get([]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"`source` and `destination` memrefs must have the same element",
):
self.module.operation.verify()
def test_async_load_op_slice_lengths_must_be_larger_than_minus_two(self):
with ir.InsertionPoint(self.module.body):
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[-2, 8],
collective=ir.ArrayAttr.get([]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"The `slice_lengths` attribute must not contain values less than -1",
):
self.module.operation.verify()
def test_async_load_op_source_and_dest_ranks_must_match_with_collapse(self):
with ir.InsertionPoint(self.module.body):
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([1, 4, 8], ir.F32Type.get()),
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[-1, 4, 8],
collective=ir.ArrayAttr.get([]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"`destination` plus the number of collapsed dimensions as indicated",
):
self.module.operation.verify()
def test_async_load_op_indices_size_must_match_source_rank(self):
with ir.InsertionPoint(self.module.body):
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[4, 8],
collective=ir.ArrayAttr.get([]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"The size of `indices` must be equal to the rank of `source`",
):
self.module.operation.verify()
def test_async_load_op_slice_lengths_size_must_match_source_rank(self):
with ir.InsertionPoint(self.module.body):
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[4, 8],
collective=ir.ArrayAttr.get([]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"The size of `slice_lengths` must be equal to the rank of `source`",
):
self.module.operation.verify()
def test_async_load_op_slice_collective_must_be_unique(self):
with ir.InsertionPoint(self.module.body):
i32 = ir.IntegerType.get_signless(32)
source, destination, barrier, *indices = undefs(
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([], ir.Type.parse("!mosaic_gpu.barrier")),
i32,
)
mgpu.dialect.async_load(
source,
destination,
barrier,
indices,
slice_lengths=[4],
collective=ir.ArrayAttr.get([
ir.IntegerAttr.get(i32, mgpu.dialect.Dimension.x),
ir.IntegerAttr.get(i32, mgpu.dialect.Dimension.x),
]),
)
with self.assertRaisesRegex(
ir.MLIRError,
"The `collective` attribute must not contain duplicate dimensions",
):
self.module.operation.verify()
def test_async_store_op_source_must_be_contiguous(self):
with ir.InsertionPoint(self.module.body):
source, destination, *indices = undefs(
ir.MemRefType.get(
[4, 8],
ir.F32Type.get(),
layout=ir.Attribute.parse("strided<[16, 1]>"),
),
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_store(
source,
destination,
indices,
slice_lengths=[4, 8],
)
with self.assertRaisesRegex(
ir.MLIRError,
"The `source` memref must be contiguous",
):
self.module.operation.verify()
def test_async_store_op_source_and_dest_must_have_same_element_type(self):
with ir.InsertionPoint(self.module.body):
source, destination, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([4, 8], ir.F64Type.get()),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_store(
source,
destination,
indices,
slice_lengths=[4, 8],
)
with self.assertRaisesRegex(
ir.MLIRError,
"`source` and `destination` memrefs must have the same element",
):
self.module.operation.verify()
def test_async_store_op_slice_lengths_must_be_larger_than_minus_two(self):
with ir.InsertionPoint(self.module.body):
source, destination, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_store(
source,
destination,
indices,
slice_lengths=[-2, 8],
)
with self.assertRaisesRegex(
ir.MLIRError,
"The `slice_lengths` attribute must not contain values less than -1",
):
self.module.operation.verify()
def test_async_store_op_source_and_dest_ranks_must_match_with_collapse(self):
with ir.InsertionPoint(self.module.body):
source, destination, *indices = undefs(
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([1, 4, 8], ir.F32Type.get()),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_store(
source,
destination,
indices,
slice_lengths=[-1, 4, 8],
)
with self.assertRaisesRegex(
ir.MLIRError,
"`source` plus the number of collapsed dimensions as indicated",
):
self.module.operation.verify()
def test_async_store_op_indices_size_must_match_destination_rank(self):
with ir.InsertionPoint(self.module.body):
source, destination, *indices = undefs(
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.MemRefType.get([4, 8], ir.F32Type.get()),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_store(
source,
destination,
indices,
slice_lengths=[4, 8],
)
with self.assertRaisesRegex(
ir.MLIRError,
"The size of `indices` must be equal to the rank of `destination`",
):
self.module.operation.verify()
def test_async_store_op_slice_lengths_size_must_match_source_rank(self):
with ir.InsertionPoint(self.module.body):
source, destination, *indices = undefs(
ir.MemRefType.get([4], ir.F32Type.get()),
ir.MemRefType.get([4], ir.F32Type.get()),
ir.IntegerType.get_signless(32),
)
mgpu.dialect.async_store(
source,
destination,
indices,
slice_lengths=[4, 8],
)
with self.assertRaisesRegex(
ir.MLIRError,
"The size of `slice_lengths` must be equal to the rank of"
" `destination`",
):
self.module.operation.verify()
def test_wgmma_types_match(self):
with ir.InsertionPoint(self.module.body):
acc, a, b = undefs(
ir.VectorType.get([128, 160], ir.F32Type.get()),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.BF16Type.get()),
)
mgpu.dialect.wgmma(acc, a, b)
with self.assertRaisesRegex(
ir.MLIRError,
"The `a` and `b` inputs must have the same element type.",
):
self.module.operation.verify()
def test_wgmma_acc_m_dim_not_multiple_of_64(self):
with ir.InsertionPoint(self.module.body):
acc, a, b = undefs(
ir.VectorType.get([127, 160], ir.F32Type.get()),
ir.MemRefType.get([127, 128], ir.BF16Type.get()),
ir.MemRefType.get([128, 160], ir.BF16Type.get()),
)
mgpu.dialect.wgmma(acc, a, b)
with self.assertRaisesRegex(
ir.MLIRError,
r"accumulator.*must be a multiple of 64",
):
self.module.operation.verify()
def test_wgmma_acc_m_not_equal_to_a_m_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b = undefs(
ir.VectorType.get([256, 160], ir.F32Type.get()),
ir.MemRefType.get([512, 128], ir.BF16Type.get()),
ir.MemRefType.get([128, 160], ir.BF16Type.get()),
)
mgpu.dialect.wgmma(acc, a, b)
with self.assertRaisesRegex(
ir.MLIRError,
r"accumulator's first dimension 256 must be equal to.*`a`",
):
self.module.operation.verify()
def test_wgmma_a_k_dim_not_equal_to_b_k_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b = undefs(
ir.VectorType.get([128, 160], ir.F32Type.get()),
ir.MemRefType.get([128, 128], ir.BF16Type.get()),
ir.MemRefType.get([160, 160], ir.BF16Type.get()),
)
mgpu.dialect.wgmma(acc, a, b)
with self.assertRaisesRegex(
ir.MLIRError,
"`a`'s contracting dimension 128 must be equal to the first dimension"
" of `b`",
):
self.module.operation.verify()
def test_wgmma_b_n_dim_not_equal_to_acc_n_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b = undefs(
ir.VectorType.get([128, 160], ir.F32Type.get()),
ir.MemRefType.get([128, 128], ir.BF16Type.get()),
ir.MemRefType.get([128, 192], ir.BF16Type.get()),
)
mgpu.dialect.wgmma(acc, a, b)
with self.assertRaisesRegex(
ir.MLIRError,
r"`b`'s non-contracting dimension 192 must be equal to the",
):
self.module.operation.verify()
def test_tcgen05_mma_types_match(self):
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.BF16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
"The `a` and `b` inputs must have the same element type.",
):
self.module.operation.verify()
def test_tcgen05_mma_acc_m_dim_not_multiple_of_128(self):
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([127, 160], ir.F16Type.get()),
ir.MemRefType.get([127, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
r"accumulator.*must be a multiple of 32",
):
self.module.operation.verify()
def test_tcgen05_mma_acc_m_not_equal_to_a_m_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([256, 160], ir.F16Type.get()),
ir.MemRefType.get([512, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
r"accumulator's first dimension 256 must be equal to.*`a`",
):
self.module.operation.verify()
def test_tcgen05_mma_a_k_dim_not_equal_to_b_k_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([160, 160], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
"`a`'s contracting dimension 128 must be equal to the first dimension"
" of `b`",
):
self.module.operation.verify()
def test_tcgen05_mma_b_n_dim_not_equal_to_acc_n_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 192], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
r"`b`'s non-contracting dimension 192 must be equal to the",
):
self.module.operation.verify()
def test_tcgen05_mma_b_n_dim_not_equal_to_half_acc_n_dim(self):
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate, collective=True)
with self.assertRaisesRegex(
ir.MLIRError,
r"`b`'s non-contracting dimension 160 must be half",
):
self.module.operation.verify()
def test_tcgen05_mma_acc_mem_space_is_tmem(self):
smem = mgpu_utils.smem()
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=smem),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
r"The accumulator must be in TMEM",
):
self.module.operation.verify()
def test_tcgen05_mma_a_mem_space_is_smem_or_tmem(self):
tmem = mgpu_utils.tmem()
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=tmem),
ir.MemRefType.get([128, 128], ir.F16Type.get()),
ir.MemRefType.get([128, 160], ir.F16Type.get()),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
r"The `a` input must be in TMEM or SMEM",
):
self.module.operation.verify()
def test_tcgen05_mma_b_mem_space_is_smem(self):
smem, tmem = mgpu_utils.smem(), mgpu_utils.tmem()
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=tmem),
ir.MemRefType.get([128, 128], ir.F16Type.get(), memory_space=smem),
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=tmem),
ir.IntegerType.get_signless(1),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate)
with self.assertRaisesRegex(
ir.MLIRError,
r"The `b` input must be in SMEM",
):
self.module.operation.verify()
def test_tcgen05_mma_scale_arg_missing(self):
smem, tmem = mgpu_utils.smem(), mgpu_utils.tmem()
f8e0m0 = ir.Float8E8M0FNUType.get()
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate, a_scale = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=tmem),
ir.MemRefType.get([128, 128], ir.F16Type.get(), memory_space=smem),
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=smem),
ir.IntegerType.get_signless(1),
ir.MemRefType.get([128, 4], f8e0m0, memory_space=tmem),
)
mgpu.dialect.tcgen05_mma(acc, a, b, accumulate, a_scale=a_scale)
with self.assertRaisesRegex(
ir.MLIRError,
r"Either none or both scales should be provided.",
):
self.module.operation.verify()
def test_tcgen05_mma_a_scale_mem_space_is_tmem(self):
smem, tmem = mgpu_utils.smem(), mgpu_utils.tmem()
f8e0m0 = ir.Float8E8M0FNUType.get()
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate, a_scale, b_scale = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=tmem),
ir.MemRefType.get([128, 128], ir.F16Type.get(), memory_space=smem),
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=smem),
ir.IntegerType.get_signless(1),
ir.MemRefType.get([128, 4], f8e0m0, memory_space=smem),
ir.MemRefType.get([160, 4], f8e0m0, memory_space=tmem),
)
mgpu.dialect.tcgen05_mma(
acc, a, b, accumulate, a_scale=a_scale, b_scale=b_scale
)
with self.assertRaisesRegex(
ir.MLIRError,
r"The `a_scale` input must be in TMEM",
):
self.module.operation.verify()
def test_tcgen05_mma_b_scale_mem_space_is_tmem(self):
smem, tmem = mgpu_utils.smem(), mgpu_utils.tmem()
f8e0m0 = ir.Float8E8M0FNUType.get()
with ir.InsertionPoint(self.module.body):
acc, a, b, accumulate, a_scale, b_scale = undefs(
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=tmem),
ir.MemRefType.get([128, 128], ir.F16Type.get(), memory_space=smem),
ir.MemRefType.get([128, 160], ir.F16Type.get(), memory_space=smem),
ir.IntegerType.get_signless(1),
ir.MemRefType.get([128, 4], f8e0m0, memory_space=tmem),
ir.MemRefType.get([160, 4], f8e0m0, memory_space=smem),
)
mgpu.dialect.tcgen05_mma(
acc, a, b, accumulate, a_scale=a_scale, b_scale=b_scale
)
with self.assertRaisesRegex(
ir.MLIRError,
r"The `b_scale` input must be in TMEM",
):
self.module.operation.verify()
def test_tiled_layout_attr_parsing(self):
with ir.InsertionPoint(self.module.body):
for layout in (
mgpu.WGMMA_LAYOUT,
mgpu.WGMMA_ROW_LAYOUT,
mgpu.WGMMA_COL_LAYOUT,
mgpu.WGMMA_TRANSPOSED_LAYOUT,
):
attr = layouts.to_tiled_layout_attr(layout)
parsed_layout = layouts.from_tiled_layout_attr(attr)
self.assertEqual(layout, parsed_layout)
def test_broadcast_in_dim_ok(self):
with ir.InsertionPoint(self.module.body):
(operand,) = undefs(ir.VectorType.get([64], ir.F32Type.get()))
mgpu.dialect.broadcast_in_dim(
ir.VectorType.get([64, 64], ir.F32Type.get()),
operand,
broadcast_dimensions=[0],
)
self.assertTrue(self.module.operation.verify())
def test_broadcast_in_dim_no_0d(self):
with ir.InsertionPoint(self.module.body):
(operand,) = undefs(ir.VectorType.get([], ir.F32Type.get()))
mgpu.dialect.broadcast_in_dim(
ir.VectorType.get([64], ir.F32Type.get()),
operand,
broadcast_dimensions=[],
)
with self.assertRaisesRegex(
ir.MLIRError,
r"The input vector must have rank > 0",
):
self.module.operation.verify()
def test_broadcast_in_dim_no_input_larger_than_output(self):
with ir.InsertionPoint(self.module.body):
(operand,) = undefs(ir.VectorType.get([64, 64], ir.F32Type.get()))
mgpu.dialect.broadcast_in_dim(
ir.VectorType.get([64], ir.F32Type.get()),
operand,
broadcast_dimensions=[],
)
with self.assertRaisesRegex(
ir.MLIRError,
r"rank of the input vector must be smaller",
):
self.module.operation.verify()
def test_broadcast_in_dim_too_many_dims(self):
with ir.InsertionPoint(self.module.body):
(operand,) = undefs(ir.VectorType.get([64], ir.F32Type.get()))
mgpu.dialect.broadcast_in_dim(
ir.VectorType.get([64, 64], ir.F32Type.get()),
operand,
broadcast_dimensions=[0, 1],
)
with self.assertRaisesRegex(
ir.MLIRError,
r"size of the `broadcast_dimensions` attribute must be",
):
self.module.operation.verify()
def test_broadcast_in_dim_dim_oob(self):
with ir.InsertionPoint(self.module.body):
(operand,) = undefs(ir.VectorType.get([64], ir.F32Type.get()))
mgpu.dialect.broadcast_in_dim(
ir.VectorType.get([64, 64], ir.F32Type.get()),
operand,
broadcast_dimensions=[2],
)
with self.assertRaisesRegex(
ir.MLIRError,
r"must be in the range \[0, result.shape.rank",
):
self.module.operation.verify()
def test_broadcast_in_dim_dim_transpose(self):
with ir.InsertionPoint(self.module.body):
(operand,) = undefs(ir.VectorType.get([64, 64, 64, 64], ir.F32Type.get()))
mgpu.dialect.broadcast_in_dim(
ir.VectorType.get([64, 64, 64, 64], ir.F32Type.get()),
operand,
broadcast_dimensions=[0, 1, 3, 2],
)
with self.assertRaisesRegex(
ir.MLIRError,
r"`broadcast_dimensions` attribute must be strictly increasing",
):
self.module.operation.verify()
def test_custom_primitive_op_args_must_match_args_of_terminator(self):
with ir.InsertionPoint(self.module.body):
shape = (128,)
elt_ty = ir.F32Type.get()
ty = ir.VectorType.get(shape, elt_ty)
strided_layout = mgpu.WGStridedFragLayout.from_shaped_type(ty)
assert strided_layout is not None
out_layouts = ir.ArrayAttr.get([layouts.to_layout_attr(strided_layout)])
op = mgpu.dialect.CustomPrimitiveOp(
result=[ty],
operands_=[],
in_layouts=[],
in_transforms=[],
out_layouts=out_layouts,
)
block = op.body.blocks.append()
with ir.InsertionPoint(block):
v = llvm.mlir_undef(ir.VectorType.get([256], ir.F32Type.get()))
mgpu.dialect.ReturnOp(operands_=[v])
with self.assertRaisesRegex(
ir.MLIRError,
r"type of return operand 0 \('vector<256xf32>'\) doesn't match the"
r" result type \('vector<128xf32>'\) in custom_primitive",
):
self.module.operation.verify()
def test_custom_primitive_op_results_must_be_scalar_or_vector(self):
with ir.InsertionPoint(self.module.body):
ref_ty = ir.MemRefType.get((128, 128), ir.F32Type.get())
op = mgpu.dialect.CustomPrimitiveOp(
result=[ref_ty],
operands_=[],
in_layouts=[],
in_transforms=[],
out_layouts=[],
)
block = op.body.blocks.append()
with ir.InsertionPoint(block):
[ref] = undefs(ref_ty)
mgpu.dialect.ReturnOp(operands_=[ref])
with self.assertRaisesRegex(
ir.MLIRError,
r"Custom primitive can only return scalars or vectors.",
):
self.module.operation.verify()
def test_tmem_alloc_op_must_have_smem_ref_input(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get([], ir.IntegerType.get_signless(32))
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 32],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=1,
)
with self.assertRaisesRegex(
ir.MLIRError,
"The `smem_ptr` memref must have the Workgroup address space",
):
self.module.operation.verify()
def test_tmem_alloc_op_result_must_have_tmem_memory_space(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 32],
ir.BF16Type.get(),
),
smem_ptr=smem_ptr,
collective=False,
packing=1,
)
with self.assertRaisesRegex(
ir.MLIRError,
"The tmem memref must have a mosaic_gpu.tmem memory space",
):
self.module.operation.verify()
def test_tmem_alloc_op_exact_column_count_must_be_at_most_512(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 1024],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=1,
)
with self.assertRaisesRegex(
ir.MLIRError,
"The number of allocated columns must be less than or equal to 512 but"
" got: 1024",
):
self.module.operation.verify()
def test_tmem_alloc_op_bad_packing(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 128],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=4,
)
with self.assertRaisesRegex(
ir.MLIRError,
"Only unpacked, or fully packed allocations are supported.",
):
self.module.operation.verify()
def test_tmem_alloc_op_exact_false_column_count_15_ok(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 15],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=1,
)
self.assertTrue(self.module.operation.verify())
def test_tmem_alloc_op_exact_false_column_count_100_ok(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 100],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=1,
)
self.assertTrue(self.module.operation.verify())
def test_tmem_alloc_op_exact_false_column_count_777_packed_not_ok(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 777],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=2,
)
with self.assertRaisesRegex(
ir.MLIRError,
"The number of unpacked columns must be divisible by the packing",
):
self.module.operation.verify()
def test_tmem_alloc_op_exact_false_column_count_778_packed_ok(self):
with ir.InsertionPoint(self.module.body):
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_alloc(
result=ir.MemRefType.get(
[128, 778],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
),
smem_ptr=smem_ptr,
collective=False,
packing=2,
)
self.assertTrue(self.module.operation.verify())
def test_tmem_alloc_dealloc_packed_large_shape_ok(self):
with ir.InsertionPoint(self.module.body):
ref_ty = ir.MemRefType.get(
[128, 1024],
ir.BF16Type.get(),
memory_space=mgpu_utils.tmem(),
)
(smem_ptr,) = undefs(
ir.MemRefType.get(
[],
ir.IntegerType.get_signless(32),
memory_space=mgpu_utils.smem(),
)
)
# This allocation would exceed the 512 columns limit if it were not packed.
ref = mgpu.dialect.tmem_alloc(
result=ref_ty,
smem_ptr=smem_ptr,
collective=False,
packing=2,
)
mgpu.dialect.tmem_dealloc(ref)
self.assertTrue(self.module.operation.verify())
def test_tmem_layout_cast_invalid_tmem_ref(self):
with ir.InsertionPoint(self.module.body):
(tmem_ref,) = undefs(
ir.MemRefType.get(
[128, 128],
ir.BF16Type.get(),
memory_space=mgpu_utils.smem(),
)
)
mgpu.dialect.tmem_layout_cast(
tmem_ref, layouts.to_layout_attr(tcgen05.TMEM_NATIVE_LAYOUT)
)
with self.assertRaisesRegex(
ir.MLIRError,
"The tmem memref must have a mosaic_gpu.tmem memory space",
):
self.module.operation.verify()
def test_vector_store_op_src_dst_shape_mismatch(self):
with ir.InsertionPoint(self.module.body):
src_ty = ir.VectorType.get((8,), ir.BF16Type.get())
dst_ty = ir.MemRefType.get((4,), ir.BF16Type.get())
(src, dst) = undefs(src_ty, dst_ty)
mgpu.dialect.vector_store(src, dst)
with self.assertRaisesRegex(
ir.MLIRError,
"The source and destination must have the same shape",
):
self.module.operation.verify()
def test_vector_store_op_src_dst_dtype_mismatch(self):
with ir.InsertionPoint(self.module.body):
src_ty = ir.VectorType.get((8,), ir.BF16Type.get())
dst_ty = ir.MemRefType.get((8,), ir.F32Type.get())
(src, dst) = undefs(src_ty, dst_ty)
mgpu.dialect.vector_store(src, dst)
with self.assertRaisesRegex(
ir.MLIRError,
"The source and destination must have the same element type",
):
self.module.operation.verify()
def test_broadcasted_iota_op_invalid_dimension(self):
with ir.InsertionPoint(self.module.body):
ty = ir.VectorType.get((2,), ir.F32Type.get())
mgpu.dialect.broadcasted_iota(ty, dimension=2)
with self.assertRaisesRegex(
ir.MLIRError,
"dimension=2 must be smaller than the rank=1 of the result.",
):
self.module.operation.verify()
def test_print_layout_op_invalid_ref(self):
with ir.InsertionPoint(self.module.body):
ref_ty = ir.MemRefType.get(
(2,), ir.F32Type.get(), memory_space=mgpu_utils.smem()
)
(ref,) = undefs(ref_ty)
mgpu.dialect.print_layout("tmem: {}", ref)
with self.assertRaisesRegex(
ir.MLIRError,
"The tmem memref must have a mosaic_gpu.tmem memory space",
):
self.module.operation.verify()
| DialectTest |
python | readthedocs__readthedocs.org | readthedocs/core/unresolver.py | {
"start": 1890,
"end": 2054
} | class ____(UnresolverError):
def __init__(self, project, language):
self.project = project
self.language = language
| TranslationWithoutVersionError |
python | TheAlgorithms__Python | linear_programming/simplex.py | {
"start": 493,
"end": 11830
} | class ____:
"""Operate on simplex tableaus
>>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4]]), 2, 2)
Traceback (most recent call last):
...
TypeError: Tableau must have type float64
>>> Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2, 2)
Traceback (most recent call last):
...
ValueError: RHS must be > 0
>>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]), -2, 2)
Traceback (most recent call last):
...
ValueError: number of (artificial) variables must be a natural number
"""
# Max iteration number to prevent cycling
maxiter = 100
def __init__(
self, tableau: np.ndarray, n_vars: int, n_artificial_vars: int
) -> None:
if tableau.dtype != "float64":
raise TypeError("Tableau must have type float64")
# Check if RHS is negative
if not (tableau[:, -1] >= 0).all():
raise ValueError("RHS must be > 0")
if n_vars < 2 or n_artificial_vars < 0:
raise ValueError(
"number of (artificial) variables must be a natural number"
)
self.tableau = tableau
self.n_rows, n_cols = tableau.shape
# Number of decision variables x1, x2, x3...
self.n_vars, self.n_artificial_vars = n_vars, n_artificial_vars
# 2 if there are >= or == constraints (nonstandard), 1 otherwise (std)
self.n_stages = (self.n_artificial_vars > 0) + 1
# Number of slack variables added to make inequalities into equalities
self.n_slack = n_cols - self.n_vars - self.n_artificial_vars - 1
# Objectives for each stage
self.objectives = ["max"]
# In two stage simplex, first minimise then maximise
if self.n_artificial_vars:
self.objectives.append("min")
self.col_titles = self.generate_col_titles()
# Index of current pivot row and column
self.row_idx = None
self.col_idx = None
# Does objective row only contain (non)-negative values?
self.stop_iter = False
def generate_col_titles(self) -> list[str]:
"""Generate column titles for tableau of specific dimensions
>>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]),
... 2, 0).generate_col_titles()
['x1', 'x2', 's1', 's2', 'RHS']
>>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]),
... 2, 2).generate_col_titles()
['x1', 'x2', 'RHS']
"""
args = (self.n_vars, self.n_slack)
# decision | slack
string_starts = ["x", "s"]
titles = []
for i in range(2):
for j in range(args[i]):
titles.append(string_starts[i] + str(j + 1))
titles.append("RHS")
return titles
def find_pivot(self) -> tuple[Any, Any]:
"""Finds the pivot row and column.
>>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6],
... [1,2,0,1,7.]]), 2, 0).find_pivot())
(1, 0)
"""
objective = self.objectives[-1]
# Find entries of highest magnitude in objective rows
sign = (objective == "min") - (objective == "max")
col_idx = np.argmax(sign * self.tableau[0, :-1])
# Choice is only valid if below 0 for maximise, and above for minimise
if sign * self.tableau[0, col_idx] <= 0:
self.stop_iter = True
return 0, 0
# Pivot row is chosen as having the lowest quotient when elements of
# the pivot column divide the right-hand side
# Slice excluding the objective rows
s = slice(self.n_stages, self.n_rows)
# RHS
dividend = self.tableau[s, -1]
# Elements of pivot column within slice
divisor = self.tableau[s, col_idx]
# Array filled with nans
nans = np.full(self.n_rows - self.n_stages, np.nan)
# If element in pivot column is greater than zero, return
# quotient or nan otherwise
quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0)
# Arg of minimum quotient excluding the nan values. n_stages is added
# to compensate for earlier exclusion of objective columns
row_idx = np.nanargmin(quotients) + self.n_stages
return row_idx, col_idx
def pivot(self, row_idx: int, col_idx: int) -> np.ndarray:
"""Pivots on value on the intersection of pivot row and column.
>>> Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]),
... 2, 2).pivot(1, 0).tolist()
... # doctest: +NORMALIZE_WHITESPACE
[[0.0, 3.0, 2.0, 0.0, 8.0],
[1.0, 3.0, 1.0, 0.0, 4.0],
[0.0, -8.0, -3.0, 1.0, -8.0]]
"""
# Avoid changes to original tableau
piv_row = self.tableau[row_idx].copy()
piv_val = piv_row[col_idx]
# Entry becomes 1
piv_row *= 1 / piv_val
# Variable in pivot column becomes basic, ie the only non-zero entry
for idx, coeff in enumerate(self.tableau[:, col_idx]):
self.tableau[idx] += -coeff * piv_row
self.tableau[row_idx] = piv_row
return self.tableau
def change_stage(self) -> np.ndarray:
"""Exits first phase of the two-stage method by deleting artificial
rows and columns, or completes the algorithm if exiting the standard
case.
>>> Tableau(np.array([
... [3, 3, -1, -1, 0, 0, 4],
... [2, 1, 0, 0, 0, 0, 0.],
... [1, 2, -1, 0, 1, 0, 2],
... [2, 1, 0, -1, 0, 1, 2]
... ]), 2, 2).change_stage().tolist()
... # doctest: +NORMALIZE_WHITESPACE
[[2.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 2.0, -1.0, 0.0, 2.0],
[2.0, 1.0, 0.0, -1.0, 2.0]]
"""
# Objective of original objective row remains
self.objectives.pop()
if not self.objectives:
return self.tableau
# Slice containing ids for artificial columns
s = slice(-self.n_artificial_vars - 1, -1)
# Delete the artificial variable columns
self.tableau = np.delete(self.tableau, s, axis=1)
# Delete the objective row of the first stage
self.tableau = np.delete(self.tableau, 0, axis=0)
self.n_stages = 1
self.n_rows -= 1
self.n_artificial_vars = 0
self.stop_iter = False
return self.tableau
def run_simplex(self) -> dict[Any, Any]:
"""Operate on tableau until objective function cannot be
improved further.
# Standard linear program:
Max: x1 + x2
ST: x1 + 3x2 <= 4
3x1 + x2 <= 4
>>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0],
... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()}
{'P': 2.0, 'x1': 1.0, 'x2': 1.0}
# Standard linear program with 3 variables:
Max: 3x1 + x2 + 3x3
ST: 2x1 + x2 + x3 ≤ 2
x1 + 2x2 + 3x3 ≤ 5
2x1 + 2x2 + x3 ≤ 6
>>> {key: float(value) for key, value in Tableau(np.array([
... [-3,-1,-3,0,0,0,0],
... [2,1,1,1,0,0,2],
... [1,2,3,0,1,0,5],
... [2,2,1,0,0,1,6.]
... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS
{'P': 5.4, 'x1': 0.199..., 'x3': 1.6}
# Optimal tableau input:
>>> {key: float(value) for key, value in Tableau(np.array([
... [0, 0, 0.25, 0.25, 2],
... [0, 1, 0.375, -0.125, 1],
... [1, 0, -0.125, 0.375, 1]
... ]), 2, 0).run_simplex().items()}
{'P': 2.0, 'x1': 1.0, 'x2': 1.0}
# Non-standard: >= constraints
Max: 2x1 + 3x2 + x3
ST: x1 + x2 + x3 <= 40
2x1 + x2 - x3 >= 10
- x2 + x3 >= 10
>>> {key: float(value) for key, value in Tableau(np.array([
... [2, 0, 0, 0, -1, -1, 0, 0, 20],
... [-2, -3, -1, 0, 0, 0, 0, 0, 0],
... [1, 1, 1, 1, 0, 0, 0, 0, 40],
... [2, 1, -1, 0, -1, 0, 1, 0, 10],
... [0, -1, 1, 0, 0, -1, 0, 1, 10.]
... ]), 3, 2).run_simplex().items()}
{'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0}
# Non standard: minimisation and equalities
Min: x1 + x2
ST: 2x1 + x2 = 12
6x1 + 5x2 = 40
>>> {key: float(value) for key, value in Tableau(np.array([
... [8, 6, 0, 0, 52],
... [1, 1, 0, 0, 0],
... [2, 1, 1, 0, 12],
... [6, 5, 0, 1, 40.],
... ]), 2, 2).run_simplex().items()}
{'P': 7.0, 'x1': 5.0, 'x2': 2.0}
# Pivot on slack variables
Max: 8x1 + 6x2
ST: x1 + 3x2 <= 33
4x1 + 2x2 <= 48
2x1 + 4x2 <= 48
x1 + x2 >= 10
x1 >= 2
>>> {key: float(value) for key, value in Tableau(np.array([
... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0],
... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0],
... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0],
... [4, 2, 0, 1, 0, 0, 0, 0, 0, 60.0],
... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0],
... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0],
... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0]
... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS
{'P': 132.0, 'x1': 12.000... 'x2': 5.999...}
"""
# Stop simplex algorithm from cycling.
for _ in range(Tableau.maxiter):
# Completion of each stage removes an objective. If both stages
# are complete, then no objectives are left
if not self.objectives:
# Find the values of each variable at optimal solution
return self.interpret_tableau()
row_idx, col_idx = self.find_pivot()
# If there are no more negative values in objective row
if self.stop_iter:
# Delete artificial variable columns and rows. Update attributes
self.tableau = self.change_stage()
else:
self.tableau = self.pivot(row_idx, col_idx)
return {}
def interpret_tableau(self) -> dict[str, float]:
"""Given the final tableau, add the corresponding values of the basic
decision variables to the `output_dict`
>>> {key: float(value) for key, value in Tableau(np.array([
... [0,0,0.875,0.375,5],
... [0,1,0.375,-0.125,1],
... [1,0,-0.125,0.375,1]
... ]),2, 0).interpret_tableau().items()}
{'P': 5.0, 'x1': 1.0, 'x2': 1.0}
"""
# P = RHS of final tableau
output_dict = {"P": abs(self.tableau[0, -1])}
for i in range(self.n_vars):
# Gives indices of nonzero entries in the ith column
nonzero = np.nonzero(self.tableau[:, i])
n_nonzero = len(nonzero[0])
# First entry in the nonzero indices
nonzero_rowidx = nonzero[0][0]
nonzero_val = self.tableau[nonzero_rowidx, i]
# If there is only one nonzero value in column, which is one
if n_nonzero == 1 and nonzero_val == 1:
rhs_val = self.tableau[nonzero_rowidx, -1]
output_dict[self.col_titles[i]] = rhs_val
return output_dict
if __name__ == "__main__":
import doctest
doctest.testmod()
| Tableau |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_ip_is_not_blacklisted.py | {
"start": 859,
"end": 1861
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.ip_is_not_blacklisted"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_not_blacklisted(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesIpIsNotBlacklisted |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/utils/multi.py | {
"start": 3502,
"end": 3997
} | class ____(
NamedTuple("_PartitionDimensionKey", [("dimension_name", str), ("partition_key", str)])
):
"""Representation of a single dimension of a multi-dimensional partition key."""
def __new__(cls, dimension_name: str, partition_key: str):
return super().__new__(
cls,
dimension_name=check.str_param(dimension_name, "dimension_name"),
partition_key=check.str_param(partition_key, "partition_key"),
)
@public
| PartitionDimensionKey |
python | tensorflow__tensorflow | tensorflow/python/keras/callbacks.py | {
"start": 32875,
"end": 34420
} | class ____(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Args:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
| BaseLogger |
python | apache__airflow | providers/standard/tests/unit/standard/triggers/test_file.py | {
"start": 1009,
"end": 2381
} | class ____:
FILE_PATH = "/files/dags/example_async_file.py"
def test_serialization(self):
"""Asserts that the trigger correctly serializes its arguments and classpath."""
trigger = FileTrigger(filepath=self.FILE_PATH, poll_interval=5)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.standard.triggers.file.FileTrigger"
assert kwargs == {
"filepath": self.FILE_PATH,
"poke_interval": 5,
"recursive": False,
}
@pytest.mark.asyncio
async def test_task_file_trigger(self, tmp_path):
"""Asserts that the trigger only goes off on or after file is found"""
tmp_dir = tmp_path / "test_dir"
tmp_dir.mkdir()
p = tmp_dir / "hello.txt"
trigger = FileTrigger(
filepath=str(p.resolve()),
poke_interval=0.2,
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# It should not have produced a result
assert task.done() is False
p.touch()
await asyncio.sleep(0.5)
assert task.done() is True
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Skip on Airflow < 3.0")
| TestFileTrigger |
python | tensorflow__tensorflow | tensorflow/python/distribute/failure_handling/failure_handling.py | {
"start": 3219,
"end": 8307
} | class ____(object):
"""Customization of `PreemptionCheckpointHandler` for various platforms.
A `TerminationConfig` can be created and passed to a
`tf.distribute.experimental.PreemptionCheckpointHandler` to provide
customization based on the platform. It can deliver three pieces of
information:
* How to decide if there is a termination event soon
The form of termination notification and how to fetch it vary across
platforms. Thus `PreemptionCheckpointHandler` may take a user-defined
function, `termination_watcher_fn`, and execute it repeatedly to check for
termination notification. `termination_watcher_fn` should be a function
that returns `True` if a termination notification is available and
`False` otherwise. The function should be lightweight and non-blocking so that
resources can be cleaned up properly if no termination signal is ever raised
until training finishes.
* How to exit the program
A user can configure this through the `exit_fn`, which
`PreemptionCheckpointHandler` executes after saving the checkpoint to exit the
training program gracefully. For `tf.distribute.MultiWorkerMirroredStrategy`,
a restart is necessary to reset the program's state. However, having a
customized `exit_fn` may facilitate the restart and smoothen the training
experience. How so? Maybe the platform has an agreement to a `RESTART_CODE`
recognized as a program auto-restart signal, or maybe the user has a
coordinating script that starts up the training, in which they can configure
the program to auto-restart if it ever exits with this `RESTART_CODE`. In both
cases, configuring the `exit_fn` to be `sys.exit(RESTART_CODE)` makes the
training seamless.
* How long does `PreemptionCheckpointHandler` have from receiving a
termination event notice till the actual termination
Some platforms have a gap time as long as one hour or so. In these cases,
there is the option to utilize this gap time for training as much as possible
before saving a checkpoint and exiting. This can be achieved by passing the
`grace_period` argument a nonzero value. Note, for a user with a grace period
that is not multiple times longer than their checkpoint writing time (e.g.,
three times or more), we advise not to configure this argument, in which case
`PreemptionCheckpointHandler` will directly save a checkpoint and exit.
**The default behavior**:
* For Google Borg Platform:
* Automatically know how to detect preemption signal
* Exit with a platform-recognized restart code
* Save a checkpoint and exit immediately
* For Google Cloud Platform:
* Automatically know how to detect maintenance signal.
* Exit with a code (User may configure this)
* Automatically utilized the extended training period before save and exit
* For Other platform:
* If `termination_watcher_fn` is `None`, we will treat `signal.SIGTERM` as
a termination signal.
* If `exit_fn` is not configured, we exit the program with an arbitrary
code.
* If `grace_period` is not configured, we will wrap up the current
training step, save a checkpoint, and exit the program as soon as we
receive the termination signal.
"""
def __init__(self,
termination_watcher_fn=None,
exit_fn=None,
grace_period=None,
save_fn=None):
"""Creates a `TerminationConfig` object.
Args:
termination_watcher_fn: a function to execute repeatedly that returns
`True` if a preemption signal is available and False otherwise. The
function cannot block until a preemption signal is available, which
prevents proper cleanup of the program. A change is **NOT** recommended
for users on Google Borg or Google Cloud Platform.
exit_fn: a function to execute after a checkpoint is saved and before the
preemption happens. Usually, it should be in the form of
`lambda: sys.exit(RESTART_CODE)`, where `RESTART_CODE` varies by
platform. A change is **NOT** recommended for users on Google Borg.
Users on Google Cloud Platform may configure it to use a customized
`RESTART_CODE`.
grace_period: the length of time between receiving a preemption signal and
the actual preemption. A change is **NOT** recommended for users on
Google Borg, Google Cloud Platform, or users with a short grace period.
save_fn: an optional function letting you configure how to save a
checkpoint. This is useful if you'd like to pass extra argument to
`tf.train.CheckpointManager.save` or `tf.train.Checkpoint.save`. By
default, if not configured, the API will save checkpoint without extra
arguments.
"""
self.termination_watcher_fn = termination_watcher_fn
self.exit_fn = exit_fn
self.grace_period = grace_period
self.save_fn = save_fn
# TODO(wxinyi): add some tests for TerminationConfig.
# TODO(wxinyi): configure the exit function based on device type (GPU or TPU).
| TerminationConfig |
python | kamyu104__LeetCode-Solutions | Python/number-of-steps-to-reduce-a-number-in-binary-representation-to-one.py | {
"start": 29,
"end": 438
} | class ____(object):
def numSteps(self, s):
"""
:type s: str
:rtype: int
"""
result, carry = 0, 0
for i in reversed(xrange(1, len(s))):
if int(s[i]) + carry == 1:
carry = 1 # once it was set, it would keep carrying forever
result += 2
else:
result += 1
return result+carry
| Solution |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 22843,
"end": 24478
} | class ____(TestCase):
"""Tests for intersperse()"""
def test_even(self):
iterable = (x for x in '01')
self.assertEqual(
list(mi.intersperse(None, iterable)), ['0', None, '1']
)
def test_odd(self):
iterable = (x for x in '012')
self.assertEqual(
list(mi.intersperse(None, iterable)), ['0', None, '1', None, '2']
)
def test_nested(self):
element = ('a', 'b')
iterable = (x for x in '012')
actual = list(mi.intersperse(element, iterable))
expected = ['0', ('a', 'b'), '1', ('a', 'b'), '2']
self.assertEqual(actual, expected)
def test_not_iterable(self):
self.assertRaises(TypeError, lambda: mi.intersperse('x', 1))
def test_n(self):
for n, element, expected in [
(1, '_', ['0', '_', '1', '_', '2', '_', '3', '_', '4', '_', '5']),
(2, '_', ['0', '1', '_', '2', '3', '_', '4', '5']),
(3, '_', ['0', '1', '2', '_', '3', '4', '5']),
(4, '_', ['0', '1', '2', '3', '_', '4', '5']),
(5, '_', ['0', '1', '2', '3', '4', '_', '5']),
(6, '_', ['0', '1', '2', '3', '4', '5']),
(7, '_', ['0', '1', '2', '3', '4', '5']),
(3, ['a', 'b'], ['0', '1', '2', ['a', 'b'], '3', '4', '5']),
]:
iterable = (x for x in '012345')
actual = list(mi.intersperse(element, iterable, n=n))
self.assertEqual(actual, expected)
def test_n_zero(self):
self.assertRaises(
ValueError, lambda: list(mi.intersperse('x', '012', n=0))
)
| IntersperseTest |
python | getsentry__sentry | tests/sentry/integrations/msteams/webhook/test_ms_teams_webhook_endpoint.py | {
"start": 2978,
"end": 3435
} | class ____(TestCase):
def test_has_all_handlers(self) -> None:
instance = MsTeamsWebhookEndpoint()
assert len(instance._event_handlers) == 4
assert MsTeamsEvents.INSTALLATION_UPDATE in instance._event_handlers
assert MsTeamsEvents.UNKNOWN in instance._event_handlers
assert MsTeamsEvents.MESSAGE in instance._event_handlers
assert MsTeamsEvents.CONVERSATION_UPDATE in instance._event_handlers
| TestEventHandler |
python | spyder-ide__spyder | spyder/plugins/preferences/tests/conftest.py | {
"start": 843,
"end": 2368
} | class ____(QMainWindow):
register_shortcut = Mock()
def __init__(self, parent):
super().__init__(parent)
self.default_style = None
self.widgetlist = []
self.thirdparty_plugins = []
self.shortcut_data = []
self.prefs_dialog_instance = None
self._APPLICATION_TOOLBARS = MagicMock()
self.console = Mock()
# To provide command line options for plugins that need them
sys_argv = [sys.argv[0]] # Avoid options passed to pytest
self._cli_options = get_options(sys_argv)[0]
PLUGIN_REGISTRY.reset()
PLUGIN_REGISTRY.sig_plugin_ready.connect(self.register_plugin)
PLUGIN_REGISTRY.register_plugin(self, Preferences)
# Load shortcuts for tests
for context, name, __ in CONF.iter_shortcuts():
self.shortcut_data.append((None, context, name, None, None))
for attr in ['mem_status', 'cpu_status']:
mock_attr = Mock()
setattr(mock_attr, 'toolTip', lambda: '')
setattr(mock_attr, 'setToolTip', lambda x: '')
setattr(mock_attr, 'prefs_dialog_instance', lambda: '')
setattr(self, attr, mock_attr)
def register_plugin(self, plugin_name, external=False):
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
plugin._register(omit_conf=True)
def get_plugin(self, plugin_name, error=True):
if plugin_name in PLUGIN_REGISTRY:
return PLUGIN_REGISTRY.get_plugin(plugin_name)
| MainWindowMock |
python | django-haystack__django-haystack | test_haystack/test_loading.py | {
"start": 8376,
"end": 15245
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.ui = loading.UnifiedIndex()
self.ui.build([])
def test_get_index(self):
self.assertRaises(NotHandled, self.ui.get_index, MockModel)
try:
self.ui.get_index(MockModel)
except NotHandled as e:
self.assertTrue(MockModel.__name__ in str(e))
self.ui.build(indexes=[BasicMockModelSearchIndex()])
self.assertTrue(
isinstance(self.ui.get_index(MockModel), indexes.BasicSearchIndex)
)
def test_get_indexed_models(self):
self.assertEqual(self.ui.get_indexed_models(), [])
self.ui.build(indexes=[ValidSearchIndex()])
indexed_models = self.ui.get_indexed_models()
self.assertEqual(len(indexed_models), 1)
self.assertTrue(MockModel in indexed_models)
def test_get_indexes(self):
self.assertEqual(self.ui.get_indexes(), {})
index = ValidSearchIndex()
self.ui.build(indexes=[index])
results = self.ui.get_indexes()
self.assertEqual(len(results), 1)
self.assertTrue(MockModel in results)
self.assertEqual(results[MockModel], index)
def test_all_searchfields(self):
self.ui.build(indexes=[BasicMockModelSearchIndex()])
fields = self.ui.all_searchfields()
self.assertEqual(len(fields), 1)
self.assertTrue("text" in fields)
self.assertTrue(isinstance(fields["text"], indexes.CharField))
self.assertEqual(fields["text"].document, True)
self.assertEqual(fields["text"].use_template, True)
self.ui.build(
indexes=[BasicMockModelSearchIndex(), AlternateValidSearchIndex()]
)
fields = self.ui.all_searchfields()
self.assertEqual(len(fields), 5)
self.assertEqual(
sorted(fields.keys()),
["author", "author_exact", "text", "title", "title_exact"],
)
self.assertTrue("text" in fields)
self.assertTrue(isinstance(fields["text"], indexes.CharField))
self.assertEqual(fields["text"].document, True)
self.assertEqual(fields["text"].use_template, True)
self.assertTrue("title" in fields)
self.assertTrue(isinstance(fields["title"], indexes.CharField))
self.assertEqual(fields["title"].document, False)
self.assertEqual(fields["title"].use_template, False)
self.assertEqual(fields["title"].faceted, True)
self.assertEqual(fields["title"].indexed, True)
self.assertTrue("author" in fields)
self.assertTrue(isinstance(fields["author"], indexes.CharField))
self.assertEqual(fields["author"].document, False)
self.assertEqual(fields["author"].use_template, False)
self.assertEqual(fields["author"].faceted, True)
self.assertEqual(fields["author"].stored, True)
self.assertEqual(fields["author"].index_fieldname, "author")
self.ui.build(
indexes=[AlternateValidSearchIndex(), MultiValueValidSearchIndex()]
)
fields = self.ui.all_searchfields()
self.assertEqual(len(fields), 5)
self.assertEqual(
sorted(fields.keys()),
["author", "author_exact", "text", "title", "title_exact"],
)
self.assertTrue("text" in fields)
self.assertTrue(isinstance(fields["text"], indexes.CharField))
self.assertEqual(fields["text"].document, True)
self.assertEqual(fields["text"].use_template, False)
self.assertTrue("title" in fields)
self.assertTrue(isinstance(fields["title"], indexes.CharField))
self.assertEqual(fields["title"].document, False)
self.assertEqual(fields["title"].use_template, False)
self.assertEqual(fields["title"].faceted, True)
self.assertEqual(fields["title"].indexed, True)
self.assertTrue("author" in fields)
self.assertTrue(isinstance(fields["author"], indexes.MultiValueField))
self.assertEqual(fields["author"].document, False)
self.assertEqual(fields["author"].use_template, False)
self.assertEqual(fields["author"].stored, True)
self.assertEqual(fields["author"].faceted, True)
self.assertEqual(fields["author"].index_fieldname, "author")
try:
self.ui.build(indexes=[AlternateValidSearchIndex(), InvalidSearchIndex()])
self.fail()
except SearchFieldError:
pass
def test_get_index_fieldname(self):
self.assertEqual(self.ui._fieldnames, {})
self.ui.build(indexes=[ValidSearchIndex(), BasicAnotherMockModelSearchIndex()])
self.ui.get_index_fieldname("text")
self.assertEqual(
self.ui._fieldnames, {"text": "text", "title": "title", "author": "name"}
)
self.assertEqual(self.ui.get_index_fieldname("text"), "text")
self.assertEqual(self.ui.get_index_fieldname("author"), "name")
self.assertEqual(self.ui.get_index_fieldname("title"), "title")
# Reset the internal state to test the invalid case.
self.ui.reset()
self.assertEqual(self.ui._fieldnames, {})
try:
self.ui.build(indexes=[ValidSearchIndex(), AlternateValidSearchIndex()])
self.fail()
except SearchFieldError:
pass
def test_basic_get_facet_field_name(self):
self.assertEqual(self.ui._facet_fieldnames, {})
self.ui.build(
indexes=[BasicMockModelSearchIndex(), AlternateValidSearchIndex()]
)
self.ui.get_facet_fieldname("text")
self.assertEqual(
self.ui._facet_fieldnames,
{"title": "title_exact", "author": "author_exact"},
)
self.assertEqual(self.ui.get_index_fieldname("text"), "text")
self.assertEqual(self.ui.get_index_fieldname("author"), "author")
self.assertEqual(self.ui.get_index_fieldname("title"), "title")
self.assertEqual(self.ui.get_facet_fieldname("text"), "text")
self.assertEqual(self.ui.get_facet_fieldname("author"), "author_exact")
self.assertEqual(self.ui.get_facet_fieldname("title"), "title_exact")
def test_more_advanced_get_facet_field_name(self):
self.assertEqual(self.ui._facet_fieldnames, {})
self.ui.build(
indexes=[BasicAnotherMockModelSearchIndex(), ExplicitFacetSearchIndex()]
)
self.ui.get_facet_fieldname("text")
self.assertEqual(
self.ui._facet_fieldnames,
{
"bare_facet": "bare_facet",
"title": "title_facet",
"author": "author_exact",
},
)
self.assertEqual(self.ui.get_facet_fieldname("title"), "title_facet")
self.assertEqual(self.ui.get_facet_fieldname("bare_facet"), "bare_facet")
| UnifiedIndexTestCase |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 60574,
"end": 62286
} | class ____(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided):
Next sequence prediction (classification) loss.
logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| NextSentencePredictorOutput |
python | huggingface__transformers | src/transformers/models/granite_speech/modeling_granite_speech.py | {
"start": 10543,
"end": 11566
} | class ____(nn.Module):
"""Conformer block, consisting largely of linear layers, attention, and convolutional layers."""
def __init__(self, config: GraniteSpeechEncoderConfig):
super().__init__()
self.ff1 = GraniteSpeechConformerFeedForward(config)
self.attn = GraniteSpeechConformerAttention(config)
self.conv = GraniteSpeechConformerConvModule(config)
self.ff2 = GraniteSpeechConformerFeedForward(config)
self.post_norm = nn.LayerNorm(config.hidden_dim)
def forward(self, hidden_states: torch.Tensor, attention_dists: torch.Tensor) -> torch.Tensor:
hidden_states = 0.5 * self.ff1(hidden_states) + hidden_states
hidden_states = self.attn(hidden_states, attention_dists=attention_dists) + hidden_states
hidden_states = self.conv(hidden_states) + hidden_states
hidden_states = 0.5 * self.ff2(hidden_states) + hidden_states
hidden_states = self.post_norm(hidden_states)
return hidden_states
| GraniteSpeechConformerBlock |
python | pandas-dev__pandas | pandas/tests/arrays/test_datetimelike.py | {
"start": 35927,
"end": 46924
} | class ____(SharedTests):
index_cls = PeriodIndex
array_cls = PeriodArray
scalar_type = Period
example_dtype = PeriodIndex([], freq="W").dtype
@pytest.fixture
def arr1d(self, period_index):
"""
Fixture returning DatetimeArray from parametrized PeriodIndex objects
"""
return period_index._data
def test_from_pi(self, arr1d):
pi = self.index_cls(arr1d)
arr = arr1d
assert list(arr) == list(pi)
# Check that Index.__new__ knows what to do with PeriodArray
pi2 = pd.Index(arr)
assert isinstance(pi2, PeriodIndex)
assert list(pi2) == list(arr)
def test_astype_object(self, arr1d):
pi = self.index_cls(arr1d)
arr = arr1d
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(pi)
def test_take_fill_valid(self, arr1d):
arr = arr1d
value = NaT._value
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
arr.take([-1, 1], allow_fill=True, fill_value=value)
value = np.timedelta64("NaT", "ns")
with pytest.raises(TypeError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
@pytest.mark.parametrize("how", ["S", "E"])
def test_to_timestamp(self, how, arr1d):
pi = self.index_cls(arr1d)
arr = arr1d
expected = DatetimeIndex(pi.to_timestamp(how=how))._data
result = arr.to_timestamp(how=how)
assert isinstance(result, DatetimeArray)
tm.assert_equal(result, expected)
def test_to_timestamp_roundtrip_bday(self):
# Case where infer_freq inside would choose "D" instead of "B"
dta = pd.date_range("2021-10-18", periods=3, freq="B", unit="ns")._data
parr = dta.to_period()
result = parr.to_timestamp()
assert result.freq == "B"
tm.assert_extension_array_equal(result, dta)
dta2 = dta[::2]
parr2 = dta2.to_period()
result2 = parr2.to_timestamp()
assert result2.freq == "2B"
tm.assert_extension_array_equal(result2, dta2)
parr3 = dta.to_period("2B")
result3 = parr3.to_timestamp()
assert result3.freq == "B"
tm.assert_extension_array_equal(result3, dta)
def test_to_timestamp_out_of_bounds(self):
# GH#19643 previously overflowed silently
pi = pd.period_range("1500", freq="Y", periods=3)
msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pi.to_timestamp()
with pytest.raises(OutOfBoundsDatetime, match=msg):
pi._data.to_timestamp()
@pytest.mark.parametrize("propname", PeriodArray._bool_ops)
def test_bool_properties(self, arr1d, propname):
# in this case _bool_ops is just `is_leap_year`
pi = self.index_cls(arr1d)
arr = arr1d
result = getattr(arr, propname)
expected = np.array(getattr(pi, propname))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("propname", PeriodArray._field_ops)
def test_int_properties(self, arr1d, propname):
pi = self.index_cls(arr1d)
arr = arr1d
result = getattr(arr, propname)
expected = np.array(getattr(pi, propname))
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self, arr1d):
arr = arr1d
# default asarray gives objects
result = np.asarray(arr)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to object dtype (same as default)
result = np.asarray(arr, dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to int64 gives the underlying representation
result = np.asarray(arr, dtype="int64")
tm.assert_numpy_array_equal(result, arr.asi8)
result2 = np.asarray(arr, dtype="int64")
assert np.may_share_memory(result, result2)
result_copy1 = np.array(arr, dtype="int64", copy=True)
result_copy2 = np.array(arr, dtype="int64", copy=True)
assert not np.may_share_memory(result_copy1, result_copy2)
# to other dtypes
msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'"
with pytest.raises(TypeError, match=msg):
np.asarray(arr, dtype="float64")
result = np.asarray(arr, dtype="S20")
expected = np.asarray(arr).astype("S20")
tm.assert_numpy_array_equal(result, expected)
def test_strftime(self, arr1d, using_infer_string):
arr = arr1d
result = arr.strftime("%Y")
expected = np.array([per.strftime("%Y") for per in arr], dtype=object)
if using_infer_string:
expected = pd.array(expected, dtype=pd.StringDtype(na_value=np.nan))
tm.assert_equal(result, expected)
def test_strftime_nat(self, using_infer_string):
# GH 29578
arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]"))
result = arr.strftime("%Y-%m-%d")
expected = np.array(["2019-01-01", np.nan], dtype=object)
if using_infer_string:
expected = pd.array(expected, dtype=pd.StringDtype(na_value=np.nan))
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"arr,casting_nats",
[
(
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
(NaT, np.timedelta64("NaT", "ns")),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
(NaT, np.datetime64("NaT", "ns")),
),
(pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)),
],
ids=lambda x: type(x).__name__,
)
def test_casting_nat_setitem_array(arr, casting_nats):
expected = type(arr)._from_sequence([NaT, arr[1], arr[2]], dtype=arr.dtype)
for nat in casting_nats:
arr = arr.copy()
arr[0] = nat
tm.assert_equal(arr, expected)
@pytest.mark.parametrize(
"arr,non_casting_nats",
[
(
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
(np.datetime64("NaT", "ns"), NaT._value),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
(np.timedelta64("NaT", "ns"), NaT._value),
),
(
pd.period_range("2000-01-01", periods=3, freq="D")._data,
(np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value),
),
],
ids=lambda x: type(x).__name__,
)
def test_invalid_nat_setitem_array(arr, non_casting_nats):
msg = (
"value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. "
"Got '(timedelta64|datetime64|int)' instead."
)
for nat in non_casting_nats:
with pytest.raises(TypeError, match=msg):
arr[0] = nat
@pytest.mark.parametrize(
"arr",
[
pd.date_range("2000", periods=4)._values,
pd.timedelta_range("2000", periods=4)._values,
],
)
def test_to_numpy_extra(arr):
arr[0] = NaT
original = arr.copy()
result = arr.to_numpy()
assert np.isnan(result[0])
result = arr.to_numpy(dtype="int64")
assert result[0] == -9223372036854775808
result = arr.to_numpy(dtype="int64", na_value=0)
assert result[0] == 0
result = arr.to_numpy(na_value=arr[1].to_numpy())
assert result[0] == result[1]
result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False))
assert result[0] == result[1]
tm.assert_equal(arr, original)
@pytest.mark.parametrize(
"arr",
[
pd.date_range("2000", periods=4)._values,
pd.timedelta_range("2000", periods=4)._values,
],
)
def test_to_numpy_extra_readonly(arr):
arr[0] = NaT
original = arr.copy()
arr._readonly = True
result = arr.to_numpy(dtype=object)
assert result.flags.writeable
# numpy does not do zero-copy conversion from M8 to i8
result = arr.to_numpy(dtype="int64")
assert result.flags.writeable
tm.assert_equal(arr, original)
@pytest.mark.parametrize("as_index", [True, False])
@pytest.mark.parametrize(
"values",
[
pd.to_datetime(["2020-01-01", "2020-02-01"]),
pd.to_timedelta([1, 2], unit="D"),
PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
],
)
@pytest.mark.parametrize(
"klass",
[
list,
np.array,
pd.array,
pd.Series,
pd.Index,
pd.Categorical,
pd.CategoricalIndex,
],
)
def test_searchsorted_datetimelike_with_listlike(values, klass, as_index):
# https://github.com/pandas-dev/pandas/issues/32762
if not as_index:
values = values._data
result = values.searchsorted(klass(values))
expected = np.array([0, 1], dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.to_datetime(["2020-01-01", "2020-02-01"]),
pd.to_timedelta([1, 2], unit="D"),
PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
],
)
@pytest.mark.parametrize(
"arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]
)
def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg):
# https://github.com/pandas-dev/pandas/issues/32762
msg = "[Unexpected type|Cannot compare]"
with pytest.raises(TypeError, match=msg):
values.searchsorted(arg)
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_period_index_construction_from_strings(klass):
# https://github.com/pandas-dev/pandas/issues/26109
strings = ["2020Q1", "2020Q2"] * 2
data = klass(strings)
result = PeriodIndex(data, freq="Q")
expected = PeriodIndex([Period(s) for s in strings])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_from_pandas_array(dtype):
# GH#24615
data = np.array([1, 2, 3], dtype=dtype)
arr = NumpyExtensionArray(data)
cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype]
result = cls._from_sequence(arr, dtype=dtype)
expected = cls._from_sequence(data, dtype=dtype)
tm.assert_extension_array_equal(result, expected)
func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype]
result = func(arr).array
expected = func(data).array
tm.assert_equal(result, expected)
# Let's check the Indexes while we're here
idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype]
result = idx_cls(arr)
expected = idx_cls(data)
tm.assert_index_equal(result, expected)
| TestPeriodArray |
python | getlogbook__logbook | tests/conftest.py | {
"start": 1401,
"end": 2455
} | class ____:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
@pytest.fixture(params=[Path, str, CustomPathLike])
def logfile(tmp_path, request):
path = str(tmp_path / "logfile.log")
return request.param(path)
@pytest.fixture
def default_handler(request):
returned = logbook.StderrHandler()
returned.push_application()
request.addfinalizer(returned.pop_application)
return returned
if importlib.util.find_spec("gevent") is not None:
@pytest.fixture(
scope="module", autouse=True, params=[False, True], ids=["nogevent", "gevent"]
)
def gevent(request):
module_name = getattr(request.module, "__name__", "")
if (
not any(s in module_name for s in ("queues", "processors"))
and request.param
):
from logbook.concurrency import _disable_gevent, enable_gevent
enable_gevent()
@request.addfinalizer
def fin():
_disable_gevent()
| CustomPathLike |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 65305,
"end": 65391
} | class ____(legend_key_spacing):
pass
@deprecated_themeable_name
| legend_entry_spacing |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 4366,
"end": 5494
} | class ____:
target_E = -19.2085
solution = [8.05502, 9.66459]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
stepsize = 2.
temperature = 2.
def fun(self, x):
return - abs(sin(x[0]) * cos(x[1]) * exp(abs(1. - sqrt(x[0]**2 +
x[1]**2) / pi)))
def dabs(self, x):
"""derivative of absolute value"""
if x < 0:
return -1.
elif x > 0:
return 1.
else:
return 0.
#commented out at the because it causes FloatingPointError in
#basinhopping
# def der(self, x):
# R = sqrt(x[0]**2 + x[1]**2)
# g = 1. - R / pi
# f = sin(x[0]) * cos(x[1]) * exp(abs(g))
# E = -abs(f)
#
# dRdx = x[0] / R
# dgdx = - dRdx / pi
# dfdx = cos(x[0]) * cos(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdx
# dEdx = - self.dabs(f) * dfdx
#
# dRdy = x[1] / R
# dgdy = - dRdy / pi
# dfdy = -sin(x[0]) * sin(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdy
# dEdy = - self.dabs(f) * dfdy
# return np.array([dEdx, dEdy])
| HolderTable |
python | pypa__setuptools | setuptools/_vendor/more_itertools/more.py | {
"start": 128554,
"end": 148370
} | class ____:
"""Wrap *iterable* and keep a count of how many items have been consumed.
The ``items_seen`` attribute starts at ``0`` and increments as the iterable
is consumed:
>>> iterable = map(str, range(10))
>>> it = countable(iterable)
>>> it.items_seen
0
>>> next(it), next(it)
('0', '1')
>>> list(it)
['2', '3', '4', '5', '6', '7', '8', '9']
>>> it.items_seen
10
"""
def __init__(self, iterable):
self._it = iter(iterable)
self.items_seen = 0
def __iter__(self):
return self
def __next__(self):
item = next(self._it)
self.items_seen += 1
return item
def chunked_even(iterable, n):
"""Break *iterable* into lists of approximately length *n*.
Items are distributed such the lengths of the lists differ by at most
1 item.
>>> iterable = [1, 2, 3, 4, 5, 6, 7]
>>> n = 3
>>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
[[1, 2, 3], [4, 5], [6, 7]]
>>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
[[1, 2, 3], [4, 5, 6], [7]]
"""
iterable = iter(iterable)
# Initialize a buffer to process the chunks while keeping
# some back to fill any underfilled chunks
min_buffer = (n - 1) * (n - 2)
buffer = list(islice(iterable, min_buffer))
# Append items until we have a completed chunk
for _ in islice(map(buffer.append, iterable), n, None, n):
yield buffer[:n]
del buffer[:n]
# Check if any chunks need addition processing
if not buffer:
return
length = len(buffer)
# Chunks are either size `full_size <= n` or `partial_size = full_size - 1`
q, r = divmod(length, n)
num_lists = q + (1 if r > 0 else 0)
q, r = divmod(length, num_lists)
full_size = q + (1 if r > 0 else 0)
partial_size = full_size - 1
num_full = length - partial_size * num_lists
# Yield chunks of full size
partial_start_idx = num_full * full_size
if full_size > 0:
for i in range(0, partial_start_idx, full_size):
yield buffer[i : i + full_size]
# Yield chunks of partial size
if partial_size > 0:
for i in range(partial_start_idx, length, partial_size):
yield buffer[i : i + partial_size]
def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
"""A version of :func:`zip` that "broadcasts" any scalar
(i.e., non-iterable) items into output tuples.
>>> iterable_1 = [1, 2, 3]
>>> iterable_2 = ['a', 'b', 'c']
>>> scalar = '_'
>>> list(zip_broadcast(iterable_1, iterable_2, scalar))
[(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
The *scalar_types* keyword argument determines what types are considered
scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
treat strings and byte strings as iterable:
>>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
[('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
If the *strict* keyword argument is ``True``, then
``UnequalIterablesError`` will be raised if any of the iterables have
different lengths.
"""
def is_scalar(obj):
if scalar_types and isinstance(obj, scalar_types):
return True
try:
iter(obj)
except TypeError:
return True
else:
return False
size = len(objects)
if not size:
return
new_item = [None] * size
iterables, iterable_positions = [], []
for i, obj in enumerate(objects):
if is_scalar(obj):
new_item[i] = obj
else:
iterables.append(iter(obj))
iterable_positions.append(i)
if not iterables:
yield tuple(objects)
return
zipper = _zip_equal if strict else zip
for item in zipper(*iterables):
for i, new_item[i] in zip(iterable_positions, item):
pass
yield tuple(new_item)
def unique_in_window(iterable, n, key=None):
"""Yield the items from *iterable* that haven't been seen recently.
*n* is the size of the lookback window.
>>> iterable = [0, 1, 0, 2, 3, 0]
>>> n = 3
>>> list(unique_in_window(iterable, n))
[0, 1, 2, 3, 0]
The *key* function, if provided, will be used to determine uniqueness:
>>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
['a', 'b', 'c', 'd', 'a']
The items in *iterable* must be hashable.
"""
if n <= 0:
raise ValueError('n must be greater than 0')
window = deque(maxlen=n)
counts = defaultdict(int)
use_key = key is not None
for item in iterable:
if len(window) == n:
to_discard = window[0]
if counts[to_discard] == 1:
del counts[to_discard]
else:
counts[to_discard] -= 1
k = key(item) if use_key else item
if k not in counts:
yield item
counts[k] += 1
window.append(k)
def duplicates_everseen(iterable, key=None):
"""Yield duplicate elements after their first appearance.
>>> list(duplicates_everseen('mississippi'))
['s', 'i', 's', 's', 'i', 'p', 'i']
>>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
This function is analogous to :func:`unique_everseen` and is subject to
the same performance considerations.
"""
seen_set = set()
seen_list = []
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seen_set:
seen_set.add(k)
else:
yield element
except TypeError:
if k not in seen_list:
seen_list.append(k)
else:
yield element
def duplicates_justseen(iterable, key=None):
"""Yields serially-duplicate elements after their first appearance.
>>> list(duplicates_justseen('mississippi'))
['s', 's', 'p']
>>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
This function is analogous to :func:`unique_justseen`.
"""
return flatten(g for _, g in groupby(iterable, key) for _ in g)
def classify_unique(iterable, key=None):
"""Classify each element in terms of its uniqueness.
For each element in the input iterable, return a 3-tuple consisting of:
1. The element itself
2. ``False`` if the element is equal to the one preceding it in the input,
``True`` otherwise (i.e. the equivalent of :func:`unique_justseen`)
3. ``False`` if this element has been seen anywhere in the input before,
``True`` otherwise (i.e. the equivalent of :func:`unique_everseen`)
>>> list(classify_unique('otto')) # doctest: +NORMALIZE_WHITESPACE
[('o', True, True),
('t', True, True),
('t', False, False),
('o', True, False)]
This function is analogous to :func:`unique_everseen` and is subject to
the same performance considerations.
"""
seen_set = set()
seen_list = []
use_key = key is not None
previous = None
for i, element in enumerate(iterable):
k = key(element) if use_key else element
is_unique_justseen = not i or previous != k
previous = k
is_unique_everseen = False
try:
if k not in seen_set:
seen_set.add(k)
is_unique_everseen = True
except TypeError:
if k not in seen_list:
seen_list.append(k)
is_unique_everseen = True
yield element, is_unique_justseen, is_unique_everseen
def minmax(iterable_or_value, *others, key=None, default=_marker):
"""Returns both the smallest and largest items in an iterable
or the largest of two or more arguments.
>>> minmax([3, 1, 5])
(1, 5)
>>> minmax(4, 2, 6)
(2, 6)
If a *key* function is provided, it will be used to transform the input
items for comparison.
>>> minmax([5, 30], key=str) # '30' sorts before '5'
(30, 5)
If a *default* value is provided, it will be returned if there are no
input items.
>>> minmax([], default=(0, 0))
(0, 0)
Otherwise ``ValueError`` is raised.
This function is based on the
`recipe <http://code.activestate.com/recipes/577916/>`__ by
Raymond Hettinger and takes care to minimize the number of comparisons
performed.
"""
iterable = (iterable_or_value, *others) if others else iterable_or_value
it = iter(iterable)
try:
lo = hi = next(it)
except StopIteration as exc:
if default is _marker:
raise ValueError(
'`minmax()` argument is an empty iterable. '
'Provide a `default` value to suppress this error.'
) from exc
return default
# Different branches depending on the presence of key. This saves a lot
# of unimportant copies which would slow the "key=None" branch
# significantly down.
if key is None:
for x, y in zip_longest(it, it, fillvalue=lo):
if y < x:
x, y = y, x
if x < lo:
lo = x
if hi < y:
hi = y
else:
lo_key = hi_key = key(lo)
for x, y in zip_longest(it, it, fillvalue=lo):
x_key, y_key = key(x), key(y)
if y_key < x_key:
x, y, x_key, y_key = y, x, y_key, x_key
if x_key < lo_key:
lo, lo_key = x, x_key
if hi_key < y_key:
hi, hi_key = y, y_key
return lo, hi
def constrained_batches(
iterable, max_size, max_count=None, get_len=len, strict=True
):
"""Yield batches of items from *iterable* with a combined size limited by
*max_size*.
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10))
[(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')]
If a *max_count* is supplied, the number of items per batch is also
limited:
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10, max_count = 2))
[(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)]
If a *get_len* function is supplied, use that instead of :func:`len` to
determine item size.
If *strict* is ``True``, raise ``ValueError`` if any single item is bigger
than *max_size*. Otherwise, allow single items to exceed *max_size*.
"""
if max_size <= 0:
raise ValueError('maximum size must be greater than zero')
batch = []
batch_size = 0
batch_count = 0
for item in iterable:
item_len = get_len(item)
if strict and item_len > max_size:
raise ValueError('item size exceeds maximum size')
reached_count = batch_count == max_count
reached_size = item_len + batch_size > max_size
if batch_count and (reached_size or reached_count):
yield tuple(batch)
batch.clear()
batch_size = 0
batch_count = 0
batch.append(item)
batch_size += item_len
batch_count += 1
if batch:
yield tuple(batch)
def gray_product(*iterables):
"""Like :func:`itertools.product`, but return tuples in an order such
that only one element in the generated tuple changes from one iteration
to the next.
>>> list(gray_product('AB','CD'))
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')]
This function consumes all of the input iterables before producing output.
If any of the input iterables have fewer than two items, ``ValueError``
is raised.
For information on the algorithm, see
`this section <https://www-cs-faculty.stanford.edu/~knuth/fasc2a.ps.gz>`__
of Donald Knuth's *The Art of Computer Programming*.
"""
all_iterables = tuple(tuple(x) for x in iterables)
iterable_count = len(all_iterables)
for iterable in all_iterables:
if len(iterable) < 2:
raise ValueError("each iterable must have two or more items")
# This is based on "Algorithm H" from section 7.2.1.1, page 20.
# a holds the indexes of the source iterables for the n-tuple to be yielded
# f is the array of "focus pointers"
# o is the array of "directions"
a = [0] * iterable_count
f = list(range(iterable_count + 1))
o = [1] * iterable_count
while True:
yield tuple(all_iterables[i][a[i]] for i in range(iterable_count))
j = f[0]
f[0] = 0
if j == iterable_count:
break
a[j] = a[j] + o[j]
if a[j] == 0 or a[j] == len(all_iterables[j]) - 1:
o[j] = -o[j]
f[j] = f[j + 1]
f[j + 1] = j + 1
def partial_product(*iterables):
"""Yields tuples containing one item from each iterator, with subsequent
tuples changing a single item at a time by advancing each iterator until it
is exhausted. This sequence guarantees every value in each iterable is
output at least once without generating all possible combinations.
This may be useful, for example, when testing an expensive function.
>>> list(partial_product('AB', 'C', 'DEF'))
[('A', 'C', 'D'), ('B', 'C', 'D'), ('B', 'C', 'E'), ('B', 'C', 'F')]
"""
iterators = list(map(iter, iterables))
try:
prod = [next(it) for it in iterators]
except StopIteration:
return
yield tuple(prod)
for i, it in enumerate(iterators):
for prod[i] in it:
yield tuple(prod)
def takewhile_inclusive(predicate, iterable):
"""A variant of :func:`takewhile` that yields one additional element.
>>> list(takewhile_inclusive(lambda x: x < 5, [1, 4, 6, 4, 1]))
[1, 4, 6]
:func:`takewhile` would return ``[1, 4]``.
"""
for x in iterable:
yield x
if not predicate(x):
break
def outer_product(func, xs, ys, *args, **kwargs):
"""A generalized outer product that applies a binary function to all
pairs of items. Returns a 2D matrix with ``len(xs)`` rows and ``len(ys)``
columns.
Also accepts ``*args`` and ``**kwargs`` that are passed to ``func``.
Multiplication table:
>>> list(outer_product(mul, range(1, 4), range(1, 6)))
[(1, 2, 3, 4, 5), (2, 4, 6, 8, 10), (3, 6, 9, 12, 15)]
Cross tabulation:
>>> xs = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
>>> ys = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
>>> rows = list(zip(xs, ys))
>>> count_rows = lambda x, y: rows.count((x, y))
>>> list(outer_product(count_rows, sorted(set(xs)), sorted(set(ys))))
[(2, 3, 0), (1, 0, 4)]
Usage with ``*args`` and ``**kwargs``:
>>> animals = ['cat', 'wolf', 'mouse']
>>> list(outer_product(min, animals, animals, key=len))
[('cat', 'cat', 'cat'), ('cat', 'wolf', 'wolf'), ('cat', 'wolf', 'mouse')]
"""
ys = tuple(ys)
return batched(
starmap(lambda x, y: func(x, y, *args, **kwargs), product(xs, ys)),
n=len(ys),
)
def iter_suppress(iterable, *exceptions):
"""Yield each of the items from *iterable*. If the iteration raises one of
the specified *exceptions*, that exception will be suppressed and iteration
will stop.
>>> from itertools import chain
>>> def breaks_at_five(x):
... while True:
... if x >= 5:
... raise RuntimeError
... yield x
... x += 1
>>> it_1 = iter_suppress(breaks_at_five(1), RuntimeError)
>>> it_2 = iter_suppress(breaks_at_five(2), RuntimeError)
>>> list(chain(it_1, it_2))
[1, 2, 3, 4, 2, 3, 4]
"""
try:
yield from iterable
except exceptions:
return
def filter_map(func, iterable):
"""Apply *func* to every element of *iterable*, yielding only those which
are not ``None``.
>>> elems = ['1', 'a', '2', 'b', '3']
>>> list(filter_map(lambda s: int(s) if s.isnumeric() else None, elems))
[1, 2, 3]
"""
for x in iterable:
y = func(x)
if y is not None:
yield y
def powerset_of_sets(iterable):
"""Yields all possible subsets of the iterable.
>>> list(powerset_of_sets([1, 2, 3])) # doctest: +SKIP
[set(), {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}]
>>> list(powerset_of_sets([1, 1, 0])) # doctest: +SKIP
[set(), {1}, {0}, {0, 1}]
:func:`powerset_of_sets` takes care to minimize the number
of hash operations performed.
"""
sets = tuple(map(set, dict.fromkeys(map(frozenset, zip(iterable)))))
for r in range(len(sets) + 1):
yield from starmap(set().union, combinations(sets, r))
def join_mappings(**field_to_map):
"""
Joins multiple mappings together using their common keys.
>>> user_scores = {'elliot': 50, 'claris': 60}
>>> user_times = {'elliot': 30, 'claris': 40}
>>> join_mappings(score=user_scores, time=user_times)
{'elliot': {'score': 50, 'time': 30}, 'claris': {'score': 60, 'time': 40}}
"""
ret = defaultdict(dict)
for field_name, mapping in field_to_map.items():
for key, value in mapping.items():
ret[key][field_name] = value
return dict(ret)
def _complex_sumprod(v1, v2):
"""High precision sumprod() for complex numbers.
Used by :func:`dft` and :func:`idft`.
"""
r1 = chain((p.real for p in v1), (-p.imag for p in v1))
r2 = chain((q.real for q in v2), (q.imag for q in v2))
i1 = chain((p.real for p in v1), (p.imag for p in v1))
i2 = chain((q.imag for q in v2), (q.real for q in v2))
return complex(_fsumprod(r1, r2), _fsumprod(i1, i2))
def dft(xarr):
"""Discrete Fourier Tranform. *xarr* is a sequence of complex numbers.
Yields the components of the corresponding transformed output vector.
>>> import cmath
>>> xarr = [1, 2-1j, -1j, -1+2j]
>>> Xarr = [2, -2-2j, -2j, 4+4j]
>>> all(map(cmath.isclose, dft(xarr), Xarr))
True
See :func:`idft` for the inverse Discrete Fourier Transform.
"""
N = len(xarr)
roots_of_unity = [e ** (n / N * tau * -1j) for n in range(N)]
for k in range(N):
coeffs = [roots_of_unity[k * n % N] for n in range(N)]
yield _complex_sumprod(xarr, coeffs)
def idft(Xarr):
"""Inverse Discrete Fourier Tranform. *Xarr* is a sequence of
complex numbers. Yields the components of the corresponding
inverse-transformed output vector.
>>> import cmath
>>> xarr = [1, 2-1j, -1j, -1+2j]
>>> Xarr = [2, -2-2j, -2j, 4+4j]
>>> all(map(cmath.isclose, idft(Xarr), xarr))
True
See :func:`dft` for the Discrete Fourier Transform.
"""
N = len(Xarr)
roots_of_unity = [e ** (n / N * tau * 1j) for n in range(N)]
for k in range(N):
coeffs = [roots_of_unity[k * n % N] for n in range(N)]
yield _complex_sumprod(Xarr, coeffs) / N
def doublestarmap(func, iterable):
"""Apply *func* to every item of *iterable* by dictionary unpacking
the item into *func*.
The difference between :func:`itertools.starmap` and :func:`doublestarmap`
parallels the distinction between ``func(*a)`` and ``func(**a)``.
>>> iterable = [{'a': 1, 'b': 2}, {'a': 40, 'b': 60}]
>>> list(doublestarmap(lambda a, b: a + b, iterable))
[3, 100]
``TypeError`` will be raised if *func*'s signature doesn't match the
mapping contained in *iterable* or if *iterable* does not contain mappings.
"""
for item in iterable:
yield func(**item)
| countable |
python | pydata__xarray | xarray/core/utils.py | {
"start": 17388,
"end": 18617
} | class ____(Mapping[K, V]):
"""Implements the Mapping interface. Uses the wrapped mapping for item lookup
and a separate wrapped keys collection for iteration.
Can be used to construct a mapping object from another dict-like object without
eagerly accessing its items or when a mapping object is expected but only
iteration over keys is actually used.
Note: keys should be a subset of mapping, but FilteredMapping does not
validate consistency of the provided `keys` and `mapping`. It is the
caller's responsibility to ensure that they are suitable for the task at
hand.
"""
__slots__ = ("keys_", "mapping")
def __init__(self, keys: Collection[K], mapping: Mapping[K, V]):
self.keys_ = keys # .keys is already a property on Mapping
self.mapping = mapping
def __getitem__(self, key: K) -> V:
if key not in self.keys_:
raise KeyError(key)
return self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(self.keys_)
def __len__(self) -> int:
return len(self.keys_)
def __repr__(self) -> str:
return f"{type(self).__name__}(keys={self.keys_!r}, mapping={self.mapping!r})"
| FilteredMapping |
python | redis__redis-py | redis/commands/json/__init__.py | {
"start": 4757,
"end": 4845
} | class ____(JSONCommands, redis.client.Pipeline):
"""Pipeline for the module."""
| Pipeline |
python | scrapy__scrapy | tests/test_utils_curl.py | {
"start": 178,
"end": 10182
} | class ____:
@staticmethod
def _test_command(curl_command: str, expected_result: dict[str, Any]) -> None:
result = curl_to_request_kwargs(curl_command)
assert result == expected_result
try:
Request(**result)
except TypeError as e:
pytest.fail(f"Request kwargs are not correct {e}")
def test_get(self):
curl_command = "curl http://example.org/"
expected_result = {"method": "GET", "url": "http://example.org/"}
self._test_command(curl_command, expected_result)
def test_get_without_scheme(self):
curl_command = "curl www.example.org"
expected_result = {"method": "GET", "url": "http://www.example.org"}
self._test_command(curl_command, expected_result)
def test_get_basic_auth(self):
curl_command = 'curl "https://api.test.com/" -u "some_username:some_password"'
expected_result = {
"method": "GET",
"url": "https://api.test.com/",
"headers": [
("Authorization", basic_auth_header("some_username", "some_password"))
],
}
self._test_command(curl_command, expected_result)
def test_get_complex(self):
curl_command = (
"curl 'http://httpbin.org/get' -H 'Accept-Encoding: gzip, deflate'"
" -H 'Accept-Language: en-US,en;q=0.9,ru;q=0.8,es;q=0.7' -H 'Upgra"
"de-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (X11; Linux "
"x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/62"
".0.3202.75 Chrome/62.0.3202.75 Safari/537.36' -H 'Accept: text/ht"
"ml,application/xhtml+xml,application/xml;q=0.9,image/webp,image/a"
"png,*/*;q=0.8' -H 'Referer: http://httpbin.org/' -H 'Cookie: _gau"
"ges_unique_year=1; _gauges_unique=1; _gauges_unique_month=1; _gau"
"ges_unique_hour=1' -H 'Connection: keep-alive' --compressed -b '_"
"gauges_unique_day=1'"
)
expected_result = {
"method": "GET",
"url": "http://httpbin.org/get",
"headers": [
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-US,en;q=0.9,ru;q=0.8,es;q=0.7"),
("Upgrade-Insecure-Requests", "1"),
(
"User-Agent",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML"
", like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.32"
"02.75 Safari/537.36",
),
(
"Accept",
"text/html,application/xhtml+xml,application/xml;q=0.9,ima"
"ge/webp,image/apng,*/*;q=0.8",
),
("Referer", "http://httpbin.org/"),
("Connection", "keep-alive"),
],
"cookies": {
"_gauges_unique_year": "1",
"_gauges_unique_hour": "1",
"_gauges_unique_day": "1",
"_gauges_unique": "1",
"_gauges_unique_month": "1",
},
}
self._test_command(curl_command, expected_result)
def test_post(self):
curl_command = (
"curl 'http://httpbin.org/post' -X POST -H 'Cookie: _gauges_unique"
"_year=1; _gauges_unique=1; _gauges_unique_month=1; _gauges_unique"
"_hour=1; _gauges_unique_day=1' -H 'Origin: http://httpbin.org' -H"
" 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: en-US,en;q"
"=0.9,ru;q=0.8,es;q=0.7' -H 'Upgrade-Insecure-Requests: 1' -H 'Use"
"r-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTM"
"L, like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.3202.75 S"
"afari/537.36' -H 'Content-Type: application/x-www-form-urlencoded"
"' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0"
".9,image/webp,image/apng,*/*;q=0.8' -H 'Cache-Control: max-age=0'"
" -H 'Referer: http://httpbin.org/forms/post' -H 'Connection: keep"
"-alive' --data 'custname=John+Smith&custtel=500&custemail=jsmith%"
"40example.org&size=small&topping=cheese&topping=onion&delivery=12"
"%3A15&comments=' --compressed"
)
expected_result = {
"method": "POST",
"url": "http://httpbin.org/post",
"body": "custname=John+Smith&custtel=500&custemail=jsmith%40exampl"
"e.org&size=small&topping=cheese&topping=onion&delivery=12"
"%3A15&comments=",
"cookies": {
"_gauges_unique_year": "1",
"_gauges_unique_hour": "1",
"_gauges_unique_day": "1",
"_gauges_unique": "1",
"_gauges_unique_month": "1",
},
"headers": [
("Origin", "http://httpbin.org"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-US,en;q=0.9,ru;q=0.8,es;q=0.7"),
("Upgrade-Insecure-Requests", "1"),
(
"User-Agent",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML"
", like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.32"
"02.75 Safari/537.36",
),
("Content-Type", "application/x-www-form-urlencoded"),
(
"Accept",
"text/html,application/xhtml+xml,application/xml;q=0.9,ima"
"ge/webp,image/apng,*/*;q=0.8",
),
("Cache-Control", "max-age=0"),
("Referer", "http://httpbin.org/forms/post"),
("Connection", "keep-alive"),
],
}
self._test_command(curl_command, expected_result)
def test_post_data_raw(self):
curl_command = (
"curl 'https://www.example.org/' --data-raw 'excerptLength=200&ena"
"bleDidYouMean=true&sortCriteria=ffirstz32xnamez32x201740686%20asc"
"ending&queryFunctions=%5B%5D&rankingFunctions=%5B%5D'"
)
expected_result = {
"method": "POST",
"url": "https://www.example.org/",
"body": (
"excerptLength=200&enableDidYouMean=true&sortCriteria=ffirstz3"
"2xnamez32x201740686%20ascending&queryFunctions=%5B%5D&ranking"
"Functions=%5B%5D"
),
}
self._test_command(curl_command, expected_result)
def test_post_data_raw_with_string_prefix(self):
curl_command = "curl 'https://www.example.org/' --data-raw $'{\"$filters\":\"Filter\u0021\"}'"
expected_result = {
"method": "POST",
"url": "https://www.example.org/",
"body": '{"$filters":"Filter!"}',
}
self._test_command(curl_command, expected_result)
def test_explicit_get_with_data(self):
curl_command = "curl httpbin.org/anything -X GET --data asdf"
expected_result = {
"method": "GET",
"url": "http://httpbin.org/anything",
"body": "asdf",
}
self._test_command(curl_command, expected_result)
def test_patch(self):
curl_command = (
'curl "https://example.com/api/fake" -u "username:password" -H "Ac'
'cept: application/vnd.go.cd.v4+json" -H "Content-Type: applicatio'
'n/json" -X PATCH -d \'{"hostname": "agent02.example.com", "agent'
'_config_state": "Enabled", "resources": ["Java","Linux"], "enviro'
'nments": ["Dev"]}\''
)
expected_result = {
"method": "PATCH",
"url": "https://example.com/api/fake",
"headers": [
("Accept", "application/vnd.go.cd.v4+json"),
("Content-Type", "application/json"),
("Authorization", basic_auth_header("username", "password")),
],
"body": '{"hostname": "agent02.example.com", "agent_config_state"'
': "Enabled", "resources": ["Java","Linux"], "environments'
'": ["Dev"]}',
}
self._test_command(curl_command, expected_result)
def test_delete(self):
curl_command = 'curl -X "DELETE" https://www.url.com/page'
expected_result = {"method": "DELETE", "url": "https://www.url.com/page"}
self._test_command(curl_command, expected_result)
def test_get_silent(self):
curl_command = 'curl --silent "www.example.com"'
expected_result = {"method": "GET", "url": "http://www.example.com"}
assert curl_to_request_kwargs(curl_command) == expected_result
def test_too_few_arguments_error(self):
with pytest.raises(
ValueError,
match=r"too few arguments|the following arguments are required:\s*url",
):
curl_to_request_kwargs("curl")
def test_ignore_unknown_options(self):
# case 1: ignore_unknown_options=True:
with warnings.catch_warnings(): # avoid warning when executing tests
warnings.simplefilter("ignore")
curl_command = "curl --bar --baz http://www.example.com"
expected_result = {"method": "GET", "url": "http://www.example.com"}
assert curl_to_request_kwargs(curl_command) == expected_result
# case 2: ignore_unknown_options=False (raise exception):
with pytest.raises(ValueError, match=r"Unrecognized options:.*--bar.*--baz"):
curl_to_request_kwargs(
"curl --bar --baz http://www.example.com", ignore_unknown_options=False
)
def test_must_start_with_curl_error(self):
with pytest.raises(ValueError, match="A curl command must start"):
curl_to_request_kwargs("carl -X POST http://example.org")
| TestCurlToRequestKwargs |
python | django__django | tests/admin_views/models.py | {
"start": 17308,
"end": 17430
} | class ____(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee, models.CASCADE)
| WorkHour |
python | openai__openai-python | src/openai/types/responses/response_input_item.py | {
"start": 2877,
"end": 3779
} | class ____(BaseModel):
call_id: str
"""The ID of the computer tool call that produced the output."""
output: ResponseComputerToolCallOutputScreenshot
"""A computer screenshot image used with the computer use tool."""
type: Literal["computer_call_output"]
"""The type of the computer tool call output. Always `computer_call_output`."""
id: Optional[str] = None
"""The ID of the computer tool call output."""
acknowledged_safety_checks: Optional[List[ComputerCallOutputAcknowledgedSafetyCheck]] = None
"""
The safety checks reported by the API that have been acknowledged by the
developer.
"""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the message input.
One of `in_progress`, `completed`, or `incomplete`. Populated when input items
are returned via API.
"""
| ComputerCallOutput |
python | PrefectHQ__prefect | tests/server/utilities/test_text_search_parser.py | {
"start": 18206,
"end": 19364
} | class ____:
"""Test the TextSearchQuery dataclass itself"""
def test_dataclass_creation(self):
query = TextSearchQuery(
include=["term1", "term2"], exclude=["excluded"], required=["required"]
)
assert query.include == ["term1", "term2"]
assert query.exclude == ["excluded"]
assert query.required == ["required"]
def test_dataclass_defaults(self):
query = TextSearchQuery()
assert query.include == []
assert query.exclude == []
assert query.required == []
def test_dataclass_equality(self):
query1 = TextSearchQuery(include=["test"], exclude=[], required=[])
query2 = TextSearchQuery(include=["test"], exclude=[], required=[])
assert query1 == query2
def test_dataclass_repr(self):
query = TextSearchQuery(include=["test"], exclude=["debug"], required=["error"])
repr_str = repr(query)
assert "include=['test']" in repr_str
assert "exclude=['debug']" in repr_str
assert "required=['error']" in repr_str
# Integration-style tests that verify the complete parsing flow
| TestDataclassStructure |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 83748,
"end": 83883
} | class ____(ExecutionControlCommandBase):
"Run the program."
invoke = dont_suppress_errors(ExecutionControlCommandBase.run)
| PyRun |
python | pytorch__pytorch | test/quantization/fx/test_numeric_suite_fx.py | {
"start": 30867,
"end": 32474
} | class ____(QuantizationTestCase):
@skipIfTorchDynamo("too slow")
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).eval().float()
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = prepare_fx(copy.deepcopy(m), {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2_qat(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).float()
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = prepare_qat_fx(
copy.deepcopy(m),
{'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')},
example_inputs=example_inputs)
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
| TestFXGraphMatcherModels |
python | pytorch__pytorch | torch/testing/_internal/distributed/common_state_dict.py | {
"start": 457,
"end": 4665
} | class ____:
def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False):
if isinstance(dist_tensor, (DTensor, ShardedTensor)):
dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey")
if offload_to_cpu:
orig_tensor = orig_tensor.cpu()
dist_tensor = dist_tensor.cpu()
self.assertTrue(isinstance(dist_tensor, torch.Tensor))
self.assertTrue(torch.allclose(orig_tensor, dist_tensor))
def _verify_msd(
self,
msd: dict[str, Any],
dist_msd: dict[str, Any],
options: StateDictOptions = StateDictOptions(),
offload_to_cpu=False,
) -> None:
if not options.ignore_frozen_params:
self.assertEqual(len(msd), len(dist_msd))
for fqn, param in msd.items():
dist_param = dist_msd.get(fqn)
if not options.ignore_frozen_params:
self.assertIsNotNone(dist_param, f"{fqn=}")
try:
self._compare_tensor(param, dist_param, offload_to_cpu)
except AssertionError as e:
raise AssertionError(
f"{fqn} has mismatched value {param} {dist_param}"
) from e
elif dist_param is None:
self.assertFalse(param.requires_grad, f"{fqn=}")
def _verify_osd(
self,
model: nn.Module,
optim: torch.optim.Optimizer,
osd: dict[str, Any],
dist_osd: dict[str, Any],
) -> None:
params = list(chain.from_iterable(g["params"] for g in optim.param_groups))
param_pid_mapping = dict(zip(params, range(len(params)), strict=True))
fqn_pid_mapping = {}
for fqn, param in model.named_parameters():
pid = param_pid_mapping[param]
fqn_pid_mapping[fqn] = pid
fqn_pid_mapping[pid] = fqn
# Check optimizer_state_dict state
self.assertEqual(len(osd[_STATE]), len(dist_osd[_STATE]))
for pid, states in osd[_STATE].items():
fqn = fqn_pid_mapping[pid]
dist_states = dist_osd[_STATE].get(fqn, None)
self.assertIsNotNone(dist_states, fqn)
self.assertEqual(len(states), len(dist_states))
for key, state in states.items():
dist_state = states.get(key, None)
self.assertIsNotNone(dist_state)
self._compare_tensor(state, dist_state)
# Check optimizer_state_dict param_group
old_dist_osd_pg = dist_osd[_PG]
if len(osd[_PG]) != len(dist_osd[_PG]):
self.assertTrue(len(dist_osd[_PG]) > len(osd[_PG]))
new_pg = copy.deepcopy(dist_osd[_PG][0])
new_pg["params"] = []
for dist_group in dist_osd[_PG]:
new_pg["params"].extend(dist_group["params"])
dist_osd[_PG] = [new_pg]
self.assertEqual(len(osd[_PG]), len(dist_osd[_PG]))
for group, dist_group in zip(osd[_PG], dist_osd[_PG], strict=True):
self.assertEqual(len(group), len(dist_group))
for key, value in group.items():
# Below doesn't work because param_groups can have None
# values.
# dist_value = dist_group.get(key, None)
# self.assertIsNotNone(dist_value, (dist_group, group))
dist_value = dist_group[key]
if key == "params":
fqns = [fqn_pid_mapping[pid] for pid in value]
self.assertEqual(sorted(fqns), sorted(dist_value))
else:
self.assertEqual(value, dist_value)
dist_osd[_PG] = old_dist_osd_pg
def _verify_osd_by_load(
self,
model: nn.Module,
optim: torch.optim.Optimizer,
new_optim: torch.optim.Optimizer,
dist_osd: dict[str, Any],
) -> None:
new_dist_osd = _gather_state_dict(dist_osd)
set_state_dict(
model,
optimizers=new_optim,
model_state_dict={},
optim_state_dict=new_dist_osd,
)
self.assertEqual(optim.state_dict(), new_optim.state_dict())
| VerifyStateDictMixin |
python | pypa__setuptools | setuptools/_vendor/typing_extensions.py | {
"start": 2602,
"end": 4660
} | class ____:
def __repr__(self):
return "<sentinel>"
_marker = _Sentinel()
if sys.version_info >= (3, 10):
def _should_collect_from_parameters(t):
return isinstance(
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
)
elif sys.version_info >= (3, 9):
def _should_collect_from_parameters(t):
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
else:
def _should_collect_from_parameters(t):
return isinstance(t, typing._GenericAlias) and not t._special
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if sys.version_info >= (3, 11):
from typing import Any
else:
class _AnyMeta(type):
def __instancecheck__(self, obj):
if self is Any:
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
return super().__instancecheck__(obj)
def __repr__(self):
if self is Any:
return "typing_extensions.Any"
return super().__repr__()
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
ClassVar = typing.ClassVar
| _Sentinel |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 13510,
"end": 13915
} | class ____:
params = ["float64", "Float64", "float64[pyarrow]"]
param_names = ["dtype"]
def setup(self, dtype):
data = np.random.randn(10000, 1000)
# all-na columns
data[:, 600:800] = np.nan
# partial-na columns
data[800:1000, 4000:5000] = np.nan
self.df = DataFrame(data, dtype=dtype)
def time_isna(self, dtype):
self.df.isna()
| Isna |
python | EpistasisLab__tpot | tpot/search_spaces/base.py | {
"start": 3640,
"end": 6634
} | class ____():
def __init__(self,):
pass
def generate(self, rng=None) -> SklearnIndividual:
pass
def flatten_graphpipeline(est):
flattened_full_graph = est.graph.copy()
#put ests into the node label from the attributes
flattened_full_graph = nx.relabel_nodes(flattened_full_graph, {n: flattened_full_graph.nodes[n]['instance'] for n in flattened_full_graph.nodes})
remove_list = []
for node in flattened_full_graph.nodes:
if isinstance(node, nx.DiGraph):
flattened = flatten_any(node)
roots = graph_utils.get_roots(flattened)
leaves = graph_utils.get_leaves(flattened)
n1_s = flattened_full_graph.successors(node)
n1_p = flattened_full_graph.predecessors(node)
remove_list.append(node)
flattened_full_graph = nx.compose(flattened_full_graph, flattened)
flattened_full_graph.add_edges_from([ (n2, n) for n in n1_s for n2 in leaves])
flattened_full_graph.add_edges_from([ (n, n2) for n in n1_p for n2 in roots])
for node in remove_list:
flattened_full_graph.remove_node(node)
return flattened_full_graph
def flatten_pipeline(est):
graph = nx.DiGraph()
steps = [flatten_any(s[1]) for s in est.steps]
#add steps to graph and connect them
for s in steps:
graph = nx.compose(graph, s)
#connect leaves of each step to the roots of the next step
for i in range(len(steps)-1):
roots = graph_utils.get_roots(steps[i])
leaves = graph_utils.get_leaves(steps[i+1])
graph.add_edges_from([ (l,r) for l in leaves for r in roots])
return graph
def flatten_estimator(est):
graph = nx.DiGraph()
graph.add_node(est)
return graph
def flatten_any(est):
if isinstance(est, tpot.GraphPipeline):
return flatten_graphpipeline(est)
elif isinstance(est, sklearn.pipeline.Pipeline):
return flatten_pipeline(est)
else:
return flatten_estimator(est)
def flatten_to_graphpipeline(est, **graphpipeline_kwargs):
#rename nodes to string representation of the instance and put the instance in the node attributes
flattened_full_graph = flatten_any(est)
instance_to_label = {}
label_to_instance = {}
for node in flattened_full_graph.nodes:
found_unique_label = False
i=1
while not found_unique_label:
new_label = f"{node.__class__.__name__}_{i}"
if new_label not in label_to_instance:
found_unique_label = True
i+=1
label_to_instance[new_label] = node
instance_to_label[node] = new_label
flattened_full_graph = nx.relabel_nodes(flattened_full_graph, instance_to_label)
for label, instance in label_to_instance.items():
flattened_full_graph.nodes[label]["instance"] = instance
return tpot.GraphPipeline(flattened_full_graph, **graphpipeline_kwargs) | SearchSpace |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py | {
"start": 1964,
"end": 3552
} | class ____(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
f" Expected {self.num_channels} but got {num_channels}."
)
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
| Dinov2WithRegistersPatchEmbeddings |
python | sympy__sympy | sympy/assumptions/predicates/ntheory.py | {
"start": 1999,
"end": 2546
} | class ____(Predicate):
"""
Odd number predicate.
Explanation
===========
``ask(Q.odd(x))`` is true iff ``x`` belongs to the set of odd numbers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.odd(0))
False
>>> ask(Q.odd(2))
False
>>> ask(Q.odd(3))
True
>>> ask(Q.odd(pi))
False
"""
name = 'odd'
handler = Dispatcher(
"OddHandler",
doc=("Handler for key 'odd'. Test that an expression represents an odd"
" number.")
)
| OddPredicate |
python | redis__redis-py | tests/test_asyncio/test_pubsub.py | {
"start": 22777,
"end": 24411
} | class ____:
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.0")
async def test_pubsub_channels(self, r: redis.Redis, pubsub):
p = pubsub
await p.subscribe("foo", "bar", "baz", "quux")
for i in range(4):
assert (await wait_for_message(p))["type"] == "subscribe"
expected = [b"bar", b"baz", b"foo", b"quux"]
assert all([channel in await r.pubsub_channels() for channel in expected])
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.0")
async def test_pubsub_numsub(self, r: redis.Redis):
p1 = r.pubsub()
await p1.subscribe("foo", "bar", "baz")
for i in range(3):
assert (await wait_for_message(p1))["type"] == "subscribe"
p2 = r.pubsub()
await p2.subscribe("bar", "baz")
for i in range(2):
assert (await wait_for_message(p2))["type"] == "subscribe"
p3 = r.pubsub()
await p3.subscribe("baz")
assert (await wait_for_message(p3))["type"] == "subscribe"
channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)]
assert await r.pubsub_numsub("foo", "bar", "baz") == channels
await p1.aclose()
await p2.aclose()
await p3.aclose()
@skip_if_server_version_lt("2.8.0")
async def test_pubsub_numpat(self, r: redis.Redis):
p = r.pubsub()
await p.psubscribe("*oo", "*ar", "b*z")
for i in range(3):
assert (await wait_for_message(p))["type"] == "psubscribe"
assert await r.pubsub_numpat() == 3
await p.aclose()
@pytest.mark.onlynoncluster
| TestPubSubSubcommands |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor32.py | {
"start": 468,
"end": 546
} | class ____(type):
def __call__(cls: type[T], x: int, y: str) -> T: ...
| BMeta |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/battlenet/tests.py | {
"start": 457,
"end": 2697
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = BattleNetProvider.id
_uid = 123456789
_battletag = "LuckyDragon#1953"
def get_mocked_response(self):
data = {"battletag": self._battletag, "id": self._uid}
return MockedResponse(HTTPStatus.OK, json.dumps(data))
def get_expected_to_str(self):
return self._battletag
def test_valid_response_no_battletag(self):
data = {"id": 12345}
response = MockedResponse(HTTPStatus.OK, json.dumps(data))
self.assertEqual(_check_errors(response), data)
def test_invalid_data(self):
response = MockedResponse(HTTPStatus.OK, json.dumps({}))
with self.assertRaises(OAuth2Error):
# No id, raises
_check_errors(response)
def test_profile_invalid_response(self):
data = {
"code": HTTPStatus.FORBIDDEN,
"type": "Forbidden",
"detail": "Account Inactive",
}
response = MockedResponse(HTTPStatus.UNAUTHORIZED, json.dumps(data))
with self.assertRaises(OAuth2Error):
# no id, 4xx code, raises
_check_errors(response)
def test_error_response(self):
body = json.dumps({"error": "invalid_token"})
response = MockedResponse(HTTPStatus.BAD_REQUEST, body)
with self.assertRaises(OAuth2Error):
# no id, 4xx code, raises
_check_errors(response)
def test_service_not_found(self):
response = MockedResponse(596, "<h1>596 Service Not Found</h1>")
with self.assertRaises(OAuth2Error):
# bad json, 5xx code, raises
_check_errors(response)
def test_invalid_response(self):
response = MockedResponse(HTTPStatus.OK, "invalid json data")
with self.assertRaises(OAuth2Error):
# bad json, raises
_check_errors(response)
def test_extra_data(self):
self.login(self.get_mocked_response())
account = SocialAccount.objects.get(uid=str(self._uid))
self.assertEqual(account.extra_data["battletag"], self._battletag)
self.assertEqual(account.extra_data["id"], self._uid)
self.assertEqual(account.extra_data["region"], "us")
| BattleNetTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-cart/source_cart/streams.py | {
"start": 5297,
"end": 5436
} | class ____(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1orders/get
"""
| Orders |
python | simonw__datasette | tests/test_plugins.py | {
"start": 47462,
"end": 63484
} | class ____:
__name__ = "SlotPlugin"
@hookimpl
def top_homepage(self, request):
return "Xtop_homepage:" + request.args["z"]
@hookimpl
def top_database(self, request, database):
async def inner():
return "Xtop_database:{}:{}".format(database, request.args["z"])
return inner
@hookimpl
def top_table(self, request, database, table):
return "Xtop_table:{}:{}:{}".format(database, table, request.args["z"])
@hookimpl
def top_row(self, request, database, table, row):
return "Xtop_row:{}:{}:{}:{}".format(
database, table, row["name"], request.args["z"]
)
@hookimpl
def top_query(self, request, database, sql):
return "Xtop_query:{}:{}:{}".format(database, sql, request.args["z"])
@hookimpl
def top_canned_query(self, request, database, query_name):
return "Xtop_query:{}:{}:{}".format(database, query_name, request.args["z"])
@pytest.mark.asyncio
async def test_hook_top_homepage():
datasette = Datasette(memory=True)
try:
datasette.pm.register(SlotPlugin(), name="SlotPlugin")
response = await datasette.client.get("/?z=foo")
assert response.status_code == 200
assert "Xtop_homepage:foo" in response.text
finally:
datasette.pm.unregister(name="SlotPlugin")
@pytest.mark.asyncio
async def test_hook_top_database():
datasette = Datasette(memory=True)
try:
datasette.pm.register(SlotPlugin(), name="SlotPlugin")
response = await datasette.client.get("/_memory?z=bar")
assert response.status_code == 200
assert "Xtop_database:_memory:bar" in response.text
finally:
datasette.pm.unregister(name="SlotPlugin")
@pytest.mark.asyncio
async def test_hook_top_table(ds_client):
try:
ds_client.ds.pm.register(SlotPlugin(), name="SlotPlugin")
response = await ds_client.get("/fixtures/facetable?z=baz")
assert response.status_code == 200
assert "Xtop_table:fixtures:facetable:baz" in response.text
finally:
ds_client.ds.pm.unregister(name="SlotPlugin")
@pytest.mark.asyncio
async def test_hook_top_row(ds_client):
try:
ds_client.ds.pm.register(SlotPlugin(), name="SlotPlugin")
response = await ds_client.get("/fixtures/facet_cities/1?z=bax")
assert response.status_code == 200
assert "Xtop_row:fixtures:facet_cities:San Francisco:bax" in response.text
finally:
ds_client.ds.pm.unregister(name="SlotPlugin")
@pytest.mark.asyncio
async def test_hook_top_query(ds_client):
try:
pm.register(SlotPlugin(), name="SlotPlugin")
response = await ds_client.get("/fixtures/-/query?sql=select+1&z=x")
assert response.status_code == 200
assert "Xtop_query:fixtures:select 1:x" in response.text
finally:
pm.unregister(name="SlotPlugin")
@pytest.mark.asyncio
async def test_hook_top_canned_query(ds_client):
try:
pm.register(SlotPlugin(), name="SlotPlugin")
response = await ds_client.get("/fixtures/from_hook?z=xyz")
assert response.status_code == 200
assert "Xtop_query:fixtures:from_hook:xyz" in response.text
finally:
pm.unregister(name="SlotPlugin")
@pytest.mark.asyncio
async def test_hook_track_event():
datasette = Datasette(memory=True)
from .conftest import TrackEventPlugin
await datasette.invoke_startup()
await datasette.track_event(
TrackEventPlugin.OneEvent(actor=None, extra="extra extra")
)
assert len(datasette._tracked_events) == 1
assert isinstance(datasette._tracked_events[0], TrackEventPlugin.OneEvent)
event = datasette._tracked_events[0]
assert event.name == "one"
assert event.properties() == {"extra": "extra extra"}
# Should have a recent created as well
created = event.created
assert isinstance(created, datetime.datetime)
assert created.tzinfo == datetime.timezone.utc
@pytest.mark.asyncio
async def test_hook_register_events():
datasette = Datasette(memory=True)
await datasette.invoke_startup()
assert any(k.__name__ == "OneEvent" for k in datasette.event_classes)
@pytest.mark.asyncio
async def test_hook_register_actions():
datasette = Datasette(memory=True, plugins_dir=PLUGINS_DIR)
await datasette.invoke_startup()
# Check that the custom action from my_plugin.py is registered
assert "view-collection" in datasette.actions
action = datasette.actions["view-collection"]
assert action.abbr == "vc"
assert action.description == "View a collection"
@pytest.mark.asyncio
async def test_hook_register_actions_with_custom_resources():
"""
Test registering actions with custom Resource classes:
- A global action (no resource)
- A parent-level action (DocumentCollectionResource)
- A child-level action (DocumentResource)
"""
from datasette.permissions import Resource, Action
# Define custom Resource classes
class DocumentCollectionResource(Resource):
"""A collection of documents."""
name = "document_collection"
parent_class = None # Top-level resource
def __init__(self, collection: str):
super().__init__(parent=collection, child=None)
@classmethod
async def resources_sql(cls, datasette) -> str:
return """
SELECT 'collection1' AS parent, NULL AS child
UNION ALL
SELECT 'collection2' AS parent, NULL AS child
"""
class DocumentResource(Resource):
"""A document in a collection."""
name = "document"
parent_class = DocumentCollectionResource # Child of DocumentCollectionResource
def __init__(self, collection: str, document: str):
super().__init__(parent=collection, child=document)
@classmethod
async def resources_sql(cls, datasette) -> str:
return """
SELECT 'collection1' AS parent, 'doc1' AS child
UNION ALL
SELECT 'collection1' AS parent, 'doc2' AS child
UNION ALL
SELECT 'collection2' AS parent, 'doc3' AS child
"""
# Define a test plugin that registers these actions
class TestPlugin:
__name__ = "test_custom_resources_plugin"
@hookimpl
def register_actions(self, datasette):
return [
# Global action - no resource_class
Action(
name="manage-documents",
abbr="md",
description="Manage the document system",
),
# Parent-level action - collection only
Action(
name="view-document-collection",
description="View a document collection",
resource_class=DocumentCollectionResource,
),
# Child-level action - collection + document
Action(
name="view-document",
abbr="vdoc",
description="View a document",
resource_class=DocumentResource,
),
]
@hookimpl
def permission_resources_sql(self, datasette, actor, action):
from datasette.permissions import PermissionSQL
# Grant user2 access to manage-documents globally
if actor and actor.get("id") == "user2" and action == "manage-documents":
return PermissionSQL.allow(reason="user2 granted manage-documents")
# Grant user2 access to view-document-collection globally
if (
actor
and actor.get("id") == "user2"
and action == "view-document-collection"
):
return PermissionSQL.allow(
reason="user2 granted view-document-collection"
)
# Default allow for view-document-collection (like other view-* actions)
if action == "view-document-collection":
return PermissionSQL.allow(
reason="default allow for view-document-collection"
)
# Default allow for view-document (like other view-* actions)
if action == "view-document":
return PermissionSQL.allow(reason="default allow for view-document")
# Register the plugin temporarily
plugin = TestPlugin()
pm.register(plugin, name="test_custom_resources_plugin")
try:
# Create datasette instance and invoke startup
datasette = Datasette(memory=True)
await datasette.invoke_startup()
# Test global action
manage_docs = datasette.actions["manage-documents"]
assert manage_docs.name == "manage-documents"
assert manage_docs.abbr == "md"
assert manage_docs.resource_class is None
assert manage_docs.takes_parent is False
assert manage_docs.takes_child is False
# Test parent-level action
view_collection = datasette.actions["view-document-collection"]
assert view_collection.name == "view-document-collection"
assert view_collection.abbr is None
assert view_collection.resource_class is DocumentCollectionResource
assert view_collection.takes_parent is True
assert view_collection.takes_child is False
# Test child-level action
view_doc = datasette.actions["view-document"]
assert view_doc.name == "view-document"
assert view_doc.abbr == "vdoc"
assert view_doc.resource_class is DocumentResource
assert view_doc.takes_parent is True
assert view_doc.takes_child is True
# Verify the resource classes have correct hierarchy
assert DocumentCollectionResource.parent_class is None
assert DocumentResource.parent_class is DocumentCollectionResource
# Test that resources can be instantiated correctly
collection_resource = DocumentCollectionResource(collection="collection1")
assert collection_resource.parent == "collection1"
assert collection_resource.child is None
doc_resource = DocumentResource(collection="collection1", document="doc1")
assert doc_resource.parent == "collection1"
assert doc_resource.child == "doc1"
# Test permission checks with restricted actors
# Test 1: Global action - no restrictions (custom actions default to deny)
unrestricted_actor = {"id": "user1"}
allowed = await datasette.allowed(
action="manage-documents",
actor=unrestricted_actor,
)
assert allowed is False # Custom actions have no default allow
# Test 2: Global action - user2 has explicit permission via plugin hook
restricted_global = {"id": "user2", "_r": {"a": ["md"]}}
allowed = await datasette.allowed(
action="manage-documents",
actor=restricted_global,
)
assert allowed is True # Granted by plugin hook for user2
# Test 3: Global action - restricted but not in allowlist
restricted_no_access = {"id": "user3", "_r": {"a": ["vdc"]}}
allowed = await datasette.allowed(
action="manage-documents",
actor=restricted_no_access,
)
assert allowed is False # Not in allowlist
# Test 4: Collection-level action - allowed for specific collection
collection_resource = DocumentCollectionResource(collection="collection1")
# This one does not have an abbreviation:
restricted_collection = {
"id": "user4",
"_r": {"d": {"collection1": ["view-document-collection"]}},
}
allowed = await datasette.allowed(
action="view-document-collection",
resource=collection_resource,
actor=restricted_collection,
)
assert allowed is True # Allowed for collection1
# Test 5: Collection-level action - denied for different collection
collection2_resource = DocumentCollectionResource(collection="collection2")
allowed = await datasette.allowed(
action="view-document-collection",
resource=collection2_resource,
actor=restricted_collection,
)
assert allowed is False # Not allowed for collection2
# Test 6: Document-level action - allowed for specific document
doc1_resource = DocumentResource(collection="collection1", document="doc1")
restricted_document = {
"id": "user5",
"_r": {"r": {"collection1": {"doc1": ["vdoc"]}}},
}
allowed = await datasette.allowed(
action="view-document",
resource=doc1_resource,
actor=restricted_document,
)
assert allowed is True # Allowed for collection1/doc1
# Test 7: Document-level action - denied for different document
doc2_resource = DocumentResource(collection="collection1", document="doc2")
allowed = await datasette.allowed(
action="view-document",
resource=doc2_resource,
actor=restricted_document,
)
assert allowed is False # Not allowed for collection1/doc2
# Test 8: Document-level action - globally allowed
doc_resource = DocumentResource(collection="collection2", document="doc3")
restricted_all_docs = {"id": "user6", "_r": {"a": ["vdoc"]}}
allowed = await datasette.allowed(
action="view-document",
resource=doc_resource,
actor=restricted_all_docs,
)
assert allowed is True # Globally allowed for all documents
# Test 9: Verify hierarchy - collection access doesn't grant document access
collection_only_actor = {"id": "user7", "_r": {"d": {"collection1": ["vdc"]}}}
doc_resource = DocumentResource(collection="collection1", document="doc1")
allowed = await datasette.allowed(
action="view-document",
resource=doc_resource,
actor=collection_only_actor,
)
assert (
allowed is False
) # Collection permission doesn't grant document permission
finally:
# Unregister the plugin
pm.unregister(plugin)
@pytest.mark.skip(reason="TODO")
@pytest.mark.parametrize(
"metadata,config,expected_metadata,expected_config",
(
(
# Instance level
{"plugins": {"datasette-foo": "bar"}},
{},
{},
{"plugins": {"datasette-foo": "bar"}},
),
(
# Database level
{"databases": {"foo": {"plugins": {"datasette-foo": "bar"}}}},
{},
{},
{"databases": {"foo": {"plugins": {"datasette-foo": "bar"}}}},
),
(
# Table level
{
"databases": {
"foo": {"tables": {"bar": {"plugins": {"datasette-foo": "bar"}}}}
}
},
{},
{},
{
"databases": {
"foo": {"tables": {"bar": {"plugins": {"datasette-foo": "bar"}}}}
}
},
),
(
# Keep other keys
{"plugins": {"datasette-foo": "bar"}, "other": "key"},
{"original_config": "original"},
{"other": "key"},
{"original_config": "original", "plugins": {"datasette-foo": "bar"}},
),
),
)
def test_metadata_plugin_config_treated_as_config(
metadata, config, expected_metadata, expected_config
):
ds = Datasette(metadata=metadata, config=config)
actual_metadata = ds.metadata()
assert "plugins" not in actual_metadata
assert actual_metadata == expected_metadata
assert ds.config == expected_config
| SlotPlugin |
python | google__jax | tests/lax_numpy_test.py | {
"start": 238540,
"end": 243038
} | class ____(jtu.JaxTestCase):
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op=rec.op, rng_factory=rec.rng_factory, tol=rec.tol,
order=rec.order)],
shapes=itertools.combinations_with_replacement(nonempty_shapes, rec.nargs),
dtype=rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises mixed type promotion
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,
np.complex64: 1e-1, np.complex128: 1e-3})
if jtu.test_device_matches(["tpu"]) and op == jnp.arctanh:
tol = jtu.join_tolerance(tol, {np.float32: 2e-1})
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op=rec.op, order=rec.order)],
special_value=rec.values
)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(
op, (special_value,), order, ['fwd', 'rev'], atol={np.float32: 3.4e-3}
)
def testSincAtZero(self):
# Some manual tests for sinc at zero, since it doesn't have well-behaved
# numerical derivatives at zero
def deriv(f):
return lambda x: jax.jvp(f, (x,), (1.,))[1]
def apply_all(fns, x):
for f in fns:
x = f(x)
return x
d1 = 0.
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 1):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)
d2 = -np.pi ** 2 / 3
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 2):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)
d3 = 0.
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 3):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)
d4 = np.pi ** 4 / 5
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 4):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)
def testSincGradArrayInput(self):
# tests for a bug almost introduced in #5077
jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash
def testTakeAlongAxisIssue1521(self):
# https://github.com/jax-ml/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
@jtu.sample_product(
shapes=filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2)),
dtype=(np.complex128,),
)
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexpComplex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(jnp.array(rng(shape, dtype)) for shape in shapes)
if jtu.test_device_matches(["tpu"]):
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp, args, 1, ["fwd", "rev"], tol, tol)
@jtu.sample_product(
shapes=filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2)),
dtype=(np.complex128,),
)
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexp2Complex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(jnp.array(rng(shape, dtype)) for shape in shapes)
if jtu.test_device_matches(["tpu"]):
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp2, args, 1, ["fwd", "rev"], tol, tol)
@jtu.sample_product(
n=range(-4, 5),
dtype=[jnp.float32, jnp.float64],
)
def testGradLdexp(self, n, dtype):
rng = jtu.rand_default(self.rng())
x = rng((10,), dtype)
check_grads(lambda x: jnp.ldexp(x, n), (x,), 1)
@jtu.sample_product(
n=range(-4, 5),
dtype=[jnp.float32, jnp.float64],
)
def testGradFrexp(self, n, dtype):
rng = jtu.rand_default(self.rng())
x = rng((10,), dtype) * 2 ** n
check_grads(lambda x: jnp.frexp(x)[0], (x,), 1)
| NumpyGradTests |
python | django__django | tests/user_commands/management/commands/mutually_exclusive_required.py | {
"start": 54,
"end": 910
} | class ____(BaseCommand):
def add_arguments(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--foo-id", type=int, nargs="?", default=None)
group.add_argument("--foo-name", type=str, nargs="?", default=None)
group.add_argument("--foo-list", type=int, nargs="+")
group.add_argument("--append_const", action="append_const", const=42)
group.add_argument("--const", action="store_const", const=31)
group.add_argument("--count", action="count")
group.add_argument("--flag_false", action="store_false")
group.add_argument("--flag_true", action="store_true")
def handle(self, *args, **options):
for option, value in options.items():
if value is not None:
self.stdout.write("%s=%s" % (option, value))
| Command |
python | dagster-io__dagster | python_modules/libraries/dagster-managed-elements/dagster_managed_elements/types.py | {
"start": 220,
"end": 967
} | class ____(enum.Enum):
CANNOT_CONNECT = "cannot_connect"
SANITIZE_KEY_KEYWORDS = [
"password",
"token",
"secret",
"ssh_key",
"credentials_json",
"access_key_id",
]
SANITIZE_KEY_EXACT_MATCHES = ["pat"]
SECRET_MASK_VALUE = "**********"
def is_key_secret(key: str):
"""Rudamentary check to see if a config key is a secret value."""
return any(keyword in key for keyword in SANITIZE_KEY_KEYWORDS) or any(
match == key for match in SANITIZE_KEY_EXACT_MATCHES
)
def _sanitize(key: str, value: str):
"""Rudamentary sanitization of values so we can avoid printing passwords
to the console.
"""
if is_key_secret(key):
return SECRET_MASK_VALUE
return value
| ManagedElementError |
python | google__jax | tests/tree_util_test.py | {
"start": 7999,
"end": 36238
} | class ____(jtu.JaxTestCase):
@parameterized.parameters(*(TREES + LEAVES))
def testRoundtrip(self, inputs):
xs, tree = tree_util.tree_flatten(inputs)
actual = tree_util.tree_unflatten(tree, xs)
self.assertEqual(actual, inputs)
@parameterized.parameters(*(TREES + LEAVES))
def testRoundtripWithFlattenUpTo(self, inputs):
_, tree = tree_util.tree_flatten(inputs)
xs = tree.flatten_up_to(inputs)
actual = tree_util.tree_unflatten(tree, xs)
self.assertEqual(actual, inputs)
@parameterized.parameters(
(tree_util.Partial(_dummy_func),),
(tree_util.Partial(_dummy_func, 1, 2),),
(tree_util.Partial(_dummy_func, x="a"),),
(tree_util.Partial(_dummy_func, 1, 2, 3, x=4, y=5),),
)
def testRoundtripPartial(self, inputs):
xs, tree = tree_util.tree_flatten(inputs)
actual = tree_util.tree_unflatten(tree, xs)
# functools.partial does not support equality comparisons:
# https://stackoverflow.com/a/32786109/809705
self.assertEqual(actual.func, inputs.func)
self.assertEqual(actual.args, inputs.args)
self.assertEqual(actual.keywords, inputs.keywords)
def testPartialDoesNotMergeWithOtherPartials(self):
def f(a, b, c): pass
g = functools.partial(f, 2)
h = tree_util.Partial(g, 3)
self.assertEqual(h.args, (3,))
def testPartialFuncAttributeHasStableHash(self):
# https://github.com/jax-ml/jax/issues/9429
fun = functools.partial(print, 1)
p1 = tree_util.Partial(fun, 2)
p2 = tree_util.Partial(fun, 2)
self.assertEqual(fun, p1.func)
self.assertEqual(p1.func, fun)
self.assertEqual(p1.func, p2.func)
self.assertEqual(hash(p1.func), hash(p2.func))
def testChildren(self):
_, tree = tree_util.tree_flatten(((1, 2, 3), (4,)))
_, c0 = tree_util.tree_flatten((0, 0, 0))
_, c1 = tree_util.tree_flatten((7,))
self.assertEqual([c0, c1], tree.children())
def testTreedefTupleFromChildren(self):
# https://github.com/jax-ml/jax/issues/7377
tree = ((1, 2, (3, 4)), (5,))
leaves, treedef1 = tree_util.tree_flatten(tree)
treedef2 = tree_util.treedef_tuple(treedef1.children())
self.assertEqual(treedef1.num_leaves, len(leaves))
self.assertEqual(treedef1.num_leaves, treedef2.num_leaves)
self.assertEqual(treedef1.num_nodes, treedef2.num_nodes)
def testTreedefTupleComparesEqual(self):
# https://github.com/jax-ml/jax/issues/9066
self.assertEqual(tree_util.tree_structure((3,)),
tree_util.treedef_tuple((tree_util.tree_structure(3),)))
def testFlattenOrder(self):
flat1, _ = tree_util.tree_flatten([0, ((1, 2), 3, (4, (5, 6, 7))), 8, 9])
flat2, _ = tree_util.tree_flatten([0, ((1, 2), 3, (4, (5, 6, 7))), 8, 9])
flat3, _ = tree_util.tree_flatten([0, ((1, (2, 3)), (4, (5, 6, 7))), 8, 9])
self.assertEqual(flat1, list(range(10)))
self.assertEqual(flat2, list(range(10)))
self.assertEqual(flat3, list(range(10)))
@parameterized.parameters(
(
[(1, 2), None, ATuple(foo=3, bar=7)],
[({"foo": 7}, (3, 4)), None, ATuple(foo=(11, 9), bar=None)],
[{"foo": 7}, (3, 4), (11, 9), None],
),
([1], [{"a": 7}], [{"a": 7}]),
([1], [[7]], [[7]]),
([1], [(7,)], [(7,)]),
((1, 2), ({"a": 7}, {"a": 8}), [{"a": 7}, {"a": 8}]),
((1,), ([7],), [[7]]),
((1,), ((7,),), [(7,)]),
({"a": 1, "b": (2, 3)}, {"a": [7], "b": ([8], (9,))}, [[7], [8], (9,)]),
({"a": 1}, {"a": (7,)}, [(7,)]),
({"a": 1}, {"a": {"a": 7}}, [{"a": 7}]),
(None, None, [])
)
def testFlattenUpTo(self, tree, xs, expected):
_, tree_def = tree_util.tree_flatten(tree)
out = tree_def.flatten_up_to(xs)
self.assertEqual(out, expected)
@parameterized.parameters(
([1, 2], [7], re.escape("List arity mismatch: 1 != 2; list: [7].")),
((1,), (7, 8), re.escape("Tuple arity mismatch: 2 != 1; tuple: (7, 8).")),
(
{"a": 1},
{"a": 7, "b": 8},
re.escape("Dict key mismatch; expected keys: ['a'];"),
),
(
{"a": 1},
{"b": 7},
re.escape("Dict key mismatch; expected keys: ['a'];"),
),
([1], {"a": 7}, re.escape("Expected list, got {'a': 7}.")),
([1], (7,), re.escape("Expected list, got (7,).")),
((1,), [7], re.escape("Expected tuple, got [7].")),
((1,), {"b": 7}, re.escape("Expected tuple, got {'b': 7}.")),
({"a": 1}, (7,), re.escape("Expected dict, got (7,).")),
({"a": 1}, [7], re.escape("Expected dict, got [7].")),
([[1]], [7], re.escape("Expected list, got 7.")),
([[1]], [(7,)], re.escape("Expected list, got (7,).")),
([[1]], [{"a": 7}], re.escape("Expected list, got {'a': 7}.")),
([(1,)], [7], re.escape("Expected tuple, got 7.")),
([(1,)], [[7]], re.escape("Expected tuple, got [7].")),
([(1,)], [{"a": 7}], re.escape("Expected tuple, got {'a': 7}.")),
([{"a": 1}], [7], re.escape("Expected dict, got 7.")),
([{"a": 1}], [[7]], re.escape("Expected dict, got [7].")),
([{"a": 1}], [(7,)], re.escape("Expected dict, got (7,).")),
(
[{"a": 1}],
[{"b": 7}],
re.escape("Dict key mismatch; expected keys: ['a'];"),
),
(([1],), (7,), re.escape("Expected list, got 7.")),
(([1],), ((7,),), re.escape("Expected list, got (7,).")),
(([1],), ({"a": 7},), re.escape("Expected list, got {'a': 7}.")),
(((1,),), (7,), re.escape("Expected tuple, got 7.")),
(((1,),), ([7],), re.escape("Expected tuple, got [7].")),
(((1,),), ({"a": 7},), re.escape("Expected tuple, got {'a': 7}.")),
(({"a": 1},), (7,), re.escape("Expected dict, got 7.")),
(({"a": 1},), ([7],), re.escape("Expected dict, got [7].")),
(({"a": 1},), ((7,),), re.escape("Expected dict, got (7,).")),
(
({"a": 1},),
({"b": 7},),
re.escape("Dict key mismatch; expected keys: ['a'];"),
),
({"a": [1]}, {"a": 7}, re.escape("Expected list, got 7.")),
({"a": [1]}, {"a": (7,)}, re.escape("Expected list, got (7,).")),
({"a": [1]}, {"a": {"a": 7}}, re.escape("Expected list, got {'a': 7}.")),
({"a": (1,)}, {"a": 7}, re.escape("Expected tuple, got 7.")),
({"a": (1,)}, {"a": [7]}, re.escape("Expected tuple, got [7].")),
(
{"a": (1,)},
{"a": {"a": 7}},
re.escape("Expected tuple, got {'a': 7}."),
),
({"a": {"a": 1}}, {"a": 7}, re.escape("Expected dict, got 7.")),
({"a": {"a": 1}}, {"a": [7]}, re.escape("Expected dict, got [7].")),
({"a": {"a": 1}}, {"a": (7,)}, re.escape("Expected dict, got (7,).")),
(
{"a": {"a": 1}},
{"a": {"b": 7}},
re.escape("Dict key mismatch; expected keys: ['a'];"),
),
(
[ATuple(foo=1, bar=2)],
[(1, 2)],
re.escape("Expected named tuple, got (1, 2)."),
),
(
[ATuple(foo=1, bar=2)],
[ATuple2(foo=1, bar=2)],
re.escape("Named tuple type mismatch"),
),
(
[AnObject(x=[1], y=(2,), z={"a": [1]})],
[([1], (2,), {"a": [1]})],
re.escape("Custom node type mismatch"),
),
((None, [2], re.escape("Expected None, got [2]."))),
)
def testFlattenUpToErrors(self, tree, xs, error):
_, tree_def = tree_util.tree_flatten(tree)
with self.assertRaisesRegex(ValueError, error):
tree_def.flatten_up_to(xs)
def testTreeMap(self):
x = ((1, 2), (3, 4, 5))
y = (([3], None), ({"foo": "bar"}, 7, [5, 6]))
out = tree_util.tree_map(lambda *xs: tuple(xs), x, y)
self.assertEqual(out, (((1, [3]), (2, None)),
((3, {"foo": "bar"}), (4, 7), (5, [5, 6]))))
def testTreeMapWithIsLeafArgument(self):
x = ((1, 2), [3, 4, 5])
y = (([3], None), ({"foo": "bar"}, 7, [5, 6]))
out = tree_util.tree_map(lambda *xs: tuple(xs), x, y,
is_leaf=lambda n: isinstance(n, list))
self.assertEqual(out, (((1, [3]), (2, None)),
(([3, 4, 5], ({"foo": "bar"}, 7, [5, 6])))))
def testTreeReduceWithIsLeafArgument(self):
out = tree_util.tree_reduce(lambda x, y: x + y, [(1, 2), [(3, 4), (5, 6)]],
is_leaf=lambda l: isinstance(l, tuple))
self.assertEqual(out, (1, 2, 3, 4, 5, 6))
def testTreeReduceAssociativeWithIsLeafArgument(self):
out = tree_util.tree_reduce_associative(
lambda x, y: x + y, [(1, 2), [(3, 4), (5, 6)]],
is_leaf=lambda l: isinstance(l, tuple),
)
self.assertEqual(out, (1, 2, 3, 4, 5, 6))
@parameterized.parameters(
tree_util.tree_leaves,
lambda tree, is_leaf: tree_util.tree_flatten(tree, is_leaf)[0])
def testFlattenIsLeaf(self, leaf_fn):
x = [(1, 2), (3, 4), (5, 6)]
leaves = leaf_fn(x, is_leaf=lambda t: False)
self.assertEqual(leaves, [1, 2, 3, 4, 5, 6])
leaves = leaf_fn(x, is_leaf=lambda t: isinstance(t, tuple))
self.assertEqual(leaves, x)
leaves = leaf_fn(x, is_leaf=lambda t: isinstance(t, list))
self.assertEqual(leaves, [x])
leaves = leaf_fn(x, is_leaf=lambda t: True)
self.assertEqual(leaves, [x])
y = [[[(1,)], [[(2,)], {"a": (3,)}]]]
leaves = leaf_fn(y, is_leaf=lambda t: isinstance(t, tuple))
self.assertEqual(leaves, [(1,), (2,), (3,)])
@parameterized.parameters(
tree_util.tree_structure,
lambda tree, is_leaf: tree_util.tree_flatten(tree, is_leaf)[1])
def testStructureIsLeaf(self, structure_fn):
x = [(1, 2), (3, 4), (5, 6)]
treedef = structure_fn(x, is_leaf=lambda t: False)
self.assertEqual(treedef.num_leaves, 6)
treedef = structure_fn(x, is_leaf=lambda t: isinstance(t, tuple))
self.assertEqual(treedef.num_leaves, 3)
treedef = structure_fn(x, is_leaf=lambda t: isinstance(t, list))
self.assertEqual(treedef.num_leaves, 1)
treedef = structure_fn(x, is_leaf=lambda t: True)
self.assertEqual(treedef.num_leaves, 1)
y = [[[(1,)], [[(2,)], {"a": (3,)}]]]
treedef = structure_fn(y, is_leaf=lambda t: isinstance(t, tuple))
self.assertEqual(treedef.num_leaves, 3)
@parameterized.parameters(*TREES)
def testRoundtripIsLeaf(self, tree):
xs, treedef = tree_util.tree_flatten(
tree, is_leaf=lambda t: isinstance(t, tuple))
recon_tree = tree_util.tree_unflatten(treedef, xs)
self.assertEqual(recon_tree, tree)
@parameterized.parameters(*TREES)
def testAllLeavesWithTrees(self, tree):
leaves = tree_util.tree_leaves(tree)
self.assertTrue(tree_util.all_leaves(leaves))
self.assertFalse(tree_util.all_leaves([tree]))
@parameterized.parameters(*LEAVES)
def testAllLeavesWithLeaves(self, leaf):
self.assertTrue(tree_util.all_leaves([leaf]))
@parameterized.parameters(*TREES)
def testAllLeavesWithTreesAndCustomIsLeaf(self, tree):
def is_leaf(t):
return tree_util.all_leaves([t])
self.assertFalse(tree_util.all_leaves([tree], is_leaf=is_leaf))
@parameterized.parameters(*LEAVES)
def testAllLeavesWithLeavesAndCustomIsLeaf(self, leaf):
def is_leaf(t):
return tree_util.all_leaves([t])
self.assertTrue(tree_util.all_leaves([leaf], is_leaf=is_leaf))
@parameterized.parameters(*TREES)
def testCompose(self, tree):
treedef = tree_util.tree_structure(tree)
inner_treedef = tree_util.tree_structure(["*", "*", "*"])
composed_treedef = treedef.compose(inner_treedef)
expected_leaves = treedef.num_leaves * inner_treedef.num_leaves
self.assertEqual(composed_treedef.num_leaves, expected_leaves)
expected_nodes = ((treedef.num_nodes - treedef.num_leaves) +
(inner_treedef.num_nodes * treedef.num_leaves))
self.assertEqual(composed_treedef.num_nodes, expected_nodes)
leaves = [1] * expected_leaves
composed = tree_util.tree_unflatten(composed_treedef, leaves)
self.assertEqual(leaves, tree_util.tree_leaves(composed))
@parameterized.parameters(*TREES)
def testTranspose(self, tree):
outer_treedef = tree_util.tree_structure(tree)
if not outer_treedef.num_leaves:
self.skipTest("Skipping empty tree")
def make_inner(x):
return [x, x, x]
inner_treedef = tree_util.tree_structure(make_inner(1))
nested = tree_util.tree_map(make_inner, tree)
actual = tree_util.tree_transpose(outer_treedef, inner_treedef, nested)
self.assertEqual(actual, make_inner(tree))
@parameterized.parameters(*TREES)
def testTransposeInferInnerTreedef(self, tree):
if isinstance(tree, FlatCache):
# The tree_map construction below fails for FlatCache, because
# the cached metadata becomes out of sync.
self.skipTest("Test does not work properly for FlatCache.")
outer_treedef = tree_util.tree_structure(tree)
if not outer_treedef.num_leaves:
self.skipTest("Skipping empty tree")
def make_inner(x):
return [x, {'a': x}, (x,)]
nested = tree_util.tree_map(make_inner, tree)
actual = tree_util.tree_transpose(outer_treedef, None, nested)
self.assertEqual(actual, make_inner(tree))
def testTransposeMismatchOuter(self):
tree = {"a": [1, 2], "b": [3, 4]}
outer_treedef = tree_util.tree_structure({"a": 1, "b": 2, "c": 3})
inner_treedef = tree_util.tree_structure([1, 2])
with self.assertRaisesRegex(TypeError, "Mismatch"):
tree_util.tree_transpose(outer_treedef, inner_treedef, tree)
def testTransposeMismatchInner(self):
tree = {"a": [1, 2], "b": [3, 4]}
outer_treedef = tree_util.tree_structure({"a": 1, "b": 2})
inner_treedef = tree_util.tree_structure([1, 2, 3])
with self.assertRaisesRegex(TypeError, "Mismatch"):
tree_util.tree_transpose(outer_treedef, inner_treedef, tree)
def testTransposeWithCustomObject(self):
outer_treedef = tree_util.tree_structure(FlatCache({"a": 1, "b": 2}))
inner_treedef = tree_util.tree_structure([1, 2])
expected = [FlatCache({"a": 3, "b": 5}), FlatCache({"a": 4, "b": 6})]
actual = tree_util.tree_transpose(outer_treedef, inner_treedef,
FlatCache({"a": [3, 4], "b": [5, 6]}))
self.assertEqual(expected, actual)
@parameterized.parameters(*TREES)
def testBroadcast(self, tree):
if isinstance(tree, FlatCache):
# The tree_map construction below fails for FlatCache, because
# the cached metadata becomes out of sync.
self.skipTest("Test does not work properly for FlatCache.")
def make_inner(x):
return [x, x, x]
nested = tree_util.tree_map(make_inner, tree)
actual = tree_util.tree_broadcast(tree, nested)
self.assertEqual(actual, nested)
actual_flat = broadcast_flattened_prefix_with_treedef(
*tree_util.tree_flatten(tree), tree_util.tree_structure(nested))
actual = tree_util.tree_structure(nested).unflatten(actual_flat)
self.assertEqual(actual, nested)
def testBroadcastSimple(self):
prefix = (1, 2, 3)
full = (0, {'a': 0, 'b': 0}, (0, 0))
actual = tree_util.tree_broadcast(prefix, full)
expected = (1, {'a': 2, 'b': 2}, (3, 3))
self.assertEqual(actual, expected)
def testBroadcastError(self):
prefix = (1, 2, 3)
full = (0, {'a': 0, 'b': 0})
with self.assertRaisesRegex(ValueError, "pytree structure error"):
tree_util.tree_broadcast(prefix, full)
with self.assertRaises(Exception):
broadcast_flattened_prefix_with_treedef(
*tree_util.tree_flatten(prefix), tree_util.tree_structure(full))
prefix = (1, 2)
full = (0, {'a': 0, 'b': 0}, (0, 0))
with self.assertRaisesRegex(ValueError, "pytree structure error"):
tree_util.tree_broadcast(prefix, full)
with self.assertRaises(Exception):
broadcast_flattened_prefix_with_treedef(
*tree_util.tree_flatten(prefix), tree_util.tree_structure(full))
prefix = (1, {'a': 0})
full = (0, {'a': 0, 'b': 0})
with self.assertRaisesRegex(ValueError, "pytree structure error"):
tree_util.tree_broadcast(prefix, full)
with self.assertRaises(Exception):
broadcast_flattened_prefix_with_treedef(
*tree_util.tree_flatten(prefix), tree_util.tree_structure(full))
@parameterized.parameters([(*t, s) for t, s in zip(TREES, TREE_STRINGS)])
def testStringRepresentation(self, tree, correct_string):
"""Checks that the string representation of a tree works."""
treedef = tree_util.tree_structure(tree)
self.assertRegex(str(treedef), correct_string)
def testTreeDefWithEmptyDictStringRepresentation(self):
self.assertEqual(str(tree_util.tree_structure({})), "PyTreeDef({})")
@parameterized.parameters(*TREES)
def testPickleRoundTrip(self, tree):
leaves, treedef = tree_util.tree_flatten(tree)
treedef_restored = pickle.loads(pickle.dumps(treedef))
self.assertEqual(treedef, treedef_restored)
reconstituted = treedef_restored.unflatten(leaves)
self.assertEqual(tree, reconstituted)
def testDictKeysSortable(self):
d = {"a": 1, 2: "b"}
with self.assertRaisesRegex(
(TypeError, ValueError),
"('<' not supported|Comparator raised exception).*"):
_, _ = tree_util.tree_flatten(d)
def testFlattenDictKeyOrder(self):
d = {"b": 2, "a": 1, "c": {"b": 2, "a": 1}}
leaves, treedef = tree_util.tree_flatten(d)
self.assertEqual(leaves, [1, 2, 1, 2])
self.assertEqual(
str(treedef), "PyTreeDef({'a': *, 'b': *, 'c': {'a': *, 'b': *}})"
)
restored_d = tree_util.tree_unflatten(treedef, leaves)
self.assertEqual(list(restored_d.keys()), ["a", "b", "c"])
def testFlattenDefaultDictKeyOrder(self):
d = collections.defaultdict(int,
{"b": 2, "a": 1, "c": {"b": 2, "a": 1}})
leaves, treedef = tree_util.tree_flatten(d)
self.assertEqual(leaves, [1, 2, 1, 2])
restored_d = tree_util.tree_unflatten(treedef, leaves)
self.assertEqual(list(restored_d.keys()), ["a", "b", "c"])
def testWalk(self):
d = {"b": 2, "a": 1, "c": {"b": 2, "a": 1}}
leaves, treedef = tree_util.tree_flatten(d)
nodes_visited = []
node_data_visited = []
leaves_visited = []
def f_node(node, node_data):
nodes_visited.append(node)
node_data_visited.append(node_data)
def f_leaf(leaf):
leaves_visited.append(leaf)
treedef.walk(f_node, f_leaf, leaves)
self.assertEqual(leaves_visited, [1, 2, 1, 2])
self.assertEqual(nodes_visited, [(None, None), (None, None, None)])
self.assertEqual(node_data_visited, [["a", "b"], ["a", "b", "c"]])
@parameterized.parameters(*(TREES_WITH_KEYPATH + LEAVES))
def testRoundtripWithPath(self, inputs):
key_leaves, treedef = tree_util.tree_flatten_with_path(inputs)
actual = tree_util.tree_unflatten(treedef, [leaf for _, leaf in key_leaves])
self.assertEqual(actual, inputs)
def testTreeMapWithPath(self):
tree = [{i: i for i in range(10)}]
all_zeros = tree_util.tree_map_with_path(
lambda kp, val: val - kp[1].key + kp[0].idx, tree
)
self.assertEqual(all_zeros, [{i: 0 for i in range(10)}])
def testTreeMapWithPathMultipleTrees(self):
tree1 = [AnObject2(x=12,
y={'cin': [1, 4, 10], 'bar': None},
z='constantdef'),
5]
tree2 = [AnObject2(x=2,
y={'cin': [2, 2, 2], 'bar': None},
z='constantdef'),
2]
from_two_trees = tree_util.tree_map_with_path(
lambda kp, a, b: a + b, tree1, tree2
)
from_one_tree = tree_util.tree_map(lambda a: a + 2, tree1)
self.assertEqual(from_two_trees, from_one_tree)
def testTreeLeavesWithPath(self):
tree = [{i: i for i in range(10)}]
actual = tree_util.tree_leaves_with_path(tree)
expected = [((tree_util.SequenceKey(0), tree_util.DictKey(i)), i)
for i in range(10)]
self.assertEqual(actual, expected)
def testKeyStr(self):
tree1 = [ATuple(12, {'cin': [1, 4, 10], 'bar': None}), jnp.arange(5)]
flattened, _ = tree_util.tree_flatten_with_path(tree1)
strs = [f"{tree_util.keystr(kp)}: {x}" for kp, x in flattened]
self.assertEqual(
strs,
[
"[0].foo: 12",
"[0].bar['cin'][0]: 1",
"[0].bar['cin'][1]: 4",
"[0].bar['cin'][2]: 10",
"[1]: [0 1 2 3 4]",
],
)
strs = [f"{tree_util.keystr(kp, simple=True, separator='/')}: {x}"
for kp, x in flattened]
self.assertEqual(
strs,
[
"0/foo: 12",
"0/bar/cin/0: 1",
"0/bar/cin/1: 4",
"0/bar/cin/2: 10",
"1: [0 1 2 3 4]",
],
)
def testTreeMapWithPathWithIsLeafArgument(self):
x = ((1, 2), [3, 4, 5])
y = (([3], jnp.array(0)), ([0], 7, [5, 6]))
out = tree_util.tree_map_with_path(
lambda kp, *xs: (kp[0].idx, *xs), x, y,
is_leaf=lambda _, n: isinstance(n, list), is_leaf_takes_path=True)
self.assertEqual(out, (((0, 1, [3]),
(0, 2, jnp.array(0))),
(1, [3, 4, 5], ([0], 7, [5, 6]))))
def testFlattenWithPathWithIsLeafArgument(self):
def is_empty(x):
try:
children, _ = flatten_one_level(x)
except ValueError:
return True # Cannot flatten x; means it must be a leaf
return len(children) == 0
EmptyTuple = collections.namedtuple("EmptyTuple", ())
tree1 = {'a': 1,
'sub': [jnp.array((1, 2)), ATuple(foo=(), bar=[None])],
'obj': AnObject2(x=EmptyTuple(), y=0, z='constantdef')}
is_empty_new = lambda kp, x: is_empty(x)
flattened, _ = tree_util.tree_flatten_with_path(
tree1, is_empty_new, is_leaf_takes_path=True
)
strs = [f"{tree_util.keystr(kp)}: {x}" for kp, x in flattened]
self.assertEqual(
strs,
[
"['a']: 1",
"['obj']x: EmptyTuple()",
"['obj']y: 0",
"['sub'][0]: [1 2]",
"['sub'][1].foo: ()",
"['sub'][1].bar[0]: None",
],
)
def testTreeFlattenWithPathWithIsLeafWithPathArgument(self):
x = ((1, 2), [3, {4: 4, 5: 5}])
check_max_depth = lambda kp, _: len(kp) >= 2
flattened, _ = tree_util.tree_flatten_with_path(
x, is_leaf=check_max_depth, is_leaf_takes_path=True
)
self.assertEqual(
flattened,
[
((SequenceKey(0), SequenceKey(0),), 1),
((SequenceKey(0), SequenceKey(1),), 2),
((SequenceKey(1), SequenceKey(0),), 3),
((SequenceKey(1), SequenceKey(1)), {4: 4, 5: 5}),
],
)
def testTreeMapWithPathWithIsLeafWithPathArgument(self):
x = ((1, 2), [3, 4, 5])
y = (([3], jnp.array(0)), ([0], 7, [5, 6]))
out = tree_util.tree_map_with_path(
lambda kp, *xs: (kp[0].idx, *xs), x, y,
is_leaf=lambda kp, n: isinstance(n, list), is_leaf_takes_path=True)
self.assertEqual(out, (((0, 1, [3]),
(0, 2, jnp.array(0))),
(1, [3, 4, 5], ([0], 7, [5, 6]))))
def testTreeFlattenWithPathBuiltin(self):
x = (1, {"a": 2, "b": 3})
flattened = tree_util.tree_flatten_with_path(x)
_, tdef = tree_util.tree_flatten(x)
self.assertEqual(
flattened[0],
[
((SequenceKey(0),), 1),
((SequenceKey(1), DictKey("a")), 2),
((SequenceKey(1), DictKey("b")), 3),
],
)
self.assertEqual(flattened[1], tdef)
def testTreeFlattenWithPathCustom(self):
x = [
AnObject2(
x=12,
y={"foo": SpecialWithKeys(x=2, y=3), "bar": None},
z="constantdef",
),
5,
]
flattened, _ = tree_util.tree_flatten_with_path(x)
self.assertEqual(
flattened,
[
((SequenceKey(0), "x"), 12),
((SequenceKey(0), "y", DictKey("foo"), GetAttrKey("x")), 2),
((SequenceKey(0), "y", DictKey("foo"), GetAttrKey("y")), 3),
((SequenceKey(1),), 5),
],
)
def testFlattenWithPathDefaultDict(self):
d = collections.defaultdict(int, {"b": 2, "a": 1, "c": {"b": 2, "a": 1}})
leaves, treedef = tree_util.tree_flatten_with_path(d)
self.assertEqual(
leaves,
[
((DictKey("a"),), 1),
((DictKey("b"),), 2),
((DictKey("c"), DictKey("a")), 1),
((DictKey("c"), DictKey("b")), 2),
],
)
restored_d = tree_util.tree_unflatten(treedef, [l for _, l in leaves])
self.assertEqual(list(restored_d.keys()), ["a", "b", "c"])
_, from_flatten = tree_util.tree_flatten(d)
self.assertEqual(treedef, from_flatten)
def testFlattenWithPathOrderedDict(self):
d = collections.OrderedDict({"b": 2, "a": 1, "c": {"b": 2, "a": 1}})
leaves, treedef = tree_util.tree_flatten_with_path(d)
self.assertEqual(
leaves,
[
((DictKey("b"),), 2),
((DictKey("a"),), 1),
((DictKey("c"), DictKey("a")), 1),
((DictKey("c"), DictKey("b")), 2),
],
)
restored_d = tree_util.tree_unflatten(treedef, [l for _, l in leaves])
self.assertEqual(list(restored_d.keys()), ["b", "a", "c"])
_, from_flatten = tree_util.tree_flatten(d)
self.assertEqual(treedef, from_flatten)
def testFlattenOneLevel(self):
EmptyTuple = collections.namedtuple("EmptyTuple", ())
tree1 = {'a': 1,
'sub': [jnp.array((1, 2)), ATuple(foo=(), bar=[None])],
'obj': AnObject2(x=EmptyTuple(), y=0, z='constantdef')}
self.assertEqual(flatten_one_level(tree1["sub"])[0],
tree1["sub"])
self.assertEqual(flatten_one_level(tree1["sub"][1])[0],
[(), [None]])
self.assertEqual(flatten_one_level(tree1["obj"])[0],
[EmptyTuple(), 0])
with self.assertRaisesRegex(ValueError, "can't tree-flatten type"):
flatten_one_level(1)
with self.assertRaisesRegex(ValueError, "can't tree-flatten type"):
flatten_one_level(jnp.array((1, 2)))
def testOptionalFlatten(self):
@tree_util.register_pytree_with_keys_class
class FooClass:
def __init__(self, x, y):
self.x = x
self.y = y
def tree_flatten(self):
return ((self.x, self.y), 'treedef')
def tree_flatten_with_keys(self):
return (((tree_util.GetAttrKey('x'), self.x),
(tree_util.GetAttrKey('x'), self.y)), 'treedef')
@classmethod
def tree_unflatten(cls, _, children):
return cls(*children)
tree = FooClass(x=1, y=2)
self.assertEqual(
str(tree_util.tree_flatten(tree)[1]),
"PyTreeDef(CustomNode(FooClass[treedef], [*, *]))",
)
self.assertEqual(
str(tree_util.tree_flatten_with_path(tree)[1]),
"PyTreeDef(CustomNode(FooClass[treedef], [*, *]))",
)
self.assertEqual(tree_util.tree_flatten(tree)[0],
[l for _, l in tree_util.tree_flatten_with_path(tree)[0]])
def testPyTreeWithoutKeysIsntTreatedAsLeaf(self):
leaves, _ = tree_util.tree_flatten_with_path(Special([1, 2], [3, 4]))
self.assertLen(leaves, 4)
def testNamedTupleRegisteredWithoutKeysIsntTreatedAsLeaf(self):
leaves, _ = tree_util.tree_flatten_with_path(ATuple2(1, 'hi'))
self.assertLen(leaves, 1)
def testBadFlattenNonTuple(self):
t = BadFlattenNonTuple(3, 4)
with self.assertRaisesRegex(
ValueError,
"The to_iterable function for a custom PyTree node should return a"
r" \(children, aux_data\) tuple, got 'hello'",
):
tree_util.tree_flatten(t)
def testBadFlattenBadArityTuple(self):
t = BadFlattenBadArityTuple(3, 4)
with self.assertRaisesRegex(
ValueError,
"The to_iterable function for a custom PyTree node should return a"
r" \(children, aux_data\) tuple, got \(2, 3, 4\)",
):
tree_util.tree_flatten(t)
def testBadFlattenNonIterableLeaves(self):
t = BadFlattenNonIterableLeaves(3, 4)
with self.assertRaisesRegex(
ValueError,
"The to_iterable function for a custom PyTree node should return a"
r" \(children, aux_data\) tuple where 'children' is iterable, got "
r"\(7, 7\)",
):
tree_util.tree_flatten(t)
| TreeTest |
python | getsentry__sentry | src/sentry/utils/services.py | {
"start": 1379,
"end": 14414
} | class ____:
"""
The delegator is a class that coordinates and delegates method execution to
multiple named backends that share a common API. It can be used to route
requests to different backends based on method arguments, as well as execute
the same request against multiple backends in parallel for testing backend
performance and data consistency.
The backends used for a method call are determined by a selector function
which is provided with the current ``Context``, the method name (as a
string) and arguments (in the form returned by ``inspect.getcallargs``) and
expected to return a list of strings which correspond to names in the
backend mapping. (This list should contain at least one member.) The first
item in the result list is considered the "primary backend". The remainder
of the items in the result list are considered "secondary backends". The
result value of the primary backend will be the result value of the
delegated method (to callers, this appears as a synchronous method call.)
The secondary backends are called asynchronously in the background when
using threaded executors (the default.) To receive the result values of
these method calls, provide a callback, described below. If the primary
backend name returned by the selector function doesn't correspond to any
registered backend, the function will raise a ``InvalidBackend`` exception.
If any referenced secondary backends are not registered names, they will be
discarded and logged.
The members and ordering of the selector function result (and thus the
primary and secondary backends for a method call) may vary from call to
call based on the calling arguments or some other state. For example, some
calls may use a different primary backend based on some piece of global
state (e.g. some property of a web request), or a secondary backend
undergoing testing may be included based on the result of a random number
generator (essentially calling it in the background for a sample of calls.)
If provided, the callback is called after all futures have completed, either
successfully or unsuccessfully. The function parameters are:
- the context,
- the method name (as a string),
- the calling arguments (as returned by ``inspect.getcallargs``),
- the backend names (as returned by the selector function),
- a list of results (as either a ``Future``, or ``None`` if the backend
was invalid) of the same length and ordering as the backend names.
Implementation notes:
- Only method access is delegated to the individual backends. Attribute
values are returned from the base backend. Only methods that are defined
on the base backend are eligible for delegation (since these methods are
considered the public API.) Ideally, backend classes are concrete classes
of the base abstract class, but this is not strictly enforced at runtime
with instance checks.
- The backend makes no attempt to synchronize common backend option values
between backends (e.g. TSDB rollup configuration) to ensure equivalency
of request parameters based on configuration.
- Each backend is associated with an executor pool which defaults to a
thread pool implementation unless otherwise specified in the backend
configuration. If the backend itself is not thread safe (due to socket
access, etc.), it's recommended to specify a pool size of 1 to ensure
exclusive access to resources. Each executor is started when the first
task is submitted.
- The threaded executor does not use a bounded queue by default. If there
are large throughput differences between the primary and secondary
backend(s), a significant backlog may accumulate. In extreme cases, this can
lead to memory exhaustion.
- The request is added to the request queue of the primary backend using a
blocking put. The request is added to the request queue(s) of the
secondary backend(s) as a non-blocking put (if these queues are full, the
request is rejected and the future will raise ``Queue.Full`` when
attempting to retrieve the result.)
"""
def __init__(
self,
base: type[Service],
backends: Mapping[str, tuple[Service, Executor]],
selector: Selector,
callback: Callback | None = None,
) -> None:
self.base = base
self.backends = backends
self.selector = selector
self.callback = callback
class InvalidBackend(Exception):
"""\
Exception raised when an invalid backend is returned by a selector
function.
"""
class State(threading.local):
def __init__(self) -> None:
self.context: Context | None = None
__state = State()
def __getattr__(self, attribute_name: str) -> Any:
# When deciding how to handle attribute accesses, we have three
# different possible outcomes:
# 1. If this is defined as a method on the base implementation, we are
# able delegate it to the backends based on the selector function.
# 2. If this is defined as an attribute on the base implementation, we
# are able to (immediately) return that as the value. (This also
# mirrors the behavior of ``LazyServiceWrapper``, which will cache
# any attribute access during ``expose``, so we can't delegate
# attribute access anyway when using this as a service interface.)
# 3. If this isn't defined at all on the base implementation, we let
# the ``AttributeError`` raised by ``getattr`` propagate (mirroring
# normal attribute access behavior for a missing/invalid name.)
base_value = getattr(self.base, attribute_name)
if not inspect.isroutine(base_value):
return base_value
def execute(*args: Any, **kwargs: Any) -> Any:
context = type(self).__state.context
# If there is no context object already set in the thread local
# state, we are entering the delegator for the first time and need
# to create a new context.
if context is None:
context = Context({})
# If this thread already has an active backend for this base class,
# we can safely call that backend synchronously without delegating.
if self.base in context.backends:
backend = context.backends[self.base]
return getattr(backend, attribute_name)(*args, **kwargs)
# Binding the call arguments to named arguments has two benefits:
# 1. These values always be passed in the same form to the selector
# function and callback, regardless of how they were passed to
# the method itself (as positional arguments, keyword arguments,
# etc.)
# 2. This ensures that the given arguments are those supported by
# the base backend itself, which should be a common subset of
# arguments that are supported by all backends.
callargs = inspect.getcallargs(base_value, None, *args, **kwargs)
selected_backend_names = list(self.selector(context, attribute_name, callargs))
if not len(selected_backend_names) > 0:
raise self.InvalidBackend("No backends returned by selector!")
# Ensure that the primary backend is actually registered -- we
# don't want to schedule any work on the secondaries if the primary
# request is going to fail anyway.
if selected_backend_names[0] not in self.backends:
raise self.InvalidBackend(
f"{selected_backend_names[0]!r} is not a registered backend."
)
def call_backend_method(context: Context, backend: Service, is_primary: bool) -> Any:
# Update the thread local state in the executor to the provided
# context object. This allows the context to be propagated
# across different threads.
assert type(self).__state.context is None
type(self).__state.context = context
# Ensure that we haven't somehow accidentally entered a context
# where the backend we're calling has already been marked as
# active (or worse, some other backend is already active.)
base = self.base
assert base not in context.backends
# Mark the backend as active.
context.backends[base] = backend
try:
return getattr(backend, attribute_name)(*args, **kwargs)
except Exception as e:
# If this isn't the primary backend, we log any
# exceptions so that they don't pass by unnoticed. (Any
# exceptions raised by the primary backend aren't logged
# here, since it's assumed that the caller will log them
# from the calling thread.)
if not is_primary:
logger.warning(
"%s caught in executor while calling %r on %s.",
type(e).__name__,
attribute_name,
type(backend).__name__,
exc_info=True,
)
raise
finally:
type(self).__state.context = None
# Enqueue all of the secondary backend requests first since these
# are non-blocking queue insertions. (Since the primary backend
# executor queue insertion can block, if that queue was full the
# secondary requests would have to wait unnecessarily to be queued
# until the after the primary request can be enqueued.)
# NOTE: If the same backend is both the primary backend *and* in
# the secondary backend list -- this is unlikely, but possible --
# this means that one of the secondary requests will be queued and
# executed before the primary request is queued. This is such a
# strange usage pattern that I don't think it's worth optimizing
# for.)
results: list[TimedFuture[Any] | None] = [None] * len(selected_backend_names)
for i, backend_name in enumerate(selected_backend_names[1:], 1):
try:
backend, executor = self.backends[backend_name]
except KeyError:
logger.warning(
"%r is not a registered backend and will be ignored.",
backend_name,
exc_info=True,
)
else:
results[i] = executor.submit(
functools.partial(
call_backend_method, context.copy(), backend, is_primary=False
),
priority=1,
block=False,
)
# The primary backend is scheduled last since it may block the
# calling thread. (We don't have to protect this from ``KeyError``
# since we already ensured that the primary backend exists.)
backend, executor = self.backends[selected_backend_names[0]]
result = results[0] = executor.submit(
functools.partial(call_backend_method, context.copy(), backend, is_primary=True),
priority=0,
block=True,
)
if self.callback is not None:
FutureSet([_f for _f in results if _f]).add_done_callback(
lambda *a, **k: self.callback(
context, attribute_name, callargs, selected_backend_names, results
)
)
return result.result()
return execute
def build_instance_from_options(
options: ServiceOptions,
*,
default_constructor: Callable[..., object] | None = None,
) -> object:
try:
path = options["path"]
except KeyError:
if default_constructor:
constructor = default_constructor
else:
raise
else:
constructor = resolve_callable(path)
return constructor(**options.get("options", {}))
def build_instance_from_options_of_type[T](
tp: type[T],
options: ServiceOptions,
*,
default_constructor: Callable[..., T] | None = None,
) -> T:
ret = build_instance_from_options(options, default_constructor=default_constructor)
if isinstance(ret, tp):
return ret
else:
raise TypeError(f"expected built object of type {tp}, got {type(ret)}")
| Delegator |
python | celery__celery | celery/exceptions.py | {
"start": 4231,
"end": 4346
} | class ____(CeleryWarning):
"""Celery hasn't been configured, as no config module has been found."""
| NotConfigured |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 10080,
"end": 10156
} | class ____(PydanticTypeError):
msg_template = 'str type expected'
| StrError |
python | catalyst-team__catalyst | examples/detection/models/yolo_x.py | {
"start": 2536,
"end": 2995
} | class ____(nn.Module):
"Residual layer with `in_channels` inputs."
def __init__(self, in_channels: int):
super().__init__()
mid_channels = in_channels // 2
self.layer1 = BaseConv(in_channels, mid_channels, ksize=1, stride=1, act="lrelu")
self.layer2 = BaseConv(mid_channels, in_channels, ksize=3, stride=1, act="lrelu")
def forward(self, x):
out = self.layer2(self.layer1(x))
return x + out
| ResLayer |
python | tiangolo__fastapi | tests/test_security_oauth2_optional_description.py | {
"start": 547,
"end": 11487
} | class ____(BaseModel):
username: str
def get_current_user(oauth_header: Optional[str] = Security(reusable_oauth2)):
if oauth_header is None:
return None
user = User(username=oauth_header)
return user
@app.post("/login")
def login(form_data: OAuth2PasswordRequestFormStrict = Depends()):
return form_data
@app.get("/users/me")
def read_users_me(current_user: Optional[User] = Depends(get_current_user)):
if current_user is None:
return {"msg": "Create an account first"}
return current_user
client = TestClient(app)
def test_security_oauth2():
response = client.get("/users/me", headers={"Authorization": "Bearer footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Bearer footokenbar"}
def test_security_oauth2_password_other_header():
response = client.get("/users/me", headers={"Authorization": "Other footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Other footokenbar"}
def test_security_oauth2_password_bearer_no_header():
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_strict_login_None():
response = client.post("/login", data=None)
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", "grant_type"],
"msg": "Field required",
"input": None,
},
{
"type": "missing",
"loc": ["body", "username"],
"msg": "Field required",
"input": None,
},
{
"type": "missing",
"loc": ["body", "password"],
"msg": "Field required",
"input": None,
},
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "grant_type"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", "username"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", "password"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
)
def test_strict_login_no_grant_type():
response = client.post("/login", data={"username": "johndoe", "password": "secret"})
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", "grant_type"],
"msg": "Field required",
"input": None,
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "grant_type"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
)
@pytest.mark.parametrize(
argnames=["grant_type"],
argvalues=[
pytest.param("incorrect", id="incorrect value"),
pytest.param("passwordblah", id="password with suffix"),
pytest.param("blahpassword", id="password with prefix"),
],
)
def test_strict_login_incorrect_grant_type(grant_type: str):
response = client.post(
"/login",
data={"username": "johndoe", "password": "secret", "grant_type": grant_type},
)
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "string_pattern_mismatch",
"loc": ["body", "grant_type"],
"msg": "String should match pattern '^password$'",
"input": grant_type,
"ctx": {"pattern": "^password$"},
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "grant_type"],
"msg": 'string does not match regex "^password$"',
"type": "value_error.str.regex",
"ctx": {"pattern": "^password$"},
}
]
}
)
def test_strict_login_correct_correct_grant_type():
response = client.post(
"/login",
data={"username": "johndoe", "password": "secret", "grant_type": "password"},
)
assert response.status_code == 200, response.text
assert response.json() == {
"grant_type": "password",
"username": "johndoe",
"password": "secret",
"scopes": [],
"client_id": None,
"client_secret": None,
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/login": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Login",
"operationId": "login_login_post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"$ref": "#/components/schemas/Body_login_login_post"
}
}
},
"required": True,
},
}
},
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Users Me",
"operationId": "read_users_me_users_me_get",
"security": [{"OAuth2": []}],
}
},
},
"components": {
"schemas": {
"Body_login_login_post": {
"title": "Body_login_login_post",
"required": ["grant_type", "username", "password"],
"type": "object",
"properties": {
"grant_type": {
"title": "Grant Type",
"pattern": "^password$",
"type": "string",
},
"username": {"title": "Username", "type": "string"},
"password": {"title": "Password", "type": "string"},
"scope": {"title": "Scope", "type": "string", "default": ""},
"client_id": IsDict(
{
"title": "Client Id",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Client Id", "type": "string"}
),
"client_secret": IsDict(
{
"title": "Client Secret",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Client Secret", "type": "string"}
),
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
},
"securitySchemes": {
"OAuth2": {
"type": "oauth2",
"flows": {
"password": {
"scopes": {
"read:users": "Read the users",
"write:users": "Create users",
},
"tokenUrl": "token",
}
},
"description": "OAuth2 security scheme",
}
},
},
}
| User |
python | walkccc__LeetCode | solutions/900. RLE Iterator/900.py | {
"start": 0,
"end": 415
} | class ____:
def __init__(self, encoding: list[int]):
self.encoding = encoding
self.index = 0
def next(self, n: int) -> int:
while self.index < len(self.encoding) and self.encoding[self.index] < n:
n -= self.encoding[self.index]
self.index += 2
if self.index == len(self.encoding):
return -1
self.encoding[self.index] -= n
return self.encoding[self.index + 1]
| RLEIterator |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 6989,
"end": 7132
} | class ____(TrigRule):
"""integrate(sin(x), x) -> -cos(x)"""
def eval(self) -> Expr:
return -cos(self.variable)
@dataclass
| SinRule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.