language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | test/units/_internal/_errors/test_error_utils.py | {
"start": 722,
"end": 2032
} | class ____(Exception, _error_utils.ContributesToTaskResult):
@property
def result_contribution(self) -> c.Mapping[str, object]:
return dict(msg="contributed msg")
@pytest.mark.parametrize("exceptions,expected", (
(
(Exception("e0"), _TestContributesError("e1"), ValueError("e2")),
dict(failed=True, some_flag=True, msg="e0: e1: e2"),
),
(
(Exception("e0"), ValueError("e1"), _TestContributesError("e2")),
dict(failed=True, some_flag=True, msg="e0: e1: e2"),
),
(
(Exception("e0"), _TestContributesUnreachable("e1")),
dict(unreachable=True, msg="e0: e1"),
),
(
(Exception("e0"), _TestContributesMsg()),
dict(failed=True, msg="contributed msg"),
),
))
def test_exception_result_contribution(exceptions: t.Sequence[BaseException], expected: dict[str, t.Any]) -> None:
"""Validate result dict augmentation by exceptions conforming to the ContributeToTaskResult protocol."""
with pytest.raises(Exception) as error:
raise_exceptions(exceptions)
result = _error_utils.result_dict_from_exception(error.value, accept_result_contribution=True)
summary = result.pop('exception')
assert isinstance(summary, _messages.ErrorSummary)
assert result == expected
| _TestContributesMsg |
python | kubernetes-client__python | kubernetes/client/models/v1_object_meta.py | {
"start": 383,
"end": 28280
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'annotations': 'dict(str, str)',
'creation_timestamp': 'datetime',
'deletion_grace_period_seconds': 'int',
'deletion_timestamp': 'datetime',
'finalizers': 'list[str]',
'generate_name': 'str',
'generation': 'int',
'labels': 'dict(str, str)',
'managed_fields': 'list[V1ManagedFieldsEntry]',
'name': 'str',
'namespace': 'str',
'owner_references': 'list[V1OwnerReference]',
'resource_version': 'str',
'self_link': 'str',
'uid': 'str'
}
attribute_map = {
'annotations': 'annotations',
'creation_timestamp': 'creationTimestamp',
'deletion_grace_period_seconds': 'deletionGracePeriodSeconds',
'deletion_timestamp': 'deletionTimestamp',
'finalizers': 'finalizers',
'generate_name': 'generateName',
'generation': 'generation',
'labels': 'labels',
'managed_fields': 'managedFields',
'name': 'name',
'namespace': 'namespace',
'owner_references': 'ownerReferences',
'resource_version': 'resourceVersion',
'self_link': 'selfLink',
'uid': 'uid'
}
def __init__(self, annotations=None, creation_timestamp=None, deletion_grace_period_seconds=None, deletion_timestamp=None, finalizers=None, generate_name=None, generation=None, labels=None, managed_fields=None, name=None, namespace=None, owner_references=None, resource_version=None, self_link=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1ObjectMeta - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._annotations = None
self._creation_timestamp = None
self._deletion_grace_period_seconds = None
self._deletion_timestamp = None
self._finalizers = None
self._generate_name = None
self._generation = None
self._labels = None
self._managed_fields = None
self._name = None
self._namespace = None
self._owner_references = None
self._resource_version = None
self._self_link = None
self._uid = None
self.discriminator = None
if annotations is not None:
self.annotations = annotations
if creation_timestamp is not None:
self.creation_timestamp = creation_timestamp
if deletion_grace_period_seconds is not None:
self.deletion_grace_period_seconds = deletion_grace_period_seconds
if deletion_timestamp is not None:
self.deletion_timestamp = deletion_timestamp
if finalizers is not None:
self.finalizers = finalizers
if generate_name is not None:
self.generate_name = generate_name
if generation is not None:
self.generation = generation
if labels is not None:
self.labels = labels
if managed_fields is not None:
self.managed_fields = managed_fields
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if owner_references is not None:
self.owner_references = owner_references
if resource_version is not None:
self.resource_version = resource_version
if self_link is not None:
self.self_link = self_link
if uid is not None:
self.uid = uid
@property
def annotations(self):
"""Gets the annotations of this V1ObjectMeta. # noqa: E501
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations # noqa: E501
:return: The annotations of this V1ObjectMeta. # noqa: E501
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this V1ObjectMeta.
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations # noqa: E501
:param annotations: The annotations of this V1ObjectMeta. # noqa: E501
:type: dict(str, str)
"""
self._annotations = annotations
@property
def creation_timestamp(self):
"""Gets the creation_timestamp of this V1ObjectMeta. # noqa: E501
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:return: The creation_timestamp of this V1ObjectMeta. # noqa: E501
:rtype: datetime
"""
return self._creation_timestamp
@creation_timestamp.setter
def creation_timestamp(self, creation_timestamp):
"""Sets the creation_timestamp of this V1ObjectMeta.
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:param creation_timestamp: The creation_timestamp of this V1ObjectMeta. # noqa: E501
:type: datetime
"""
self._creation_timestamp = creation_timestamp
@property
def deletion_grace_period_seconds(self):
"""Gets the deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. # noqa: E501
:return: The deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
:rtype: int
"""
return self._deletion_grace_period_seconds
@deletion_grace_period_seconds.setter
def deletion_grace_period_seconds(self, deletion_grace_period_seconds):
"""Sets the deletion_grace_period_seconds of this V1ObjectMeta.
Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. # noqa: E501
:param deletion_grace_period_seconds: The deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
:type: int
"""
self._deletion_grace_period_seconds = deletion_grace_period_seconds
@property
def deletion_timestamp(self):
"""Gets the deletion_timestamp of this V1ObjectMeta. # noqa: E501
DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:return: The deletion_timestamp of this V1ObjectMeta. # noqa: E501
:rtype: datetime
"""
return self._deletion_timestamp
@deletion_timestamp.setter
def deletion_timestamp(self, deletion_timestamp):
"""Sets the deletion_timestamp of this V1ObjectMeta.
DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:param deletion_timestamp: The deletion_timestamp of this V1ObjectMeta. # noqa: E501
:type: datetime
"""
self._deletion_timestamp = deletion_timestamp
@property
def finalizers(self):
"""Gets the finalizers of this V1ObjectMeta. # noqa: E501
Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501
:return: The finalizers of this V1ObjectMeta. # noqa: E501
:rtype: list[str]
"""
return self._finalizers
@finalizers.setter
def finalizers(self, finalizers):
"""Sets the finalizers of this V1ObjectMeta.
Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501
:param finalizers: The finalizers of this V1ObjectMeta. # noqa: E501
:type: list[str]
"""
self._finalizers = finalizers
@property
def generate_name(self):
"""Gets the generate_name of this V1ObjectMeta. # noqa: E501
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency # noqa: E501
:return: The generate_name of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._generate_name
@generate_name.setter
def generate_name(self, generate_name):
"""Sets the generate_name of this V1ObjectMeta.
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency # noqa: E501
:param generate_name: The generate_name of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._generate_name = generate_name
@property
def generation(self):
"""Gets the generation of this V1ObjectMeta. # noqa: E501
A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. # noqa: E501
:return: The generation of this V1ObjectMeta. # noqa: E501
:rtype: int
"""
return self._generation
@generation.setter
def generation(self, generation):
"""Sets the generation of this V1ObjectMeta.
A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. # noqa: E501
:param generation: The generation of this V1ObjectMeta. # noqa: E501
:type: int
"""
self._generation = generation
@property
def labels(self):
"""Gets the labels of this V1ObjectMeta. # noqa: E501
Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels # noqa: E501
:return: The labels of this V1ObjectMeta. # noqa: E501
:rtype: dict(str, str)
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this V1ObjectMeta.
Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels # noqa: E501
:param labels: The labels of this V1ObjectMeta. # noqa: E501
:type: dict(str, str)
"""
self._labels = labels
@property
def managed_fields(self):
"""Gets the managed_fields of this V1ObjectMeta. # noqa: E501
ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. # noqa: E501
:return: The managed_fields of this V1ObjectMeta. # noqa: E501
:rtype: list[V1ManagedFieldsEntry]
"""
return self._managed_fields
@managed_fields.setter
def managed_fields(self, managed_fields):
"""Sets the managed_fields of this V1ObjectMeta.
ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. # noqa: E501
:param managed_fields: The managed_fields of this V1ObjectMeta. # noqa: E501
:type: list[V1ManagedFieldsEntry]
"""
self._managed_fields = managed_fields
@property
def name(self):
"""Gets the name of this V1ObjectMeta. # noqa: E501
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
:return: The name of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ObjectMeta.
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
:param name: The name of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ObjectMeta. # noqa: E501
Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces # noqa: E501
:return: The namespace of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ObjectMeta.
Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces # noqa: E501
:param namespace: The namespace of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def owner_references(self):
"""Gets the owner_references of this V1ObjectMeta. # noqa: E501
List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. # noqa: E501
:return: The owner_references of this V1ObjectMeta. # noqa: E501
:rtype: list[V1OwnerReference]
"""
return self._owner_references
@owner_references.setter
def owner_references(self, owner_references):
"""Sets the owner_references of this V1ObjectMeta.
List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. # noqa: E501
:param owner_references: The owner_references of this V1ObjectMeta. # noqa: E501
:type: list[V1OwnerReference]
"""
self._owner_references = owner_references
@property
def resource_version(self):
"""Gets the resource_version of this V1ObjectMeta. # noqa: E501
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
:return: The resource_version of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""Sets the resource_version of this V1ObjectMeta.
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
:param resource_version: The resource_version of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._resource_version = resource_version
@property
def self_link(self):
"""Gets the self_link of this V1ObjectMeta. # noqa: E501
Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. # noqa: E501
:return: The self_link of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._self_link
@self_link.setter
def self_link(self, self_link):
"""Sets the self_link of this V1ObjectMeta.
Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. # noqa: E501
:param self_link: The self_link of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._self_link = self_link
@property
def uid(self):
"""Gets the uid of this V1ObjectMeta. # noqa: E501
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
:return: The uid of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1ObjectMeta.
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
:param uid: The uid of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ObjectMeta):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ObjectMeta):
return True
return self.to_dict() != other.to_dict()
| V1ObjectMeta |
python | numba__numba | numba/core/types/misc.py | {
"start": 12881,
"end": 13336
} | class ____(Type):
"""
Internal only.
Represents the data of the instance. The representation of
ClassInstanceType contains a pointer to a ClassDataType which represents
a C structure that contains all the data fields of the class instance.
"""
def __init__(self, classtyp):
self.class_type = classtyp
name = "data.{0}".format(self.class_type.name)
super(ClassDataType, self).__init__(name)
| ClassDataType |
python | crytic__slither | slither/detectors/functions/suicidal.py | {
"start": 432,
"end": 2582
} | class ____(AbstractDetector):
"""
Unprotected function detector
"""
ARGUMENT = "suicidal"
HELP = "Functions allowing anyone to destruct the contract"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal"
WIKI_TITLE = "Suicidal"
WIKI_DESCRIPTION = "Unprotected call to a function executing `selfdestruct`/`suicide`."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Suicidal{
function kill() public{
selfdestruct(msg.sender);
}
}
```
Bob calls `kill` and destructs the contract."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Protect access to all sensitive functions."
@staticmethod
def detect_suicidal_func(func: FunctionContract) -> bool:
"""Detect if the function is suicidal
Detect the public functions calling suicide/selfdestruct without protection
Returns:
(bool): True if the function is suicidal
"""
if func.is_constructor:
return False
if func.visibility not in ["public", "external"]:
return False
calls = [ir.function.name for ir in func.all_internal_calls()]
if not ("suicide(address)" in calls or "selfdestruct(address)" in calls):
return False
if func.is_protected():
return False
return True
def detect_suicidal(self, contract: Contract) -> List[FunctionContract]:
ret = []
for f in contract.functions_declared:
if self.detect_suicidal_func(f):
ret.append(f)
return ret
def _detect(self) -> List[Output]:
"""Detect the suicidal functions"""
results = []
for c in self.contracts:
functions = self.detect_suicidal(c)
for func in functions:
info: DETECTOR_INFO = [func, " allows anyone to destruct the contract\n"]
res = self.generate_result(info)
results.append(res)
return results
| Suicidal |
python | PrefectHQ__prefect | tests/runtime/test_flow_run.py | {
"start": 5754,
"end": 6812
} | class ____:
async def test_run_count_is_attribute(self):
assert "run_count" in dir(flow_run)
async def test_run_count_is_zero_when_not_set(self):
assert flow_run.run_count == 0
async def test_run_count_returns_run_count_when_present_dynamically(self):
assert flow_run.run_count == 0
with FlowRunContext.model_construct(
flow_run=FlowRun.model_construct(id="foo", run_count=10)
):
assert flow_run.run_count == 10
assert flow_run.run_count == 0
async def test_run_count_from_api(
self, monkeypatch: pytest.MonkeyPatch, prefect_client: PrefectClient
):
run = await prefect_client.create_flow_run(
flow=flow(lambda: None, name="test", retries=5)
)
assert flow_run.run_count == 0
await prefect_client.set_flow_run_state(
flow_run_id=run.id, state=states.Retrying()
)
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(run.id))
assert flow_run.run_count == 1
| TestRunCount |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/gelu_test.py | {
"start": 236,
"end": 603
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device):
self.inputs = {"input": torch.rand(N, C, H, W, device=device)}
def forward(self, input):
return torch.nn.functional.gelu(input)
op_bench.generate_pt_test(gelu_configs_long, GeluBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| GeluBenchmark |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_reload_repository_location.py | {
"start": 15955,
"end": 17374
} | class ____(ManagedTestSuite):
def test_managed_grpc_reload_location(self, graphql_context):
result = execute_dagster_graphql(
graphql_context,
RELOAD_REPOSITORY_LOCATION_QUERY,
{"repositoryLocationName": main_repo_location_name()},
)
assert result
assert result.data
assert result.data["reloadRepositoryLocation"]
assert (
result.data["reloadRepositoryLocation"]["locationOrLoadError"]["__typename"]
== "RepositoryLocation"
)
assert result.data["reloadRepositoryLocation"]["name"] == main_repo_location_name()
assert result.data["reloadRepositoryLocation"]["loadStatus"] == "LOADED"
repositories = result.data["reloadRepositoryLocation"]["locationOrLoadError"][
"repositories"
]
assert len(repositories) == 1
assert repositories[0]["name"] == "test_repo"
metadatas = repositories[0]["displayMetadata"]
metadata_dict = {metadata["key"]: metadata["value"] for metadata in metadatas}
assert (
"python_file" in metadata_dict
or "module_name" in metadata_dict
or "package_name" in metadata_dict
)
assert (
result.data["reloadRepositoryLocation"]["locationOrLoadError"]["isReloadSupported"]
is True
)
| TestReloadRepositoriesManagedGrpc |
python | ray-project__ray | python/ray/serve/_private/deployment_scheduler.py | {
"start": 25036,
"end": 32015
} | class ____(DeploymentScheduler):
def schedule(
self,
upscales: Dict[DeploymentID, List[ReplicaSchedulingRequest]],
downscales: Dict[DeploymentID, DeploymentDownscaleRequest],
) -> Dict[DeploymentID, Set[ReplicaID]]:
"""Called for each update cycle to do batch scheduling.
Args:
upscales: a dict of deployment name to a list of replicas to schedule.
downscales: a dict of deployment name to a downscale request.
Returns:
The IDs of replicas to stop for each deployment.
"""
for upscale in upscales.values():
for scheduling_request in upscale:
replica_id = scheduling_request.replica_id
deployment_id = replica_id.deployment_id
self._pending_replicas[deployment_id][replica_id] = scheduling_request
non_strict_pack_pgs_exist = any(
d.is_non_strict_pack_pg() for d in self._deployments.values()
)
# Schedule replicas using compact strategy.
if RAY_SERVE_USE_COMPACT_SCHEDULING_STRATEGY and not non_strict_pack_pgs_exist:
# Flatten dict of deployment replicas into all replicas,
# then sort by decreasing resource size
all_scheduling_requests = sorted(
_flatten(self._pending_replicas).values(),
key=lambda r: r.required_resources,
reverse=True,
)
# Schedule each replica
for scheduling_request in all_scheduling_requests:
target_node = self._find_best_available_node(
scheduling_request.required_resources,
self._get_available_resources_per_node(),
)
self._schedule_replica(
scheduling_request,
default_scheduling_strategy="DEFAULT",
target_node_id=target_node,
)
else:
for pending_replicas in self._pending_replicas.values():
if not pending_replicas:
continue
for scheduling_request in list(pending_replicas.values()):
self._schedule_replica(
scheduling_request=scheduling_request,
default_scheduling_strategy="SPREAD",
)
deployment_to_replicas_to_stop = {}
for downscale in downscales.values():
deployment_to_replicas_to_stop[
downscale.deployment_id
] = self._get_replicas_to_stop(
downscale.deployment_id, downscale.num_to_stop
)
return deployment_to_replicas_to_stop
def _get_replicas_to_stop(
self, deployment_id: DeploymentID, max_num_to_stop: int
) -> Set[ReplicaID]:
"""Prioritize replicas running on a node with fewest replicas of
all deployments.
This algorithm helps to scale down more intelligently because it can
relinquish nodes faster. Note that this algorithm doesn't consider
other non-serve actors on the same node. See more at
https://github.com/ray-project/ray/issues/20599.
"""
replicas_to_stop = set()
# Replicas not in running state don't have node id.
# We will prioritize those first.
pending_launching_recovering_replicas = set().union(
self._pending_replicas[deployment_id].keys(),
self._launching_replicas[deployment_id].keys(),
self._recovering_replicas[deployment_id],
)
for (
pending_launching_recovering_replica
) in pending_launching_recovering_replicas:
replicas_to_stop.add(pending_launching_recovering_replica)
if len(replicas_to_stop) == max_num_to_stop:
return replicas_to_stop
node_to_running_replicas_of_all_deployments = (
self._get_node_to_running_replicas()
)
# _running_replicas preserves insertion order (oldest → newest).
# Reverse once so we have newest → oldest, then bucket by node.
ordered_running_replicas = list(self._running_replicas[deployment_id].items())
ordered_running_replicas.reverse()
ordered_running_replicas_of_target_deployment: Dict[
str, List[ReplicaID]
] = defaultdict(list)
for replica_id, replica_node_id in ordered_running_replicas:
ordered_running_replicas_of_target_deployment[replica_node_id].append(
replica_id
)
# Replicas on the head node has the lowest priority for downscaling
# since we cannot relinquish the head node.
def key(node_and_num_running_replicas_of_all_deployments):
return (
len(node_and_num_running_replicas_of_all_deployments[1])
if node_and_num_running_replicas_of_all_deployments[0]
!= self._head_node_id
else sys.maxsize
)
for node_id, _ in sorted(
node_to_running_replicas_of_all_deployments.items(), key=key
):
if node_id not in ordered_running_replicas_of_target_deployment:
continue
# Newest-first list for this node.
for replica_id in ordered_running_replicas_of_target_deployment[node_id]:
replicas_to_stop.add(replica_id)
if len(replicas_to_stop) == max_num_to_stop:
return replicas_to_stop
return replicas_to_stop
def _find_best_available_node(
self,
required_resources: Resources,
available_resources_per_node: Dict[str, Resources],
) -> Optional[str]:
"""Chooses best available node to schedule the required resources.
If there are available nodes, returns the node ID of the best
available node, minimizing fragmentation. Prefers non-idle nodes
over idle nodes.
"""
node_to_running_replicas = self._get_node_to_running_replicas()
non_idle_nodes = {
node_id: res
for node_id, res in available_resources_per_node.items()
if len(node_to_running_replicas.get(node_id, set())) > 0
}
idle_nodes = {
node_id: res
for node_id, res in available_resources_per_node.items()
if len(node_to_running_replicas.get(node_id, set())) == 0
}
# 1. Prefer non-idle nodes
chosen_node = self._best_fit_node(required_resources, non_idle_nodes)
if chosen_node:
return chosen_node
# 2. Consider idle nodes last
chosen_node = self._best_fit_node(required_resources, idle_nodes)
if chosen_node:
return chosen_node
def get_node_to_compact(
self, allow_new_compaction: bool
) -> Optional[Tuple[str, float]]:
return None
| DefaultDeploymentScheduler |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/arrayeditor.py | {
"start": 1864,
"end": 2061
} | class ____:
Close = 'close'
Copy = 'copy_action'
Edit = 'edit_action'
Preferences = 'preferences_action'
Refresh = 'refresh_action'
Resize = 'resize_action'
| ArrayEditorActions |
python | altair-viz__altair | tools/schemapi/utils.py | {
"start": 11628,
"end": 32248
} | class ____:
"""A wrapper for inspecting a JSON schema."""
_remap_title: ClassVar[dict[str, Sequence[str]]] = {}
def __init__(
self, schema: Mapping[str, Any], rootschema: Mapping[str, Any] | None = None
) -> None:
if not rootschema:
rootschema = schema
self.raw_schema: Mapping[str, Any]
self.rootschema: Mapping[str, Any]
self.schema: Mapping[str, Any]
object.__setattr__(self, "raw_schema", schema)
object.__setattr__(self, "rootschema", rootschema)
object.__setattr__(self, "schema", resolve_references(schema, rootschema)) # type: ignore
@classmethod
def from_refname(cls, refname: str, /, rootschema: Mapping[str, Any]) -> SchemaInfo:
return cls({"$ref": f"#/definitions/{refname}"}, rootschema)
def __setattr__(self, name: str, value: Any) -> Never:
msg = f"{type(self).__name__!r} is immutable.\nCould not assign self.{name} = {value}"
raise TypeError(msg)
def __hash__(self) -> int:
return hash(_HASH_ENCODER.encode(self.schema))
def __eq__(self, value: object) -> bool:
if isinstance(value, SchemaInfo):
if self.ref:
return self.ref == value.ref
return self.schema == value.schema
return False
def child(self, schema: dict[str, Any]) -> SchemaInfo:
return self.__class__(schema, rootschema=self.rootschema)
def iter_descendants(self) -> Iterator[SchemaInfo]:
"""Yields `properties`, `anyOf`, `items`."""
if "properties" in self.schema:
yield from self.properties.values()
if "anyOf" in self.schema:
yield from self.anyOf
if self.items:
yield self.child(self.items)
def __repr__(self) -> str:
keys = []
for key in sorted(self.schema.keys()):
val = self.schema[key]
rval = repr(val).replace("\n", "")
if len(rval) > 30:
rval = rval[:30] + "..."
if key == "definitions":
rval = "{...}"
elif key == "properties":
rval = "{\n " + "\n ".join(sorted(map(repr, val))) + "\n }"
keys.append(f'"{key}": {rval}')
return "SchemaInfo({\n " + "\n ".join(keys) + "\n})"
@property
def title(self) -> str:
if self.is_reference():
return get_valid_identifier(self.refname)
else:
return ""
@overload
def to_type_repr(
self,
*,
as_str: Literal[True] = ...,
target: TargetType = ...,
use_concrete: bool = ...,
use_undefined: bool = ...,
) -> str: ...
@overload
def to_type_repr(
self,
*,
as_str: Literal[False],
target: TargetType = ...,
use_concrete: bool = ...,
use_undefined: bool = ...,
) -> list[str]: ...
def to_type_repr( # noqa: C901
self,
*,
as_str: bool = True,
target: TargetType = "doc",
use_concrete: bool = False,
use_undefined: bool = False,
) -> str | list[str]:
"""
Return the python type representation of ``SchemaInfo``.
Includes `altair` classes, standard `python` types, etc.
Parameters
----------
as_str
Return as a string.
Should only be ``False`` during internal recursive calls.
target: {"annotation", "doc"}
Where the representation will be used.
use_concrete
Avoid base classes/wrappers that don't provide type info.
use_undefined
Wrap the result in ``altair.typing.Optional``.
"""
tps: set[str] = set()
FOR_TYPE_HINTS: bool = target == "annotation"
if self.title:
if target == "annotation":
tps.update(self.title_to_type_reprs(use_concrete=use_concrete))
elif target == "doc":
tps.add(rst_syntax_for_class(self.title))
if self.is_empty():
tps.add("Any")
elif self.is_literal():
tp_str = spell_literal(self.literal)
if FOR_TYPE_HINTS:
tp_str = TypeAliasTracer.add_literal(self, tp_str, replace=True)
tps.add(tp_str)
elif FOR_TYPE_HINTS and self.is_union_literal():
it: Iterator[str] = chain.from_iterable(el.literal for el in self.anyOf)
tp_str = TypeAliasTracer.add_literal(self, spell_literal(it), replace=True)
tps.add(tp_str)
elif self.is_anyOf():
it_nest = (
s.to_type_repr(target=target, as_str=False, use_concrete=use_concrete)
for s in self.anyOf
)
tps.update(maybe_rewrap_literal(chain.from_iterable(it_nest)))
elif FOR_TYPE_HINTS and self.is_type_alias_union():
it = (
SchemaInfo(dict(self.schema, type=tp)).to_type_repr(
target=target, use_concrete=use_concrete
)
for tp in self.type
)
tps.add(TypeAliasTracer.add_union(self, it, replace=True))
elif isinstance(self.type, list):
# We always use title if possible for nested objects
tps.update(
SchemaInfo(dict(self.schema, type=tp)).to_type_repr(
target=target, use_concrete=use_concrete
)
for tp in self.type
)
elif self.is_array():
tps.add(
spell_nested_sequence(self, target=target, use_concrete=use_concrete)
)
elif self.type in jsonschema_to_python_types:
if self.is_object() and use_concrete:
... # HACK: Fall-through case to avoid `dict` added to `TypedDict`
elif self.is_object() and target == "doc":
tps.add("dict")
else:
tps.add(jsonschema_to_python_types[self.type])
else:
msg = "No Python type representation available for this schema"
raise ValueError(msg)
if use_concrete:
if tps >= {"ColorHex", TypeAliasTracer.fmt.format("ColorName"), "str"}:
# HACK: Remove regular `str` if HEX & CSS color codes are present as well
tps.discard("str")
elif len(tps) == 0 and as_str:
# HACK: There is a single case that ends up empty here
# See: https://github.com/vega/altair/pull/3536#discussion_r1714344162
tps = {"Map"}
return (
finalize_type_reprs(tps, target=target, use_undefined=use_undefined)
if as_str
else sort_type_reprs(tps)
)
@classmethod
def to_type_repr_batched(
cls,
infos: Iterable[SchemaInfo],
/,
*,
target: TargetType = "doc",
use_concrete: bool = False,
use_undefined: bool = False,
) -> str:
"""
Return the python type representation of multiple ``SchemaInfo``.
Intended to handle a subset of a ``Union``.
Parameters
----------
infos
Schemas to collapse into a single representation.
target: {"annotation", "doc"}
Where the representation will be used.
use_concrete
Avoid base classes/wrappers that don't provide type info.
use_undefined
Wrap the result in ``altair.typing.Optional``.
See Also
--------
- ``SchemaInfo.to_type_repr``
"""
it: Iterator[str] = chain.from_iterable(
info.to_type_repr(
as_str=False,
target=target,
use_concrete=use_concrete,
use_undefined=False,
)
for info in infos
)
return finalize_type_reprs(it, target=target, use_undefined=use_undefined)
def title_to_type_reprs(self, *, use_concrete: bool) -> set[str]:
"""
Possibly use ``self.title`` as a type, or provide alternative(s).
Parameters
----------
use_concrete
Avoid base classes/wrappers that don't provide type info.
"""
tp_param: set[str] = {"ExprRef", "ParameterExtent"}
# In these cases, a `VariableParameter` is also always accepted.
# It could be difficult to differentiate `(Variable|Selection)Parameter`, with typing.
# TODO: A solution could be defining `Parameter` as generic over either `param` or `param_type`.
# - Rewriting the init logic to not use an `Undefined` default.
# - Any narrowing logic could be factored-out into `is_(selection|variable)_parameter` guards.
EXCLUDE_TITLE: set[str] = tp_param | {"RelativeBandSize"}
"""
`RelativeBandSize` excluded as it has a single property `band`,
but all instances also accept `float`.
"""
REMAP_TITLE = SchemaInfo._remap_title
title: str = self.title
tps: set[str] = set()
if not use_concrete:
tps.add("SchemaBase")
# NOTE: To keep type hints simple, we annotate with `SchemaBase` for all subclasses.
if title in tp_param:
tps.add("Parameter")
if self.is_datetime():
tps.add("Temporal")
if self.is_top_level_spec_data():
tps.add("ChartDataType")
elif self.is_value():
value = self.properties["value"]
t = value.to_type_repr(target="annotation", use_concrete=use_concrete)
tps.add(f"Value[{t}]")
elif self.is_rowcol():
row = self.properties["row"]
t = row.to_type_repr(target="annotation", use_concrete=use_concrete)
tps.add(f"RowColKwds[{t}]")
elif title in REMAP_TITLE:
tps.update(REMAP_TITLE[title])
elif (
(title not in EXCLUDE_TITLE)
and not TypeAliasTracer.is_cached(title, include_concrete=use_concrete)
and not self.is_union()
and not self.is_format()
and not self.is_array()
and not self.is_type_alias()
and not self.additionalProperties
):
tps.add(title)
return tps
@property
def properties(self) -> SchemaProperties:
return SchemaProperties(
self.schema.get("properties", {}), self.schema, self.rootschema
)
@property
def definitions(self) -> SchemaProperties:
return SchemaProperties(
self.schema.get("definitions", {}), self.schema, self.rootschema
)
@property
def required(self) -> list[str]:
return self.schema.get("required", [])
@property
def patternProperties(self) -> dict[str, Any]:
return self.schema.get("patternProperties", {})
@property
def additionalProperties(self) -> bool:
return self.schema.get("additionalProperties", True)
@property
def type(self) -> str | list[Any]:
return self.schema.get("type", "")
@property
def anyOf(self) -> Iterator[SchemaInfo]:
for s in self.schema.get("anyOf", []):
yield self.child(s)
@property
def oneOf(self) -> Iterator[SchemaInfo]:
for s in self.schema.get("oneOf", []):
yield self.child(s)
@property
def allOf(self) -> Iterator[SchemaInfo]:
for s in self.schema.get("allOf", []):
yield self.child(s)
@property
def not_(self) -> SchemaInfo:
return self.child(self.schema.get("not", {}))
@property
def items(self) -> dict[str, Any]:
return self.schema.get("items", {})
@property
def enum(self) -> list[str]:
return self.schema.get("enum", [])
@property
def const(self) -> str:
return self.schema.get("const", "")
@property
def literal(self) -> list[str]:
return self.schema.get("enum", [self.const])
@property
def refname(self) -> str:
return self.raw_schema.get("$ref", "#/").split("/")[-1]
@property
def ref(self) -> str:
return self.raw_schema.get("$ref", "")
@property
def description(self) -> str:
return self._get_description(include_sublevels=False)
@property
def deep_description(self) -> str:
return process_description(self._get_description(include_sublevels=True))
def _get_description(self, include_sublevels: bool = False) -> str:
desc = self.raw_schema.get("description", self.schema.get("description", ""))
if not desc and include_sublevels:
for item in self.anyOf:
sub_desc = item._get_description(include_sublevels=False)
if desc and sub_desc:
raise ValueError(
"There are multiple potential descriptions which could"
+ " be used for the currently inspected schema. You'll need to"
+ " clarify which one is the correct one.\n"
+ str(self.schema)
)
if sub_desc:
desc = sub_desc
return desc
def is_reference(self) -> bool:
return "$ref" in self.raw_schema
def is_enum(self) -> bool:
return "enum" in self.schema
def is_const(self) -> bool:
return "const" in self.schema
def is_literal(self) -> bool:
"""
Return True for `const`_ or `enum`_ values.
JSON Schema distinguishes between singular/multiple values.
But we annotate them both the same way:
ConstInfo = Literal["single value"]
EnumInfo = Literal["value 1", "value 2", "value 3"]
.. _const:
https://json-schema.org/understanding-json-schema/reference/const
.. _enum:
https://json-schema.org/understanding-json-schema/reference/enum
"""
return not ({"enum", "const"}.isdisjoint(self.schema))
def is_empty(self) -> bool:
return not (self.schema.keys() - EXCLUDE_KEYS)
def is_compound(self) -> bool:
return any(key in self.schema for key in COMPOUND_KEYS)
def is_anyOf(self) -> bool:
return "anyOf" in self.schema
def is_allOf(self) -> bool:
return "allOf" in self.schema
def is_oneOf(self) -> bool:
return "oneOf" in self.schema
def is_not(self) -> bool:
return "not" in self.schema
def is_object(self) -> bool:
if self.type == "object":
return True
elif self.type:
return False
elif (
self.properties
or self.required
or self.additionalProperties
or self.patternProperties
):
return True
else:
msg = "Unclear whether schema.is_object() is True"
raise ValueError(msg)
def is_value(self) -> bool:
return self.is_object() and self.properties.keys() == {"value"}
def is_rowcol(self) -> bool:
props = self.properties
return (
self.is_object()
and props.keys() == {"column", "row"}
and props["column"] == props["row"]
)
def is_array(self) -> bool:
return self.type == "array"
def is_union(self) -> bool:
"""
Candidate for ``Union`` type alias.
Not a real class.
"""
return self.is_anyOf() and not self.type
def is_union_literal(self) -> bool:
"""
Candidate for reducing to a single ``Literal`` alias.
E.g. `BinnedTimeUnit`
"""
return self.is_union() and all(el.is_literal() for el in self.anyOf)
def is_primitive(self) -> bool:
"""
A basic JSON Schema `type`_ or an array of **only** basic types.
.. _type:
https://json-schema.org/understanding-json-schema/reference/type
"""
TP = "type"
return (self.schema.keys() == {TP}) or (
self.is_array() and self.child(self.items).is_primitive()
)
def is_flattenable(self) -> bool:
"""
Represents a range of cases we want to annotate in ``@overload``(s).
Examples
--------
The following are non-exhaustive examples, using ``python`` types.
Base cases look like:
Literal["left", "center", "right"]
float
Sequence[str]
We also include compound cases, but only when **every** member meets these criteria:
Literal["pad", "none", "fit"] | None
float | Sequence[float]
Sequence[str] | str | bool | float | None
"""
return self.is_literal() or self.is_primitive() or self.is_union_flattenable()
def is_union_flattenable(self) -> bool:
"""
Represents a fully flattenable ``Union``.
Used to prevent ``@overload`` explosion in ``channels.py``
Requires **every** member of the ``Union`` satisfies *at least* **one** the criteria.
See Also
--------
- ``SchemaInfo.is_literal``
- ``SchemaInfo.is_array``
- ``SchemaInfo.is_primitive``
- ``SchemaInfo.is_flattenable``
"""
if not self.is_union():
return False
else:
fns = (
SchemaInfo.is_literal,
SchemaInfo.is_array,
SchemaInfo.is_primitive,
SchemaInfo.is_union_flattenable,
)
return all(any(fn(el) for fn in fns) for el in self.anyOf)
def is_format(self) -> bool:
"""
Represents a string format specifier.
These do not currently produce useful classes (e.g. ``HexColor``, ``URI``).
See Also
--------
[python-jsonschema](https://python-jsonschema.readthedocs.io/en/latest/faq/#my-schema-specifies-format-validation-why-do-invalid-instances-seem-valid)
"""
return (self.schema.keys() == {"format", "type"}) and self.type == "string"
def is_type_alias(self) -> bool:
"""
Represents a name assigned to a literal type.
At the time of writing, most of these are:
SchemaInfo.schema = {"type": "string"}
The resulting annotation then becomes, e.g. ``FieldName``:
arg: str | FieldName
Where both of the above represent:
arg = "name 1"
arg = FieldName("name 1")
The latter is not useful and adds noise.
``Dict`` is very similar case, with a *slightly* different schema:
SchemaInfo.schema = {"additionalProperties": {}, "type": "object"}
"""
TP = "type"
ADDITIONAL = "additionalProperties"
keys = self.schema.keys()
return (
(
(keys == {TP})
or (keys == {TP, ADDITIONAL} and self.schema[ADDITIONAL] == {})
)
and isinstance(self.type, str)
and self.type in jsonschema_to_python_types
)
def is_type_alias_union(self) -> bool:
"""
Represents a name assigned to a list of literal types.
Example:
{"PrimitiveValue": {"type": ["number", "string", "boolean", "null"]}}
Translating from JSON -> Python, this is the same as an ``"anyOf"`` -> ``Union``.
The distinction in the schema is purely due to these types being defined in the draft, rather than definitions.
"""
TP = "type"
return (
self.schema.keys() == {TP}
and isinstance(self.type, list)
and bool(self.title)
)
def is_theme_config_target(self) -> bool:
"""
Return `True` for candidates classes in ``ThemeConfig`` hierarchy of ``TypedDict``(s).
Satisfying these rules ensures:
- we generate meaningful annotations
- they improve autocompletion, without overwhelming the UX
"""
EXCLUDE = {"ExprRef", "ParameterPredicate", "RelativeBandSize"}
return bool(
self.ref
and self.refname not in EXCLUDE
and self.properties
and self.type == "object"
and not self.is_value()
and "field" not in self.required
and not (iskeyword(next(iter(self.required), "")))
)
def is_datetime(self) -> bool:
return self.refname == "DateTime"
def is_top_level_spec_data(self) -> bool:
return self.refname == "Data"
| SchemaInfo |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 199860,
"end": 205171
} | class ____(object):
# https://argoproj.github.io/argo-workflows/fields/#template
def __init__(self, name):
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["name"] = name
def active_deadline_seconds(self, active_deadline_seconds):
# Overall duration of a pod in seconds, only obeyed for container templates
# Used for implementing @timeout.
self.payload["activeDeadlineSeconds"] = int(active_deadline_seconds)
return self
def dag(self, dag_template):
self.payload["dag"] = dag_template.to_json()
return self
def steps(self, steps):
if "steps" not in self.payload:
self.payload["steps"] = []
# steps is a list of lists.
# hence we go over every item in the incoming list
# serialize it and then append the list to the payload
step_list = []
for step in steps:
step_list.append(step.to_json())
self.payload["steps"].append(step_list)
return self
def container(self, container):
# Luckily this can simply be V1Container and we are spared from writing more
# boilerplate - https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Container.md.
self.payload["container"] = container
return self
def http(self, http):
self.payload["http"] = http.to_json()
return self
def inputs(self, inputs):
self.payload["inputs"] = inputs.to_json()
return self
def outputs(self, outputs):
self.payload["outputs"] = outputs.to_json()
return self
def fail_fast(self, fail_fast=True):
# https://github.com/argoproj/argo-workflows/issues/1442
self.payload["failFast"] = fail_fast
return self
def metadata(self, metadata):
self.payload["metadata"] = metadata.to_json()
return self
def service_account_name(self, service_account_name):
self.payload["serviceAccountName"] = service_account_name
return self
def retry_strategy(self, times, minutes_between_retries):
if times > 0:
self.payload["retryStrategy"] = {
"retryPolicy": "Always",
"limit": times,
"backoff": {"duration": "%sm" % minutes_between_retries},
}
return self
def empty_dir_volume(self, name, medium=None, size_limit=None):
"""
Create and attach an emptyDir volume for Kubernetes.
Parameters:
-----------
name: str
name for the volume
size_limit: int (optional)
sizeLimit (in MiB) for the volume
medium: str (optional)
storage medium of the emptyDir
"""
# Do not add volume if size is zero. Enables conditional chaining.
if size_limit == 0:
return self
# Attach an emptyDir volume
# https://argoproj.github.io/argo-workflows/empty-dir/
if "volumes" not in self.payload:
self.payload["volumes"] = []
self.payload["volumes"].append(
{
"name": name,
"emptyDir": {
# Add default unit as ours differs from Kubernetes default.
**({"sizeLimit": "{}Mi".format(size_limit)} if size_limit else {}),
**({"medium": medium} if medium else {}),
},
}
)
return self
def pvc_volumes(self, pvcs=None):
"""
Create and attach Persistent Volume Claims as volumes.
Parameters:
-----------
pvcs: Optional[Dict]
a dictionary of pvc's and the paths they should be mounted to. e.g.
{"pv-claim-1": "/mnt/path1", "pv-claim-2": "/mnt/path2"}
"""
if pvcs is None:
return self
if "volumes" not in self.payload:
self.payload["volumes"] = []
for claim in pvcs.keys():
self.payload["volumes"].append(
{"name": claim, "persistentVolumeClaim": {"claimName": claim}}
)
return self
def pod_spec_patch(self, pod_spec_patch=None):
if pod_spec_patch is None:
return self
self.payload["podSpecPatch"] = json.dumps(pod_spec_patch)
return self
def node_selectors(self, node_selectors):
if "nodeSelector" not in self.payload:
self.payload["nodeSelector"] = {}
if node_selectors:
self.payload["nodeSelector"].update(node_selectors)
return self
def tolerations(self, tolerations):
self.payload["tolerations"] = tolerations
return self
def to_json(self):
return self.payload
def resource(self, action, manifest, success_criteria, failure_criteria):
self.payload["resource"] = {}
self.payload["resource"]["action"] = action
self.payload["resource"]["setOwnerReference"] = True
self.payload["resource"]["successCondition"] = success_criteria
self.payload["resource"]["failureCondition"] = failure_criteria
self.payload["resource"]["manifest"] = manifest
return self
def __str__(self):
return json.dumps(self.payload, indent=4)
| Template |
python | arrow-py__arrow | arrow/locales.py | {
"start": 128039,
"end": 129403
} | class ____(Locale):
names = ["sq", "sq-al"]
past = "{0} më parë"
future = "në {0}"
and_word = "dhe"
timeframes = {
"now": "tani",
"second": "sekondë",
"seconds": "{0} sekonda",
"minute": "minutë",
"minutes": "{0} minuta",
"hour": "orë",
"hours": "{0} orë",
"day": "ditë",
"days": "{0} ditë",
"week": "javë",
"weeks": "{0} javë",
"month": "muaj",
"months": "{0} muaj",
"year": "vit",
"years": "{0} vjet",
}
month_names = [
"",
"janar",
"shkurt",
"mars",
"prill",
"maj",
"qershor",
"korrik",
"gusht",
"shtator",
"tetor",
"nëntor",
"dhjetor",
]
month_abbreviations = [
"",
"jan",
"shk",
"mar",
"pri",
"maj",
"qer",
"korr",
"gush",
"sht",
"tet",
"nën",
"dhj",
]
day_names = [
"",
"e hënë",
"e martë",
"e mërkurë",
"e enjte",
"e premte",
"e shtunë",
"e diel",
]
day_abbreviations = [
"",
"hën",
"mar",
"mër",
"enj",
"pre",
"sht",
"die",
]
| AlbanianLocale |
python | pytorch__pytorch | torch/_export/verifier.py | {
"start": 3760,
"end": 12091
} | class ____(metaclass=_VerifierMeta):
dialect = "ATEN"
def allowed_builtin_ops(self) -> list:
return [
operator.getitem,
operator.add,
operator.mul,
operator.sub,
operator.truediv,
operator.ge,
operator.le,
operator.gt,
operator.lt,
operator.eq,
operator.ne,
operator.floordiv,
operator.mod,
operator.and_,
operator.or_,
operator.not_,
operator.pow,
operator.neg,
operator.abs,
operator.lshift,
operator.rshift,
math.ceil,
math.floor,
math.trunc,
round,
]
def allowed_op_types(self) -> tuple[type[Any], ...]:
return (OpOverload, HigherOrderOperator)
def allowed_getattr_types(self) -> tuple[type[Any], ...]:
return (torch.fx.GraphModule, torch.utils._pytree.TreeSpec)
def allowed_getattr_types_for_subgm(self) -> tuple[type[Any], ...]:
# subgm in HOP's argument could has have getattr(weight) nodes, thus stateful
return (
torch.fx.GraphModule,
torch.nn.parameter.Parameter,
torch.Tensor, # for buffer and constant tensor
torch.utils._pytree.TreeSpec,
)
def check_valid_op(self, op):
pass
def check_additional(self, gm: GraphModule) -> None:
"""
Additional checks that are specific to some dialects.
"""
@final
def check(self, ep: "ExportedProgram") -> None:
self._check_graph_module(ep.graph_module)
_verify_exported_program_module_call_graph(ep)
_verify_exported_program_signature(ep)
@final
def _check_graph_module(self, gm: torch.fx.GraphModule) -> None:
def _allowed_getattr_types(is_toplevel_gm) -> tuple[type[Any], ...]:
if is_toplevel_gm:
ret = self.allowed_getattr_types()
else:
ret = self.allowed_getattr_types_for_subgm()
assert not any(t is object for t in ret)
return ret
def _check_valid_op(op) -> None:
def _allowed_builtin_ops() -> list:
ret = self.allowed_builtin_ops()
assert all(inspect.isbuiltin(op) for op in ret)
return ret
def _allowed_op_types() -> tuple[type[Any], ...]:
ret = self.allowed_op_types()
assert not any(t is object for t in ret)
return ret
# TODO Remove this allowlist.
_allowed_torch_functions = (
torch.autograd.grad_mode.set_grad_enabled,
torch.sym_int,
torch.sym_float,
torch.sym_ite,
torch.sym_max,
torch.sym_min,
torch.sym_not,
torch.sym_sqrt,
torch.sym_sum,
torch.export.custom_ops._call_custom_autograd_function_in_pre_dispatch,
# TODO (tmanlaibaatar)
# Predispatch export is able to contain autograd ops.
# These will be modeled as HOO later
torch._C._set_grad_enabled,
torch.amp.autocast_mode._enter_autocast,
torch.amp.autocast_mode._exit_autocast,
torch.fx.experimental.symbolic_shapes.cast_symbool_to_symint_guardless,
torch._functorch.predispatch._add_batch_dim,
torch._functorch.predispatch._remove_batch_dim,
torch._functorch.predispatch._vmap_increment_nesting,
torch._functorch.predispatch._vmap_decrement_nesting,
torch._functorch.predispatch.lazy_load_decompositions,
)
if not isinstance(op, _allowed_op_types()):
if (
op not in _allowed_builtin_ops()
and op not in _allowed_torch_functions
):
raise SpecViolationError(
f"Operator '{op}' is not an allowed operator type: {_allowed_op_types()}\n"
f"Valid builtin ops: {_allowed_builtin_ops()}"
f"Valid torch functions: {_allowed_torch_functions}"
)
if isinstance(op, OpOverload):
# All ops functional
# TODO (tmanlaibaatar) more proper way is needed here
if self.dialect != "TRAINING" and not is_functional(op):
raise SpecViolationError(f"operator '{op}' is not functional")
self.check_valid_op(op)
for mod in gm.modules():
is_toplevel_gm = mod is gm
if not isinstance(mod, torch.fx.GraphModule):
continue
mod.graph.lint()
for node in mod.graph.nodes:
# TODO(T140410192): should have fake tensor for all dialects
if node.op in {"call_module", "call_method"}:
raise SpecViolationError(
f"call_module is not valid: got a class '{node.target}' ",
)
elif node.op == "call_function":
_check_val(node)
_check_valid_op(node.target)
elif node.op == "get_attr":
if not isinstance(node.target, str):
raise SpecViolationError(
f"Expected get_attr target to be string, but got {type(node.target)}"
)
attr = getattr_recursive(mod, node.target)
if isinstance(attr, torch.nn.Module):
def _is_type(name, ty):
return isinstance(getattr(attr, name, None), ty)
if type(attr).__name__ == "LoweredBackendModule":
if (
_is_type("backend_id", str)
and hasattr(attr, "original_module")
and hasattr(attr, "module_name")
and getattr(attr, "backend_id", None) == "aoti"
):
continue
if (
_is_type("backend_id", str)
and _is_type("processed_bytes", bytes)
and _is_type("compile_specs", list)
and hasattr(attr, "original_module")
):
continue
else:
backend_id = getattr(attr, "backend_id", None)
processed_bytes = getattr(attr, "processed_bytes", None)
compile_specs = getattr(attr, "compile_specs", None)
raise SpecViolationError(
f"Invalid get_attr type {type(attr)}. \n"
f"LoweredBackendModule fields: "
f"backend_id(str) : {type(backend_id)}, "
f"processed_bytes(bytes) : {type(processed_bytes)}, "
f"compile_specs(list) : {type(compile_specs)}"
)
elif type(attr).__name__ == "AOTInductorEPModule":
continue
elif type(attr).__name__ == "AOTInductorRunnerWrapper":
continue
if not isinstance(attr, _allowed_getattr_types(is_toplevel_gm)):
raise SpecViolationError(
f"Invalid get_attr type {type(attr)} on target {node.target}. \n"
f"Valid get_attr types: {_allowed_getattr_types(is_toplevel_gm)}"
)
elif node.op == "placeholder":
_check_val(node)
# TODO(zhxchen17)
# elif node.op == "output":
# _check_flattened_outputs()
self.check_additional(gm)
| Verifier |
python | getsentry__sentry | tests/sentry/snuba/test_discover_query.py | {
"start": 116651,
"end": 128654
} | class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.now = before_now()
event_data = load_data("transaction")
# Half of duration so we don't get weird rounding differences when comparing the results
event_data["breakdowns"]["span_ops"]["ops.http"]["value"] = 1500
event_data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
event_data["timestamp"] = (self.day_ago + timedelta(minutes=30, seconds=3)).isoformat()
self.store_event(data=event_data, project_id=self.project.id)
self.params = SnubaParams(
projects=[self.project],
start=self.day_ago,
end=self.now,
)
self.query = "event.type:transaction"
def test_simple(self) -> None:
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=["spans.http / transaction.duration"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["spans.http"] / result["transaction.duration"]
def test_multiple_equations(self) -> None:
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["spans.http"] / result["transaction.duration"]
assert result["equation[1]"] == result["transaction.duration"] / result["spans.http"]
assert result["equation[2]"] == 1500 + result["transaction.duration"]
def test_invalid_field(self) -> None:
with pytest.raises(ArithmeticValidationError):
discover.query(
selected_columns=[
"spans.http",
"transaction.status",
],
# while transaction_status is a uint8, there's no reason we should allow arith on it
equations=["spans.http / transaction.status"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
def test_invalid_function(self) -> None:
with pytest.raises(ArithmeticValidationError):
discover.query(
selected_columns=[
"p50(transaction.duration)",
"last_seen()",
],
equations=["p50(transaction.duration) / last_seen()"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
def test_unselected_field(self) -> None:
with pytest.raises(InvalidSearchQuery):
discover.query(
selected_columns=[
"spans.http",
],
equations=["spans.http / transaction.duration"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
def test_unselected_function(self) -> None:
with pytest.raises(InvalidSearchQuery):
discover.query(
selected_columns=[
"p50(transaction.duration)",
],
equations=["p50(transaction.duration) / p100(transaction.duration)"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
def test_orderby_equation(self) -> None:
for i in range(1, 3):
event_data = load_data("transaction")
# Half of duration so we don't get weird rounding differences when comparing the results
event_data["breakdowns"]["span_ops"]["ops.http"]["value"] = 300 * i
event_data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
event_data["timestamp"] = (self.day_ago + timedelta(minutes=30, seconds=3)).isoformat()
self.store_event(data=event_data, project_id=self.project.id)
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
orderby=["equation[0]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [0.1, 0.2, 0.5]
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
orderby=["equation[1]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[1]"] for result in results["data"]] == [2, 5, 10]
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
orderby=["-equation[0]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [0.5, 0.2, 0.1]
def test_orderby_nonexistent_equation(self) -> None:
with pytest.raises(InvalidSearchQuery):
discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
orderby=["equation[1]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
def test_equation_without_field_or_function(self) -> None:
with pytest.raises(InvalidSearchQuery):
discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"5 + 5",
],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
def test_aggregate_equation(self) -> None:
results = discover.query(
selected_columns=[
"p50(transaction.duration)",
],
equations=["p50(transaction.duration) / 2"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["p50_transaction_duration"] / 2
def test_multiple_aggregate_equation(self) -> None:
results = discover.query(
selected_columns=[
"p50(transaction.duration)",
"count()",
],
equations=["p50(transaction.duration) + 2", "p50(transaction.duration) / count()"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["p50_transaction_duration"] + 2
assert result["equation[1]"] == result["p50_transaction_duration"] / result["count"]
def test_multiple_operators(self) -> None:
results = discover.query(
selected_columns=[
"p50(transaction.duration)",
"p100(transaction.duration)",
"count()",
],
equations=[
"p50(transaction.duration) / p100(transaction.duration) * 100",
"100 + count() * 5 - 3 / 5",
"count() + count() / count() * count() - count()",
],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert (
result["equation[0]"]
== result["p50_transaction_duration"] / result["p100_transaction_duration"] * 100
)
assert result["equation[1]"] == 100 + result["count"] * 5 - 3 / 5
assert (
result["equation[2]"]
== result["count"]
+ result["count"] / result["count"] * result["count"]
- result["count"]
)
def test_nan_equation_results(self) -> None:
for i in range(1, 3):
event_data = load_data("transaction")
# Half of duration so we don't get weird rounding differences when comparing the results
event_data["breakdowns"]["span_ops"]["ops.http"]["value"] = 0
event_data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
event_data["timestamp"] = (self.day_ago + timedelta(minutes=30, seconds=3)).isoformat()
self.store_event(data=event_data, project_id=self.project.id)
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"transaction.duration / spans.http", # inf
"spans.http / spans.http", # nan
],
orderby=["equation[0]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [2, None, None]
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"transaction.duration / spans.http", # inf
"spans.http / spans.http", # nan
],
orderby=["equation[1]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[1]"] for result in results["data"]] == [1, None, None]
results = discover.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"transaction.duration / spans.http", # inf
"spans.http / spans.http", # nan
],
orderby=["-equation[0]"],
query=self.query,
snuba_params=self.params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [2, None, None]
| ArithmeticTest |
python | kamyu104__LeetCode-Solutions | Python/android-unlock-patterns.py | {
"start": 3682,
"end": 5137
} | class ____(object):
def numberOfPatterns(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
def merge(used, i):
return used | (1 << i)
def contain(used, i):
return bool(used & (1 << i))
def convert(i, j):
return 3 * i + j
def numberOfPatternsHelper(m, n, level, used, i):
number = 0
if level > n:
return number
if m <= level <= n:
number += 1
x1, y1 = divmod(i, 3)
for j in xrange(9):
if contain(used, j):
continue
x2, y2 = divmod(j, 3)
if ((x1 == x2 and abs(y1 - y2) == 2) or
(y1 == y2 and abs(x1 - x2) == 2) or
(abs(x1 - x2) == 2 and abs(y1 - y2) == 2)) and \
not contain(used,
convert((x1 + x2) // 2, (y1 + y2) // 2)):
continue
number += numberOfPatternsHelper(m, n, level + 1, merge(used, j), j)
return number
number = 0
# 1, 3, 7, 9
number += 4 * numberOfPatternsHelper(m, n, 1, merge(0, 0), 0)
# 2, 4, 6, 8
number += 4 * numberOfPatternsHelper(m, n, 1, merge(0, 1), 1)
# 5
number += numberOfPatternsHelper(m, n, 1, merge(0, 4), 4)
return number
| Solution_TLE |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py | {
"start": 3144,
"end": 3627
} | class ____(BaseModel):
type: RunLauncherType
config: RunLauncherConfig
model_config = ConfigDict(
extra="forbid",
json_schema_extra={
"allOf": create_json_schema_conditionals(
{
RunLauncherType.CELERY: "celeryK8sRunLauncher",
RunLauncherType.K8S: "k8sRunLauncher",
RunLauncherType.CUSTOM: "customRunLauncher",
}
)
},
)
| RunLauncher |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 10469,
"end": 11197
} | class ____:
def __init__(self, thresh=0.25):
self.thresh = thresh
self.batch_acc = []
def __call__(self, eval_pred, compute_result):
predictions, labels = eval_pred
if isinstance(predictions, tuple):
predictions = predictions[0]
if isinstance(labels, tuple):
labels = labels[0]
batch_size = len(predictions)
true = torch.abs(predictions - labels) <= self.thresh
acc = true.type(torch.FloatTensor).mean().item()
self.batch_acc.extend([acc] * batch_size)
if compute_result:
result = {"accuracy": np.mean(self.batch_acc).item()}
self.batch_acc = []
return result
| AlmostAccuracyBatched |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/expectations/test_expect_column_values_to_not_match_like_pattern_list.py | {
"start": 863,
"end": 4065
} | class ____:
@pytest.mark.parametrize(
"expectation",
[
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["bc"]
),
id="one_pattern",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["bc", "%de%"]
),
id="multiple_patterns",
),
],
)
@parameterize_batch_for_data_sources(data_source_configs=REGULAR_DATA_SOURCES, data=DATA)
def test_success(
self,
batch_for_datasource: Batch,
expectation: gxe.ExpectColumnValuesToNotMatchLikePatternList,
) -> None:
result = batch_for_datasource.validate(expectation)
assert result.success
@pytest.mark.parametrize(
"expectation",
[
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["%a%"]
),
id="one_pattern",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["%a%", "not_this"]
),
id="multiple_patterns",
),
],
)
@parameterize_batch_for_data_sources(data_source_configs=REGULAR_DATA_SOURCES, data=DATA)
def test_failure(
self,
batch_for_datasource: Batch,
expectation: gxe.ExpectColumnValuesToNotMatchLikePatternList,
) -> None:
result = batch_for_datasource.validate(expectation)
assert not result.success
@parameterize_batch_for_data_sources(
data_source_configs=[PostgreSQLDatasourceTestConfig()], data=DATA
)
def test_include_unexpected_rows_postgres(self, batch_for_datasource: Batch) -> None:
"""Test include_unexpected_rows for ExpectColumnValuesToNotMatchLikePatternList."""
expectation = gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["%a%"]
)
result = batch_for_datasource.validate(
expectation, result_format={"result_format": "BASIC", "include_unexpected_rows": True}
)
assert not result.success
result_dict = result["result"]
# Verify that unexpected_rows is present and contains the expected data
assert "unexpected_rows" in result_dict
assert result_dict["unexpected_rows"] is not None
unexpected_rows_data = result_dict["unexpected_rows"]
assert isinstance(unexpected_rows_data, list)
# Should contain 3 rows where COL_NAME matches like_pattern_list ["%a%"]
# ("aa", "ab", "ac" all contain 'a')
assert len(unexpected_rows_data) == 3
# Check that "aa", "ab", and "ac" appear in the unexpected rows data
unexpected_rows_str = str(unexpected_rows_data)
assert "aa" in unexpected_rows_str
assert "ab" in unexpected_rows_str
assert "ac" in unexpected_rows_str
| TestNormalSql |
python | getsentry__sentry | src/sentry/deletions/tasks/hybrid_cloud.py | {
"start": 1586,
"end": 16417
} | class ____:
low: int
up: int
has_more: bool
transaction_id: str
def _get_redis_client() -> RedisCluster[str] | StrictRedis[str]:
return redis.redis_clusters.get(settings.SENTRY_HYBRIDCLOUD_DELETIONS_REDIS_CLUSTER)
def get_watermark_key(prefix: str, field: HybridCloudForeignKey[Any, Any]) -> str:
return f"{prefix}.{field.model._meta.db_table}.{field.name}"
def get_watermark(prefix: str, field: HybridCloudForeignKey[Any, Any]) -> tuple[int, str]:
client = _get_redis_client()
key = get_watermark_key(prefix, field)
v = client.get(key)
if v is None:
result = (0, uuid4().hex)
client.set(key, json.dumps(result))
return result
lower, transaction_id = json.loads(v)
if not (isinstance(lower, int) and isinstance(transaction_id, str)):
raise TypeError("Expected watermarks data to be a tuple of (int, str)")
return lower, transaction_id
def set_watermark(
prefix: str, field: HybridCloudForeignKey[Any, Any], value: int, prev_transaction_id: str
) -> None:
_get_redis_client().set(
get_watermark_key(prefix, field),
json.dumps((value, sha1(prev_transaction_id.encode("utf8")).hexdigest())),
)
metrics.gauge(
"deletion.hybrid_cloud.low_bound",
value,
tags=dict(
field_name=f"{field.model._meta.db_table}.{field.name}",
watermark=prefix,
),
)
def _chunk_watermark_batch(
prefix: str,
field: HybridCloudForeignKey[Any, Any],
manager: Manager[Any],
*,
batch_size: int,
model: type[Model],
) -> WatermarkBatch:
lower, transaction_id = get_watermark(prefix, field)
agg = manager.aggregate(Min("id"), Max("id"))
lower = lower or ((agg["id__min"] or 1) - 1)
upper = agg["id__max"] or 0
batch_upper = min(upper, lower + batch_size)
# cap to batch size so that query timeouts don't get us.
capped = upper
if upper >= batch_upper:
capped = batch_upper
watermark_delta = max(upper - lower, 0)
metric_field_name = f"{model._meta.db_table}:{field.name}"
metric_tags = dict(field_name=metric_field_name, watermark_type=prefix)
metrics.gauge(
"deletion.hybrid_cloud.watermark_delta",
value=watermark_delta,
tags=metric_tags,
sample_rate=1.0,
)
return WatermarkBatch(
low=lower, up=capped, has_more=batch_upper < upper, transaction_id=transaction_id
)
@instrumented_task(
name="sentry.deletions.tasks.hybrid_cloud.schedule_hybrid_cloud_foreign_key_jobs_control",
namespace=deletion_control_tasks,
silo_mode=SiloMode.CONTROL,
)
def schedule_hybrid_cloud_foreign_key_jobs_control() -> None:
if options.get("hybrid_cloud.disable_tombstone_cleanup"):
return
_schedule_hybrid_cloud_foreign_key(
SiloMode.CONTROL, process_hybrid_cloud_foreign_key_cascade_batch_control
)
@instrumented_task(
name="sentry.deletions.tasks.hybrid_cloud.schedule_hybrid_cloud_foreign_key_jobs",
namespace=deletion_tasks,
silo_mode=SiloMode.REGION,
)
def schedule_hybrid_cloud_foreign_key_jobs() -> None:
if options.get("hybrid_cloud.disable_tombstone_cleanup"):
return
_schedule_hybrid_cloud_foreign_key(
SiloMode.REGION, process_hybrid_cloud_foreign_key_cascade_batch
)
def _schedule_hybrid_cloud_foreign_key(silo_mode: SiloMode, cascade_task: Task[Any, Any]) -> None:
for app, app_models in apps.all_models.items():
for model in app_models.values():
if not hasattr(model._meta, "silo_limit"):
continue
# Only process models local this operational mode.
if silo_mode not in model._meta.silo_limit.modes:
continue
for field in model._meta.fields:
if not isinstance(field, HybridCloudForeignKey):
continue
cascade_task.delay(
app_name=app,
model_name=model.__name__,
field_name=field.name,
silo_mode=silo_mode.name,
)
@instrumented_task(
name="sentry.deletions.tasks.hybrid_cloud.process_hybrid_cloud_foreign_key_cascade_batch_control",
namespace=deletion_control_tasks,
silo_mode=SiloMode.CONTROL,
)
def process_hybrid_cloud_foreign_key_cascade_batch_control(
app_name: str, model_name: str, field_name: str, **kwargs: Any
) -> None:
if options.get("hybrid_cloud.disable_tombstone_cleanup"):
return
_process_hybrid_cloud_foreign_key_cascade(
app_name=app_name,
model_name=model_name,
field_name=field_name,
process_task=process_hybrid_cloud_foreign_key_cascade_batch_control,
silo_mode=SiloMode.CONTROL,
)
@instrumented_task(
name="sentry.deletions.tasks.process_hybrid_cloud_foreign_key_cascade_batch",
namespace=deletion_tasks,
silo_mode=SiloMode.REGION,
)
def process_hybrid_cloud_foreign_key_cascade_batch(
app_name: str, model_name: str, field_name: str, **kwargs: Any
) -> None:
if options.get("hybrid_cloud.disable_tombstone_cleanup"):
return
_process_hybrid_cloud_foreign_key_cascade(
app_name=app_name,
model_name=model_name,
field_name=field_name,
process_task=process_hybrid_cloud_foreign_key_cascade_batch,
silo_mode=SiloMode.REGION,
)
def _process_hybrid_cloud_foreign_key_cascade(
app_name: str,
model_name: str,
field_name: str,
process_task: Task[Any, Any],
silo_mode: SiloMode,
) -> None:
"""
Called by the silo bound tasks above.
"""
try:
model = apps.get_model(app_label=app_name, model_name=model_name)
try:
field = model._meta.get_field(field_name)
if not isinstance(field, HybridCloudForeignKey):
raise Exception(f"The {field_name} field is not a HybridCloudForeignKey")
except Exception as err:
sentry_sdk.capture_exception(err)
raise LookupError(f"Could not find field {field_name} on model {app_name}.{model_name}")
tombstone_cls = TombstoneBase.class_for_silo_mode(silo_mode)
assert tombstone_cls, "A tombstone class is required"
# We rely on the return value of _process_tombstone_reconciliation
# to short circuit the second half of this `or` so that the terminal batch
# also updates the tombstone watermark.
if _process_tombstone_reconciliation(
field, model, tombstone_cls, True
) or _process_tombstone_reconciliation(field, model, tombstone_cls, False):
process_task.delay(
app_name=app_name,
model_name=model_name,
field_name=field_name,
silo_mode=silo_mode.name,
)
except Exception as err:
sentry_sdk.set_context(
"deletion.hybrid_cloud",
dict(
app_name=app_name,
model_name=model_name,
field_name=field_name,
silo_mode=silo_mode,
),
)
sentry_sdk.capture_exception(err)
raise
# Convenience wrapper for mocking in tests
def get_batch_size() -> int:
return 500
def _process_tombstone_reconciliation(
field: HybridCloudForeignKey[Any, Any],
model: Any,
tombstone_cls: type[TombstoneBase],
row_after_tombstone: bool,
) -> bool:
from sentry import deletions
prefix = "tombstone"
watermark_manager: Manager[Any] = tombstone_cls.objects
if row_after_tombstone:
prefix = "row"
watermark_manager = field.model.objects
watermark_batch = _chunk_watermark_batch(
prefix, field, watermark_manager, batch_size=get_batch_size(), model=model
)
has_more = watermark_batch.has_more
if watermark_batch.low < watermark_batch.up:
to_delete_ids, oldest_seen = _get_model_ids_for_tombstone_cascade(
tombstone_cls=tombstone_cls,
model=model,
field=field,
row_after_tombstone=row_after_tombstone,
watermark_batch=watermark_batch,
)
if field.on_delete == "CASCADE":
task = deletions.get(
model=model,
query={"id__in": to_delete_ids},
transaction_id=watermark_batch.transaction_id,
)
if task.chunk():
has_more = True # The current batch is not complete, rerun this task again
else:
set_watermark(prefix, field, watermark_batch.up, watermark_batch.transaction_id)
elif field.on_delete == "SET_NULL":
model.objects.filter(id__in=to_delete_ids).update(**{field.name: None})
set_watermark(prefix, field, watermark_batch.up, watermark_batch.transaction_id)
elif field.on_delete == "DO_NOTHING":
set_watermark(prefix, field, watermark_batch.up, watermark_batch.transaction_id)
else:
raise ValueError(
f"{field.model.__name__}.{field.name} has unexpected on_delete={field.on_delete}, could not process delete!"
)
metrics.timing(
"deletion.hybrid_cloud.processing_lag",
datetime.datetime.now().timestamp() - oldest_seen.timestamp(),
tags=dict(
field_name=f"{model._meta.db_table}.{field.name}",
watermark=prefix,
),
)
return has_more
def _get_model_ids_for_tombstone_cascade(
tombstone_cls: type[TombstoneBase],
model: type[Model],
field: HybridCloudForeignKey[Any, Any],
row_after_tombstone: bool,
watermark_batch: WatermarkBatch,
) -> tuple[list[int], datetime.datetime]:
"""
Queries the database or databases if spanning multiple, and returns
a tuple with a list of row IDs to delete, and the oldest
tombstone timestamp for the batch.
:param tombstone_cls: Either a RegionTombstone or ControlTombstone, depending on
which silo the tombstone process is running.
:param model: The model with a HybridCloudForeignKey to process.
:param field: The HybridCloudForeignKey field from the model to process.
:param row_after_tombstone: Determines which table is bound by the
watermark batch. When set to true, the model's IDs are used as the
bounds, otherwise, the tombstone's IDs are used.
:param watermark_batch: The batch information containing ID bounds for the
watermark query.
:return:
"""
to_delete_ids = []
oldest_seen = timezone.now()
tombstone_and_model_in_same_db = router.db_for_read(model) == router.db_for_read(tombstone_cls)
watermark_target = "t"
if row_after_tombstone:
watermark_target = "r"
if tombstone_and_model_in_same_db:
with connections[router.db_for_read(model)].cursor() as conn:
conn.execute(
f"""
SELECT r.id, t.created_at
FROM {model._meta.db_table} r
JOIN {tombstone_cls._meta.db_table} t
ON t.table_name = %(table_name)s AND t.object_identifier = r.{field.name}
WHERE {watermark_target}.id > %(low)s AND {watermark_target}.id <= %(up)s
""",
{
"table_name": field.foreign_table_name,
"low": watermark_batch.low,
"up": watermark_batch.up,
},
)
for row_id, tomb_created in conn.fetchall():
to_delete_ids.append(row_id)
oldest_seen = min(oldest_seen, tomb_created)
return to_delete_ids, oldest_seen
# Because tombstones can span multiple databases, we can't always rely on
# the join code above. Instead, we have to manually query IDs from the
# watermark target table, querying the intersection of IDs manually.
# The implementation of this varies depending on whether we are
# processing row or tombstone watermarks.
if row_after_tombstone:
return get_ids_cross_db_for_row_watermark(
tombstone_cls=tombstone_cls,
model=model,
field=field,
row_watermark_batch=watermark_batch,
)
return get_ids_cross_db_for_tombstone_watermark(
tombstone_cls=tombstone_cls,
model=model,
field=field,
tombstone_watermark_batch=watermark_batch,
)
def get_ids_cross_db_for_row_watermark(
tombstone_cls: type[TombstoneBase],
model: type[Model],
field: HybridCloudForeignKey[Any, Any],
row_watermark_batch: WatermarkBatch,
) -> tuple[list[int], datetime.datetime]:
oldest_seen = timezone.now()
model_object_id_pairs = model.objects.filter(
id__lte=row_watermark_batch.up, id__gt=row_watermark_batch.low
).values_list("id", f"{field.name}")
# Construct a map of foreign key IDs to model IDs, which gives us the
# minimal set of foreign key values to lookup in the tombstones table.
fk_to_model_id_map: defaultdict[int, set[int]] = defaultdict(set)
for m_id, o_id in model_object_id_pairs:
fk_to_model_id_map[o_id].add(m_id)
object_ids_to_check = fk_to_model_id_map.keys()
tombstone_entries = tombstone_cls.objects.filter(
object_identifier__in=object_ids_to_check,
table_name=field.foreign_table_name,
).values_list("object_identifier", "created_at")
affected_rows: list[int] = []
# Once we have the intersecting tombstones, use the dictionary we
# created before to construct the minimal set of model IDs we need to
# update with cascade behavior.
for object_id, created_at in tombstone_entries:
affected_rows.extend(fk_to_model_id_map[object_id])
oldest_seen = min(oldest_seen, created_at)
return affected_rows, oldest_seen
def get_ids_cross_db_for_tombstone_watermark(
tombstone_cls: type[TombstoneBase],
model: type[Model],
field: HybridCloudForeignKey[Any, Any],
tombstone_watermark_batch: WatermarkBatch,
) -> tuple[list[int], datetime.datetime]:
oldest_seen = timezone.now()
tombstone_entries = tombstone_cls.objects.filter(
id__lte=tombstone_watermark_batch.up,
id__gt=tombstone_watermark_batch.low,
table_name=field.foreign_table_name,
).values_list("object_identifier", "created_at")
ids_to_check = []
for object_id, created_at in tombstone_entries:
ids_to_check.append(object_id)
oldest_seen = min(oldest_seen, created_at)
field_name = f"{field.name}__in"
query_kwargs = {field_name: ids_to_check}
affected_rows = list(model.objects.filter(**query_kwargs).values_list("id", flat=True))
return affected_rows, oldest_seen
| WatermarkBatch |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_reflection.py | {
"start": 1050,
"end": 1814
} | class ____(fixtures.TestBase):
__only_on__ = "sqlite"
__backend__ = True
def setup_test(self):
exec_sql(testing.db, "CREATE TABLE a (id INTEGER PRIMARY KEY)")
# this syntax actually works on other DBs perhaps we'd want to add
# tests to test_reflection
exec_sql(
testing.db, "CREATE TABLE b (id INTEGER PRIMARY KEY REFERENCES a)"
)
def teardown_test(self):
exec_sql(testing.db, "drop table b")
exec_sql(testing.db, "drop table a")
def test_reflect_tables_fk_no_colref(self):
meta = MetaData()
a = Table("a", meta, autoload_with=testing.db)
b = Table("b", meta, autoload_with=testing.db)
assert b.c.id.references(a.c.id)
| ReflectHeadlessFKsTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F722.py | {
"start": 0,
"end": 409
} | class ____:
pass
def f() -> "A":
pass
def g() -> "///":
pass
X: """List[int]"""'☃' = []
# Type annotations with triple quotes can contain newlines and indentation
# https://github.com/python/typing-council/issues/9
y: """
int |
str
"""
z: """(
int |
str
)
"""
invalid1: """
int |
str)
"""
invalid2: """
int) |
str
"""
invalid3: """
((int)
"""
invalid4: """
(int
"""
| A |
python | altair-viz__altair | altair/expr/core.py | {
"start": 7694,
"end": 7948
} | class ____(Expression):
def __init__(self, name, args) -> None:
super().__init__(name=name, args=args)
def __repr__(self):
args = ",".join(_js_repr(arg) for arg in self.args)
return f"{self.name}({args})"
| FunctionExpression |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver12.py | {
"start": 248,
"end": 350
} | class ____:
def chain(self: _T1) -> _T1: ...
def func1(p1: _T2) -> _T2:
return p1.chain()
| ClassA |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 40983,
"end": 41647
} | class ____:
def __init__(self, size):
super().__init__()
self.size = size
self.counts = dict.fromkeys(('get', 'set', 'del'), 0)
def __getitem__(self, item):
self.counts['get'] += 1
value = super().__getitem__(item)
self.move_to_end(item)
return value
def __setitem__(self, key, value):
self.counts['set'] += 1
while key not in self and len(self) >= self.size:
self.popitem(last=False)
super().__setitem__(key, value)
self.move_to_end(key)
def __delitem__(self, key):
self.counts['del'] += 1
super().__delitem__(key)
| SimpleLRUCache |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 70591,
"end": 73262
} | class ____(BuiltinFunctionT):
_id = "uint2str"
_inputs = [("x", IntegerT.unsigneds())]
def fetch_call_return(self, node):
arg_t = self.infer_arg_types(node)[0]
bits = arg_t.bits
len_needed = math.ceil(bits * math.log(2) / math.log(10))
return StringT(len_needed)
def _try_fold(self, node):
validate_call_args(node, 1)
value = node.args[0].get_folded_value()
if not isinstance(value, vy_ast.Int):
raise UnfoldableNode
value = value.value
if value < 0:
raise InvalidType("Only unsigned ints allowed", node)
value = str(value)
return vy_ast.Str.from_node(node, value=value)
def infer_arg_types(self, node, expected_return_typ=None):
self._validate_arg_types(node)
input_type = get_possible_types_from_node(node.args[0]).pop()
return [input_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return_t = self.fetch_call_return(expr)
n_digits = return_t.maxlen
with args[0].cache_when_complex("val") as (b1, val):
buf = context.new_internal_variable(return_t)
i = IRnode.from_list(context.fresh_varname("uint2str_i"), typ=UINT256_T)
ret = ["repeat", i, 0, n_digits + 1, n_digits + 1]
body = [
"seq",
[
"if",
["eq", val, 0],
# clobber val, and return it as a pointer
[
"seq",
["mstore", ["sub", add_ofst(buf, n_digits), i], i],
["set", val, ["sub", add_ofst(buf, n_digits), i]],
"break",
],
[
"seq",
[
"mstore",
["sub", add_ofst(buf, n_digits), i],
["add", 48, ["mod", val, 10]],
],
["set", val, ["div", val, 10]],
],
],
]
ret.append(body)
# "0" has hex representation 0x00..0130..00
# if (val == 0) {
# return "0"
# } else {
# do the loop
# }
ret = [
"if",
["eq", val, 0],
["seq", ["mstore", add_ofst(buf, 1), ord("0")], ["mstore", buf, 1], buf],
["seq", ret, val],
]
return b1.resolve(IRnode.from_list(ret, location=MEMORY, typ=return_t))
| Uint2Str |
python | openai__openai-python | src/openai/types/responses/response_conversation_param.py | {
"start": 216,
"end": 340
} | class ____(TypedDict, total=False):
id: Required[str]
"""The unique ID of the conversation."""
| ResponseConversationParam |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 7723,
"end": 8270
} | class ____(Source):
random_call_index: int
def guard_source(self) -> GuardSource:
return GuardSource.RANDOM_VALUE
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.append_output(codegen.create_load(codegen.tx.output.random_values_var))
codegen.append_output(codegen.create_load_const(self.random_call_index))
codegen.append_output(create_binary_subscr())
def name(self) -> str:
return f"random_value_{self.random_call_index}"
@dataclasses.dataclass(frozen=True)
| RandomValueSource |
python | doocs__leetcode | solution/0700-0799/0799.Champagne Tower/Solution2.py | {
"start": 0,
"end": 424
} | class ____:
def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:
f = [poured]
for i in range(1, query_row + 1):
g = [0] * (i + 1)
for j, v in enumerate(f):
if v > 1:
half = (v - 1) / 2
g[j] += half
g[j + 1] += half
f = g
return min(1, f[query_glass])
| Solution |
python | getsentry__sentry | tests/sentry/snuba/test_validators.py | {
"start": 460,
"end": 11447
} | class ____(TestCase):
def setUp(self) -> None:
self.valid_data = {
"queryType": SnubaQuery.Type.ERROR.value,
"dataset": Dataset.Events.value,
"query": "test query",
"aggregate": "count()",
"timeWindow": 60,
"environment": self.environment.name,
"eventTypes": [SnubaQueryEventType.EventType.ERROR.name.lower()],
}
self.context = {
"organization": self.project.organization,
"project": self.project,
"request": self.make_request(),
}
def test_simple(self) -> None:
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
assert validator.validated_data["query_type"] == SnubaQuery.Type.ERROR
assert validator.validated_data["dataset"] == Dataset.Events
assert validator.validated_data["query"] == "test query"
assert validator.validated_data["aggregate"] == "count()"
assert validator.validated_data["time_window"] == 60
assert validator.validated_data["environment"] == self.environment
assert validator.validated_data["event_types"] == [SnubaQueryEventType.EventType.ERROR]
assert isinstance(validator.validated_data["_creator"], DataSourceCreator)
def test_invalid_query(self) -> None:
unsupported_query = "release:latest"
self.valid_data["query"] = unsupported_query
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("query") == [
ErrorDetail(
string=f"Unsupported Query: We do not currently support the {unsupported_query} query",
code="invalid",
)
]
def test_invalid_query_type(self) -> None:
invalid_query_type = 666
self.valid_data["queryType"] = invalid_query_type
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("queryType") == [
ErrorDetail(string=f"Invalid query type {invalid_query_type}", code="invalid")
]
def test_validated_create_source_limits(self) -> None:
with self.settings(MAX_QUERY_SUBSCRIPTIONS_PER_ORG=2):
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
validator.validated_create_source(validator.validated_data)
validator.validated_create_source(validator.validated_data)
with pytest.raises(serializers.ValidationError) as e:
validator.validated_create_source(validator.validated_data)
assert e.value.detail == [
ErrorDetail(
string="You may not exceed 2 data sources of this type.",
code="invalid",
)
]
def test_validated_create_source_limits_with_override(self) -> None:
with self.settings(MAX_QUERY_SUBSCRIPTIONS_PER_ORG=2):
with self.options(
{
"metric_alerts.extended_max_subscriptions_orgs": [self.organization.id],
"metric_alerts.extended_max_subscriptions": 4,
}
):
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
validator.validated_create_source(validator.validated_data)
validator.validated_create_source(validator.validated_data)
validator.validated_create_source(validator.validated_data)
validator.validated_create_source(validator.validated_data)
with pytest.raises(serializers.ValidationError) as e:
validator.validated_create_source(validator.validated_data)
assert e.value.detail == [
ErrorDetail(
string="You may not exceed 4 data sources of this type.",
code="invalid",
)
]
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_valid_group_by(self) -> None:
"""Test that valid group_by data is accepted."""
self.valid_data["group_by"] = ["project", "environment"]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
assert validator.validated_data["group_by"] == ["project", "environment"]
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_empty_group_by(self) -> None:
"""Test that empty group_by list is rejected."""
self.valid_data["group_by"] = []
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
# The serializer catches this before our custom validation
assert "groupBy" in validator.errors
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_empty_group_by_string(self) -> None:
"""Test that empty group_by list is rejected."""
self.valid_data["group_by"] = [""]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert "groupBy" in validator.errors
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_none_group_by(self) -> None:
"""Test that None group_by is handled correctly."""
self.valid_data["group_by"] = None
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert "groupBy" in validator.errors
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_invalid_group_by_not_list(self) -> None:
"""Test that non-list group_by raises validation error."""
self.valid_data["group_by"] = "not_a_list"
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert "groupBy" in validator.errors
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_group_by_too_many_items(self) -> None:
"""Test that group_by with more than 100 items raises validation error."""
self.valid_data["group_by"] = [f"field_{i}" for i in range(101)]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("nonFieldErrors") == [
ErrorDetail(string="Group by must be 100 or fewer items", code="invalid")
]
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_group_by_duplicate_items(self) -> None:
"""Test that group_by with duplicate items raises validation error."""
self.valid_data["group_by"] = ["project", "environment", "project"]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("nonFieldErrors") == [
ErrorDetail(string="Group by must be a unique list of strings", code="invalid")
]
def test_group_by_no_feature(self) -> None:
"""Test group_by with performance dataset."""
self.valid_data.update(
{
"queryType": SnubaQuery.Type.ERROR.value,
"dataset": Dataset.Events.value,
"eventTypes": [SnubaQueryEventType.EventType.ERROR.name.lower()],
"group_by": ["project", "environment"],
}
)
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("nonFieldErrors") == [
ErrorDetail(
string="Group by Metric Alerts feature must be enabled to use this field",
code="invalid",
)
]
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_group_by_string_too_long(self) -> None:
"""Test that group_by with strings longer than 200 characters is rejected."""
self.valid_data["group_by"] = ["project", "a" * 201, "environment"]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert not validator.is_valid()
assert "groupBy" in validator.errors
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_group_by_mixed_types(self) -> None:
"""Test that group_by with non-string items is converted to strings."""
self.valid_data["group_by"] = ["project", 123, "environment"]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
assert validator.validated_data["group_by"] == ["project", "123", "environment"]
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_group_by_with_valid_fields(self) -> None:
"""Test that common valid group_by fields are accepted."""
valid_group_by_fields = [
"project",
"environment",
"release",
"user",
"transaction",
"message",
"level",
"type",
"mechanism",
"handled",
"unhandled",
"culprit",
"title",
"location",
"function",
"package",
"sdk_name",
"sdk_version",
"device_name",
"device_family",
"device_model",
"os_name",
"os_version",
"browser_name",
"browser_version",
"geo_country_code",
"geo_region",
"geo_city",
]
for field in valid_group_by_fields:
self.valid_data["group_by"] = [field]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid(), f"Failed for field: {field}"
assert validator.validated_data["group_by"] == [field]
@with_feature("organizations:workflow-engine-metric-alert-group-by-creation")
def test_group_by_multiple_valid_fields(self) -> None:
self.valid_data["group_by"] = ["project", "environment", "release", "user"]
validator = SnubaQueryValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
assert validator.validated_data["group_by"] == ["project", "environment", "release", "user"]
| SnubaQueryValidatorTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 74958,
"end": 75482
} | class ____(PrefectFilterBaseModel):
"""Filter by `Artifact.flow_run_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of flow run IDs to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Artifact.flow_run_id.in_(self.any_))
return filters
| ArtifactFilterFlowRunId |
python | plotly__plotly.py | plotly/graph_objs/layout/newselection/_line.py | {
"start": 235,
"end": 4471
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.newselection"
_path_str = "layout.newselection.line"
_valid_props = {"color", "dash", "width"}
@property
def color(self):
"""
Sets the line color. By default uses either dark grey or white
to increase contrast with background color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the line color. By default uses either dark grey
or white to increase contrast with background color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.newselection.Line`
color
Sets the line color. By default uses either dark grey
or white to increase contrast with background color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.newselection.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.newselection.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 80448,
"end": 84858
} | class ____(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
# Dummy CompositeTensor to test CompositeTensor support.
| SparseTest |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 21892,
"end": 23974
} | class ____(test.TestCase):
def _xlog1py_gradients(self, x, y):
xlog1py_xgrad = self.evaluate(
gradients.gradients(math_ops.xlog1py(x, y), x)[0])
xlog1py_ygrad = self.evaluate(
gradients.gradients(math_ops.xlog1py(x, y), y)[0])
return xlog1py_xgrad, xlog1py_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
xlog1py_expected_xgrad = self.evaluate(math_ops.log1p(y))
xlog1py_expected_ygrad = self.evaluate(x / (1. + y))
self.assertAllClose(xlog1py_expected_xgrad, xlog1py_xgrad)
self.assertAllClose(xlog1py_expected_ygrad, xlog1py_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlog1py_xgrad)
self.assertAllClose(zero, xlog1py_ygrad)
@test_util.run_deprecated_v1
def testNegOneYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(-1., dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
self.assertAllClose(-np.inf, xlog1py_xgrad)
self.assertAllClose(np.inf, xlog1py_ygrad)
@test_util.run_deprecated_v1
def testZeroXNegOneYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(-1., dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlog1py_xgrad)
self.assertAllClose(zero, xlog1py_ygrad)
| Xlog1pyTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E70.py | {
"start": 1056,
"end": 1193
} | class ____:
match: Optional[Match] = None
#: E702:2:4
while 1:
1;...
#: E703:2:1
0\
;
#: E701:2:3
a = \
5;
#:
with x(y) as z: ...
| Foo |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 1958,
"end": 7330
} | class ____:
def setup_method(self):
self.app.conf.accept_content = ['json']
def test_accept_precedence(self):
# default is app.conf.accept_content
accept_content = self.app.conf.accept_content
b1 = BaseBackend(self.app)
assert prepare_accept_content(accept_content) == b1.accept
# accept parameter
b2 = BaseBackend(self.app, accept=['yaml'])
assert len(b2.accept) == 1
assert list(b2.accept)[0] == 'application/x-yaml'
assert prepare_accept_content(['yaml']) == b2.accept
# accept parameter over result_accept_content
self.app.conf.result_accept_content = ['json']
b3 = BaseBackend(self.app, accept=['yaml'])
assert len(b3.accept) == 1
assert list(b3.accept)[0] == 'application/x-yaml'
assert prepare_accept_content(['yaml']) == b3.accept
# conf.result_accept_content if specified
self.app.conf.result_accept_content = ['yaml']
b4 = BaseBackend(self.app)
assert len(b4.accept) == 1
assert list(b4.accept)[0] == 'application/x-yaml'
assert prepare_accept_content(['yaml']) == b4.accept
def test_get_result_meta(self):
b1 = BaseBackend(self.app)
meta = b1._get_result_meta(result={'fizz': 'buzz'},
state=states.SUCCESS, traceback=None,
request=None)
assert meta['status'] == states.SUCCESS
assert meta['result'] == {'fizz': 'buzz'}
assert meta['traceback'] is None
self.app.conf.result_extended = True
args = ['a', 'b']
kwargs = {'foo': 'bar'}
task_name = 'mytask'
b2 = BaseBackend(self.app)
request = Context(args=args, kwargs=kwargs,
task=task_name,
delivery_info={'routing_key': 'celery'})
meta = b2._get_result_meta(result={'fizz': 'buzz'},
state=states.SUCCESS, traceback=None,
request=request, encode=False)
assert meta['name'] == task_name
assert meta['args'] == args
assert meta['kwargs'] == kwargs
assert meta['queue'] == 'celery'
def test_get_result_meta_stamps_attribute_error(self):
class Request:
pass
self.app.conf.result_extended = True
b1 = BaseBackend(self.app)
meta = b1._get_result_meta(result={'fizz': 'buzz'},
state=states.SUCCESS, traceback=None,
request=Request())
assert meta['status'] == states.SUCCESS
assert meta['result'] == {'fizz': 'buzz'}
assert meta['traceback'] is None
def test_get_result_meta_encoded(self):
self.app.conf.result_extended = True
b1 = BaseBackend(self.app)
args = ['a', 'b']
kwargs = {'foo': 'bar'}
request = Context(args=args, kwargs=kwargs)
meta = b1._get_result_meta(result={'fizz': 'buzz'},
state=states.SUCCESS, traceback=None,
request=request, encode=True)
assert meta['args'] == ensure_bytes(b1.encode(args))
assert meta['kwargs'] == ensure_bytes(b1.encode(kwargs))
def test_get_result_meta_with_none(self):
b1 = BaseBackend(self.app)
meta = b1._get_result_meta(result=None,
state=states.SUCCESS, traceback=None,
request=None)
assert meta['status'] == states.SUCCESS
assert meta['result'] is None
assert meta['traceback'] is None
self.app.conf.result_extended = True
args = ['a', 'b']
kwargs = {'foo': 'bar'}
task_name = 'mytask'
b2 = BaseBackend(self.app)
request = Context(args=args, kwargs=kwargs,
task=task_name,
delivery_info={'routing_key': 'celery'})
meta = b2._get_result_meta(result=None,
state=states.SUCCESS, traceback=None,
request=request, encode=False)
assert meta['name'] == task_name
assert meta['args'] == args
assert meta['kwargs'] == kwargs
assert meta['queue'] == 'celery'
def test_get_result_meta_format_date(self):
import datetime
self.app.conf.result_extended = True
b1 = BaseBackend(self.app)
args = ['a', 'b']
kwargs = {'foo': 'bar'}
request = Context(args=args, kwargs=kwargs)
meta = b1._get_result_meta(result={'fizz': 'buzz'},
state=states.SUCCESS, traceback=None,
request=request, format_date=True)
assert isinstance(meta['date_done'], str)
self.app.conf.result_extended = True
b2 = BaseBackend(self.app)
args = ['a', 'b']
kwargs = {'foo': 'bar'}
request = Context(args=args, kwargs=kwargs)
meta = b2._get_result_meta(result={'fizz': 'buzz'},
state=states.SUCCESS, traceback=None,
request=request, format_date=False)
assert isinstance(meta['date_done'], datetime.datetime)
| test_Backend_interface |
python | falconry__falcon | tests/test_wsgi_interface.py | {
"start": 71,
"end": 1480
} | class ____:
def test_srmock(self):
mock = testing.StartResponseMock()
mock(falcon.HTTP_200, ())
assert mock.status == falcon.HTTP_200
assert mock.exc_info is None
mock = testing.StartResponseMock()
exc_info = sys.exc_info()
mock(falcon.HTTP_200, (), exc_info)
assert mock.exc_info == exc_info
def test_pep3333(self):
api = falcon.App()
mock = testing.StartResponseMock()
# Simulate a web request (normally done though a WSGI server)
response = api(testing.create_environ(), mock)
# Verify that the response is iterable
assert _is_iterable(response)
# Make sure start_response was passed a valid status string
assert mock.call_count == 1
assert isinstance(mock.status, str)
assert re.match(r'^\d+[a-zA-Z\s]+$', mock.status)
# Verify headers is a list of tuples, each containing a pair of strings
assert isinstance(mock.headers, list)
if len(mock.headers) != 0:
header = mock.headers[0]
assert isinstance(header, tuple)
assert len(header) == 2
assert isinstance(header[0], str)
assert isinstance(header[1], str)
def _is_iterable(thing):
try:
for i in thing:
break
return True
except TypeError:
return False
| TestWSGIInterface |
python | keras-team__keras | keras/src/metrics/iou_metrics_test.py | {
"start": 8641,
"end": 17737
} | class ____(testing.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name="mean_iou")
self.assertEqual(m_obj.name, "mean_iou")
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, "mean_iou")
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_unweighted_ignore_class_255(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 2, 255]
m_obj = metrics.MeanIoU(num_classes=3, ignore_class=255)
result = m_obj(y_true, y_pred)
# cm = [[1, 0, 0],
# [0, 1, 0],
# [0, 1, 0]]
# sum_row = [1, 1, 1], sum_col = [1, 2, 0], true_positives = [1, 1, 0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
1 / (1 + 1 - 1) + 1 / (2 + 1 - 1) + 0 / (0 + 1 - 0)
) / 3
self.assertAllClose(result, expected_result, atol=1e-3)
def test_unweighted_ignore_class_1(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 2, -1]
m_obj = metrics.MeanIoU(num_classes=3, ignore_class=-1)
result = m_obj(y_true, y_pred)
# cm = [[1, 0, 0],
# [0, 1, 0],
# [0, 1, 0]]
# sum_row = [1, 1, 1], sum_col = [1, 2, 0], true_positives = [1, 1, 0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
1 / (1 + 1 - 1) + 1 / (2 + 1 - 1) + 0 / (0 + 1 - 0)
) / 3
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, 1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2, dtype="float32")
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted_ignore_class_1(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, -1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2, ignore_class=-1, dtype="float32")
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.0]]
# sum_row = [0.6, 0.3], sum_col = [0.5, 0.4], true_positives = [0.2,
# 0.0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.0 / (0.3 + 0.4 - 0.0)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_true = np.array([[0, 0], [1, 1]])
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2, dtype="float32")
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.assertAllClose(m_obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([1], dtype=np.float32)
y_true = np.array([1])
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(result, expected_result, atol=1e-3)
@staticmethod
def _confusion_matrix(y_true, y_pred, num_classes):
"""
Creates a confusion matrix as a numpy array using vectorized operations.
Parameters:
- y_true: array-like, true class labels.
- y_pred: array-like, predicted class labels.
- num_classes: int, number of classes.
Returns:
- conf_matrix: np.ndarray, confusion matrix of shape (num_classes,
num_classes).
"""
# Map pairs of (y_true, y_pred) to indices in the confusion matrix
indices = y_true * num_classes + y_pred
# Count occurrences of each index
conf_matrix = np.bincount(indices, minlength=num_classes * num_classes)
# Reshape the flat array into a 2D confusion matrix
conf_matrix = conf_matrix.reshape((num_classes, num_classes))
return conf_matrix
@staticmethod
def _get_big_chunk(dtype):
np.random.seed(14)
all_y_true = np.random.choice([0, 1, 2], size=(10, 530, 530))
# Generate random probabilities for each channel
random_probs = np.random.rand(10, 530, 530, 3)
# Normalize to ensure the last dimension sums to 1
all_y_pred = random_probs / random_probs.sum(axis=-1, keepdims=True)
# Convert predictions to class indices
all_y_pred_arg = np.argmax(all_y_pred, axis=-1)
mean_iou_metric = metrics.MeanIoU(num_classes=3, dtype=dtype)
conf_matrix_start_point = np.array(
[
[18729664, 18728760, 18731196],
[18727297, 18726105, 18728071],
[18727917, 18717835, 18723155],
]
)
mean_iou_metric.total_cm = mean_iou_metric.add_variable(
name="total_confusion_matrix",
shape=(3, 3),
initializer=convert_to_tensor(conf_matrix_start_point),
dtype=dtype or "int",
)
mean_iou_metric.update_state(all_y_true, all_y_pred_arg)
tmp_true = np.reshape(all_y_true, -1)
tmp_pred = np.reshape(all_y_pred_arg, -1)
return (
all_y_true,
all_y_pred_arg,
mean_iou_metric,
tmp_true,
tmp_pred,
conf_matrix_start_point,
)
def test_big_chunk(self):
# Init. process with dtype=None which will default to int
(
all_y_true,
all_y_pred_arg,
mean_iou_metric_all,
tmp_true,
tmp_pred,
conf_matrix_start_point,
) = self._get_big_chunk(dtype=None)
conf_matrix_from_keras = np.array(mean_iou_metric_all.total_cm)
# Validate confusion matrices and results
conf_matrix_manual = (
self._confusion_matrix(tmp_true, tmp_pred, 3)
+ conf_matrix_start_point
)
self.assertTrue(
np.array_equal(conf_matrix_from_keras, conf_matrix_manual),
msg="Confusion matrices do not match!",
)
# Now same but with float32 dtype, in here the confusion matrix
# should not match. Likely this can be removed
(
all_y_true,
all_y_pred_arg,
mean_iou_metric_all,
tmp_true,
tmp_pred,
conf_matrix_start_point,
) = self._get_big_chunk(dtype="float32")
conf_matrix_from_keras = np.array(mean_iou_metric_all.total_cm)
# Validate confusion matrices and results
conf_matrix_manual = (
self._confusion_matrix(tmp_true, tmp_pred, 3)
+ conf_matrix_start_point
)
self.assertFalse(
np.array_equal(conf_matrix_from_keras, conf_matrix_manual),
msg="Confusion matrices match, but they should not!",
)
def test_user_warning_float_weight(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 1, 0]
m_obj = metrics.MeanIoU(num_classes=3)
with pytest.warns(Warning, match=r"weight.*float.*int.*casting"):
m_obj(y_true, y_pred, sample_weight=np.array([0.2, 0.3, 0.4, 0.1]))
| MeanIoUTest |
python | dateutil__dateutil | src/dateutil/parser/_parser.py | {
"start": 19356,
"end": 49747
} | class ____(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param \\*\\*kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ParserError("Unknown string format: %s", timestr)
if len(res) == 0:
raise ParserError("String does not contain a date: %s", timestr)
try:
ret = self._build_naive(res, default)
except ValueError as e:
six.raise_from(ParserError(str(e) + ": %s", timestr), e)
if not ignoretz:
ret = self._build_tzaware(ret, res, tzinfos)
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm","any_unused_tokens"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
skipped_idxs = []
# year/month/day list
ymd = _ymd()
len_l = len(l)
i = 0
try:
while i < len_l:
# Check if it's a number
value_repr = l[i]
try:
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Numeric token
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
# Check weekday
elif info.weekday(l[i]) is not None:
value = info.weekday(l[i])
res.weekday = value
# Check month name
elif info.month(l[i]) is not None:
value = info.month(l[i])
ymd.append(value, 'M')
if i + 1 < len_l:
if l[i + 1] in ('-', '/'):
# Jan-01[-99]
sep = l[i + 1]
ymd.append(l[i + 2])
if i + 3 < len_l and l[i + 3] == sep:
# Jan-01-99
ymd.append(l[i + 4])
i += 2
i += 2
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
info.pertain(l[i + 2])):
# Jan of 01
# In this case, 01 is clearly year
if l[i + 4].isdigit():
# Convert it here to become unambiguous
value = int(l[i + 4])
year = str(info.convertyear(value))
ymd.append(year, 'Y')
else:
# Wrong guess
pass
# TODO: not hit in tests
i += 4
# Check am/pm
elif info.ampm(l[i]) is not None:
value = info.ampm(l[i])
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
if val_is_ampm:
res.hour = self._adjust_ampm(res.hour, value)
res.ampm = value
elif fuzzy:
skipped_idxs.append(i)
# Check for a timezone name
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i + 1 < len_l and l[i + 1] in ('+', '-'):
l[i + 1] = ('+', '-')[l[i + 1] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
# Check for a numbered timezone
elif res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
len_li = len(l[i + 1])
# TODO: check that l[i + 1] is integer?
if len_li == 4:
# -0300
hour_offset = int(l[i + 1][:2])
min_offset = int(l[i + 1][2:])
elif i + 2 < len_l and l[i + 2] == ':':
# -03:00
hour_offset = int(l[i + 1])
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
i += 2
elif len_li <= 2:
# -[0]3
hour_offset = int(l[i + 1][:2])
min_offset = 0
else:
raise ValueError(timestr)
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
# Look for a timezone name between parenthesis
if (i + 5 < len_l and
info.jump(l[i + 2]) and l[i + 3] == '(' and
l[i + 5] == ')' and
3 <= len(l[i + 4]) and
self._could_be_tzname(res.hour, res.tzname,
None, l[i + 4])):
# -0300 (BRST)
res.tzname = l[i + 4]
i += 4
i += 1
# Check jumps
elif not (info.jump(l[i]) or fuzzy):
raise ValueError(timestr)
else:
skipped_idxs.append(i)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
res.century_specified = ymd.century_specified
res.year = year
res.month = month
res.day = day
except (IndexError, ValueError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
return res, tuple(skipped_tokens)
else:
return res, None
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
# Token is a number
value_repr = tokens[idx]
try:
value = self._to_decimal(value_repr)
except Exception as e:
six.raise_from(ValueError('Unknown numeric token'), e)
len_li = len(value_repr)
len_l = len(tokens)
if (len(ymd) == 3 and len_li in (2, 4) and
res.hour is None and
(idx + 1 >= len_l or
(tokens[idx + 1] != ':' and
info.hms(tokens[idx + 1]) is None))):
# 19990101T23[59]
s = tokens[idx]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = tokens[idx]
if not ymd and '.' not in tokens[idx]:
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
# TODO: Check if res attributes already set.
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = self._parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = tokens[idx]
ymd.append(s[:4], 'Y')
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
if hms is not None:
# TODO: checking that hour/minute/second are not
# already set?
self._assign_hms(res, value_repr, hms)
elif idx + 2 < len_l and tokens[idx + 1] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
(res.minute, res.second) = self._parse_min_sec(value)
if idx + 4 < len_l and tokens[idx + 3] == ':':
res.second, res.microsecond = self._parsems(tokens[idx + 4])
idx += 2
idx += 2
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
sep = tokens[idx + 1]
ymd.append(value_repr)
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
if tokens[idx + 2].isdigit():
# 01-01[-01]
ymd.append(tokens[idx + 2])
else:
# 01-Jan[-01]
value = info.month(tokens[idx + 2])
if value is not None:
ymd.append(value, 'M')
else:
raise ValueError()
if idx + 3 < len_l and tokens[idx + 3] == sep:
# We have three members
value = info.month(tokens[idx + 4])
if value is not None:
ymd.append(value, 'M')
else:
ymd.append(tokens[idx + 4])
idx += 2
idx += 1
idx += 1
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
# 12 am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
idx += 1
else:
# Year, month or day
ymd.append(value)
idx += 1
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
# 12am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
idx += 1
elif ymd.could_be_day(value):
ymd.append(value)
elif not fuzzy:
raise ValueError()
return idx
def _find_hms_idx(self, idx, tokens, info, allow_jump):
len_l = len(tokens)
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
# There is an "h", "m", or "s" label following this token. We take
# assign the upcoming label to the current token.
# e.g. the "12" in 12h"
hms_idx = idx + 1
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
info.hms(tokens[idx+2]) is not None):
# There is a space and then an "h", "m", or "s" label.
# e.g. the "12" in "12 h"
hms_idx = idx + 2
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
# There is a "h", "m", or "s" preceding this token. Since neither
# of the previous cases was hit, there is no label following this
# token, so we use the previous label.
# e.g. the "04" in "12h04"
hms_idx = idx-1
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
info.hms(tokens[idx-2]) is not None):
# If we are looking at the final token, we allow for a
# backward-looking check to skip over a space.
# TODO: Are we sure this is the right condition here?
hms_idx = idx - 2
else:
hms_idx = None
return hms_idx
def _assign_hms(self, res, value_repr, hms):
# See GH issue #427, fixing float rounding
value = self._to_decimal(value_repr)
if hms == 0:
# Hour
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif hms == 1:
(res.minute, res.second) = self._parse_min_sec(value)
elif hms == 2:
(res.second, res.microsecond) = self._parsems(value_repr)
def _could_be_tzname(self, hour, tzname, tzoffset, token):
return (hour is not None and
tzname is None and
tzoffset is None and
len(token) <= 5 and
(all(x in string.ascii_uppercase for x in token)
or token in self.info.UTCZONE))
def _ampm_valid(self, hour, ampm, fuzzy):
"""
For fuzzy parsing, 'a' or 'am' (both valid English words)
may erroneously trigger the AM/PM flag. Deal with that
here.
"""
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with AM or PM flag.')
elif not 0 <= hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for 12-hour clock.')
return val_is_ampm
def _adjust_ampm(self, hour, ampm):
if hour < 12 and ampm == 1:
hour += 12
elif hour == 12 and ampm == 0:
hour = 0
return hour
def _parse_min_sec(self, value):
# TODO: Every usage of this function sets res.second to the return
# value. Are there any cases where second will be returned as None and
# we *don't* want to set res.second = None?
minute = int(value)
second = None
sec_remainder = value % 1
if sec_remainder:
second = int(60 * sec_remainder)
return (minute, second)
def _parse_hms(self, idx, tokens, info, hms_idx):
# TODO: Is this going to admit a lot of false-positives for when we
# just happen to have digits and "h", "m" or "s" characters in non-date
# text? I guess hex hashes won't have that problem, but there's plenty
# of random junk out there.
if hms_idx is None:
hms = None
new_idx = idx
elif hms_idx > idx:
hms = info.hms(tokens[hms_idx])
new_idx = hms_idx
else:
# Looking backwards, increment one.
hms = info.hms(tokens[hms_idx]) + 1
new_idx = idx
return (new_idx, hms)
# ------------------------------------------------------------------
# Handling for individual tokens. These are kept as methods instead
# of functions for the sake of customizability via subclassing.
def _parsems(self, value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
def _to_decimal(self, val):
try:
decimal_value = Decimal(val)
# See GH 662, edge case, infinite value should not be converted
# via `_to_decimal`
if not decimal_value.is_finite():
raise ValueError("Converted decimal value is infinite or NaN")
except Exception as e:
msg = "Could not convert %s to decimal" % val
six.raise_from(ValueError(msg), e)
else:
return decimal_value
# ------------------------------------------------------------------
# Post-Parsing construction of datetime output. These are kept as
# methods instead of functions for the sake of customizability via
# subclassing.
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
if callable(tzinfos):
tzdata = tzinfos(tzname, tzoffset)
else:
tzdata = tzinfos.get(tzname)
# handle case where tzinfo is paased an options that returns None
# eg tzinfos = {'BRST' : None}
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(tzname, tzdata)
else:
raise TypeError("Offset must be tzinfo subclass, tz string, "
"or int offset.")
return tzinfo
def _build_tzaware(self, naive, res, tzinfos):
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
aware = naive.replace(tzinfo=tzinfo)
aware = self._assign_tzname(aware, res.tzname)
elif res.tzname and res.tzname in time.tzname:
aware = naive.replace(tzinfo=tz.tzlocal())
# Handle ambiguous local datetime
aware = self._assign_tzname(aware, res.tzname)
# This is mostly relevant for winter GMT zones parsed in the UK
if (aware.tzname() != res.tzname and
res.tzname in self.info.UTCZONE):
aware = aware.replace(tzinfo=tz.UTC)
elif res.tzoffset == 0:
aware = naive.replace(tzinfo=tz.UTC)
elif res.tzoffset:
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
elif not res.tzname and not res.tzoffset:
# i.e. no timezone information was found.
aware = naive
elif res.tzname:
# tz-like string was parsed but we don't know what to do
# with it
warnings.warn("tzname {tzname} identified but not understood. "
"Pass `tzinfos` argument in order to correctly "
"return a timezone-aware datetime. In a future "
"version, this will raise an "
"exception.".format(tzname=res.tzname),
category=UnknownTimezoneWarning)
aware = naive
return aware
def _build_naive(self, res, default):
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back
# to the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
naive = default.replace(**repl)
if res.weekday is not None and not res.day:
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
return naive
def _assign_tzname(self, dt, tzname):
if dt.tzname() != tzname:
new_dt = tz.enfold(dt, fold=1)
if new_dt.tzname() == tzname:
return new_dt
return dt
def _recombine_skipped(self, tokens, skipped_idxs):
"""
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
>>> skipped_idxs = [0, 1, 2, 5]
>>> _recombine_skipped(tokens, skipped_idxs)
["foo bar", "baz"]
"""
skipped_tokens = []
for i, idx in enumerate(sorted(skipped_idxs)):
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
else:
skipped_tokens.append(tokens[idx])
return skipped_tokens
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string formats, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date would
be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
| parser |
python | ray-project__ray | python/ray/data/tests/preprocessors/test_chain.py | {
"start": 4909,
"end": 5800
} | class ____(Preprocessor):
pass
def test_determine_transform_to_use():
# Test that _determine_transform_to_use doesn't throw any exceptions
# and selects the transform function of the underlying preprocessor
# while dealing with the nested Chain case.
# Check that error is propagated correctly
with pytest.raises(NotImplementedError):
chain = Chain(PreprocessorWithoutTransform())
chain._determine_transform_to_use()
# Should have no errors from here on
preprocessor = SimpleImputer(["A"])
chain1 = Chain(preprocessor)
format1 = chain1._determine_transform_to_use()
assert format1 == BatchFormat.PANDAS
chain2 = Chain(chain1)
format2 = chain2._determine_transform_to_use()
assert format1 == format2
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| PreprocessorWithoutTransform |
python | ray-project__ray | python/ray/air/_internal/util.py | {
"start": 1788,
"end": 3876
} | class ____(threading.Thread):
"""Supervisor thread that runs your script."""
def __init__(self, *args, error_queue, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self._error_queue = error_queue
self._ret = None
def _propagate_exception(self, e: BaseException):
try:
# report the error but avoid indefinite blocking which would
# prevent the exception from being propagated in the unlikely
# case that something went terribly wrong
self._error_queue.put(e, block=True, timeout=_ERROR_REPORT_TIMEOUT)
except queue.Full:
logger.critical(
(
"Runner Thread was unable to report error to main "
"function runner thread. This means a previous error "
"was not processed. This should never happen."
)
)
def run(self):
try:
self._ret = self._target(*self._args, **self._kwargs)
except StopIteration:
logger.debug(
(
"Thread runner raised StopIteration. Interpreting it as a "
"signal to terminate the thread without error."
)
)
except SystemExit as e:
# Do not propagate up for graceful termination.
if e.code == 0:
logger.debug(
(
"Thread runner raised SystemExit with error code 0. "
"Interpreting it as a signal to terminate the thread "
"without error."
)
)
else:
# If non-zero exit code, then raise exception to main thread.
self._propagate_exception(e)
except BaseException as e:
# Propagate all other exceptions to the main thread.
self._propagate_exception(e)
def join(self, timeout=None):
super(RunnerThread, self).join(timeout)
return self._ret
| RunnerThread |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 17682,
"end": 17828
} | class ____(TestNoChunks):
HTTP_CLIENT_VERSION = '1.0'
PIPELINE_NOT_SUPPORTED_EXS = (ConnectionClosed,)
EXPECT_CLOSE = True
| TestNoChunks10 |
python | getsentry__sentry | src/sentry/integrations/jira/integration.py | {
"start": 4946,
"end": 5015
} | class ____(TypedDict):
value: str
label: str
| JiraProjectMapping |
python | matplotlib__matplotlib | lib/matplotlib/patheffects.py | {
"start": 12178,
"end": 13321
} | class ____(AbstractPathEffect):
"""
Draws a `.PathPatch` instance whose Path comes from the original
PathEffect artist.
"""
def __init__(self, offset=(0, 0), **kwargs):
"""
Parameters
----------
offset : (float, float), default: (0, 0)
The (x, y) offset to apply to the path, in points.
**kwargs
All keyword arguments are passed through to the
:class:`~matplotlib.patches.PathPatch` constructor. The
properties which cannot be overridden are "path", "clip_box"
"transform" and "clip_path".
"""
super().__init__(offset=offset)
self.patch = mpatches.PathPatch([], **kwargs)
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
self.patch._path = tpath
self.patch.set_transform(affine + self._offset_transform(renderer))
self.patch.set_clip_box(gc.get_clip_rectangle())
clip_path = gc.get_clip_path()
if clip_path and self.patch.get_clip_path() is None:
self.patch.set_clip_path(*clip_path)
self.patch.draw(renderer)
| PathPatchEffect |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/operators/test_sql_execute.py | {
"start": 1549,
"end": 14751
} | class ____(NamedTuple):
id2: str
value2: str
@pytest.mark.parametrize(
("sql", "return_last", "split_statement", "hook_results", "hook_descriptions", "expected_results"),
[
pytest.param(
"select * from dummy",
True,
True,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
[Row(id="1", value="value1"), Row(id="2", value="value2")],
id="Scalar: Single SQL statement, return_last, split statement",
),
pytest.param(
"select * from dummy;select * from dummy2",
True,
True,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
[Row(id="1", value="value1"), Row(id="2", value="value2")],
id="Scalar: Multiple SQL statements, return_last, split statement",
),
pytest.param(
"select * from dummy",
False,
False,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
[Row(id="1", value="value1"), Row(id="2", value="value2")],
id="Scalar: Single SQL statements, no return_last (doesn't matter), no split statement",
),
pytest.param(
"select * from dummy",
True,
False,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
[Row(id="1", value="value1"), Row(id="2", value="value2")],
id="Scalar: Single SQL statements, return_last (doesn't matter), no split statement",
),
pytest.param(
["select * from dummy"],
False,
False,
[[Row(id="1", value="value1"), Row(id="2", value="value2")]],
[[("id",), ("value",)]],
[[Row(id="1", value="value1"), Row(id="2", value="value2")]],
id="Non-Scalar: Single SQL statements in list, no return_last, no split statement",
),
pytest.param(
["select * from dummy", "select * from dummy2"],
False,
False,
[
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")],
],
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")],
],
id="Non-Scalar: Multiple SQL statements in list, no return_last (no matter), no split statement",
),
pytest.param(
["select * from dummy", "select * from dummy2"],
True,
False,
[
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")],
],
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")],
],
id="Non-Scalar: Multiple SQL statements in list, return_last (no matter), no split statement",
),
],
)
def test_exec_success(sql, return_last, split_statement, hook_results, hook_descriptions, expected_results):
"""
Test the execute function in case where SQL query was successful.
"""
class SQLExecuteQueryOperatorForTest(SQLExecuteQueryOperator):
_mock_db_api_hook = MagicMock()
def get_db_hook(self):
return self._mock_db_api_hook
op = SQLExecuteQueryOperatorForTest(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
op._mock_db_api_hook.run.return_value = hook_results
op._mock_db_api_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
op._mock_db_api_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
)
@pytest.mark.parametrize(
("sql", "return_last", "split_statement", "hook_results", "hook_descriptions", "expected_results"),
[
pytest.param(
"select * from dummy",
True,
True,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")]),
id="Scalar: Single SQL statement, return_last, split statement",
),
pytest.param(
"select * from dummy;select * from dummy2",
True,
True,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")]),
id="Scalar: Multiple SQL statements, return_last, split statement",
),
pytest.param(
"select * from dummy",
False,
False,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")]),
id="Scalar: Single SQL statements, no return_last (doesn't matter), no split statement",
),
pytest.param(
"select * from dummy",
True,
False,
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[[("id",), ("value",)]],
([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")]),
id="Scalar: Single SQL statements, return_last (doesn't matter), no split statement",
),
pytest.param(
["select * from dummy"],
False,
False,
[[Row(id="1", value="value1"), Row(id="2", value="value2")]],
[[("id",), ("value",)]],
[([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")])],
id="Non-Scalar: Single SQL statements in list, no return_last, no split statement",
),
pytest.param(
["select * from dummy", "select * from dummy2"],
False,
False,
[
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")],
],
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")]),
([("id2",), ("value2",)], [Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")]),
],
id="Non-Scalar: Multiple SQL statements in list, no return_last (no matter), no split statement",
),
pytest.param(
["select * from dummy", "select * from dummy2"],
True,
False,
[
[Row(id="1", value="value1"), Row(id="2", value="value2")],
[Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")],
],
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
([("id",), ("value",)], [Row(id="1", value="value1"), Row(id="2", value="value2")]),
([("id2",), ("value2",)], [Row2(id2="1", value2="value1"), Row2(id2="2", value2="value2")]),
],
id="Non-Scalar: Multiple SQL statements in list, return_last (no matter), no split statement",
),
],
)
def test_exec_success_with_process_output(
sql, return_last, split_statement, hook_results, hook_descriptions, expected_results
):
"""
Test the execute function in case where SQL query was successful.
"""
class SQLExecuteQueryOperatorForTestWithProcessOutput(SQLExecuteQueryOperator):
_mock_db_api_hook = MagicMock()
def get_db_hook(self):
return self._mock_db_api_hook
def _process_output(
self, results: list[Any], descriptions: list[Sequence[Sequence] | None]
) -> list[Any]:
return list(zip(descriptions, results))
op = SQLExecuteQueryOperatorForTestWithProcessOutput(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
op._mock_db_api_hook.run.return_value = hook_results
op._mock_db_api_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
op._mock_db_api_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
)
@pytest.mark.parametrize(
("connection_port", "default_port", "expected_port"),
[(None, 4321, 4321), (1234, None, 1234), (1234, 4321, 1234)],
)
def test_execute_openlineage_events(connection_port, default_port, expected_port):
class DBApiHookForTests(DbApiHook):
conn_name_attr = "sql_default"
get_conn = MagicMock(name="conn")
get_connection = MagicMock()
def get_openlineage_database_info(self, connection):
from airflow.providers.openlineage.sqlparser import DatabaseInfo
return DatabaseInfo(
scheme="sqlscheme",
authority=DbApiHook.get_openlineage_authority_part(connection, default_port=default_port),
)
def get_openlineage_database_specific_lineage(self, task_instance):
return OperatorLineage(run_facets={"completed": True})
dbapi_hook = DBApiHookForTests()
class SQLExecuteQueryOperatorForTest(SQLExecuteQueryOperator):
def get_db_hook(self):
return dbapi_hook
sql = """CREATE TABLE IF NOT EXISTS popular_orders_day_of_week (
order_day_of_week VARCHAR(64) NOT NULL,
order_placed_on TIMESTAMP NOT NULL,
orders_placed INTEGER NOT NULL
);
FORGOT TO COMMENT"""
op = SQLExecuteQueryOperatorForTest(task_id=TASK_ID, sql=sql)
DB_SCHEMA_NAME = "PUBLIC"
rows = [
(DB_SCHEMA_NAME, "popular_orders_day_of_week", "order_day_of_week", 1, "varchar"),
(DB_SCHEMA_NAME, "popular_orders_day_of_week", "order_placed_on", 2, "timestamp"),
(DB_SCHEMA_NAME, "popular_orders_day_of_week", "orders_placed", 3, "int4"),
]
dbapi_hook.get_connection.return_value = Connection(
conn_id="sql_default", conn_type="postgresql", host="host", port=connection_port
)
dbapi_hook.get_conn.return_value.cursor.return_value.fetchall.side_effect = [rows, []]
lineage = op.get_openlineage_facets_on_start()
assert len(lineage.inputs) == 0
assert lineage.outputs == [
Dataset(
namespace=f"sqlscheme://host:{expected_port}",
name="PUBLIC.popular_orders_day_of_week",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="order_day_of_week", type="varchar"),
SchemaDatasetFacetFields(name="order_placed_on", type="timestamp"),
SchemaDatasetFacetFields(name="orders_placed", type="int4"),
]
)
},
)
]
assert lineage.job_facets == {"sql": SQLJobFacet(query=sql)}
assert lineage.run_facets["extractionError"].failedTasks == 1
dbapi_hook.get_conn.return_value.cursor.return_value.fetchall.side_effect = [rows, []]
lineage_on_complete = op.get_openlineage_facets_on_complete(None)
assert (
OperatorLineage(
inputs=lineage.inputs,
outputs=lineage.outputs,
run_facets={**lineage.run_facets, **{"completed": True}},
job_facets=lineage.job_facets,
)
== lineage_on_complete
)
def test_with_no_openlineage_provider():
import importlib
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.startswith("airflow.providers.openlineage"):
raise ImportError("No provider 'apache-airflow-providers-openlineage'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch("builtins.__import__", side_effect=mock__import__):
op = SQLExecuteQueryOperator(task_id=TASK_ID, sql="SELECT 1;")
assert op.get_openlineage_facets_on_start() is None
assert op.get_openlineage_facets_on_complete(None) is None
| Row2 |
python | donnemartin__interactive-coding-challenges | online_judges/maximizing_xor/test_maximizing_xor.py | {
"start": 18,
"end": 344
} | class ____(unittest.TestCase):
def test_maximizing_xor(self):
solution = Solution()
self.assertEqual(solution.max_xor(10, 15), 7)
print('Success: test_maximizing_xor')
def main():
test = TestMaximizingXor()
test.test_maximizing_xor()
if __name__ == '__main__':
main()
| TestMaximizingXor |
python | redis__redis-py | tests/test_cache.py | {
"start": 24272,
"end": 27932
} | class ____:
@pytest.mark.parametrize(
"sentinel_setup",
[
{
"cache": DefaultCache(CacheConfig(max_size=128)),
"force_master_ip": "localhost",
},
{
"cache": DefaultCache(CacheConfig(max_size=128)),
"force_master_ip": "localhost",
"decode_responses": True,
},
],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_get_from_cache(self, master):
cache = master.get_cache()
master.set("foo", "bar")
# get key from redis and save in local cache_data
assert master.get("foo") in [b"bar", "bar"]
# get key from local cache_data
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"bar",
"bar",
]
# change key in redis (cause invalidation)
master.set("foo", "barbar")
# get key from redis
assert master.get("foo") in [b"barbar", "barbar"]
# Make sure that new value was cached
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"barbar",
"barbar",
]
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=128),
},
{
"cache_config": CacheConfig(max_size=128),
"decode_responses": True,
},
],
indirect=True,
)
def test_get_from_default_cache(self, r, r2):
cache = r.get_cache()
assert isinstance(cache.eviction_policy, LRUPolicy)
# add key to redis
r.set("foo", "bar")
# get key from redis and save in local cache_data
assert r.get("foo") in [b"bar", "bar"]
# get key from local cache_data
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"bar",
"bar",
]
# change key in redis (cause invalidation)
r2.set("foo", "barbar")
time.sleep(0.1)
# Retrieves a new value from server and cache_data it
assert r.get("foo") in [b"barbar", "barbar"]
# Make sure that new value was cached
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"barbar",
"barbar",
]
@pytest.mark.parametrize(
"sentinel_setup",
[
{
"cache_config": CacheConfig(max_size=128),
"force_master_ip": "localhost",
}
],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_cache_clears_on_disconnect(self, master, cache):
cache = master.get_cache()
# add key to redis
master.set("foo", "bar")
# get key from redis and save in local cache_data
assert master.get("foo") == b"bar"
# get key from local cache_data
assert (
cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value
== b"bar"
)
# Force disconnection
master.connection_pool.get_connection().disconnect()
# Make sure cache_data is empty
assert cache.size == 0
@pytest.mark.onlynoncluster
@skip_if_resp_version(2)
@skip_if_server_version_lt("7.4.0")
| TestSentinelCache |
python | huggingface__transformers | src/transformers/models/mobilevit/configuration_mobilevit.py | {
"start": 786,
"end": 6789
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViT
[apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
hidden_sizes (`list[int]`, *optional*, defaults to `[144, 192, 240]`):
Dimensionality (hidden size) of the Transformer encoders at each stage.
neck_hidden_sizes (`list[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
The number of channels for the feature maps of the backbone.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 2.0):
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
expand_ratio (`float`, *optional*, defaults to 4.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViT layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the Transformer encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
aspp_out_channels (`int`, *optional*, defaults to 256):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import MobileViTConfig, MobileViTModel
>>> # Initializing a mobilevit-small style configuration
>>> configuration = MobileViTConfig()
>>> # Initializing a model from the mobilevit-small style configuration
>>> model = MobileViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mobilevit"
def __init__(
self,
num_channels=3,
image_size=256,
patch_size=2,
hidden_sizes=[144, 192, 240],
neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640],
num_attention_heads=4,
mlp_ratio=2.0,
expand_ratio=4.0,
hidden_act="silu",
conv_kernel_size=3,
output_stride=32,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.0,
classifier_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
qkv_bias=True,
aspp_out_channels=256,
atrous_rates=[6, 12, 18],
aspp_dropout_prob=0.1,
semantic_loss_ignore_index=255,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_sizes = hidden_sizes
self.neck_hidden_sizes = neck_hidden_sizes
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.expand_ratio = expand_ratio
self.hidden_act = hidden_act
self.conv_kernel_size = conv_kernel_size
self.output_stride = output_stride
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
# decode head attributes for semantic segmentation
self.aspp_out_channels = aspp_out_channels
self.atrous_rates = atrous_rates
self.aspp_dropout_prob = aspp_dropout_prob
self.semantic_loss_ignore_index = semantic_loss_ignore_index
__all__ = ["MobileViTConfig"]
| MobileViTConfig |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/deferred.py | {
"start": 860,
"end": 3683
} | class ____(SearchStrategy[Ex]):
"""A strategy which may be used before it is fully defined."""
def __init__(self, definition: Callable[[], SearchStrategy[Ex]]):
super().__init__()
self.__wrapped_strategy: SearchStrategy[Ex] | None = None
self.__in_repr: bool = False
self.__definition: Callable[[], SearchStrategy[Ex]] | None = definition
@property
def wrapped_strategy(self) -> SearchStrategy[Ex]:
# we assign this before entering the condition to avoid a race condition
# under threading. See issue #4523.
definition = self.__definition
if self.__wrapped_strategy is None:
check_sideeffect_during_initialization("deferred evaluation of {!r}", self)
if not inspect.isfunction(definition):
raise InvalidArgument(
f"Expected definition to be a function but got {definition!r} "
f"of type {type(definition).__name__} instead."
)
result = definition()
if result is self:
raise InvalidArgument("Cannot define a deferred strategy to be itself")
check_strategy(result, "definition()")
self.__wrapped_strategy = result
self.__definition = None
return self.__wrapped_strategy
@property
def branches(self) -> Sequence[SearchStrategy[Ex]]:
return self.wrapped_strategy.branches
def calc_label(self) -> int:
"""Deferred strategies don't have a calculated label, because we would
end up having to calculate the fixed point of some hash function in
order to calculate it when they recursively refer to themself!
The label for the wrapped strategy will still appear because it
will be passed to draw.
"""
# This is actually the same as the parent class implementation, but we
# include it explicitly here in order to document that this is a
# deliberate decision.
return self.class_label
def calc_is_empty(self, recur: RecurT) -> bool:
return recur(self.wrapped_strategy)
def calc_has_reusable_values(self, recur: RecurT) -> bool:
return recur(self.wrapped_strategy)
def __repr__(self) -> str:
if self.__wrapped_strategy is not None:
if self.__in_repr:
return f"(deferred@{id(self)!r})"
try:
self.__in_repr = True
return repr(self.__wrapped_strategy)
finally:
self.__in_repr = False
else:
description = get_pretty_function_description(self.__definition)
return f"deferred({description})"
def do_draw(self, data: ConjectureData) -> Ex:
return data.draw(self.wrapped_strategy)
| DeferredStrategy |
python | astropy__astropy | astropy/io/fits/tests/test_util.py | {
"start": 488,
"end": 2449
} | class ____(FitsTestCase):
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Cannot test on Windows")
def test_ignore_sigint(self):
if threading.active_count() > 1: # Only check when test starts.
pytest.skip("Cannot test when multiple threads are active")
@ignore_sigint
def runme():
with pytest.warns(AstropyUserWarning) as w:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
# One more time, for good measure
os.kill(pid, signal.SIGINT)
assert len(w) == 2
assert (
str(w[0].message) == "KeyboardInterrupt ignored until test is complete!"
)
pytest.raises(KeyboardInterrupt, runme)
def test_realign_dtype(self):
"""
Tests a few corner-cases for numpy dtype creation.
These originally were the reason for having a realign_dtype hack.
"""
dt = np.dtype([("a", np.int32), ("b", np.int16)])
names = dt.names
formats = [dt.fields[name][0] for name in names]
dt2 = np.dtype({"names": names, "formats": formats, "offsets": [0, 0]})
assert dt2.itemsize == 4
dt2 = np.dtype({"names": names, "formats": formats, "offsets": [0, 1]})
assert dt2.itemsize == 4
dt2 = np.dtype({"names": names, "formats": formats, "offsets": [1, 0]})
assert dt2.itemsize == 5
dt = np.dtype([("a", np.float64), ("b", np.int8), ("c", np.int8)])
names = dt.names
formats = [dt.fields[name][0] for name in names]
dt2 = np.dtype({"names": names, "formats": formats, "offsets": [0, 0, 0]})
assert dt2.itemsize == 8
dt2 = np.dtype({"names": names, "formats": formats, "offsets": [0, 0, 1]})
assert dt2.itemsize == 8
dt2 = np.dtype({"names": names, "formats": formats, "offsets": [0, 0, 27]})
assert dt2.itemsize == 28
| TestUtils |
python | getsentry__sentry | src/sentry/incidents/serializers/alert_rule_trigger_action.py | {
"start": 1202,
"end": 10579
} | class ____(CamelSnakeModelSerializer):
"""
Serializer for creating/updating a trigger action. Required context:
- `trigger`: The trigger related to this action.
- `alert_rule`: The alert_rule related to this action.
- `organization`: The organization related to this action.
- `access`: An access object (from `request.access`)
- `user`: The user from `request.user`
"""
id = serializers.IntegerField(required=False)
type = serializers.CharField()
target_type = serializers.CharField()
sentry_app_config = serializers.JSONField(required=False) # array of dicts
sentry_app_installation_uuid = serializers.CharField(required=False)
integration = serializers.IntegerField(source="integration_id", required=False, allow_null=True)
sentry_app = serializers.IntegerField(source="sentry_app_id", required=False, allow_null=True)
priority = serializers.CharField(required=False, allow_null=True)
class Meta:
model = AlertRuleTriggerAction
fields = [
"id",
"type",
"target_type",
"target_identifier",
"integration",
"sentry_app",
"sentry_app_config",
"sentry_app_installation_uuid",
"priority",
]
extra_kwargs = {
"target_identifier": {"required": True},
"target_display": {"required": False},
"integration": {"required": False, "allow_null": True},
"sentry_app": {"required": False, "allow_null": True},
"sentry_app_config": {"required": False, "allow_null": True},
"sentry_app_installation_uuid": {"required": False, "allow_null": True},
}
def validate_type(self, type: str) -> ActionService:
factory = AlertRuleTriggerAction.look_up_factory_by_slug(type)
if factory is None:
valid_slugs = AlertRuleTriggerAction.get_all_slugs()
raise serializers.ValidationError(f"Invalid type, valid values are {valid_slugs!r}")
return factory.service_type
def validate_target_type(self, target_type):
if target_type not in STRING_TO_ACTION_TARGET_TYPE:
raise serializers.ValidationError(
"Invalid targetType, valid values are [%s]"
% ", ".join(STRING_TO_ACTION_TARGET_TYPE.keys())
)
return STRING_TO_ACTION_TARGET_TYPE[target_type]
def validate(self, attrs):
if ("type" in attrs) != ("target_type" in attrs) != ("target_identifier" in attrs):
raise serializers.ValidationError(
"type, targetType and targetIdentifier must be passed together"
)
type = attrs.get("type")
target_type = attrs.get("target_type")
access: Access = self.context["access"]
identifier = attrs.get("target_identifier")
if type is not None:
type_info = AlertRuleTriggerAction.get_registered_factory(type)
if target_type not in type_info.supported_target_types:
allowed_target_types = ",".join(
ACTION_TARGET_TYPE_TO_STRING[type_name]
for type_name in type_info.supported_target_types
)
raise serializers.ValidationError(
{
"target_type": "Invalid target type for %s. Valid types are [%s]"
% (type_info.slug, allowed_target_types)
}
)
action_type = attrs.get("type")
if action_type == AlertRuleTriggerAction.Type.EMAIL:
if target_type == AlertRuleTriggerAction.TargetType.TEAM:
try:
team = Team.objects.get(id=identifier)
except Team.DoesNotExist:
raise serializers.ValidationError("Team does not exist")
if not access.has_team_access(team):
raise serializers.ValidationError("Team does not exist")
elif target_type == AlertRuleTriggerAction.TargetType.USER:
if not OrganizationMember.objects.filter(
organization=self.context["organization"], user_id=identifier
).exists():
raise serializers.ValidationError("User does not belong to this organization")
elif action_type == AlertRuleTriggerAction.Type.SLACK:
if not attrs.get("integration_id"):
raise serializers.ValidationError(
{"integration": "Integration must be provided for slack"}
)
elif action_type == AlertRuleTriggerAction.Type.DISCORD:
if not attrs.get("integration_id"):
raise serializers.ValidationError(
{"integration": "Integration must be provided for discord"}
)
elif action_type == AlertRuleTriggerAction.Type.SENTRY_APP:
sentry_app_installation_uuid = attrs.get("sentry_app_installation_uuid")
if not attrs.get("sentry_app_id"):
raise serializers.ValidationError(
{"sentry_app": "SentryApp must be provided for sentry_app"}
)
if attrs.get("sentry_app_config"):
if sentry_app_installation_uuid is None:
raise serializers.ValidationError(
{"sentry_app": "Missing parameter: sentry_app_installation_uuid"}
)
installations = self.context.get("installations")
if installations and sentry_app_installation_uuid not in {
i.uuid for i in installations
}:
raise serializers.ValidationError(
{"sentry_app": "The installation does not exist."}
)
if attrs.get("priority"):
if action_type not in [
AlertRuleTriggerAction.Type.PAGERDUTY,
AlertRuleTriggerAction.Type.OPSGENIE,
]:
raise serializers.ValidationError(
{"priority": "Can only be set for Pagerduty or Opsgenie"}
)
priority: str = attrs["priority"]
if (
action_type == AlertRuleTriggerAction.Type.PAGERDUTY
and priority not in PAGERDUTY_CUSTOM_PRIORITIES
):
raise serializers.ValidationError(
{
"priority": f"Allowed priorities for Pagerduty are {str(PAGERDUTY_CUSTOM_PRIORITIES)}"
}
)
if (
action_type == AlertRuleTriggerAction.Type.OPSGENIE
and priority not in OPSGENIE_CUSTOM_PRIORITIES
):
raise serializers.ValidationError(
{
"priority": f"Allowed priorities for Opsgenie are {str(OPSGENIE_CUSTOM_PRIORITIES)}"
}
)
# TODO(Ecosystem): Validate fields on schema config if alert-rule-action component exists
# See NotifyEventSentryAppAction::self_validate for more details
attrs["use_async_lookup"] = self.context.get("use_async_lookup")
attrs["input_channel_id"] = self.context.get("input_channel_id")
attrs["installations"] = self.context.get("installations")
attrs["integrations"] = self.context.get("integrations")
should_validate_channel_id = self.context.get("validate_channel_id", True)
# validate_channel_id is assumed to be true unless explicitly passed as false
if attrs["input_channel_id"] and should_validate_channel_id:
validate_slack_entity_id(
integration_id=attrs["integration_id"],
input_name=identifier,
input_id=attrs["input_channel_id"],
)
return attrs
def create(self, validated_data):
for key in ("id", "sentry_app_installation_uuid"):
validated_data.pop(key, None)
try:
action = create_alert_rule_trigger_action(
trigger=self.context["trigger"], **validated_data
)
except (ApiRateLimitedError, InvalidTriggerActionError) as e:
raise serializers.ValidationError(force_str(e))
except ValidationError as e:
# invalid action type
raise serializers.ValidationError(str(e))
try:
analytics.record(
MetricAlertWithUiComponentCreatedEvent(
user_id=getattr(self.context["user"], "id", None),
alert_rule_id=getattr(self.context["alert_rule"], "id"),
organization_id=getattr(self.context["organization"], "id"),
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
return action
def update(self, instance, validated_data):
for key in ("id", "sentry_app_installation_uuid"):
validated_data.pop(key, None)
try:
action = update_alert_rule_trigger_action(instance, **validated_data)
except (ApiRateLimitedError, InvalidTriggerActionError) as e:
raise serializers.ValidationError(force_str(e))
return action
| AlertRuleTriggerActionSerializer |
python | jina-ai__jina | tests/unit/serve/executors/test_bad_executor_constructor.py | {
"start": 59,
"end": 217
} | class ____(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self, **kwargs):
pass
| GoodExecutor |
python | pandas-dev__pandas | pandas/tests/arrays/period/test_reductions.py | {
"start": 66,
"end": 981
} | class ____:
def test_min_max(self):
arr = period_array(
[
"2000-01-03",
"2000-01-03",
"NaT",
"2000-01-02",
"2000-01-05",
"2000-01-04",
],
freq="D",
)
result = arr.min()
expected = pd.Period("2000-01-02", freq="D")
assert result == expected
result = arr.max()
expected = pd.Period("2000-01-05", freq="D")
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
def test_min_max_empty(self, skipna):
arr = period_array([], freq="D")
result = arr.min(skipna=skipna)
assert result is pd.NaT
result = arr.max(skipna=skipna)
assert result is pd.NaT
| TestReductions |
python | google__pytype | pytype/overlays/abc_overlay.py | {
"start": 446,
"end": 991
} | class ____(overlay.Overlay):
"""A custom overlay for the 'abc' module."""
def __init__(self, ctx):
member_map = {
"abstractclassmethod": AbstractClassMethod.make,
"abstractmethod": AbstractMethod.make,
"abstractproperty": AbstractProperty.make,
"abstractstaticmethod": AbstractStaticMethod.make,
"ABCMeta": overlay.add_name(
"ABCMeta", special_builtins.Type.make_alias
),
}
ast = ctx.loader.import_name("abc")
super().__init__(ctx, "abc", member_map, ast)
| ABCOverlay |
python | RaRe-Technologies__gensim | gensim/test/test_similarities.py | {
"start": 19852,
"end": 21254
} | class ____(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.SparseMatrixSimilarity
def test_maintain_sparsity(self):
"""Sparsity is correctly maintained when maintain_sparsity=True"""
num_features = len(DICTIONARY)
index = self.cls(CORPUS, num_features=num_features)
dense_sims = index[CORPUS]
index = self.cls(CORPUS, num_features=num_features, maintain_sparsity=True)
sparse_sims = index[CORPUS]
self.assertFalse(scipy.sparse.issparse(dense_sims))
self.assertTrue(scipy.sparse.issparse(sparse_sims))
numpy.testing.assert_array_equal(dense_sims, sparse_sims.todense())
def test_maintain_sparsity_with_num_best(self):
"""Tests that sparsity is correctly maintained when maintain_sparsity=True and num_best is not None"""
num_features = len(DICTIONARY)
index = self.cls(CORPUS, num_features=num_features, maintain_sparsity=False, num_best=3)
dense_topn_sims = index[CORPUS]
index = self.cls(CORPUS, num_features=num_features, maintain_sparsity=True, num_best=3)
scipy_topn_sims = index[CORPUS]
self.assertFalse(scipy.sparse.issparse(dense_topn_sims))
self.assertTrue(scipy.sparse.issparse(scipy_topn_sims))
self.assertEqual(dense_topn_sims, [matutils.scipy2sparse(v) for v in scipy_topn_sims])
| TestSparseMatrixSimilarity |
python | google__pytype | pytype/vm_utils.py | {
"start": 1273,
"end": 1550
} | class ____(enum.Enum):
"""Ways in which a JUMP_IF opcode may pop a value off the stack."""
NONE = enum.auto() # does not pop
OR = enum.auto() # pops when the jump is not taken
ALWAYS = enum.auto() # always pops
@dataclasses.dataclass(eq=True, frozen=True)
| PopBehavior |
python | kubernetes-client__python | kubernetes/client/models/v1_custom_resource_definition_version.py | {
"start": 383,
"end": 13909
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'additional_printer_columns': 'list[V1CustomResourceColumnDefinition]',
'deprecated': 'bool',
'deprecation_warning': 'str',
'name': 'str',
'schema': 'V1CustomResourceValidation',
'selectable_fields': 'list[V1SelectableField]',
'served': 'bool',
'storage': 'bool',
'subresources': 'V1CustomResourceSubresources'
}
attribute_map = {
'additional_printer_columns': 'additionalPrinterColumns',
'deprecated': 'deprecated',
'deprecation_warning': 'deprecationWarning',
'name': 'name',
'schema': 'schema',
'selectable_fields': 'selectableFields',
'served': 'served',
'storage': 'storage',
'subresources': 'subresources'
}
def __init__(self, additional_printer_columns=None, deprecated=None, deprecation_warning=None, name=None, schema=None, selectable_fields=None, served=None, storage=None, subresources=None, local_vars_configuration=None): # noqa: E501
"""V1CustomResourceDefinitionVersion - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._additional_printer_columns = None
self._deprecated = None
self._deprecation_warning = None
self._name = None
self._schema = None
self._selectable_fields = None
self._served = None
self._storage = None
self._subresources = None
self.discriminator = None
if additional_printer_columns is not None:
self.additional_printer_columns = additional_printer_columns
if deprecated is not None:
self.deprecated = deprecated
if deprecation_warning is not None:
self.deprecation_warning = deprecation_warning
self.name = name
if schema is not None:
self.schema = schema
if selectable_fields is not None:
self.selectable_fields = selectable_fields
self.served = served
self.storage = storage
if subresources is not None:
self.subresources = subresources
@property
def additional_printer_columns(self):
"""Gets the additional_printer_columns of this V1CustomResourceDefinitionVersion. # noqa: E501
additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used. # noqa: E501
:return: The additional_printer_columns of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: list[V1CustomResourceColumnDefinition]
"""
return self._additional_printer_columns
@additional_printer_columns.setter
def additional_printer_columns(self, additional_printer_columns):
"""Sets the additional_printer_columns of this V1CustomResourceDefinitionVersion.
additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used. # noqa: E501
:param additional_printer_columns: The additional_printer_columns of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: list[V1CustomResourceColumnDefinition]
"""
self._additional_printer_columns = additional_printer_columns
@property
def deprecated(self):
"""Gets the deprecated of this V1CustomResourceDefinitionVersion. # noqa: E501
deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false. # noqa: E501
:return: The deprecated of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: bool
"""
return self._deprecated
@deprecated.setter
def deprecated(self, deprecated):
"""Sets the deprecated of this V1CustomResourceDefinitionVersion.
deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false. # noqa: E501
:param deprecated: The deprecated of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: bool
"""
self._deprecated = deprecated
@property
def deprecation_warning(self):
"""Gets the deprecation_warning of this V1CustomResourceDefinitionVersion. # noqa: E501
deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists. # noqa: E501
:return: The deprecation_warning of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: str
"""
return self._deprecation_warning
@deprecation_warning.setter
def deprecation_warning(self, deprecation_warning):
"""Sets the deprecation_warning of this V1CustomResourceDefinitionVersion.
deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists. # noqa: E501
:param deprecation_warning: The deprecation_warning of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: str
"""
self._deprecation_warning = deprecation_warning
@property
def name(self):
"""Gets the name of this V1CustomResourceDefinitionVersion. # noqa: E501
name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true. # noqa: E501
:return: The name of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1CustomResourceDefinitionVersion.
name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true. # noqa: E501
:param name: The name of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def schema(self):
"""Gets the schema of this V1CustomResourceDefinitionVersion. # noqa: E501
:return: The schema of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: V1CustomResourceValidation
"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema of this V1CustomResourceDefinitionVersion.
:param schema: The schema of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: V1CustomResourceValidation
"""
self._schema = schema
@property
def selectable_fields(self):
"""Gets the selectable_fields of this V1CustomResourceDefinitionVersion. # noqa: E501
selectableFields specifies paths to fields that may be used as field selectors. A maximum of 8 selectable fields are allowed. See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors # noqa: E501
:return: The selectable_fields of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: list[V1SelectableField]
"""
return self._selectable_fields
@selectable_fields.setter
def selectable_fields(self, selectable_fields):
"""Sets the selectable_fields of this V1CustomResourceDefinitionVersion.
selectableFields specifies paths to fields that may be used as field selectors. A maximum of 8 selectable fields are allowed. See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors # noqa: E501
:param selectable_fields: The selectable_fields of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: list[V1SelectableField]
"""
self._selectable_fields = selectable_fields
@property
def served(self):
"""Gets the served of this V1CustomResourceDefinitionVersion. # noqa: E501
served is a flag enabling/disabling this version from being served via REST APIs # noqa: E501
:return: The served of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: bool
"""
return self._served
@served.setter
def served(self, served):
"""Sets the served of this V1CustomResourceDefinitionVersion.
served is a flag enabling/disabling this version from being served via REST APIs # noqa: E501
:param served: The served of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and served is None: # noqa: E501
raise ValueError("Invalid value for `served`, must not be `None`") # noqa: E501
self._served = served
@property
def storage(self):
"""Gets the storage of this V1CustomResourceDefinitionVersion. # noqa: E501
storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true. # noqa: E501
:return: The storage of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: bool
"""
return self._storage
@storage.setter
def storage(self, storage):
"""Sets the storage of this V1CustomResourceDefinitionVersion.
storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true. # noqa: E501
:param storage: The storage of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and storage is None: # noqa: E501
raise ValueError("Invalid value for `storage`, must not be `None`") # noqa: E501
self._storage = storage
@property
def subresources(self):
"""Gets the subresources of this V1CustomResourceDefinitionVersion. # noqa: E501
:return: The subresources of this V1CustomResourceDefinitionVersion. # noqa: E501
:rtype: V1CustomResourceSubresources
"""
return self._subresources
@subresources.setter
def subresources(self, subresources):
"""Sets the subresources of this V1CustomResourceDefinitionVersion.
:param subresources: The subresources of this V1CustomResourceDefinitionVersion. # noqa: E501
:type: V1CustomResourceSubresources
"""
self._subresources = subresources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CustomResourceDefinitionVersion):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CustomResourceDefinitionVersion):
return True
return self.to_dict() != other.to_dict()
| V1CustomResourceDefinitionVersion |
python | kamyu104__LeetCode-Solutions | Python/find-maximal-uncovered-ranges.py | {
"start": 52,
"end": 665
} | class ____(object):
def findMaximalUncoveredRanges(self, n, ranges):
"""
:type n: int
:type ranges: List[List[int]]
:rtype: List[List[int]]
"""
ranges.sort()
covered = [[-1, -1]]
for left, right in ranges:
if covered[-1][1] < left:
covered.append([left, right])
continue
covered[-1][1] = max(covered[-1][1], right)
covered.append([n, n])
return [[covered[i-1][1]+1, covered[i][0]-1] for i in xrange(1, len(covered)) if covered[i-1][1]+1 <= covered[i][0]-1]
| Solution |
python | getsentry__sentry | tests/sentry/api/test_authentication.py | {
"start": 21442,
"end": 25331
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
# Create a concrete implementation for testing
class TestServiceAuth(ServiceRpcSignatureAuthentication):
shared_secret_setting_name = "TEST_SERVICE_RPC_SHARED_SECRET"
service_name = "TestService"
sdk_tag_name = "test_service_rpc_auth"
self.auth = TestServiceAuth()
@override_settings(TEST_SERVICE_RPC_SHARED_SECRET=["test-secret-key"])
def test_authenticate_success(self) -> None:
data = b'{"test": "data"}'
request = drf_request_from_request(
RequestFactory().post("/test", data=data, content_type="application/json")
)
signature = generate_service_request_signature(
request.path_info, request.body, ["test-secret-key"], "TestService"
)
request.META["HTTP_AUTHORIZATION"] = f"rpcsignature {signature}"
user, token = self.auth.authenticate(request)
assert user.is_anonymous
assert token == signature
@override_settings(TEST_SERVICE_RPC_SHARED_SECRET=["new-key", "old-key"])
def test_authenticate_old_key_validates(self) -> None:
data = b'{"test": "data"}'
request = drf_request_from_request(
RequestFactory().post("/test", data=data, content_type="application/json")
)
# Sign with old key
signature = generate_service_request_signature(
request.path_info, request.body, ["old-key"], "TestService"
)
request.META["HTTP_AUTHORIZATION"] = f"rpcsignature {signature}"
user, token = self.auth.authenticate(request)
assert user.is_anonymous
assert token == signature
def test_authenticate_without_signature(self) -> None:
request = drf_request_from_request(
RequestFactory().post(
"/test", data=b'{"test": "data"}', content_type="application/json"
)
)
request.META["HTTP_AUTHORIZATION"] = "Bearer abcdef"
assert self.auth.authenticate(request) is None
@override_settings(TEST_SERVICE_RPC_SHARED_SECRET=["test-secret-key"])
def test_authenticate_invalid_signature(self) -> None:
request = drf_request_from_request(
RequestFactory().post(
"/test", data=b'{"test": "data"}', content_type="application/json"
)
)
request.META["HTTP_AUTHORIZATION"] = "rpcsignature invalid_signature"
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_authenticate_no_shared_secret(self) -> None:
request = drf_request_from_request(
RequestFactory().post(
"/test", data=b'{"test": "data"}', content_type="application/json"
)
)
request.META["HTTP_AUTHORIZATION"] = "rpcsignature test_signature"
with override_settings(TEST_SERVICE_RPC_SHARED_SECRET=None):
with pytest.raises(RpcAuthenticationSetupException):
self.auth.authenticate(request)
def test_authenticate_empty_shared_secret(self) -> None:
request = drf_request_from_request(
RequestFactory().post(
"/test", data=b'{"test": "data"}', content_type="application/json"
)
)
request.META["HTTP_AUTHORIZATION"] = "rpcsignature test_signature"
# Test with empty string secret
with override_settings(TEST_SERVICE_RPC_SHARED_SECRET=[""]):
with pytest.raises(RpcAuthenticationSetupException):
self.auth.authenticate(request)
# Test with whitespace-only secret
with override_settings(TEST_SERVICE_RPC_SHARED_SECRET=[" "]):
with pytest.raises(RpcAuthenticationSetupException):
self.auth.authenticate(request)
| TestServiceRpcSignatureAuthentication |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_reflection.py | {
"start": 108572,
"end": 109859
} | class ____(fixtures.TablesTest):
__requires__ = ("denormalized_names",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
quoted_name("t1", quote=True),
metadata,
Column("id", Integer, primary_key=True),
)
Table(
quoted_name("t2", quote=True),
metadata,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
def test_reflect_lowercase_forced_tables(self):
m2 = MetaData()
t2_ref = Table(
quoted_name("t2", quote=True), m2, autoload_with=config.db
)
t1_ref = m2.tables["t1"]
assert t2_ref.c.t1id.references(t1_ref.c.id)
m3 = MetaData()
m3.reflect(
config.db, only=lambda name, m: name.lower() in ("t1", "t2")
)
assert m3.tables["t2"].c.t1id.references(m3.tables["t1"].c.id)
def test_get_table_names(self):
tablenames = [
t
for t in inspect(config.db).get_table_names()
if t.lower() in ("t1", "t2")
]
eq_(tablenames[0].upper(), tablenames[0].lower())
eq_(tablenames[1].upper(), tablenames[1].lower())
| NormalizedNameTest |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 7467,
"end": 7662
} | class ____(PrefectException, RuntimeError):
"""
Raised when a method is called that requires a task or flow run context to be
active but one cannot be found.
"""
| MissingContextError |
python | Netflix__metaflow | test/test_config/hellodecos_base.py | {
"start": 301,
"end": 342
} | class ____(FlowSpec):
pass
| MyBaseFlowSpec |
python | great-expectations__great_expectations | tests/data_context/abstract_data_context/test_data_docs_config_crud.py | {
"start": 2486,
"end": 2768
} | class ____:
@pytest.mark.unit
def test_list_data_docs_sites(self, ephemeral_context_with_defaults: EphemeralDataContext):
site_names = [d for d in ephemeral_context_with_defaults.list_data_docs_sites()]
assert site_names == ["local_site"]
| TestListDataDocsSites |
python | astropy__astropy | astropy/coordinates/earth.py | {
"start": 716,
"end": 3311
} | class ____(NamedTuple):
"""A namedtuple for geodetic coordinates.
The longitude is increasing to the east, so west longitudes are negative.
"""
lon: Longitude
"""The longitude, increasting to the east."""
lat: Latitude
"""The latitude."""
height: u.Quantity
"""The height above the reference ellipsoid."""
OMEGA_EARTH = (1.002_737_811_911_354_48 * u.cycle / u.day).to(
1 / u.s, u.dimensionless_angles()
)
"""
Rotational velocity of Earth, following SOFA's pvtob.
In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value
in SI seconds, so multiply by the ratio of stellar to solar day.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth
Seidelmann (1992), University Science Books. The constant is the
conventional, exact one (IERS conventions 2003); see
http://hpiers.obspm.fr/eop-pc/index.php?index=constants.
"""
def _check_ellipsoid(ellipsoid=None, default="WGS84"):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f"Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})")
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode("utf8"))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
msg = (
"connection timed out" if isinstance(e.reason, socket.timeout) else e.reason
)
raise NameResolveError(err_str.format(msg=msg)) from e
except TimeoutError:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get("results", [])
if resp_data.get("status", None) != "OK":
raise NameResolveError(
err_str.format(msg="unknown failure with Google API")
)
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
| GeodeticLocation |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 109100,
"end": 110381
} | class ____(AbstractPreprocessor):
"""
Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.
The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config: PerceiverConfig) -> None:
super().__init__()
self.config = config
self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
@property
def num_channels(self) -> int:
return self.config.d_model
def forward(
self,
inputs: torch.LongTensor,
pos: Optional[torch.Tensor] = None,
network_input_is_1d: bool = True,
interpolate_pos_encoding: bool = False,
):
embeddings_without_pos = self.embeddings(inputs)
seq_length = inputs.shape[1]
position_ids = torch.arange(0, seq_length, device=inputs.device)
embeddings = embeddings_without_pos + self.position_embeddings(position_ids)
return embeddings, None, embeddings_without_pos
| PerceiverTextPreprocessor |
python | sanic-org__sanic | sanic/pages/error.py | {
"start": 634,
"end": 3939
} | class ____(BasePage):
"""Page for displaying an error."""
STYLE_APPEND = tracerite.html.style
def __init__(
self,
debug: bool,
title: str,
text: str,
request: Request,
exc: Exception,
) -> None:
super().__init__(debug)
name = request.app.name.replace("_", " ").strip()
if name.islower():
name = name.title()
self.TITLE = f"Application {name} cannot handle your request"
self.HEADING = E("Application ").strong(name)(
" cannot handle your request"
)
self.title = title
self.text = text
self.request = request
self.exc = exc
self.details_open = not getattr(exc, "quiet", False)
def _head(self) -> None:
self.doc._script(tracerite.html.javascript)
super()._head()
def _body(self) -> None:
debug = self.request.app.debug
route_name = self.request.name or "[route not found]"
with self.doc.main:
self.doc.h1(f"⚠️ {self.title}").p(self.text)
# Show context details if available on the exception
context = getattr(self.exc, "context", None)
if context:
self._key_value_table(
"Issue context", "exception-context", context
)
if not debug:
with self.doc.div(id="enduser"):
self.doc.p(ENDUSER_TEXT).p.a("Front Page", href="/")
return
# Show additional details in debug mode,
# open by default for 500 errors
with self.doc.details(open=self.details_open, class_="smalltext"):
# Show extra details if available on the exception
extra = getattr(self.exc, "extra", None)
if extra:
self._key_value_table(
"Issue extra data", "exception-extra", extra
)
self.doc.summary(
"Details for developers (Sanic debug mode only)"
)
if self.exc:
with self.doc.div(class_="exception-wrapper"):
self.doc.h2(f"Exception in {route_name}:")
self.doc(
html_traceback(self.exc, include_js_css=False)
)
self._key_value_table(
f"{self.request.method} {self.request.path}",
"request-headers",
self.request.headers,
)
def _key_value_table(
self, title: str, table_id: str, data: Mapping[str, Any]
) -> None:
with self.doc.div(class_="key-value-display"):
self.doc.h2(title)
with self.doc.dl(id=table_id, class_="key-value-table smalltext"):
for key, value in data.items():
# Reading values may cause a new exception, so suppress it
try:
value = str(value)
except Exception:
value = E.em("Unable to display value")
self.doc.dt.span(key, class_="nobr key").span(": ").dd(
value
)
| ErrorPage |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 38397,
"end": 41370
} | class ____(Enum):
supported = 0 # Test all supported dtypes (default)
unsupported = 1 # Test only unsupported dtypes
supported_backward = 2 # Test all supported backward dtypes
unsupported_backward = 3 # Test only unsupported backward dtypes
any_one = 4 # Test precisely one supported dtype
none = 5 # Instantiate no dtype variants (no dtype kwarg needed)
any_common_cpu_cuda_one = (
6 # Test precisely one supported dtype that is common to both cuda and cpu
)
# Arbitrary order
ANY_DTYPE_ORDER = (
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
torch.float16,
torch.bfloat16,
torch.long,
torch.int32,
torch.int16,
torch.int8,
torch.uint8,
torch.bool,
torch.float8_e4m3fn,
torch.float8_e5m2,
)
def _serialize_sample(sample_input):
# NB: For OpInfos, SampleInput.summary() prints in a cleaner way.
if getattr(sample_input, "summary", None) is not None:
return sample_input.summary()
return str(sample_input)
# Decorator that defines the OpInfos a test template should be instantiated for.
#
# Example usage:
#
# @ops(unary_ufuncs)
# def test_numerics(self, device, dtype, op):
# <test_code>
#
# This will instantiate variants of test_numerics for each given OpInfo,
# on each device the OpInfo's operator supports, and for every dtype supported by
# that operator. There are a few caveats to the dtype rule, explained below.
#
# The @ops decorator can accept two
# additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified
# then the test variants are instantiated for those dtypes, regardless of
# what the operator supports. If given "allowed_dtypes" then test variants
# are instantiated only for the intersection of allowed_dtypes and the dtypes
# they would otherwise be instantiated with. That is, allowed_dtypes composes
# with the options listed above and below.
#
# The "dtypes" argument can also accept additional values (see OpDTypes above):
# OpDTypes.supported - the test is instantiated for all dtypes the operator
# supports
# OpDTypes.unsupported - the test is instantiated for all dtypes the operator
# doesn't support
# OpDTypes.supported_backward - the test is instantiated for all dtypes the
# operator's gradient formula supports
# OpDTypes.unsupported_backward - the test is instantiated for all dtypes the
# operator's gradient formula doesn't support
# OpDTypes.any_one - the test is instantiated for one dtype the
# operator supports. The dtype supports forward and backward if possible.
# OpDTypes.none - the test is instantiated without any dtype. The test signature
# should not include a dtype kwarg in this case.
# OpDTypes.any_common_cpu_cuda_one - the test is instantiated for a dtype
# that supports both CPU and CUDA.
#
# These options allow tests to have considerable control over the dtypes
# they're instantiated for.
| OpDTypes |
python | pytorch__pytorch | torch/_higher_order_ops/triton_kernel_wrap.py | {
"start": 6931,
"end": 7059
} | class ____:
idx: int
def fake(self) -> bool:
return self.idx < 0
@dataclasses.dataclass(frozen=True)
| Intermediate |
python | plotly__plotly.py | plotly/graph_objs/histogram/_xbins.py | {
"start": 233,
"end": 8985
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram"
_path_str = "histogram.xbins"
_valid_props = {"end", "size", "start"}
@property
def end(self):
"""
Sets the end value for the x axis bins. The last bin may not
end exactly at this value, we increment the bin edge by `size`
from `start` until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use a date string,
and for category data `end` is based on the category serial
numbers.
The 'end' property accepts values of any type
Returns
-------
Any
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
@property
def size(self):
"""
Sets the size of each x axis bin. Default behavior: If `nbinsx`
is 0 or omitted, we choose a nice round bin size such that the
number of bins is about the same as the typical number of
samples in each bin. If `nbinsx` is provided, we choose a nice
round bin size giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as in
`axis.dtick`. For category data, the number of categories to
bin together (always defaults to 1). If multiple non-overlaying
histograms share a subplot, the first explicit `size` is used
and all others discarded. If no `size` is provided,the sample
data from all traces is combined to determine `size` as
described above.
The 'size' property accepts values of any type
Returns
-------
Any
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def start(self):
"""
Sets the starting value for the x axis bins. Defaults to the
minimum data value, shifted down if necessary to make nice
round values and to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin edges 0.5 down,
so a `size` of 5 would have a default `start` of -0.5, so it is
clear that 0-4 are in the first bin, 5-9 in the second, but
continuous data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date string.
For category data, `start` is based on the category serial
numbers, and defaults to -0.5. If multiple non-overlaying
histograms share a subplot, the first explicit `start` is used
exactly and all others are shifted down (if necessary) to
differ from that one by an integer number of bins.
The 'start' property accepts values of any type
Returns
-------
Any
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
@property
def _prop_descriptions(self):
return """\
end
Sets the end value for the x axis bins. The last bin
may not end exactly at this value, we increment the bin
edge by `size` from `start` until we reach or exceed
`end`. Defaults to the maximum data value. Like
`start`, for dates use a date string, and for category
data `end` is based on the category serial numbers.
size
Sets the size of each x axis bin. Default behavior: If
`nbinsx` is 0 or omitted, we choose a nice round bin
size such that the number of bins is about the same as
the typical number of samples in each bin. If `nbinsx`
is provided, we choose a nice round bin size giving no
more than that many bins. For date data, use
milliseconds or "M<n>" for months, as in `axis.dtick`.
For category data, the number of categories to bin
together (always defaults to 1). If multiple non-
overlaying histograms share a subplot, the first
explicit `size` is used and all others discarded. If no
`size` is provided,the sample data from all traces is
combined to determine `size` as described above.
start
Sets the starting value for the x axis bins. Defaults
to the minimum data value, shifted down if necessary to
make nice round values and to remove ambiguous bin
edges. For example, if most of the data is integers we
shift the bin edges 0.5 down, so a `size` of 5 would
have a default `start` of -0.5, so it is clear that 0-4
are in the first bin, 5-9 in the second, but continuous
data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date
string. For category data, `start` is based on the
category serial numbers, and defaults to -0.5. If
multiple non-overlaying histograms share a subplot, the
first explicit `start` is used exactly and all others
are shifted down (if necessary) to differ from that one
by an integer number of bins.
"""
def __init__(self, arg=None, end=None, size=None, start=None, **kwargs):
"""
Construct a new XBins object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.XBins`
end
Sets the end value for the x axis bins. The last bin
may not end exactly at this value, we increment the bin
edge by `size` from `start` until we reach or exceed
`end`. Defaults to the maximum data value. Like
`start`, for dates use a date string, and for category
data `end` is based on the category serial numbers.
size
Sets the size of each x axis bin. Default behavior: If
`nbinsx` is 0 or omitted, we choose a nice round bin
size such that the number of bins is about the same as
the typical number of samples in each bin. If `nbinsx`
is provided, we choose a nice round bin size giving no
more than that many bins. For date data, use
milliseconds or "M<n>" for months, as in `axis.dtick`.
For category data, the number of categories to bin
together (always defaults to 1). If multiple non-
overlaying histograms share a subplot, the first
explicit `size` is used and all others discarded. If no
`size` is provided,the sample data from all traces is
combined to determine `size` as described above.
start
Sets the starting value for the x axis bins. Defaults
to the minimum data value, shifted down if necessary to
make nice round values and to remove ambiguous bin
edges. For example, if most of the data is integers we
shift the bin edges 0.5 down, so a `size` of 5 would
have a default `start` of -0.5, so it is clear that 0-4
are in the first bin, 5-9 in the second, but continuous
data gets a start of 0 and bins [0,5), [5,10) etc.
Dates behave similarly, and `start` should be a date
string. For category data, `start` is based on the
category serial numbers, and defaults to -0.5. If
multiple non-overlaying histograms share a subplot, the
first explicit `start` is used exactly and all others
are shifted down (if necessary) to differ from that one
by an integer number of bins.
Returns
-------
XBins
"""
super().__init__("xbins")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.XBins
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.XBins`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("end", arg, end)
self._set_property("size", arg, size)
self._set_property("start", arg, start)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| XBins |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_json_patch.py | {
"start": 383,
"end": 10016
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str'
}
attribute_map = {
'expression': 'expression'
}
def __init__(self, expression=None, local_vars_configuration=None): # noqa: E501
"""V1beta1JSONPatch - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self.discriminator = None
if expression is not None:
self.expression = expression
@property
def expression(self):
"""Gets the expression of this V1beta1JSONPatch. # noqa: E501
expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec expression must return an array of JSONPatch values. For example, this CEL expression returns a JSON patch to conditionally modify a value: [ JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"}, JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"} ] To define an object for the patch value, use Object types. For example: [ JSONPatch{ op: \"add\", path: \"/spec/selector\", value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}} } ] To use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example: [ JSONPatch{ op: \"add\", path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"), value: \"test\" }, ] CEL expressions have access to the types needed to create JSON patches and objects: - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, integer, array, map or object. If set, the 'path' and 'from' fields must be set to a [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL function may be used to escape path keys containing '/' and '~'. - 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers') CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as: - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required. # noqa: E501
:return: The expression of this V1beta1JSONPatch. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1beta1JSONPatch.
expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec expression must return an array of JSONPatch values. For example, this CEL expression returns a JSON patch to conditionally modify a value: [ JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"}, JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"} ] To define an object for the patch value, use Object types. For example: [ JSONPatch{ op: \"add\", path: \"/spec/selector\", value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}} } ] To use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example: [ JSONPatch{ op: \"add\", path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"), value: \"test\" }, ] CEL expressions have access to the types needed to create JSON patches and objects: - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, integer, array, map or object. If set, the 'path' and 'from' fields must be set to a [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL function may be used to escape path keys containing '/' and '~'. - 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers') CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as: - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required. # noqa: E501
:param expression: The expression of this V1beta1JSONPatch. # noqa: E501
:type: str
"""
self._expression = expression
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1JSONPatch):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1JSONPatch):
return True
return self.to_dict() != other.to_dict()
| V1beta1JSONPatch |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 35206,
"end": 35437
} | class ____(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
| ErrorIterableDataset |
python | pytorch__pytorch | torch/fx/experimental/accelerator_partitioner.py | {
"start": 1751,
"end": 9846
} | class ____(NamedTuple):
"""NameTuple used for returning DAG and a new fx module"""
dag: DAG
module_with_submodules: GraphModule
"""Followings are some helper functions for partition manipulation"""
def reset_partition_device(partitions):
for partition in partitions:
partition.logical_device_ids = []
def combine_two_partitions(
partition_0: Partition, partition_1: Partition, partitions: list[Partition]
) -> None:
"""Given a list of partitions and its two partitions,
combine these two partitions into a new one appending to the partitions
and remove the previous two partitions from the list of partitions
"""
partition = Partition(len(partitions))
partition.nodes = partition_0.nodes.union(partition_1.nodes)
partition.recalculate_mem_size()
partitions.append(partition)
partitions.remove(partition_0)
partitions.remove(partition_1)
reorganize_partitions(partitions)
return
def set_parents_and_children(partitions: list[Partition]) -> None:
"""Given a list of partitions, mark parents and children for each partition"""
# Go through all nodes in a partition.
# If a node's user is in other partition,
# then the other partition is this partition's children.
# This partition is the other partition's parent
for partition in partitions:
partition.children = set()
partition.parents = set()
for partition in partitions:
for node in partition.nodes:
# For each node in the current partition, find its users
users = node.users
for n in users:
# Find which the partition the user node belongs to.
# Note that if the node itself is also belongs to that partition,
# that partition is not the child of the current partition
for p in partitions:
if p != partition and n in p.nodes and node not in p.nodes:
partition.children.add(p)
p.parents.add(partition)
return
def reorganize_partitions(partitions: list[Partition]) -> None:
"""Given a list of partitions, reorganize partition id,
its parents and its children for each partition
"""
# Rearrange partition ids
for i, partition in enumerate(partitions):
partition.partition_id = i
set_parents_and_children(partitions)
return
def get_bfs_level_partition(partitions: list[Partition]) -> None:
"""Given a list of partitions,
mark the bfs level for each partition
"""
current_level: set[Partition] = set()
visited: set[Partition] = set()
for partition in partitions:
# If a partition has no parent, it should be in root level
if len(partition.parents) == 0:
current_level.add(partition)
next_level: set[Partition] = set()
level = 0
# bfs
while current_level:
partition = current_level.pop()
partition.bfs_level = level
visited.add(partition)
children = partition.children
for child in children:
if child not in next_level:
next_level.add(child)
if not current_level:
current_level = next_level.copy()
next_level = set()
level += 1
return
def get_node_to_partition_mapping(partitions: list[Partition]) -> dict[Node, int]:
"""Given a list of partitions,return node to partition mapping"""
node_to_partition: dict[Node, int] = {}
for partition in partitions:
for node in partition.nodes:
node_to_partition[node] = partition.partition_id
return node_to_partition
def get_logical_id_to_device(devices: list[Device]) -> dict[int, Device]:
"""Get a mapping from device logical ID to Device object."""
logical_id_to_device: dict[int, Device] = {}
for d in devices:
logical_id_to_device[d.logical_id] = d
return logical_id_to_device
def get_device_partition_stats(
partitions: list[Partition], devices: list[Device]
) -> tuple[dict[Device, list[Partition]], dict[Device, int], list[Partition]]:
"""Given a list of partitions and a list of devices, returns:
1. A mapping from device to partitions on it;
2. A mapping from device to its remaining memory size;
3. A list of partitions that do not have a device.
"""
# logical id to device
logical_id_to_device = get_logical_id_to_device(devices)
# Track partitions on device
device_to_partitions: dict[Device, list[Partition]] = {}
# Track device's left mem size
device_to_left_mem_bytes: dict[Device, int] = {}
for d in devices:
device_to_partitions[d] = []
device_to_left_mem_bytes[d] = d.available_mem_bytes
# Deal with the partitions that already have a device
# and also collect all partitions without a device (no_device_partitions)
no_device_partitions = []
for partition in partitions:
if partition.logical_device_ids != []:
for logical_id in partition.logical_device_ids:
device = logical_id_to_device[logical_id]
device_to_partitions[device].append(partition)
device_to_left_mem_bytes[device] -= partition.used_mem_bytes
else:
no_device_partitions.append(partition)
return (
device_to_partitions,
device_to_left_mem_bytes,
no_device_partitions,
)
def get_device_to_partitions_mapping(
partitions: list[Partition], devices: list[Device]
):
"""Given a list of partitions and a list of devices,
map each partition into a device.
"""
def calculate_extra_mem_bytes_needed_for(
partition: Partition, partitions: list[Partition]
):
all_nodes: set[Node] = set()
for p in partitions:
all_nodes = all_nodes.union(p.nodes)
if len(all_nodes) == 0:
return partition.used_mem_bytes
all_nodes = all_nodes.union(partition.nodes)
extra_size_needed = 0
for node in partition.nodes:
extra_size_needed += get_extra_size_of(node, all_nodes)
return extra_size_needed
def find_device_for(partition: Partition):
"""Given a partition, find a logical device for the partition
The algorithm is to put the partition on the device
that has just enough mem left for that partition.
device_to_left_mem_bytes is a dictionary between device and its left mem size
sorted by its left mem size
"""
for d in device_to_left_mem_bytes:
extra_size_needed = calculate_extra_mem_bytes_needed_for(
partition, device_to_partitions[d]
)
if extra_size_needed < device_to_left_mem_bytes[d]:
device_to_partitions[d].append(partition)
partition.logical_device_ids.append(d.logical_id)
device_to_left_mem_bytes[d] -= extra_size_needed
return True
return False
(
device_to_partitions,
device_to_left_mem_bytes,
no_device_partitions,
) = get_device_partition_stats(partitions, devices)
# Find devices for all the partitions without a device
found_device = True
for partition in no_device_partitions:
device_to_left_mem_bytes = dict(
sorted(device_to_left_mem_bytes.items(), key=operator.itemgetter(1))
)
found_device = find_device_for(partition)
if not found_device:
break
return found_device
def check_dependency(partition):
"""Given a partition,check if there is a circular dependency on
this partition using bfs
"""
visited: set[Partition] = {partition}
queue: deque[Partition] = deque([partition])
while queue:
p = queue.popleft()
for child in p.children:
if child == partition:
return True
else:
if child not in visited:
visited.add(child)
queue.append(child)
return False
| PartitionResult |
python | getsentry__sentry | src/sentry/dynamic_sampling/tasks/common.py | {
"start": 6695,
"end": 14847
} | class ____:
"""
Fetch organizations volumes in batches.
A batch will return at max max_orgs elements
"""
def __init__(
self,
max_orgs: int = MAX_ORGS_PER_QUERY,
time_interval: timedelta = ACTIVE_ORGS_VOLUMES_DEFAULT_TIME_INTERVAL,
granularity: Granularity = ACTIVE_ORGS_VOLUMES_DEFAULT_GRANULARITY,
include_keep: bool = True,
orgs: list[int] | None = None,
) -> None:
self.include_keep = include_keep
self.orgs = orgs
self.metric_id = indexer.resolve_shared_org(
str(TransactionMRI.COUNT_PER_ROOT_PROJECT.value)
)
if self.include_keep:
decision_string_id = indexer.resolve_shared_org("decision")
decision_tag = f"tags_raw[{decision_string_id}]"
self.keep_count_column = Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[Column(decision_tag), "keep"],
),
],
alias="keep_count",
)
else:
self.keep_count_column = None
self.offset = 0
self.last_result: list[OrganizationDataVolume] = []
self.has_more_results = True
self.max_orgs = max_orgs
self.granularity = granularity
self.time_interval = time_interval
def __iter__(self) -> GetActiveOrgsVolumes:
return self
def __next__(self) -> list[OrganizationDataVolume]:
if self._enough_results_cached():
# we have enough in the cache to satisfy the current iteration
return self._get_from_cache()
select = [
Function("sum", [Column("value")], "total_count"),
Column("org_id"),
]
where = [
Condition(Column("timestamp"), Op.GTE, datetime.utcnow() - self.time_interval),
Condition(Column("timestamp"), Op.LT, datetime.utcnow()),
Condition(Column("metric_id"), Op.EQ, self.metric_id),
]
if self.orgs:
where.append(Condition(Column("org_id"), Op.IN, self.orgs))
if self.include_keep:
select.append(self.keep_count_column)
if self.has_more_results:
# not enough for the current iteration and data still in the db top it up from db
query = (
Query(
match=Entity(EntityKey.GenericOrgMetricsCounters.value),
select=select,
groupby=[
Column("org_id"),
],
where=where,
granularity=self.granularity,
)
.set_limit(CHUNK_SIZE + 1)
.set_offset(self.offset)
)
request = Request(
dataset=Dataset.PerformanceMetrics.value,
app_id="dynamic_sampling",
query=query,
tenant_ids={
"use_case_id": UseCaseID.TRANSACTIONS.value,
"cross_org_query": 1,
},
)
data = raw_snql_query(
request,
referrer=Referrer.DYNAMIC_SAMPLING_COUNTERS_GET_ORG_TRANSACTION_VOLUMES.value,
)["data"]
count = len(data)
self.has_more_results = count > CHUNK_SIZE
self.offset += CHUNK_SIZE
if self.has_more_results:
data = data[:-1]
for row in data:
keep_count = row["keep_count"] if self.include_keep else None
self.last_result.append(
OrganizationDataVolume(
org_id=row["org_id"],
total=row["total_count"],
indexed=keep_count,
)
)
if len(self.last_result) > 0:
# we have some data left return up to the max amount
return self._get_from_cache() # we still have something left in cache
else:
# nothing left in the DB or cache
raise StopIteration()
def _enough_results_cached(self) -> bool:
"""
Return true if we have enough data to return a full batch in the cache (i.e. last_result)
"""
return len(self.last_result) >= self.max_orgs
def _get_from_cache(self) -> list[OrganizationDataVolume]:
"""
Returns a batch from cache and removes the elements returned from the cache
"""
if len(self.last_result) >= self.max_orgs:
ret_val = self.last_result[: self.max_orgs]
self.last_result = self.last_result[self.max_orgs :]
else:
ret_val = self.last_result
self.last_result = []
return ret_val
def get_organization_volume(
org_id: int,
time_interval: timedelta = ACTIVE_ORGS_VOLUMES_DEFAULT_TIME_INTERVAL,
granularity: Granularity = ACTIVE_ORGS_VOLUMES_DEFAULT_GRANULARITY,
) -> OrganizationDataVolume | None:
"""
Specialized version of GetActiveOrgsVolumes that returns a single org
"""
for org_volumes in GetActiveOrgsVolumes(
max_orgs=1,
time_interval=time_interval,
granularity=granularity,
orgs=[org_id],
):
if org_volumes:
return org_volumes[0]
return None
def sample_rate_to_float(sample_rate: str | None) -> float | None:
"""
Converts a sample rate to a float or returns None in case the conversion failed.
"""
if sample_rate is None:
return None
try:
return float(sample_rate)
except (TypeError, ValueError):
return None
def are_equal_with_epsilon(a: float | None, b: float | None) -> bool:
"""
Checks if two floating point numbers are equal within an error boundary.
"""
if a is None and b is None:
return True
if a is None or b is None:
return False
return math.isclose(a, b)
def compute_guarded_sliding_window_sample_rate(
org_id: int,
project_id: int | None,
total_root_count: int,
window_size: int,
) -> float | None:
"""
Computes the actual sliding window sample rate by guarding any exceptions and returning None in case
any problem would arise.
"""
try:
# We want to compute the sliding window sample rate by considering a window of time.
# This piece of code is very delicate, thus we want to guard it properly and capture any errors.
return compute_sliding_window_sample_rate(org_id, project_id, total_root_count, window_size)
except Exception as e:
sentry_sdk.capture_exception(
e,
extras={
"org_id": org_id,
"project_id": project_id,
"total_root_count": total_root_count,
"window_size": window_size,
},
)
return None
def compute_sliding_window_sample_rate(
org_id: int,
project_id: int | None,
total_root_count: int,
window_size: int,
) -> float | None:
"""
Computes the actual sample rate for the sliding window given the total root count and the size of the
window that was used for computing the root count.
The org_id is used only because it is required on the quotas side to determine whether dynamic sampling is
enabled in the first place for that project.
"""
extrapolated_volume = extrapolate_monthly_volume(volume=total_root_count, hours=window_size)
if extrapolated_volume is None:
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("org_id", org_id)
scope.set_extra("window_size", window_size)
sentry_sdk.capture_message("The volume of the current month can't be extrapolated.")
return None
sampling_tier = quotas.backend.get_transaction_sampling_tier_for_volume(
org_id, extrapolated_volume
)
if sampling_tier is None:
return None
_, sample_rate = sampling_tier
return float(sample_rate)
| GetActiveOrgsVolumes |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 9820,
"end": 10801
} | class ____(BaseIncidentsTest, BaseIncidentsValidation):
@cached_property
def project_incident(self):
self.create_event(self.now - timedelta(minutes=2))
self.create_event(self.now - timedelta(minutes=2))
self.create_event(self.now - timedelta(minutes=1))
return self.create_incident(
date_started=self.now - timedelta(minutes=5), query="", projects=[self.project]
)
@cached_property
def group_incident(self):
fingerprint = "group-1"
event = self.create_event(self.now - timedelta(minutes=2), fingerprint=fingerprint)
self.create_event(self.now - timedelta(minutes=2), fingerprint="other-group")
self.create_event(self.now - timedelta(minutes=1), fingerprint=fingerprint)
return self.create_incident(
date_started=self.now - timedelta(minutes=5),
query="",
projects=[],
groups=[event.group],
)
| BaseIncidentEventStatsTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 27742,
"end": 35587
} | class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(precision)
for _ in range(10):
self.assertEqual(initial_precision, self.evaluate(precision))
@test_util.run_deprecated_v1
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, self.evaluate(update_op), 6)
self.assertAlmostEqual(1.0, self.evaluate(precision), 6)
@test_util.run_deprecated_v1
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, self.evaluate(update_op))
self.assertAlmostEqual(0.5, self.evaluate(precision))
@test_util.run_deprecated_v1
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(update_op))
self.assertAlmostEqual(expected_precision, self.evaluate(precision))
@test_util.run_deprecated_v1
def testWeightedScalar_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(labels, predictions, weights=2)
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 2.0
weighted_positives = (2.0 + 2.0) + (2.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(update_op))
self.assertAlmostEqual(expected_precision, self.evaluate(precision))
@test_util.run_deprecated_v1
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertAlmostEqual(0, self.evaluate(precision))
@test_util.run_deprecated_v1
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.precision(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(0.0, self.evaluate(precision))
| PrecisionTest |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 14034,
"end": 14616
} | class ____(Constraint):
"""
Constrain to a real interval `[lower_bound, upper_bound]`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return (self.lower_bound <= value) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += (
f"(lower_bound={self.lower_bound}, upper_bound={self.upper_bound})"
)
return fmt_string
| _Interval |
python | getsentry__sentry | src/sentry/sentry_metrics/indexer/mock.py | {
"start": 4127,
"end": 4240
} | class ____(SimpleIndexer):
"""
Mock string indexer. Comes with a prepared set of strings.
"""
| MockIndexer |
python | falconry__falcon | falcon/stream.py | {
"start": 853,
"end": 5690
} | class ____(io.IOBase):
"""Wrap *wsgi.input* streams to make them more robust.
``socket._fileobject`` and ``io.BufferedReader`` are sometimes used
to implement *wsgi.input*. However, app developers are often burned
by the fact that the `read()` method for these objects block
indefinitely if either no size is passed, or a size greater than
the request's content length is passed to the method.
This class normalizes *wsgi.input* behavior between WSGI servers
by implementing non-blocking behavior for the cases mentioned
above. The caller is not allowed to read more than the number of
bytes specified by the Content-Length header in the request.
Args:
stream: Instance of ``socket._fileobject`` from
``environ['wsgi.input']``
stream_len: Expected content length of the stream.
"""
def __init__(self, stream: BinaryIO, stream_len: int) -> None:
self.stream = stream
self.stream_len = stream_len
self._bytes_remaining = self.stream_len
def __iter__(self) -> BoundedStream:
return self
def __next__(self) -> bytes:
return next(self.stream)
next = __next__
def _read(self, size: int | None, target: Callable[[int], Result]) -> Result:
"""Proxy reads to the underlying stream.
Args:
size (int): Maximum number of bytes to read. Will be
coerced, if None or -1, to the number of remaining bytes
in the stream. Will likewise be coerced if greater than
the number of remaining bytes, to avoid making a
blocking call to the wrapped stream.
target (callable): Once `size` has been fixed up, this function
will be called to actually do the work.
Returns:
bytes: Data read from the stream, as returned by `target`.
"""
# NOTE(kgriffs): Default to reading all remaining bytes if the
# size is not specified or is out of bounds. This behaves
# similarly to the IO streams passed in by non-wsgiref servers.
if size is None or size == -1 or size > self._bytes_remaining:
size = self._bytes_remaining
self._bytes_remaining -= size
return target(size)
def readable(self) -> bool:
"""Return ``True`` always."""
return True
def seekable(self) -> bool:
"""Return ``False`` always."""
return False
def writable(self) -> bool:
"""Return ``False`` always."""
return False
def read(self, size: int | None = None) -> bytes:
"""Read from the stream.
Args:
size (int): Maximum number of bytes/characters to read.
Defaults to reading until EOF.
Returns:
bytes: Data read from the stream.
"""
return self._read(size, self.stream.read)
def readline(self, limit: int | None = None) -> bytes:
"""Read a line from the stream.
Args:
limit (int): Maximum number of bytes/characters to read.
Defaults to reading until EOF.
Returns:
bytes: Data read from the stream.
"""
return self._read(limit, self.stream.readline)
def readlines(self, hint: int | None = None) -> list[bytes]:
"""Read lines from the stream.
Args:
hint (int): Maximum number of bytes/characters to read.
Defaults to reading until EOF.
Returns:
bytes: Data read from the stream.
"""
return self._read(hint, self.stream.readlines)
def write(self, data: bytes) -> None:
"""Raise OSError always; writing is not supported."""
raise OSError('Stream is not writeable')
def exhaust(self, chunk_size: int = 64 * 1024) -> None:
"""Exhaust the stream.
This consumes all the data left until the limit is reached.
Args:
chunk_size (int): The size for a chunk (default: 64 KB).
It will read the chunk until the stream is exhausted.
"""
while True:
chunk = self.read(chunk_size)
if not chunk:
break
@property
def eof(self) -> bool:
"""``True`` if there is no more data to read from the stream,
otherwise ``False``.
""" # noqa: D205
return self._bytes_remaining <= 0
@property
# NOTE(caselit): Deprecated long ago. Warns since 4.0.
@deprecated(
'Use `eof` instead. (This compatibility alias will be removed in Falcon 5.0.)',
is_property=True,
)
def is_exhausted(self) -> bool:
"""Deprecated alias for `eof`."""
return self.eof
# NOTE(kgriffs): Alias for backwards-compat
Body = BoundedStream
| BoundedStream |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-notion/llama_index/readers/notion/base.py | {
"start": 525,
"end": 9091
} | class ____(BasePydanticReader):
"""
Notion Page reader.
Reads a set of Notion pages.
Args:
integration_token (str): Notion integration token.
"""
is_remote: bool = True
token: str
headers: Dict[str, str]
def __init__(self, integration_token: Optional[str] = None) -> None:
"""Initialize with parameters."""
if integration_token is None:
integration_token = os.getenv(INTEGRATION_TOKEN_NAME)
if integration_token is None:
raise ValueError(
"Must specify `integration_token` or set environment "
"variable `NOTION_INTEGRATION_TOKEN`."
)
token = integration_token
headers = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
super().__init__(token=token, headers=headers)
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "NotionPageReader"
def _read_block(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block."""
done = False
result_lines_arr = []
cur_block_id = block_id
while not done:
block_url = BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
query_dict: Dict[str, Any] = {}
res = self._request_with_retry(
"GET", block_url, headers=self.headers, json=query_dict
)
data = res.json()
for result in data["results"]:
result_type = result["type"]
result_obj = result[result_type]
cur_result_text_arr = []
if "rich_text" in result_obj:
for rich_text in result_obj["rich_text"]:
# skip if doesn't have text object
if "text" in rich_text:
text = rich_text["text"]["content"]
prefix = "\t" * num_tabs
cur_result_text_arr.append(prefix + text)
result_block_id = result["id"]
has_children = result["has_children"]
if has_children:
children_text = self._read_block(
result_block_id, num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
cur_result_text = "\n".join(cur_result_text_arr)
result_lines_arr.append(cur_result_text)
if data["next_cursor"] is None:
done = True
break
else:
cur_block_id = data["next_cursor"]
return "\n".join(result_lines_arr)
def _request_with_retry(
self,
method: str,
url: str,
headers: Dict[str, str],
json: Optional[Dict[str, Any]] = None,
) -> requests.Response:
"""Make a request with retry and rate limit handling."""
max_retries = 5
backoff_factor = 1
for attempt in range(max_retries):
try:
response = requests.request(method, url, headers=headers, json=json)
response.raise_for_status()
return response
except requests.exceptions.HTTPError:
if response.status_code == 429:
# Rate limit exceeded
retry_after = int(response.headers.get("Retry-After", 1))
time.sleep(backoff_factor * (2**attempt) + retry_after)
else:
raise requests.exceptions.HTTPError(
f"Request failed: {response.text}"
)
except requests.exceptions.RequestException as err:
raise requests.exceptions.RequestException(f"Request failed: {err}")
raise Exception("Maximum retries exceeded")
def read_page(self, page_id: str) -> str:
"""Read a page."""
return self._read_block(page_id)
def query_database(
self, database_id: str, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[str]:
"""Get all the pages from a Notion database."""
pages = []
res = self._request_with_retry(
"POST",
DATABASE_URL_TMPL.format(database_id=database_id),
headers=self.headers,
json=query_dict,
)
res.raise_for_status()
data = res.json()
pages.extend(data.get("results"))
while data.get("has_more"):
query_dict["start_cursor"] = data.get("next_cursor")
res = self._request_with_retry(
"POST",
DATABASE_URL_TMPL.format(database_id=database_id),
headers=self.headers,
json=query_dict,
)
res.raise_for_status()
data = res.json()
pages.extend(data.get("results"))
return [page["id"] for page in pages]
def search(self, query: str) -> List[str]:
"""Search Notion page given a text query."""
done = False
next_cursor: Optional[str] = None
page_ids = []
while not done:
query_dict = {
"query": query,
}
if next_cursor is not None:
query_dict["start_cursor"] = next_cursor
res = self._request_with_retry(
"POST", SEARCH_URL, headers=self.headers, json=query_dict
)
data = res.json()
for result in data["results"]:
page_id = result["id"]
page_ids.append(page_id)
if data["next_cursor"] is None:
done = True
break
else:
next_cursor = data["next_cursor"]
return page_ids
def load_data(
self,
page_ids: List[str] = [],
database_ids: Optional[List[str]] = None,
load_all_if_empty: bool = False,
) -> List[Document]:
"""
Load data from the input directory.
Args:
page_ids (List[str]): List of page ids to load.
database_ids Optional (List[str]): List database ids from which to load page ids.
load_all_if_empty (bool): If True, load all pages and dbs if no page_ids or database_ids are provided.
Returns:
List[Document]: List of documents.
"""
if not page_ids and not database_ids:
if not load_all_if_empty:
raise ValueError(
"Must specify either `page_ids` or `database_ids` if "
"`load_all_if_empty` is False."
)
else:
database_ids = self.list_databases()
page_ids = self.list_pages()
docs = []
all_page_ids = set(page_ids) if page_ids is not None else set()
# TODO: in the future add special logic for database_ids
if database_ids is not None:
for database_id in database_ids:
# get all the pages in the database
db_page_ids = self.query_database(database_id)
all_page_ids.update(db_page_ids)
for page_id in all_page_ids:
page_text = self.read_page(page_id)
docs.append(
Document(text=page_text, id_=page_id, extra_info={"page_id": page_id})
)
return docs
def list_databases(self) -> List[str]:
"""List all databases in the Notion workspace."""
query_dict = {"filter": {"property": "object", "value": "database"}}
res = self._request_with_retry(
"POST", SEARCH_URL, headers=self.headers, json=query_dict
)
res.raise_for_status()
data = res.json()
return [db["id"] for db in data["results"]]
def list_pages(self) -> List[str]:
"""List all pages in the Notion workspace."""
query_dict = {"filter": {"property": "object", "value": "page"}}
res = self._request_with_retry(
"POST", SEARCH_URL, headers=self.headers, json=query_dict
)
res.raise_for_status()
data = res.json()
return [page["id"] for page in data["results"]]
if __name__ == "__main__":
reader = NotionPageReader()
print(reader.search("What I"))
# get list of database from notion
databases = reader.list_databases()
| NotionPageReader |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 38678,
"end": 39219
} | class ____(PipesStdioLogWriterChannel):
"""A log writer channel that writes stdout or stderr to a given file."""
def __init__(
self, output_path: str, stream: Literal["stdout", "stderr"], name: str, interval: float
):
self.output_path = output_path
super().__init__(interval=interval, stream=stream, name=name)
def write_chunk(self, chunk: str) -> None:
# write the chunk to a file
with open(self.output_path, "a") as file:
file.write(chunk)
| PipesStdioFileLogWriterChannel |
python | huggingface__transformers | src/transformers/models/instructblip/configuration_instructblip.py | {
"start": 9877,
"end": 14704
} | class ____(PreTrainedConfig):
r"""
[`InstructBlipConfig`] is the configuration class to store the configuration of a
[`InstructBlipForConditionalGeneration`]. It is used to instantiate a InstructBLIP model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the InstructBLIP
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PreTrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
image_token_index (`int`, *optional*):
Token index of special image token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVisionConfig,
... InstructBlipQFormerConfig,
... OPTConfig,
... InstructBlipConfig,
... InstructBlipForConditionalGeneration,
... )
>>> # Initializing a InstructBlipConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipConfig()
>>> # Initializing a InstructBlipForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipConfig from a InstructBlipVisionConfig, InstructBlipQFormerConfig and any PreTrainedConfig
>>> # Initializing InstructBLIP vision, InstructBLIP Q-Former and language model configurations
>>> vision_config = InstructBlipVisionConfig()
>>> qformer_config = InstructBlipQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipConfig(vision_config=vision_config, qformer_config=qformer_config, text_config=text_config)
```"""
model_type = "instructblip"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {
"text_config": AutoConfig,
"qformer_config": InstructBlipQFormerConfig,
"vision_config": InstructBlipVisionConfig,
}
def __init__(
self,
vision_config=None,
qformer_config=None,
text_config=None,
num_query_tokens=32,
image_token_index=None,
**kwargs,
):
if text_config is None:
text_config = CONFIG_MAPPING["opt"]()
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
elif isinstance(text_config, dict):
text_model_type = text_config.get("model_type", "opt")
text_config = CONFIG_MAPPING[text_model_type](**text_config)
if qformer_config is None:
qformer_config = InstructBlipQFormerConfig()
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.")
elif isinstance(qformer_config, dict):
qformer_config = InstructBlipQFormerConfig(**qformer_config)
if vision_config is None:
vision_config = InstructBlipVisionConfig()
logger.info("`vision_config` is `None`. initializing the `InstructBlipVisionConfig` with default values.")
elif isinstance(vision_config, dict):
vision_config = InstructBlipVisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.qformer_config = qformer_config
self.num_query_tokens = num_query_tokens
self.image_token_index = image_token_index
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
self.initializer_factor = 1.0
self.initializer_range = 0.02
super().__init__(**kwargs)
__all__ = ["InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig"]
| InstructBlipConfig |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/matrix_exponential_op_test.py | {
"start": 6076,
"end": 9364
} | class ____(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixExponentialOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_gpu_{shape}".format(shape=shape))
def _TestRandomSmall(dtype, batch_dims, size):
def Test(self):
np.random.seed(42)
shape = batch_dims + (size, size)
matrix = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
self._verifyExponentialReal(matrix)
return Test
def _TestL1Norms(dtype, shape, scale):
def Test(self):
np.random.seed(42)
matrix = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
l1_norm = np.max(np.sum(np.abs(matrix), axis=matrix.ndim - 2))
matrix /= l1_norm
self._verifyExponentialReal(scale * matrix)
return Test
if __name__ == "__main__":
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
for batch_ in [(), (2,), (2, 2)]:
for size_ in [4, 7]:
name = "%s_%d_%d" % (dtype_.__name__, len(batch_), size_)
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestRandomSmall(dtype_, batch_, size_))
for shape_ in [(3, 3), (2, 3, 3)]:
for dtype_ in [np.float32, np.complex64]:
for scale_ in [0.1, 1.5, 5.0, 20.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_ * 10))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
for dtype_ in [np.float64, np.complex128]:
for scale_ in [0.01, 0.2, 0.5, 1.5, 6.0, 25.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_ * 100))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
test.main()
| MatrixExponentialBenchmark |
python | crytic__slither | slither/slithir/operations/new_elementary_type.py | {
"start": 244,
"end": 790
} | class ____(Call, OperationWithLValue):
def __init__(self, new_type, lvalue):
assert isinstance(new_type, ElementaryType)
assert is_valid_lvalue(lvalue)
super().__init__()
self._type = new_type
self._lvalue = lvalue
@property
def type(self):
return self._type
@property
def read(self):
return list(self.arguments)
def __str__(self):
args = [str(a) for a in self.arguments]
return f"{self.lvalue} = new {self._type}({','.join(args)})"
| NewElementaryType |
python | sympy__sympy | sympy/polys/orderings.py | {
"start": 908,
"end": 1107
} | class ____(MonomialOrder):
"""Lexicographic order of monomials. """
alias = 'lex'
is_global = True
is_default = True
def __call__(self, monomial):
return monomial
| LexOrder |
python | getsentry__sentry | tests/sentry/integrations/discord/test_integration.py | {
"start": 1146,
"end": 7717
} | class ____(IntegrationTestCase):
provider = DiscordIntegrationProvider
def setUp(self) -> None:
super().setUp()
self.application_id = "application-id"
self.public_key = "public-key"
self.bot_token = "bot-token"
self.client_secret = "client-secret"
options.set("discord.application-id", self.application_id)
options.set("discord.public-key", self.public_key)
options.set("discord.bot-token", self.bot_token)
options.set("discord.client-secret", self.client_secret)
self.token_url = f"{DISCORD_BASE_URL}/oauth2/token"
@mock.patch("sentry.integrations.discord.client.DiscordClient.set_application_command")
def assert_setup_flow(
self,
mock_set_application_command: mock.MagicMock,
guild_id: str = "1234567890",
server_name: str = "Cool server",
auth_code: str = "auth_code",
command_response_empty: bool = True,
) -> None:
responses.reset()
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "discord.com"
assert redirect.path == "/api/oauth2/authorize"
params = parse_qs(redirect.query)
assert params["client_id"] == [self.application_id]
assert params["permissions"] == [str(self.provider.bot_permissions)]
assert params["redirect_uri"] == ["http://testserver/extensions/discord/setup/"]
assert params["response_type"] == ["code"]
scopes = self.provider.oauth_scopes
assert params["scope"] == [" ".join(scopes)]
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{GUILD_URL.format(guild_id=guild_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json={
"id": guild_id,
"name": server_name,
},
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json=[] if command_response_empty else COMMANDS,
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}/users/@me/guilds/{guild_id}/member",
json={},
)
if command_response_empty:
for command in COMMANDS:
responses.add(
responses.POST,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
match=[
header_matcher({"Authorization": f"Bot {self.bot_token}"}),
json_params_matcher({"data": command}),
],
)
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET, url=f"{DiscordClient.base_url}/users/@me", json={"id": "user_1234"}
)
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode({"guild_id": guild_id, "code": auth_code}),
)
)
call_list = responses.calls
assert call_list[0].request.headers["Authorization"] == f"Bot {self.bot_token}"
assert f"code={auth_code}" in call_list[1].request.body
assert call_list[2].request.headers["Authorization"] == "Bearer access_token"
assert resp.status_code == 200
self.assertDialogSuccess(resp)
if command_response_empty:
assert mock_set_application_command.call_count == 3
else:
assert mock_set_application_command.call_count == 0
def assert_setup_flow_from_discord(
self,
guild_id: str = "1234567890",
server_name: str = "Cool server",
auth_code: str = "auth_code",
) -> None:
responses.reset()
resp = self.client.get(self.configure_path)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "discord.com"
assert redirect.path == "/api/oauth2/authorize"
params = parse_qs(redirect.query)
assert params["client_id"] == [self.application_id]
assert params["permissions"] == [str(self.provider.bot_permissions)]
assert params["redirect_uri"] == ["http://testserver/extensions/discord/configure/"]
assert params["response_type"] == ["code"]
scopes = self.provider.oauth_scopes
assert params["scope"] == [" ".join(scopes)]
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{GUILD_URL.format(guild_id=guild_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json={
"id": guild_id,
"name": server_name,
},
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json=COMMANDS,
)
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}/users/@me/guilds/{guild_id}/member",
json={},
)
responses.add(
responses.GET, url=f"{DiscordClient.base_url}/users/@me", json={"id": "user_1234"}
)
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode({"guild_id": guild_id, "code": auth_code}),
)
)
call_list = responses.calls
assert call_list[0].request.headers["Authorization"] == f"Bot {self.bot_token}"
assert f"code={auth_code}" in call_list[1].request.body
assert call_list[2].request.headers["Authorization"] == "Bearer access_token"
assert resp.status_code == 200
self.assertDialogSuccess(resp)
@control_silo_test
| DiscordSetupTestCase |
python | ray-project__ray | python/ray/tests/test_output.py | {
"start": 17586,
"end": 18939
} | class ____:
def __init__(self, *, num_threads: int = 5):
self._num_threads = num_threads
self._done_count = 0
self._done_lock = threading.Lock()
self._done_event = threading.Event()
def _spin():
for _ in range(300000000):
pass
for _ in range(5):
threading.Thread(target=self._spin, daemon=True).start()
def _spin(self):
for _ in range(300000000):
pass
with self._done_lock:
self._done_count += 1
if self._done_count == self._num_threads:
self._done_event.set()
def ready(self):
self._done_event.wait()
def __repr__(self):
return "{actor_repr}"
a = A.remote()
ray.get(a.ready.remote())
"""
out = run_string_as_driver(script)
assert actor_repr not in out
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "_ray_instance":
# Set object store memory very low so that it won't complain
# about low shm memory in Linux environment.
# The test failures currently complain it only has 2 GB memory,
# so let's set it much lower than that.
MB = 1000**2
ray.init(num_cpus=1, object_store_memory=(100 * MB))
ray.shutdown()
else:
sys.exit(pytest.main(["-sv", __file__]))
| A |
python | encode__django-rest-framework | rest_framework/throttling.py | {
"start": 5234,
"end": 5757
} | class ____(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a anonymous users.
The IP address of the request will be used as the unique cache key.
"""
scope = 'anon'
def get_cache_key(self, request, view):
if request.user and request.user.is_authenticated:
return None # Only throttle unauthenticated requests.
return self.cache_format % {
'scope': self.scope,
'ident': self.get_ident(request)
}
| AnonRateThrottle |
python | automl__auto-sklearn | test/test_util/test_logging.py | {
"start": 134,
"end": 1667
} | class ____(unittest.TestCase):
def test_setup_logger(self):
# Test that setup_logger function correctly configures the logger
# according to the given dictionary, and uses the default
# logging.yaml file if logging_config is not specified.
with open(
os.path.join(os.path.dirname(__file__), "example_config.yaml"), "r"
) as fh:
example_config = yaml.safe_load(fh)
# Configure logger with example_config.yaml.
logging_.setup_logger(
logging_config=example_config, output_dir=tempfile.gettempdir()
)
# example_config sets the root logger's level to CRITICAL,
# which corresponds to 50.
self.assertEqual(logging.getLogger().getEffectiveLevel(), 50)
# This time use the default configuration.
logging_.setup_logger(logging_config=None, output_dir=tempfile.gettempdir())
# default config sets the root logger's level to DEBUG,
# which corresponds to 10.
self.assertEqual(logging.getLogger().getEffectiveLevel(), 10)
# Make sure we log to the desired directory
logging_.setup_logger(output_dir=os.path.dirname(__file__), filename="test.log")
logger = logging.getLogger()
logger.info("test_setup_logger")
with open(os.path.join(os.path.dirname(__file__), "test.log")) as fh:
self.assertIn("test_setup_logger", "".join(fh.readlines()))
os.remove(os.path.join(os.path.dirname(__file__), "test.log"))
| LoggingTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/workers.py | {
"start": 13886,
"end": 25261
} | class ____(NonStrictDataModel):
"""
:param id: Worker ID
:type id: str
:param user: Associated user (under whose credentials are used by the worker
daemon)
:type user: IdNameEntry
:param company: Associated company
:type company: IdNameEntry
:param ip: IP of the worker
:type ip: str
:param register_time: Registration time
:type register_time: datetime.datetime
:param last_activity_time: Last activity time (even if an error occurred)
:type last_activity_time: datetime.datetime
:param last_report_time: Last successful report time
:type last_report_time: datetime.datetime
:param task: Task currently being run by the worker
:type task: CurrentTaskEntry
:param project: Project in which currently executing task resides
:type project: IdNameEntry
:param queue: Queue from which running task was taken
:type queue: QueueEntry
:param queues: List of queues on which the worker is listening
:type queues: Sequence[QueueEntry]
:param tags: User tags for the worker
:type tags: Sequence[str]
:param system_tags: System tags for the worker
:type system_tags: Sequence[str]
:param key: Worker entry key
:type key: str
"""
_schema = {
"properties": {
"company": {
"description": "Associated company",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
"id": {"description": "Worker ID", "type": ["string", "null"]},
"ip": {"description": "IP of the worker", "type": ["string", "null"]},
"key": {"description": "Worker entry key", "type": ["string", "null"]},
"last_activity_time": {
"description": "Last activity time (even if an error occurred)",
"format": "date-time",
"type": ["string", "null"],
},
"last_report_time": {
"description": "Last successful report time",
"format": "date-time",
"type": ["string", "null"],
},
"project": {
"description": "Project in which currently executing task resides",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
"queue": {
"description": "Queue from which running task was taken",
"oneOf": [{"$ref": "#/definitions/queue_entry"}, {"type": "null"}],
},
"queues": {
"description": "List of queues on which the worker is listening",
"items": {"$ref": "#/definitions/queue_entry"},
"type": ["array", "null"],
},
"register_time": {
"description": "Registration time",
"format": "date-time",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags for the worker",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User tags for the worker",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task currently being run by the worker",
"oneOf": [
{"$ref": "#/definitions/current_task_entry"},
{"type": "null"},
],
},
"user": {
"description": "Associated user (under whose credentials are used by the worker daemon)",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
user: Any = None,
company: Any = None,
ip: Optional[str] = None,
register_time: Optional[str] = None,
last_activity_time: Optional[str] = None,
last_report_time: Optional[str] = None,
task: Any = None,
project: Any = None,
queue: Any = None,
queues: Optional[List[Any]] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
key: Optional[str] = None,
**kwargs: Any
) -> None:
super(Worker, self).__init__(**kwargs)
self.id = id
self.user = user
self.company = company
self.ip = ip
self.register_time = register_time
self.last_activity_time = last_activity_time
self.last_report_time = last_report_time
self.task = task
self.project = project
self.queue = queue
self.queues = queues
self.tags = tags
self.system_tags = system_tags
self.key = key
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("user")
def user(self) -> Any:
return self._property_user
@user.setter
def user(self, value: Any) -> None:
if value is None:
self._property_user = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "user", IdNameEntry)
self._property_user = value
@schema_property("company")
def company(self) -> Any:
return self._property_company
@company.setter
def company(self, value: Any) -> None:
if value is None:
self._property_company = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "company", IdNameEntry)
self._property_company = value
@schema_property("ip")
def ip(self) -> Optional[str]:
return self._property_ip
@ip.setter
def ip(self, value: Optional[str]) -> None:
if value is None:
self._property_ip = None
return
self.assert_isinstance(value, "ip", six.string_types)
self._property_ip = value
@schema_property("register_time")
def register_time(self) -> Optional[str]:
return self._property_register_time
@register_time.setter
def register_time(self, value: Optional[str]) -> None:
if value is None:
self._property_register_time = None
return
self.assert_isinstance(value, "register_time", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_register_time = value
@schema_property("last_activity_time")
def last_activity_time(self) -> Optional[str]:
return self._property_last_activity_time
@last_activity_time.setter
def last_activity_time(self, value: Optional[str]) -> None:
if value is None:
self._property_last_activity_time = None
return
self.assert_isinstance(value, "last_activity_time", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_activity_time = value
@schema_property("last_report_time")
def last_report_time(self) -> Optional[str]:
return self._property_last_report_time
@last_report_time.setter
def last_report_time(self, value: Optional[str]) -> None:
if value is None:
self._property_last_report_time = None
return
self.assert_isinstance(value, "last_report_time", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_report_time = value
@schema_property("task")
def task(self) -> Any:
return self._property_task
@task.setter
def task(self, value: Any) -> None:
if value is None:
self._property_task = None
return
if isinstance(value, dict):
value = CurrentTaskEntry.from_dict(value)
else:
self.assert_isinstance(value, "task", CurrentTaskEntry)
self._property_task = value
@schema_property("project")
def project(self) -> Any:
return self._property_project
@project.setter
def project(self, value: Any) -> None:
if value is None:
self._property_project = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "project", IdNameEntry)
self._property_project = value
@schema_property("queue")
def queue(self) -> Any:
return self._property_queue
@queue.setter
def queue(self, value: Any) -> None:
if value is None:
self._property_queue = None
return
if isinstance(value, dict):
value = QueueEntry.from_dict(value)
else:
self.assert_isinstance(value, "queue", QueueEntry)
self._property_queue = value
@schema_property("queues")
def queues(self) -> Optional[List[Any]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [QueueEntry.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "queues", QueueEntry, is_array=True)
self._property_queues = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("key")
def key(self) -> Optional[str]:
return self._property_key
@key.setter
def key(self, value: Optional[str]) -> None:
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
| Worker |
python | mlflow__mlflow | mlflow/gateway/providers/base.py | {
"start": 432,
"end": 2918
} | class ____(ABC):
"""
Base class for MLflow Gateway providers.
"""
NAME: str = ""
SUPPORTED_ROUTE_TYPES: tuple[str, ...]
CONFIG_TYPE: type[ConfigModel]
def __init__(self, config: EndpointConfig):
if self.NAME == "":
raise ValueError(
f"{self.__class__.__name__} is a subclass of BaseProvider and must "
f"override 'NAME' attribute as a non-empty string."
)
if not hasattr(self, "CONFIG_TYPE") or not issubclass(self.CONFIG_TYPE, ConfigModel):
raise ValueError(
f"{self.__class__.__name__} is a subclass of BaseProvider and must "
f"override 'CONFIG_TYPE' attribute as a subclass of ConfigModel."
)
self.config = config
async def chat_stream(
self, payload: chat.RequestPayload
) -> AsyncIterable[chat.StreamResponsePayload]:
raise AIGatewayException(
status_code=501,
detail=f"The chat streaming route is not implemented for {self.NAME} models.",
)
async def chat(self, payload: chat.RequestPayload) -> chat.ResponsePayload:
raise AIGatewayException(
status_code=501,
detail=f"The chat route is not implemented for {self.NAME} models.",
)
async def completions_stream(
self, payload: completions.RequestPayload
) -> AsyncIterable[completions.StreamResponsePayload]:
raise AIGatewayException(
status_code=501,
detail=f"The completions streaming route is not implemented for {self.NAME} models.",
)
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
raise AIGatewayException(
status_code=501,
detail=f"The completions route is not implemented for {self.NAME} models.",
)
async def embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
raise AIGatewayException(
status_code=501,
detail=f"The embeddings route is not implemented for {self.NAME} models.",
)
@staticmethod
def check_for_model_field(payload):
if "model" in payload:
raise AIGatewayException(
status_code=422,
detail="The parameter 'model' is not permitted to be passed. The route being "
"queried already defines a model instance.",
)
| BaseProvider |
python | spack__spack | lib/spack/spack/util/compression.py | {
"start": 16640,
"end": 16987
} | class ____(CompressedFileTypeInterface):
_MAGIC_NUMBER = b"\x42\x5a\x68"
extension = "bz2"
name = "bzip2 compressed data"
def peek(self, stream: BinaryIO, num_bytes: int) -> Optional[io.BytesIO]:
if BZ2_SUPPORTED:
return _decompressed_peek(bz2.BZ2File(stream), stream, num_bytes)
return None
| BZipFileType |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 33461,
"end": 33693
} | class ____(PrefectBaseModel):
"""Filter by `ArtifactCollection.task_run_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of task run IDs to include"
)
| ArtifactCollectionFilterTaskRunId |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol10.py | {
"start": 133,
"end": 224
} | class ____(Protocol):
def a(self) -> None: ...
def b(self) -> None: ...
| ProtocolBase |
python | tensorflow__tensorflow | tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py | {
"start": 2353,
"end": 5958
} | class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.from_tensor_slices()`."""
def benchmark_slice_repeat_batch(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = dataset.repeat(num_epochs).batch(batch_size)
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
extras={
"model_name": "from_tensor_slices.benchmark.1",
"parameters": "%d.%d" % (input_size, batch_size),
},
name="slice_repeat_batch_input_%d_batch_%d" % (input_size, batch_size))
def benchmark_reshape_slice_repeat(self):
input_size = 10000
reshape_dim = [100, 100]
num_epochs = 100
num_elements = num_epochs * reshape_dim[0]
data = np.random.randn(input_size).reshape(*reshape_dim)
dataset = dataset_ops.Dataset.from_tensor_slices(data).repeat(num_epochs)
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
extras={
"model_name": "from_tensor_slices.benchmark.2",
"parameters": "%d" % input_size,
},
name="reshape_slice_repeat_input_%d" % input_size,
)
def benchmark_slice_repeat_sparse(self):
non_zeros_per_row_values = [0, 1, 5, 10, 100]
num_rows_values = [32, 64, 128, 1024]
for non_zeros_per_row in non_zeros_per_row_values:
tensor = sparse_tensor.SparseTensor(
indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis],
values=np.arange(non_zeros_per_row, dtype=np.int64),
dense_shape=[1000])
for num_rows in num_rows_values:
# TODO(b/147153744): Function-valued attributes with their own
# attributes are currently only supported in graph mode.
@def_function.function
def make_dataset():
# pylint: disable=cell-var-from-loop
dataset = dataset_ops.Dataset.from_tensors(tensor)
dataset = dataset.repeat(num_rows).batch(num_rows)
batched_tensor = get_single_element.get_single_element(dataset)
dataset = dataset_ops.Dataset.from_tensors(batched_tensor).repeat()
return SingleThreadedFlatMapDataset(
dataset, dataset_ops.Dataset.from_tensor_slices)
self.run_and_report_benchmark(
make_dataset(),
num_elements=100000,
iters=5,
extras={
"model_name": "from_tensor_slices.benchmark.3",
"parameters": "%d.%d" % (non_zeros_per_row, num_rows),
},
name="slice_repeat_sparse_elements_per_row_%d_num_rows_%d" %
(non_zeros_per_row, num_rows))
def benchmark_slice_batch_cache_repeat(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).batch(
batch_size).cache().repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
extras={
"model_name": "from_tensor_slices.benchmark.4",
"parameters": "%d.%d" % (input_size, batch_size),
},
name="slice_batch_cache_repeat_input_%d_batch_%d" %
(input_size, batch_size))
if __name__ == "__main__":
benchmark_base.test.main()
| FromTensorSlicesBenchmark |
python | getsentry__sentry | src/sentry/integrations/msteams/webhook.py | {
"start": 3255,
"end": 3382
} | class ____(MsTeamsIntegrationAnalytics):
pass
@analytics.eventclass("integrations.msteams.archive")
| MsTeamsIntegrationResolve |
python | huggingface__transformers | src/transformers/models/pop2piano/modeling_pop2piano.py | {
"start": 23858,
"end": 27674
} | class ____(PreTrainedModel):
config: Pop2PianoConfig
base_model_prefix = "transformer"
output_modalities = ("audio",)
supports_gradient_checkpointing = True
_can_compile_fullgraph = False
_no_split_modules = ["Pop2PianoBlock"]
_keep_in_fp32_modules = ["wo"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, Pop2PianoLayerNorm):
init.constant_(module.weight, factor * 1.0)
elif isinstance(module, Pop2PianoConcatEmbeddingToMel):
init.normal_(module.embedding.weight, mean=0.0, std=factor * 1.0)
elif isinstance(module, Pop2PianoForConditionalGeneration):
init.normal_(module.shared.weight, mean=0.0, std=factor * 1.0)
if hasattr(module, "lm_head"):
init.normal_(module.lm_head.weight, mean=0.0, std=factor * 1.0)
elif isinstance(module, Pop2PianoDenseActDense):
init.normal_(module.wi.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
init.zeros_(module.wi.bias)
init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
init.zeros_(module.wo.bias)
elif isinstance(module, Pop2PianoDenseGatedActDense):
init.normal_(module.wi_0.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
init.zeros_(module.wi_0.bias)
init.normal_(module.wi_1.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
init.zeros_(module.wi_1.bias)
init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
init.zeros_(module.wo.bias)
elif isinstance(module, Pop2PianoAttention):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
init.normal_(module.q.weight, mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
init.normal_(module.k.weight, mean=0.0, std=factor * (d_model**-0.5))
init.normal_(module.v.weight, mean=0.0, std=factor * (d_model**-0.5))
init.normal_(module.o.weight, mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
init.normal_(module.relative_attention_bias.weight, mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
if decoder_start_token_id is None:
raise ValueError(
"self.model.config.decoder_start_token_id has to be defined. In Pop2Piano it is usually set to the pad_token_id."
)
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
| Pop2PianoPreTrainedModel |
python | pdm-project__pdm | src/pdm/resolver/base.py | {
"start": 887,
"end": 2154
} | class ____(abc.ABC):
"""The resolver class."""
environment: BaseEnvironment
"""The environment instance."""
requirements: list[Requirement]
"""The list of requirements to resolve."""
update_strategy: str
"""The update strategy to use [all|reuse|eager|reuse-installed]."""
strategies: set[str]
"""The list of strategies to use."""
target: EnvSpec
"""The target environment specification."""
tracked_names: t.Collection[str] = ()
"""The list of tracked names."""
keep_self: bool = False
"""Whether to keep self dependencies."""
locked_repository: LockedRepository | None = None
"""The repository with all locked dependencies."""
reporter: BaseReporter = field(default_factory=BaseReporter)
"""The reporter to use."""
requested_groups: set[str] = field(default_factory=set, init=False)
"""The list of requested groups."""
def __post_init__(self) -> None:
self.requested_groups = {g for r in self.requirements for g in r.groups}
@abc.abstractmethod
def resolve(self) -> Resolution:
"""Resolve the requirements."""
pass
@property
def project(self) -> Project:
"""The project instance."""
return self.environment.project
| Resolver |
python | scrapy__scrapy | tests/spiders.py | {
"start": 11753,
"end": 12780
} | class ____(MockServerSpider, CrawlSpider):
"""
A CrawlSpider which overrides the 'parse' method
"""
name = "crawl_spider_with_parse_method"
custom_settings: dict = {
"RETRY_HTTP_CODES": [], # no need to retry
}
rules = (Rule(LinkExtractor(), callback="parse", follow=True),)
async def start(self):
test_body = b"""
<html>
<head><title>Page title</title></head>
<body>
<p><a href="/status?n=200">Item 200</a></p> <!-- callback -->
<p><a href="/status?n=201">Item 201</a></p> <!-- callback -->
</body>
</html>
"""
url = self.mockserver.url("/alpayload")
yield Request(url, method="POST", body=test_body)
def parse(self, response, foo=None):
self.logger.info("[parse] status %i (foo: %s)", response.status, foo)
yield Request(
self.mockserver.url("/status?n=202"), self.parse, cb_kwargs={"foo": "bar"}
)
| CrawlSpiderWithParseMethod |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 3092,
"end": 3296
} | class ____(BaseModel):
text: str
"""The text to type."""
type: Literal["type"]
"""Specifies the event type.
For a type action, this property is always set to `type`.
"""
| ActionType |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py | {
"start": 1375,
"end": 2780
} | class ____(Benchmark):
r"""
Pathological objective function.
This class defines the Pathological [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pathological}}(x) = \sum_{i=1}^{n -1} \frac{\sin^{2}\left(
\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\right) -0.5}{0.001 \left(x_{i}^{2}
- 2x_{i}x_{i+1} + x_{i+1}^{2}\right)^{2} + 0.50}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.
def fun(self, x, *args):
self.nfev += 1
vec = (0.5 + (sin(sqrt(100 * x[: -1] ** 2 + x[1:] ** 2)) ** 2 - 0.5) /
(1. + 0.001 * (x[: -1] ** 2 - 2 * x[: -1] * x[1:]
+ x[1:] ** 2) ** 2))
return sum(vec)
| Pathological |
python | anthropics__anthropic-sdk-python | src/anthropic/types/message_create_params.py | {
"start": 10675,
"end": 11036
} | class ____(MessageCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
"""
MessageCreateParams = Union[MessageCreateParamsNonStreaming, MessageCreateParamsStreaming]
| MessageCreateParamsStreaming |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.