language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
apache__airflow
providers/google/src/airflow/providers/google/marketing_platform/operators/analytics_admin.py
{ "start": 1667, "end": 5130 }
class ____(GoogleCloudBaseOperator): """ Lists all accounts to which the user has access. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleAnalyticsAdminListAccountsOperator` :param page_size: Optional, number of results to return in the list. :param page_token: Optional. The next_page_token value returned from a previous List request, if any. :param show_deleted: Optional. Whether to include soft-deleted (ie: "trashed") Accounts in the results. :param retry: Optional, a retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional. The timeout for this request. :param metadata: Optional. Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use when fetching connection info. :param impersonation_chain: Optional. Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "gcp_conn_id", "impersonation_chain", "page_size", "page_token", ) def __init__( self, *, page_size: int | None = None, page_token: str | None = None, show_deleted: bool | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.page_size = page_size self.page_token = page_token self.show_deleted = show_deleted self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute( self, context: Context, ) -> Sequence[Message]: hook = GoogleAnalyticsAdminHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) self.log.info( "Requesting list of Google Analytics accounts. Page size: %s, page token: %s", self.page_size, self.page_token, ) accounts = hook.list_accounts( page_size=self.page_size, page_token=self.page_token, show_deleted=self.show_deleted, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) accounts_list: Sequence[Message] = [Account.to_dict(item) for item in accounts] n = len(accounts_list) self.log.info("Successful request. Retrieved %s item%s.", n, "s" if n > 1 else "") return accounts_list
GoogleAnalyticsAdminListAccountsOperator
python
pandas-dev__pandas
pandas/tests/io/parser/conftest.py
{ "start": 2222, "end": 2279 }
class ____(CParser): low_memory = True
CParserLowMemory
python
django-extensions__django-extensions
tests/testapp/jobs/hourly/test_hourly_job.py
{ "start": 122, "end": 229 }
class ____(HourlyJob): help = "My sample hourly job." def execute(self): HOURLY_JOB_MOCK()
Job
python
pypa__pip
src/pip/_internal/cli/parser.py
{ "start": 4589, "end": 5239 }
class ____(optparse.OptionParser): def insert_option_group( self, idx: int, *args: Any, **kwargs: Any ) -> optparse.OptionGroup: """Insert an OptionGroup at a given position.""" group = self.add_option_group(*args, **kwargs) self.option_groups.pop() self.option_groups.insert(idx, group) return group @property def option_list_all(self) -> list[optparse.Option]: """Get a list of all options, including those in option groups.""" res = self.option_list[:] for i in self.option_groups: res.extend(i.option_list) return res
CustomOptionParser
python
great-expectations__great_expectations
tests/scripts/test_public_api_report.py
{ "start": 11670, "end": 12835 }
class ____: @public_api def example_public_api_method(): pass @staticmethod @public_api def example_public_api_staticmethod(): pass @classmethod @public_api def example_public_api_classmethod(cls): pass @some_other_decorator @public_api @another_decorator def example_multiple_decorator_public_api_method(self): pass """ test_path = tmp_path / "test_path.py" ast_definitions = self._class_and_function_definitions(tree=ast.parse(file_string)) definitions = [ Definition( name="test_name", filepath=pathlib.Path(test_path), ast_definition=ast_definition, ) for ast_definition in ast_definitions ] assert all( public_api_checker.is_definition_marked_public_api(definition) for definition in definitions ) def test_is_definition_marked_public_api_no( self, public_api_checker: PublicAPIChecker, tmp_path: pathlib.Path ): file_string = """ def example_module_level_function(): pass
ExamplePublicAPIClass
python
doocs__leetcode
solution/0100-0199/0102.Binary Tree Level Order Traversal/Solution.py
{ "start": 192, "end": 699 }
class ____: def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]: ans = [] if root is None: return ans q = deque([root]) while q: t = [] for _ in range(len(q)): node = q.popleft() t.append(node.val) if node.left: q.append(node.left) if node.right: q.append(node.right) ans.append(t) return ans
Solution
python
falconry__falcon
falcon/bench/queues/queues.py
{ "start": 586, "end": 820 }
class ____: def on_put(self, req, resp, tenant_id, queue_name): pass def on_get(self, req, resp, tenant_id, queue_name): pass def on_delete(self, req, resp, tenant_id, queue_name): pass
ItemResource
python
cython__cython
Cython/Debugger/libpython.py
{ "start": 38593, "end": 38884 }
class ____(PyObjectPtr): _typename = 'PyTypeObject' def _unichr_is_printable(char): # Logic adapted from Python 3's Tools/unicode/makeunicodedata.py if char == " ": return True import unicodedata return unicodedata.category(char) not in ("C", "Z")
PyTypeObjectPtr
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/comms.py
{ "start": 25082, "end": 25284 }
class ____(BaseModel): key: str dag_id: str run_id: str task_id: str map_index: int | None = None include_prior_dates: bool = False type: Literal["GetXCom"] = "GetXCom"
GetXCom
python
airbytehq__airbyte
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
{ "start": 22383, "end": 24291 }
class ____(AbstractSource): def check_connection(self, logger, config) -> Tuple[bool, any]: """ Testing connection availability for the connector. :param config: the user-input config object conforming to the connector's spec.json :param logger: logger object :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise. """ try: req = requests.get(RkiCovidStream.url_base + "germany") if req.status_code == 200: return True, None return False, req.reason except Exception: return False, "There is a problem in source check connection." def streams(self, config: Mapping[str, Any]) -> List[Stream]: """ :param config: A Mapping of the user input configuration as defined in the connector spec. """ # Streams For Germany streams = [ Germany(), GermanyAgeGroups(), GermanyHistoryCases(config=config), GermanHistoryIncidence(config=config), GermanHistoryDeaths(config=config), GermanHistoryRecovered(config=config), GermanHistoryFrozenIncidence(config=config), GermanHistoryHospitalization(config=config), ] # Streams For States Of Germany streams.extend( [ GermanyStates(), GermanyStatesAgeGroups(), StatesHistoryCases(config=config), StatesHistoryIncidence(config=config), StatesHistoryFrozenIncidence(config=config), StatesHistoryDeaths(config=config), StatesHistoryRecovered(config=config), StatesHistoryHospitalization(config=config), ] ) return streams
SourceRkiCovid
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_searchstrategy.py
{ "start": 3256, "end": 4373 }
class ____: x: defaultdict def test_jsonable_defaultdict(): obj = HasDefaultDict(defaultdict(list)) obj.x["a"] = [42] assert to_jsonable(obj, avoid_realization=False) == {"x": {"a": [42]}} def test_jsonable_namedtuple(): Obj = namedtuple("Obj", ("x")) obj = Obj(10) assert to_jsonable(obj, avoid_realization=False) == {"x": 10} def test_jsonable_small_ints_are_ints(): n = 2**62 for avoid in (True, False): assert isinstance(to_jsonable(n, avoid_realization=avoid), int) assert to_jsonable(n, avoid_realization=avoid) == n def test_jsonable_large_ints_are_floats(): n = 2**63 assert isinstance(to_jsonable(n, avoid_realization=False), float) assert to_jsonable(n, avoid_realization=False) == float(n) assert to_jsonable(n, avoid_realization=True) == "<symbolic>" def test_jsonable_very_large_ints(): # previously caused OverflowError when casting to float. n = 2**1024 assert to_jsonable(n, avoid_realization=False) == sys.float_info.max assert to_jsonable(n, avoid_realization=True) == "<symbolic>" @dataclass
HasDefaultDict
python
pytorch__pytorch
torch/distributed/algorithms/_comm_hooks/default_hooks.py
{ "start": 93, "end": 1353 }
class ____: r""" Stores state needed to perform the default communication algorithm within a communication hook. Args: process_group (ProcessGroup): The process group to be used. """ __slots__ = [ "process_group", "world_size", "gradient_predivide_factor", "gradient_postdivide_factor", ] def __init__(self, process_group: dist.ProcessGroup): if process_group is None: raise ValueError(f"Expected to pass in an explicit ProcessGroup to {self}.") self.process_group = process_group self.world_size = dist.get_world_size(process_group) # Setting two factors `self.gradient_predivide_factor` # and `self.gradient_postdivide_factor` to avoid underflow and overflow self.gradient_predivide_factor = self._get_gradient_predivide_factor( self.world_size ) self.gradient_postdivide_factor = ( self.world_size / self.gradient_predivide_factor ) @staticmethod def _get_gradient_predivide_factor(world_size: int) -> float: factor: int = 1 while world_size % factor == 0 and world_size / factor > factor: factor *= 2 return float(factor)
DefaultState
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/modal_screen_bindings.py
{ "start": 557, "end": 893 }
class ____(App): BINDINGS = [("enter", "open_dialog", "Open Dialog")] def compose(self) -> ComposeResult: yield Header() yield Label("Hello") yield Footer() def action_open_dialog(self) -> None: self.push_screen(Dialog()) if __name__ == "__main__": app = ModalApp() app.run()
ModalApp
python
google__jax
jax/_src/numpy/error.py
{ "start": 5050, "end": 6519 }
class ____: """A context manager to set the error checking behavior. If both `all` and a category are provided, the category will override the `all` setting. When the error checking behavior is set to "ignore", all errors will be ignored. When set to "raise", errors will be detected and recorded, but an exception will not be raised immediately. Users must call :func:`raise_if_error` to at the end of the computation to raise the exception. """ def __init__( self, *, all: Behavior | None = None, nan: Behavior | None = None, divide: Behavior | None = None, oob: Behavior | None = None, ) -> None: new_settings = {} if all is not None: new_settings["nan"] = new_settings["divide"] = new_settings["oob"] = all if nan is not None: new_settings["nan"] = nan if divide is not None: new_settings["divide"] = divide if oob is not None: new_settings["oob"] = oob self.new_settings = new_settings self.stack = contextlib.ExitStack() def __enter__(self): config_flags = { "nan": config.error_checking_behavior_nan, "divide": config.error_checking_behavior_divide, "oob": config.error_checking_behavior_oob, } for key, value in self.new_settings.items(): self.stack.enter_context(config_flags[key](value)) return self def __exit__(self, exc_type, exc_value, traceback): self.stack.close()
error_checking_behavior
python
dagster-io__dagster
python_modules/libraries/dagster-aws/dagster_aws/athena/resources.py
{ "start": 7973, "end": 8093 }
class ____(FakeAthenaClient): """This class was used by the function-style fake Athena resource."""
FakeAthenaResource
python
kubernetes-client__python
kubernetes/client/models/v1_csi_driver_spec.py
{ "start": 383, "end": 24338 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'attach_required': 'bool', 'fs_group_policy': 'str', 'node_allocatable_update_period_seconds': 'int', 'pod_info_on_mount': 'bool', 'requires_republish': 'bool', 'se_linux_mount': 'bool', 'storage_capacity': 'bool', 'token_requests': 'list[StorageV1TokenRequest]', 'volume_lifecycle_modes': 'list[str]' } attribute_map = { 'attach_required': 'attachRequired', 'fs_group_policy': 'fsGroupPolicy', 'node_allocatable_update_period_seconds': 'nodeAllocatableUpdatePeriodSeconds', 'pod_info_on_mount': 'podInfoOnMount', 'requires_republish': 'requiresRepublish', 'se_linux_mount': 'seLinuxMount', 'storage_capacity': 'storageCapacity', 'token_requests': 'tokenRequests', 'volume_lifecycle_modes': 'volumeLifecycleModes' } def __init__(self, attach_required=None, fs_group_policy=None, node_allocatable_update_period_seconds=None, pod_info_on_mount=None, requires_republish=None, se_linux_mount=None, storage_capacity=None, token_requests=None, volume_lifecycle_modes=None, local_vars_configuration=None): # noqa: E501 """V1CSIDriverSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._attach_required = None self._fs_group_policy = None self._node_allocatable_update_period_seconds = None self._pod_info_on_mount = None self._requires_republish = None self._se_linux_mount = None self._storage_capacity = None self._token_requests = None self._volume_lifecycle_modes = None self.discriminator = None if attach_required is not None: self.attach_required = attach_required if fs_group_policy is not None: self.fs_group_policy = fs_group_policy if node_allocatable_update_period_seconds is not None: self.node_allocatable_update_period_seconds = node_allocatable_update_period_seconds if pod_info_on_mount is not None: self.pod_info_on_mount = pod_info_on_mount if requires_republish is not None: self.requires_republish = requires_republish if se_linux_mount is not None: self.se_linux_mount = se_linux_mount if storage_capacity is not None: self.storage_capacity = storage_capacity if token_requests is not None: self.token_requests = token_requests if volume_lifecycle_modes is not None: self.volume_lifecycle_modes = volume_lifecycle_modes @property def attach_required(self): """Gets the attach_required of this V1CSIDriverSpec. # noqa: E501 attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. This field is immutable. # noqa: E501 :return: The attach_required of this V1CSIDriverSpec. # noqa: E501 :rtype: bool """ return self._attach_required @attach_required.setter def attach_required(self, attach_required): """Sets the attach_required of this V1CSIDriverSpec. attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. This field is immutable. # noqa: E501 :param attach_required: The attach_required of this V1CSIDriverSpec. # noqa: E501 :type: bool """ self._attach_required = attach_required @property def fs_group_policy(self): """Gets the fs_group_policy of this V1CSIDriverSpec. # noqa: E501 fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field was immutable in Kubernetes < 1.29 and now is mutable. Defaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce. # noqa: E501 :return: The fs_group_policy of this V1CSIDriverSpec. # noqa: E501 :rtype: str """ return self._fs_group_policy @fs_group_policy.setter def fs_group_policy(self, fs_group_policy): """Sets the fs_group_policy of this V1CSIDriverSpec. fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field was immutable in Kubernetes < 1.29 and now is mutable. Defaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce. # noqa: E501 :param fs_group_policy: The fs_group_policy of this V1CSIDriverSpec. # noqa: E501 :type: str """ self._fs_group_policy = fs_group_policy @property def node_allocatable_update_period_seconds(self): """Gets the node_allocatable_update_period_seconds of this V1CSIDriverSpec. # noqa: E501 nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of the CSINode allocatable capacity for this driver. When set, both periodic updates and updates triggered by capacity-related failures are enabled. If not set, no updates occur (neither periodic nor upon detecting capacity-related failures), and the allocatable.count remains static. The minimum allowed value for this field is 10 seconds. This is a beta feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled. This field is mutable. # noqa: E501 :return: The node_allocatable_update_period_seconds of this V1CSIDriverSpec. # noqa: E501 :rtype: int """ return self._node_allocatable_update_period_seconds @node_allocatable_update_period_seconds.setter def node_allocatable_update_period_seconds(self, node_allocatable_update_period_seconds): """Sets the node_allocatable_update_period_seconds of this V1CSIDriverSpec. nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of the CSINode allocatable capacity for this driver. When set, both periodic updates and updates triggered by capacity-related failures are enabled. If not set, no updates occur (neither periodic nor upon detecting capacity-related failures), and the allocatable.count remains static. The minimum allowed value for this field is 10 seconds. This is a beta feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled. This field is mutable. # noqa: E501 :param node_allocatable_update_period_seconds: The node_allocatable_update_period_seconds of this V1CSIDriverSpec. # noqa: E501 :type: int """ self._node_allocatable_update_period_seconds = node_allocatable_update_period_seconds @property def pod_info_on_mount(self): """Gets the pod_info_on_mount of this V1CSIDriverSpec. # noqa: E501 podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume defined by a CSIVolumeSource, otherwise \"false\" \"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. This field was immutable in Kubernetes < 1.29 and now is mutable. # noqa: E501 :return: The pod_info_on_mount of this V1CSIDriverSpec. # noqa: E501 :rtype: bool """ return self._pod_info_on_mount @pod_info_on_mount.setter def pod_info_on_mount(self, pod_info_on_mount): """Sets the pod_info_on_mount of this V1CSIDriverSpec. podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume defined by a CSIVolumeSource, otherwise \"false\" \"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. This field was immutable in Kubernetes < 1.29 and now is mutable. # noqa: E501 :param pod_info_on_mount: The pod_info_on_mount of this V1CSIDriverSpec. # noqa: E501 :type: bool """ self._pod_info_on_mount = pod_info_on_mount @property def requires_republish(self): """Gets the requires_republish of this V1CSIDriverSpec. # noqa: E501 requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. # noqa: E501 :return: The requires_republish of this V1CSIDriverSpec. # noqa: E501 :rtype: bool """ return self._requires_republish @requires_republish.setter def requires_republish(self, requires_republish): """Sets the requires_republish of this V1CSIDriverSpec. requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. # noqa: E501 :param requires_republish: The requires_republish of this V1CSIDriverSpec. # noqa: E501 :type: bool """ self._requires_republish = requires_republish @property def se_linux_mount(self): """Gets the se_linux_mount of this V1CSIDriverSpec. # noqa: E501 seLinuxMount specifies if the CSI driver supports \"-o context\" mount option. When \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context. When \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem. Default is \"false\". # noqa: E501 :return: The se_linux_mount of this V1CSIDriverSpec. # noqa: E501 :rtype: bool """ return self._se_linux_mount @se_linux_mount.setter def se_linux_mount(self, se_linux_mount): """Sets the se_linux_mount of this V1CSIDriverSpec. seLinuxMount specifies if the CSI driver supports \"-o context\" mount option. When \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context. When \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem. Default is \"false\". # noqa: E501 :param se_linux_mount: The se_linux_mount of this V1CSIDriverSpec. # noqa: E501 :type: bool """ self._se_linux_mount = se_linux_mount @property def storage_capacity(self): """Gets the storage_capacity of this V1CSIDriverSpec. # noqa: E501 storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true. The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. This field was immutable in Kubernetes <= 1.22 and now is mutable. # noqa: E501 :return: The storage_capacity of this V1CSIDriverSpec. # noqa: E501 :rtype: bool """ return self._storage_capacity @storage_capacity.setter def storage_capacity(self, storage_capacity): """Sets the storage_capacity of this V1CSIDriverSpec. storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true. The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. This field was immutable in Kubernetes <= 1.22 and now is mutable. # noqa: E501 :param storage_capacity: The storage_capacity of this V1CSIDriverSpec. # noqa: E501 :type: bool """ self._storage_capacity = storage_capacity @property def token_requests(self): """Gets the token_requests of this V1CSIDriverSpec. # noqa: E501 tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": { \"<audience>\": { \"token\": <token>, \"expirationTimestamp\": <expiration timestamp in RFC3339>, }, ... } Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. # noqa: E501 :return: The token_requests of this V1CSIDriverSpec. # noqa: E501 :rtype: list[StorageV1TokenRequest] """ return self._token_requests @token_requests.setter def token_requests(self, token_requests): """Sets the token_requests of this V1CSIDriverSpec. tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": { \"<audience>\": { \"token\": <token>, \"expirationTimestamp\": <expiration timestamp in RFC3339>, }, ... } Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. # noqa: E501 :param token_requests: The token_requests of this V1CSIDriverSpec. # noqa: E501 :type: list[StorageV1TokenRequest] """ self._token_requests = token_requests @property def volume_lifecycle_modes(self): """Gets the volume_lifecycle_modes of this V1CSIDriverSpec. # noqa: E501 volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. This field is immutable. # noqa: E501 :return: The volume_lifecycle_modes of this V1CSIDriverSpec. # noqa: E501 :rtype: list[str] """ return self._volume_lifecycle_modes @volume_lifecycle_modes.setter def volume_lifecycle_modes(self, volume_lifecycle_modes): """Sets the volume_lifecycle_modes of this V1CSIDriverSpec. volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. This field is immutable. # noqa: E501 :param volume_lifecycle_modes: The volume_lifecycle_modes of this V1CSIDriverSpec. # noqa: E501 :type: list[str] """ self._volume_lifecycle_modes = volume_lifecycle_modes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1CSIDriverSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1CSIDriverSpec): return True return self.to_dict() != other.to_dict()
V1CSIDriverSpec
python
sqlalchemy__sqlalchemy
test/ext/test_horizontal_shard.py
{ "start": 36456, "end": 39134 }
class ____(fixtures.DeclarativeMappedTest): """illustrate the test case for #4376""" @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic from sqlalchemy.ext.associationproxy import association_proxy class Book(Base): __tablename__ = "book" id = Column(Integer, primary_key=True) authors = association_proxy( "book_authors", "author", creator=lambda author: BookAuthor(author=author), ) book_authors = relationship("BookAuthor", back_populates="book") class BookAuthor(Base): __tablename__ = "book_author" authorid = Column(ForeignKey("author.id"), primary_key=True) bookid = Column(ForeignKey("book.id"), primary_key=True) book = relationship("Book", back_populates="book_authors") author = relationship("Author", back_populates="book_authors") class Author(Base): __tablename__ = "author" id = Column(Integer, primary_key=True) books = association_proxy( "book_authors", "book", creator=lambda book: BookAuthor(book=book), ) book_authors = relationship(BookAuthor, back_populates="author") def test_update_many_to_many_sharded(self): session = ShardedSession( shards={"test": testing.db}, shard_chooser=self.shard_chooser, identity_chooser=lambda *args: None, execute_chooser=lambda *args: ["test"], ) Book, Author = self.classes("Book", "Author") book = Book() book.authors.append(Author()) session.add(book) session.commit() def test_update_many_to_many_sharded__save_junction_table_directly(self): session = ShardedSession( shards={"test": testing.db}, shard_chooser=self.shard_chooser, identity_chooser=lambda *args: None, execute_chooser=lambda *args: ["test"], ) Book, Author, BookAuthor = self.classes("Book", "Author", "BookAuthor") book = Book() author = Author() session.add(book) session.add(author) session.commit() book_author = BookAuthor() book_author.bookid = book.id book_author.authorid = author.id session.add(book_author) session.commit() def shard_chooser(self, mapper, instance, clause=None): if not instance and not clause: raise Exception("Cannot determine shard") return "test"
UseAssocProxForM2MTest
python
readthedocs__readthedocs.org
readthedocs/core/forms.py
{ "start": 1811, "end": 2255 }
class ____(forms.ModelForm): username = CharField( label=_("Username"), help_text=_("Please type your username to confirm."), ) class Meta: model = User fields = ["username"] def clean_username(self): data = self.cleaned_data["username"] if self.instance.username != data: raise forms.ValidationError(_("Username does not match!")) return data
UserDeleteForm
python
getsentry__sentry
src/sentry/notifications/notification_action/group_type_notification_registry/handlers/issue_alert_registry_handler.py
{ "start": 570, "end": 1330 }
class ____(LegacyRegistryHandler): @staticmethod def handle_workflow_action(job: WorkflowEventData, action: Action, detector: Detector) -> None: try: handler = issue_alert_handler_registry.get(action.type) handler.invoke_legacy_registry(job, action, detector) except NoRegistrationExistsError: logger.exception( "No issue alert handler found for action type: %s", action.type, extra={"action_id": action.id}, ) raise except Exception: logger.exception( "Error invoking issue alert handler", extra={"action_id": action.id}, ) raise
IssueAlertRegistryHandler
python
numpy__numpy
benchmarks/benchmarks/bench_io.py
{ "start": 5312, "end": 6153 }
class ____(Benchmark): # pandas has a similar CSV reading benchmark # modified to suit np.loadtxt params = [550, 1000, 10000] param_names = ['size'] def setup(self, size): arr = np.arange(size).astype('uint64') + 2**63 self.data1 = StringIO('\n'.join(arr.astype(str).tolist())) arr = arr.astype(object) arr[500] = -1 self.data2 = StringIO('\n'.join(arr.astype(str).tolist())) def time_read_uint64(self, size): # mandatory rewind of StringIO object # between iterations of a repeat: np.loadtxt(self.data1) self.data1.seek(0) def time_read_uint64_neg_values(self, size): # mandatory rewind of StringIO object # between iterations of a repeat: np.loadtxt(self.data2) self.data2.seek(0)
LoadtxtReadUint64Integers
python
scipy__scipy
scipy/spatial/tests/test_kdtree.py
{ "start": 7398, "end": 9107 }
class ____: def setup_method(self): self.data = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]) self.kdtree = cKDTree(self.data) def test_single_query(self): d, i = self.kdtree.query([0, 0, 0]) assert_(isinstance(d, float)) assert_(isinstance(i, int)) def test_vectorized_query(self): d, i = self.kdtree.query(np.zeros((2, 4, 3))) assert_equal(np.shape(d), (2, 4)) assert_equal(np.shape(i), (2, 4)) def test_vectorized_query_noncontiguous_values(self): np.random.seed(1234) qs = np.random.randn(3, 1000).T ds, i_s = self.kdtree.query(qs) for q, d, i in zip(qs, ds, i_s): assert_equal(self.kdtree.query(q), (d, i)) def test_single_query_multiple_neighbors(self): s = 23 kk = self.kdtree.n+s d, i = self.kdtree.query([0, 0, 0], k=kk) assert_equal(np.shape(d), (kk,)) assert_equal(np.shape(i), (kk,)) assert_(np.all(~np.isfinite(d[-s:]))) assert_(np.all(i[-s:] == self.kdtree.n)) def test_vectorized_query_multiple_neighbors(self): s = 23 kk = self.kdtree.n+s d, i = self.kdtree.query(np.zeros((2, 4, 3)), k=kk) assert_equal(np.shape(d), (2, 4, kk)) assert_equal(np.shape(i), (2, 4, kk)) assert_(np.all(~np.isfinite(d[:, :, -s:]))) assert_(np.all(i[:, :, -s:] == self.kdtree.n))
Test_vectorization_cKDTree
python
matplotlib__matplotlib
lib/matplotlib/dates.py
{ "start": 59985, "end": 62303 }
class ____(DateLocator): """ Make ticks on regular intervals of one or more microsecond(s). .. note:: By default, Matplotlib uses a floating point representation of time in days since the epoch, so plotting data with microsecond time resolution does not work well for dates that are far (about 70 years) from the epoch (check with `~.dates.get_epoch`). If you want sub-microsecond resolution time plots, it is strongly recommended to use floating point seconds, not datetime-like time representation. If you really must use datetime.datetime() or similar and still need microsecond precision, change the time origin via `.dates.set_epoch` to something closer to the dates being plotted. See :doc:`/gallery/ticks/date_precision_and_epochs`. """ def __init__(self, interval=1, tz=None): """ Parameters ---------- interval : int, default: 1 The interval between each iteration. For example, if ``interval=2``, mark every second occurrence. tz : str or `~datetime.tzinfo`, default: :rc:`timezone` Ticks timezone. If a string, *tz* is passed to `dateutil.tz`. """ super().__init__(tz=tz) self._interval = interval self._wrapped_locator = ticker.MultipleLocator(interval) def set_axis(self, axis): self._wrapped_locator.set_axis(axis) return super().set_axis(axis) def __call__(self): # if no data have been set, this will tank with a ValueError try: dmin, dmax = self.viewlim_to_dt() except ValueError: return [] return self.tick_values(dmin, dmax) def tick_values(self, vmin, vmax): nmin, nmax = date2num((vmin, vmax)) t0 = np.floor(nmin) nmax = nmax - t0 nmin = nmin - t0 nmin *= MUSECONDS_PER_DAY nmax *= MUSECONDS_PER_DAY ticks = self._wrapped_locator.tick_values(nmin, nmax) ticks = ticks / MUSECONDS_PER_DAY + t0 return ticks def _get_unit(self): # docstring inherited return 1. / MUSECONDS_PER_DAY def _get_interval(self): # docstring inherited return self._interval
MicrosecondLocator
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/defs_state_tests/sample_state_backed_component.py
{ "start": 289, "end": 1422 }
class ____(StateBackedComponent, dg.Model, dg.Resolvable): fail_write: bool = False defs_state_key_id: Optional[str] = None defs_state: ResolvedDefsStateConfig = DefsStateConfigArgs.versioned_state_storage() @property def defs_state_config(self) -> DefsStateConfig: default_key = self.__class__.__name__ if self.defs_state_key_id is not None: default_key = f"{default_key}[{self.defs_state_key_id}]" return DefsStateConfig.from_args(self.defs_state, default_key=default_key) def build_defs_from_state( self, context: dg.ComponentLoadContext, state_path: Optional[Path] ) -> dg.Definitions: if state_path is None: return dg.Definitions() with open(state_path) as f: state = f.read() @dg.asset(name=state) def the_asset(): ... return dg.Definitions(assets=[the_asset]) async def write_state_to_path(self, state_path: Path) -> None: if self.fail_write: raise Exception("Failed to write state") else: state_path.write_text("hi")
SampleStateBackedComponent
python
getsentry__sentry
src/sentry/taskworker/registry.py
{ "start": 7877, "end": 9996 }
class ____: """ Registry of all namespaces. The TaskRegistry is responsible for handling namespace -> topic resolution during startup. """ def __init__(self) -> None: self._namespaces: dict[str, TaskNamespace] = {} self._router = self._build_router() def _build_router(self) -> TaskRouter: router_name: str = settings.TASKWORKER_ROUTER router_class = import_string(router_name) router = router_class() assert hasattr(router, "route_namespace") return router def contains(self, name: str) -> bool: return name in self._namespaces def get(self, name: str) -> TaskNamespace: """Fetch a namespace by name.""" if name not in self._namespaces: raise KeyError(f"No task namespace with the name {name}") return self._namespaces[name] def get_task(self, namespace: str, task: str) -> Task[Any, Any]: """Fetch a task by namespace and name.""" return self.get(namespace).get(task) def create_namespace( self, name: str, *, retry: Retry | None = None, expires: int | datetime.timedelta | None = None, processing_deadline_duration: int = DEFAULT_PROCESSING_DEADLINE, app_feature: str | None = None, ) -> TaskNamespace: """ Create a task namespace. Namespaces are mapped onto topics through the configured router allowing infrastructure to be scaled based on a region's requirements. Namespaces can define default behavior for tasks defined within a namespace. """ if name in self._namespaces: raise ValueError(f"Task namespace with name {name} already exists.") namespace = TaskNamespace( name=name, router=self._router, retry=retry, expires=expires, processing_deadline_duration=processing_deadline_duration, app_feature=app_feature, ) self._namespaces[name] = namespace return namespace taskregistry = TaskRegistry()
TaskRegistry
python
facebook__pyre-check
client/commands/infer.py
{ "start": 14230, "end": 14582 }
class ____: name: str annotation: TypeAnnotation has_default: bool def to_stub(self) -> str: delimiter = "=" if self.annotation.missing else " = " value = f"{delimiter}..." if self.has_default else "" return f"{self.name}{self.annotation.to_stub(prefix=': ')}{value}" @dataclasses.dataclass(frozen=True)
Parameter
python
getsentry__sentry
src/sentry/hybridcloud/services/organizationmember_mapping/service.py
{ "start": 517, "end": 1710 }
class ____(RpcService): key = "organizationmember_mapping" local_mode = SiloMode.CONTROL @classmethod def get_local_implementation(cls) -> RpcService: from sentry.hybridcloud.services.organizationmember_mapping.impl import ( DatabaseBackedOrganizationMemberMappingService, ) return DatabaseBackedOrganizationMemberMappingService() @rpc_method @abstractmethod def upsert_mapping( self, *, organization_id: int, organizationmember_id: int, mapping: RpcOrganizationMemberMappingUpdate, ) -> RpcOrganizationMemberMapping: pass @rpc_method @abstractmethod def delete( self, *, organization_id: int, organizationmember_id: int, ) -> None: pass def impl_with_db() -> OrganizationMemberMappingService: from sentry.hybridcloud.services.organizationmember_mapping.impl import ( DatabaseBackedOrganizationMemberMappingService, ) return DatabaseBackedOrganizationMemberMappingService() organizationmember_mapping_service = OrganizationMemberMappingService.create_delegation()
OrganizationMemberMappingService
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/test_organization_workflow_details.py
{ "start": 1452, "end": 2206 }
class ____(OrganizationWorkflowDetailsBaseTest): def test_simple(self) -> None: workflow = self.create_workflow(organization_id=self.organization.id) response = self.get_success_response(self.organization.slug, workflow.id) assert response.data == serialize(workflow) def test_does_not_exist(self) -> None: self.get_error_response(self.organization.slug, 3, status_code=404) def test_pending_deletion(self) -> None: workflow = self.create_workflow(organization_id=self.organization.id) workflow.status = ObjectStatus.PENDING_DELETION workflow.save() self.get_error_response(self.organization.slug, workflow.id, status_code=404) @region_silo_test
OrganizationWorkflowIndexGetTest
python
django__django
django/contrib/admindocs/views.py
{ "start": 2137, "end": 3735 }
class ____(BaseAdminDocsView): template_name = "admin_doc/template_tag_index.html" def get_context_data(self, **kwargs): tags = [] try: engine = Engine.get_default() except ImproperlyConfigured: # Non-trivial TEMPLATES settings aren't supported (#24125). pass else: app_libs = sorted(engine.template_libraries.items()) builtin_libs = [("", lib) for lib in engine.template_builtins] for module_name, library in builtin_libs + app_libs: for tag_name, tag_func in library.tags.items(): title, body, metadata = utils.parse_docstring(tag_func.__doc__) title = title and utils.parse_rst( title, "tag", _("tag:") + tag_name ) body = body and utils.parse_rst(body, "tag", _("tag:") + tag_name) for key in metadata: metadata[key] = utils.parse_rst( metadata[key], "tag", _("tag:") + tag_name ) tag_library = module_name.split(".")[-1] tags.append( { "name": tag_name, "title": title, "body": body, "meta": metadata, "library": tag_library, } ) return super().get_context_data(**{**kwargs, "tags": tags})
TemplateTagIndexView
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/constructors.py
{ "start": 4546, "end": 4639 }
class ____: def __init__(self): self.foo = _test_source()
SanitizeSingleTraceSource
python
getsentry__sentry
src/sentry/search/events/fields.py
{ "start": 81955, "end": 83015 }
class ____(DiscoverFunction): def __init__(self, *args, **kwargs) -> None: self.snql_aggregate = kwargs.pop("snql_aggregate", None) self.snql_column = kwargs.pop("snql_column", None) self.requires_other_aggregates = kwargs.pop("requires_other_aggregates", False) super().__init__(*args, **kwargs) def validate(self) -> None: # assert that all optional args have defaults available for i, arg in enumerate(self.optional_args): assert ( arg.has_default ), f"{self.name}: optional argument at index {i} does not have default" assert sum([self.snql_aggregate is not None, self.snql_column is not None]) == 1 # assert that no duplicate argument names are used names = set() for arg in self.args: assert ( arg.name not in names ), f"{self.name}: argument {arg.name} specified more than once" names.add(arg.name) self.validate_result_type(self.default_result_type)
SnQLFunction
python
getsentry__sentry
tests/sentry/test_no_create_or_update_usage.py
{ "start": 1925, "end": 2013 }
class ____: file_path: str line: int col: int qualified_context: str
Usage
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 36627, "end": 36834 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("RSA_SHA1", "RSA_SHA256", "RSA_SHA384", "RSA_SHA512")
SamlSignatureAlgorithm
python
readthedocs__readthedocs.org
readthedocs/core/migrations/0002_make_userprofile_user_a_onetoonefield.py
{ "start": 133, "end": 614 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("core", "0001_initial"), ] operations = [ migrations.AlterField( model_name="userprofile", name="user", field=models.OneToOneField( related_name="profile", verbose_name="User", to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, ), ), ]
Migration
python
google__jax
tests/array_test.py
{ "start": 1838, "end": 34966 }
class ____(jtu.JaxTestCase): def test_array_impl_name(self): self.assertEqual(array.ArrayImpl.__name__, "ArrayImpl") @parameterized.named_parameters( ("mesh_x_y", P("x", "y")), ("mesh_x", P("x")), ("mesh_y", P("y")), ("mesh_none_y", P(None, "y")), ("mesh_xy", P(("x", "y"))), ("mesh_fully_replicated", P()), ) def test_jax_array_value(self, mesh_axes): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, global_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes)) for s in arr.addressable_shards: self.assertTrue(dispatch.is_single_device_sharding(s.data.sharding)) self.assertArraysEqual(s.data, global_data[s.index]) self.assertArraysEqual(arr._value, global_data) if arr._npy_value is not None: self.assertArraysEqual(arr._npy_value, global_data) @parameterized.named_parameters( ("mesh_x_y", P("x", "y"), # There are more slices but for convenient purposes, checking for only # 2. The indices + shard_shape + replica_id should be unique enough. ((slice(0, 2), slice(0, 1)), (slice(0, 2), slice(1, 2))), (2, 1), [0, 0, 0, 0, 0, 0, 0, 0], False), ("mesh_x", P("x"), ((slice(0, 2), slice(None)), (slice(0, 2), slice(None))), (2, 2), [0, 1, 0, 1, 0, 1, 0, 1], False), ("mesh_y", P("y"), ((slice(0, 4), slice(None)), (slice(4, 8), slice(None))), (4, 2), [0, 0, 1, 1, 2, 2, 3, 3], False), ("mesh_none_y", P(None, "y"), ((slice(None), slice(0, 1)), (slice(None), slice(1, 2))), (8, 1), [0, 0, 1, 1, 2, 2, 3, 3], False), ("mesh_xy", P(("x", "y")), ((slice(0, 1), slice(None)), (slice(1, 2), slice(None))), (1, 2), [0, 0, 0, 0, 0, 0, 0, 0], False), ("mesh_fully_replicated", P(), ((slice(None), slice(None)), (slice(None), slice(None))), (8, 2), [0, 1, 2, 3, 4, 5, 6, 7], True), ) def test_array_2d_shard(self, mesh_axes, expected_index, expected_shard_shape, expected_replica_ids, expected_is_fully_replicated): global_mesh = jtu.create_mesh((4, 2), ('x', 'y'), iota_order=True) global_input_shape = (8, 2) s = jax.sharding.NamedSharding(global_mesh, mesh_axes) arr, global_input_data = create_array(global_input_shape, s) self.assertEqual(arr.ndim, 2) self.assertEqual(arr.size, 16) self.assertEqual(arr.addressable_shards[0].index, expected_index[0]) self.assertEqual(arr.addressable_shards[1].index, expected_index[1]) replica_ids = [i.replica_id for i in arr.addressable_shards] self.assertListEqual(replica_ids, expected_replica_ids) self.assertListEqual([i.device.id for i in arr.addressable_shards], [0, 1, 2, 3, 4, 5, 6, 7]) self.assertEqual(arr.is_fully_replicated, expected_is_fully_replicated) for i, s in enumerate(arr.addressable_shards): self.assertEqual(s.data.aval, core.ShapedArray(expected_shard_shape, s.data.dtype)) self.assertArraysEqual(s.data, global_input_data[s.index]) self.assertArraysEqual(s.data, arr.addressable_data(i)) for g, l in safe_zip(arr.global_shards, arr.addressable_shards): self.assertEqual(g.device, l.device) self.assertEqual(g.index, l.index) self.assertEqual(g.replica_id, l.replica_id) self.assertEqual(g.data.aval, l.data.aval) self.assertArraysEqual(g.data, l.data) def test_addressable_data(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) shape = (8, 2) s = jax.sharding.NamedSharding(global_mesh, P(None)) arr, inp_data = create_array(shape, s) for i in range(len(arr)): self.assertArraysEqual(inp_data, arr.addressable_data(i)) def test_array_delete(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, _ = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) arr.delete() with self.assertRaisesRegex(RuntimeError, 'Array has been deleted.'): arr._check_if_deleted() self.assertIsNone(arr._npy_value) self.assertIsNone(arr._arrays) def test_single_device_array_usage_after_delete(self): x = jnp.array([1, 2, 3]) x.delete() with self.assertRaisesRegex(RuntimeError, 'Array has been deleted.'): _ = x + 1 @parameterized.named_parameters( ('no_global_shape', np.arange(10).reshape(2, 5), None), ('global_shape_prefix', {'a': np.arange(10).reshape(2, 5)}, (2, 5)), ('global_shape_full', {'a': np.arange(10).reshape(2, 5)}, {'a': (2, 5)}), ) def test_array_from_local_data_single_host(self, data, global_shape): jnp_data = jax.make_array_from_process_local_data( jax.devices()[0], data, global_shape ) jax.tree.map(self.assertArraysEqual, data, jnp_data) @parameterized.named_parameters( ('global_shape_prefix', {'a': np.arange(10).reshape(2, 5)}, (2, 8)), ('global_shape_full', {'a': np.arange(10).reshape(2, 5)}, {'a': (2, 6)}), ( 'global_shape_extra', {'a': np.arange(10).reshape(2, 5)}, {'a': (2, 5), 'b': (3, 5)}, ), ) def test_array_from_local_data_single_host_invalid_global_shape( self, data, global_shape ): with self.assertRaises(ValueError): jax.make_array_from_process_local_data( jax.devices()[0], data, global_shape ) def test_multi_device_array_usage_after_delete(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) shape = (8, 2) arr = jax.device_put(np.arange(math.prod(shape), dtype=np.int32), jax.sharding.NamedSharding(global_mesh, P('x'))) arr.delete() with self.assertRaisesRegex( RuntimeError, r'Array has been deleted with shape=int32\[16\].'): _ = arr + 1 def test_device_put(self): numpy_array = np.array([1, 2, 3]) arr = jax.device_put(numpy_array, jax.devices()[0]) self.assertIsInstance(arr.sharding, jax.sharding.SingleDeviceSharding) self.assertArraysEqual(arr, numpy_array) self.assertEqual(arr._committed, True) for i in arr.addressable_shards: self.assertArraysEqual(i.data, numpy_array) self.assertEqual(i.device, jax.devices()[0]) self.assertEqual(i.index, (slice(None),)) self.assertEqual(i.replica_id, 0) def test_device_put_array_delete(self): arr = jax.device_put(np.array([1, 2, 3]), jax.devices()[0]) arr.delete() with self.assertRaisesRegex(RuntimeError, 'Array has been deleted.'): arr._check_if_deleted() self.assertIsNone(arr._npy_value) self.assertIsNone(arr._arrays) def test_device_put_to_cpu(self): mesh = Mesh(jax.devices(), 'x') mesh_cpu = Mesh(jax.devices('cpu'), 'x') x = np.zeros(16) y = jax.device_put(x, NamedSharding(mesh, P('x'))) z = jax.device_put(y, NamedSharding(mesh_cpu, P('x'))) for z_s in z.addressable_shards: self.assertArraysEqual(z_s.data, x[z_s.index]) def test_array_device_get(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) self.assertArraysEqual(jax.device_get(arr), input_data) def test_repr(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, _ = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) self.assertStartsWith(repr(arr), "Array(") def test_empty_repr(self): shape = (0, 5) dtype = 'float32' x = jnp.empty(shape, dtype) self.assertEqual(repr(x), f"Array([], shape={shape}, dtype={dtype})") def test_jnp_array(self): arr = jnp.array([1, 2, 3]) self.assertIsInstance(arr, array.ArrayImpl) self.assertTrue(dispatch.is_single_device_sharding(arr.sharding)) self.assertEqual(arr._committed, False) self.assertFalse(arr.weak_type) def test_jnp_array_jit_add(self): a = jnp.array([1, 2, 3]) b = jnp.array([4, 5, 6]) arr = jax.jit(lambda x, y: x + y)(a, b) self.assertIsInstance(arr, array.ArrayImpl) self.assertArraysEqual(arr, np.array([5, 7, 9])) self.assertIsInstance(arr.sharding, jax.sharding.SingleDeviceSharding) def test_jnp_array_jnp_add(self): arr = jnp.add(jnp.array([1, 2, 3]), jnp.array([4, 5, 6])) self.assertIsInstance(arr, array.ArrayImpl) self.assertArraysEqual(arr, np.array([5, 7, 9])) self.assertIsInstance(arr.sharding, jax.sharding.SingleDeviceSharding) def test_jnp_array_normal_add(self): a = jnp.array([1, 2, 3]) b = jnp.array([4, 5, 6]) arr = a + b self.assertIsInstance(arr, array.ArrayImpl) self.assertArraysEqual(arr, np.array([5, 7, 9])) self.assertIsInstance(arr.sharding, jax.sharding.SingleDeviceSharding) def test_array_sharded_astype(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) arr_float32 = arr.astype(jnp.float32) self.assertEqual(arr_float32.dtype, np.float32) self.assertArraysEqual(arr_float32, input_data.astype(np.float32)) self.assertLen(arr_float32.addressable_shards, 8) for i in arr_float32.addressable_shards: self.assertArraysEqual(i.data, input_data[i.index].astype(np.float32)) def test_jnp_array_astype(self): arr = jnp.array([1, 2, 3]) arr_float32 = arr.astype(jnp.float32) self.assertEqual(arr_float32.dtype, np.float32) self.assertArraysEqual(arr_float32, arr.astype(np.float32)) def test_array_delete_idempotent(self): mesh = jtu.create_mesh((2,), ('x',)) arr = jax.device_put(np.arange(8), jax.sharding.NamedSharding(mesh, P('x'))) arr.delete() self.assertTrue(arr.is_deleted()) arr.delete() # Run delete again to check if it's idempotent. self.assertTrue(arr.is_deleted()) def test_sharded_add(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) a, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) b, _ = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x'))) out = a + b expected = input_data + input_data self.assertArraysEqual(out, expected) self.assertLen(out.addressable_shards, 8) for i in out.addressable_shards: self.assertArraysEqual(i.data, expected[i.index]) def test_sharded_zeros_like(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) a, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) out = jnp.zeros_like(a) expected = jnp.zeros(input_data.shape, dtype=a.dtype) self.assertArraysEqual(out, expected) self.assertLen(out.addressable_shards, 8) for i in out.addressable_shards: self.assertArraysEqual(i.data, expected[i.index]) def test_zeros_like(self): a = jnp.array([1, 2, 3], dtype=np.int32) out = jnp.zeros_like(a) expected = np.zeros(a.shape, dtype=np.int32) self.assertArraysEqual(out, expected) self.assertTrue(dispatch.is_single_device_sharding(out.sharding)) def test_wrong_num_arrays(self): if jax.device_count() < 4: self.skipTest('Requires more than 4 devices') shape = (8, 2) mesh = jtu.create_mesh((1, 2), ('x', 'y')) devices = jax.local_devices()[:2] # Taking up to 2 devices s = jax.sharding.NamedSharding(mesh, P('x', 'y')) inp_data = np.arange(math.prod(shape), dtype=np.float32).reshape(shape) di_map = s.devices_indices_map(shape) bufs = [jax.device_put(inp_data[di_map[d]], d) for d in devices] with self.assertRaisesRegex( ValueError, r'Expected 2 per-device arrays \(this is how many devices are addressable ' r'by the sharding\), but got 1'): array.ArrayImpl(core.ShapedArray(shape, np.float32), s, bufs[:1], committed=True) for buf, d in zip(list(bufs), jax.local_devices()[2:4]): bufs.append(jax.device_put(buf, d)) with self.assertRaisesRegex( ValueError, r'Expected 2 per-device arrays \(this is how many devices are addressable ' r'by the sharding\), but got 4'): array.ArrayImpl(core.ShapedArray(shape, np.float32), s, bufs, committed=True) def test_arrays_not_in_device_assignment(self): if jax.device_count() < 4: self.skipTest('Requires more than 4 devices') shape = (8, 2) mesh = jtu.create_mesh((1, 2), ('x', 'y')) # sharding device ids = {0, 1} s = jax.sharding.NamedSharding(mesh, P('x')) inp_data = np.arange(math.prod(shape), dtype=np.float32).reshape(shape) # _arrays device ids = {2, 3} bufs = [jax.device_put(inp_data, d) for d in jax.devices()[2:4]] with self.assertRaisesRegex( ValueError, "Addressable devices and per-device arrays devices do not match. " "Sharding contains devices {0, 1} that are not present in per-device " "arrays. Per-device arrays contain devices {2, 3} that are not present " "in the sharding."): array.ArrayImpl(core.ShapedArray(shape, np.float32), s, bufs, committed=True) def test_different_devices_in_arrays_than_sharding(self): if jax.device_count() < 3: self.skipTest('Requires more than 3 devices') shape = (8, 2) mesh = jax.sharding.Mesh(np.array([jax.devices()[1], jax.devices()[2]]), ('x')) # sharding device ids = {1, 2} s = jax.sharding.NamedSharding(mesh, P('x')) inp_data = np.arange(math.prod(shape), dtype=np.float32).reshape(shape) # _arrays device ids = {0, 1} bufs = [jax.device_put(inp_data, d) for d in jax.devices()[:2]] with self.assertRaisesRegex( ValueError, "Addressable devices and per-device arrays devices do not match. " r"Sharding contains devices \{2\} that are not present in per-device " r"arrays. Per-device arrays contain devices \{0\} that are not present " "in the sharding."): array.ArrayImpl(core.ShapedArray(shape, np.float32), s, bufs, committed=True) def test_duplicated_devices_in_arrays(self): shape = (8, 2) mesh = jtu.create_mesh((1, 2), ('x', 'y')) # Sharding device ids = {0, 1} s = jax.sharding.NamedSharding(mesh, P('x')) inp_data = np.arange(math.prod(shape), dtype=np.float32).reshape(shape) # _arrays device ids = {0, 0} bufs = [jax.device_put(inp_data, jax.devices()[0]) for _ in range(2)] with self.assertRaisesRegex( ValueError, 'When making an array from single-device arrays, the input arrays must' ' be from distinct devices'): array.ArrayImpl(core.ShapedArray(shape, np.float32), s, bufs, committed=True) @parameterized.named_parameters( ("mesh_x_y", P("x", "y"), (2, 2)), ("mesh_x", P("x"), (2, 4)), ("mesh_y", P("y"), (4, 4)), ("mesh_none_y", P(None, "y"), (8, 2)), ("mesh_none_x", P(None, "x"), (8, 1)), ("mesh_xy", P(("x", "y")), (1, 4)), ("mesh_replicated", P(()), (8, 4)), ) def test_shard_shape_mismatch_with_buffer_shape(self, pspec, expected_shard_shape): shape = (8, 4) mesh = jtu.create_mesh((4, 2), ('x', 'y')) mps = jax.sharding.NamedSharding(mesh, pspec) inp_data = np.arange(5) str_expected_shard_shape = str(expected_shard_shape).replace( r"(", r"\(").replace(r")", r"\)") with self.assertRaisesRegex( ValueError, f"Expected shard shape {str_expected_shard_shape} doesn't match the " "single device array shape"): array.make_array_from_callback(shape, mps, lambda idx: inp_data) def test_mismatch_dtype(self): shape = (8, 2) mesh = jtu.create_mesh((1, 2), ('x', 'y')) s = jax.sharding.NamedSharding(mesh, P('x', 'y')) inp_data = np.arange(math.prod(shape), dtype=np.int32).reshape(shape) indices = s.devices_indices_map(shape) bufs = [jax.device_put(inp_data[indices[d]], d) for d in mesh.local_devices] with self.assertRaisesRegex( ValueError, "Input buffers to `Array` must have matching dtypes. " "Got int32, expected float32"): array.ArrayImpl(core.ShapedArray(shape, np.float32), s, bufs, committed=True) def test_array_iter_pmap_sharding(self): if jax.device_count() < 2: self.skipTest('Test requires >= 2 devices.') if config.pmap_shmap_merge.value: self.skipTest( 'Under `pmap_shmap_merge=True`, `y[0]` of sharded `y` will replicate' ' because of the indexing operation. ' ) x = jnp.array([[1., 0., 0.], [0., 2., 3.]]) y = jax.pmap(jnp.sin)(x) self.assertArraysEqual([list(a.devices())[0] for a in y], y.sharding._device_assignment, allow_object_dtype=True) sin_x = iter(np.sin(x)) for i, j in zip(iter(y), sin_x): self.assertIsInstance(i, array.ArrayImpl) self.assertArraysAllClose(i, j) def test_array_iter_pmap_sharding_last_dim_sharded(self): if jax.device_count() < 2: self.skipTest('Test requires >= 2 devices.') x = jnp.array([[1., 0., 0.], [0., 2., 3.]]) y = jax.pmap(jnp.sin, out_axes=1)(x) for i, j in zip(iter(y), iter(np.sin(x).T)): self.assertArraysAllClose(i, j) def test_array_iter_mesh_pspec_sharding_multi_device(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) for i, j in zip(iter(arr), iter(input_data)): self.assertIsInstance(i, array.ArrayImpl) self.assertArraysEqual(i, j) def test_array_iter_replicated_multi_device(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P(None))) for i, j in zip(iter(arr), iter(input_data)): self.assertIsInstance(i, array.ArrayImpl) self.assertArraysEqual(i, j) self.assertLen(i.sharding.device_set, 8) self.assertTrue( op_shardings.are_hlo_shardings_equal( arr.sharding._to_xla_hlo_sharding(arr.ndim), i.sharding._to_xla_hlo_sharding(i.ndim))) def test_array_getitem_mesh_pspec_sharding_multi_device(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) s = arr[2:4, 0:1] self.assertIsInstance(s, array.ArrayImpl) self.assertArraysEqual(s, input_data[2:4, 0:1]) p = arr[:2] self.assertIsInstance(p, array.ArrayImpl) self.assertArraysEqual(p, input_data[:2]) def test_array_getitem_compile_multi_device_sharding(self): def _check(out, inp, shard_shape): self.assertArraysEqual(out, inp) self.assertEqual(out.sharding.shard_shape(out.shape), shard_shape) self.assertNotIsInstance(out.sharding, jax.sharding.SingleDeviceSharding) global_mesh = jtu.create_mesh((2, 2, 2), ('x', 'y', 'z')) input_shape = (4, 4, 2) arr, np_inp = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y', 'z'))) _check(arr[:, -1, :], np_inp[:, -1, :], (2, 1)) _check(arr[0, 0, 0], np_inp[0, 0, 0], ()) _check(arr[-1, -1, :], np_inp[-1, -1, :], (1,)) _check(arr[:, 1, 0], np_inp[:, 1, 0], (2,)) _check(arr[:, :, :], np_inp[:, :, :], (2, 2, 1)) _check(arr[3, :, :], np_inp[3, :, :], (2, 1)) _check(arr[-1, -1, -1], np_inp[-1, -1, -1], ()) _check(arr[2, -1, :], np_inp[2, -1, :], (1,)) _check(arr[2, 3, 1], np_inp[2, 3, 1], ()) _check(arr[-1], np_inp[-1], (2, 1)) _check(arr[:], np_inp[:], (2, 2, 1)) _check(arr[np.array(0), :, :], np_inp[np.array(0), :, :], (2, 1)) _check(arr[jnp.array(0), :, :], np_inp[jnp.array(0), :, :], (2, 1)) _check(arr[0, :2, 1], np_inp[0, :2, 1], (2,)) _check(arr[:, 1::2], np_inp[:, 1::2], (2, 2, 1)) _check(arr[:, -1:, :], np_inp[:, -1:, :], (2, 1, 1)) _check(arr[0:6:1], np_inp[0:6:1], (2, 2, 1)) _check(arr[:4], np_inp[:4], (2, 2, 1)) _check(arr[::-1], np_inp[::-1], (2, 2, 1)) _check(arr[1], np_inp[1], (2, 1)) def test_array_getitem_replicated_multi_device(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P(None))) s = arr[2:4, 0:1] self.assertIsInstance(s, array.ArrayImpl) self.assertArraysEqual(s, np.array([[4], [6]])) self.assertLen(s.sharding.device_set, 8) self.assertTrue( op_shardings.are_hlo_shardings_equal( arr.sharding._to_xla_hlo_sharding(arr.ndim), s.sharding._to_xla_hlo_sharding(s.ndim))) p = arr[:2] self.assertIsInstance(p, array.ArrayImpl) self.assertArraysEqual(p, input_data[:2]) self.assertLen(s.sharding.device_set, 8) self.assertTrue( op_shardings.are_hlo_shardings_equal( arr.sharding._to_xla_hlo_sharding(arr.ndim), s.sharding._to_xla_hlo_sharding(s.ndim))) def test_array_iter_mesh_pspec_sharding_single_device(self): if jax.device_count() < 2: self.skipTest('Test requires >= 2 devices.') single_dev = jax.devices()[1:2] mesh = jax.sharding.Mesh(np.array(single_dev), ('x')) input_shape = (8, 2) arr, input_data = create_array( input_shape, jax.sharding.NamedSharding(mesh, P('x'))) for i, j in zip(arr, iter(input_data)): self.assertArraysEqual(i, j) self.assertEqual(i.devices(), {single_dev[0]}) def test_array_shards_committed(self): if jax.device_count() < 2: self.skipTest('Test requires >= 2 devices.') x = jnp.array([1, 2, 3]) for s in x.addressable_shards: self.assertEqual(s.data._committed, x._committed) self.assertFalse(s.data._committed) y = jax.device_put(x, jax.devices()[1]) for s in y.addressable_shards: self.assertEqual(s.data._committed, y._committed) self.assertTrue(s.data._committed) def test_array_jnp_array_copy_multi_device(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, _ = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) c_arr = jnp.array(arr, copy=True) self.assertArraysEqual(arr, c_arr) self.assertEqual(arr._committed, c_arr._committed) for a, c in safe_zip(arr.addressable_shards, c_arr.addressable_shards): self.assertArraysEqual(a.data, c.data) self.assertEqual(a.index, c.index) self.assertEqual(a.replica_id, c.replica_id) self.assertEqual(a.device, c.device) self.assertNotEqual(a.data.unsafe_buffer_pointer(), c.data.unsafe_buffer_pointer()) def test_array_addressable_shards(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) input_shape = (8, 2) arr, _ = create_array( input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) for a in arr.addressable_shards: self.assertIsInstance(a.data, array.ArrayImpl) x = jnp.array([1, 2, 3]) self.assertIsInstance(x.addressable_data(0), array.ArrayImpl) def test_array_not_hashable(self): x = jnp.arange(4) with self.assertRaisesRegex(TypeError, "unhashable type"): hash(x) with self.assertRaisesRegex(TypeError, "unhashable type"): jax.jit(hash)(x) with self.assertRaisesRegex(TypeError, "unhashable type"): jax.vmap(hash)(x) def test_shape_dtype_struct_sharding_jit(self): mesh = jtu.create_mesh((8,), ('x')) s = jax.sharding.NamedSharding(mesh, P('x')) x_dummy = jax.ShapeDtypeStruct( shape=(16,), dtype=jnp.dtype('float32'), sharding=s) def f(x): return x * 2 c = jax.jit(f).lower(x_dummy).compile() input_shardings, output_shardings = c.input_shardings, c.output_shardings self.assertLen(input_shardings, 2) self.assertEqual(input_shardings[1], {}) self.assertEqual(input_shardings[1], {}) self.assertTrue( op_shardings.are_hlo_shardings_equal( input_shardings[0][0]._to_xla_hlo_sharding(x_dummy.ndim), s._to_xla_hlo_sharding(x_dummy.ndim))) self.assertTrue( op_shardings.are_hlo_shardings_equal( output_shardings._to_xla_hlo_sharding(x_dummy.ndim), s._to_xla_hlo_sharding(x_dummy.ndim))) # TODO(b/399879011): GPU is the only platform that has an implementation for # this, which exists in py_client.cc. Ideally, this would be replaced with # some kind of auto-defrag-on-OOM. @jtu.run_on_devices('gpu') def test_defragment(self): # Since the GPU implementation is in py_client.cc, it cannot be exposed via # the PjRt C API. if xb.using_pjrt_c_api(): self.skipTest('Manual defragment not exposed via PJRT C API') # Create a few arrays global_mesh = jtu.create_mesh((jax.local_device_count(),), ('x',)) shape = (8, 2) mpsharding = jax.sharding.NamedSharding(global_mesh, P('x',)) arr1, data = create_array(shape, mpsharding) arr2, _ = create_array(shape, mpsharding, data) arr3, _ = create_array(shape, mpsharding, data) # Delete one of them arr2.delete() # Defragment. xb.get_backend().defragment() # Sanity check remaining arrays self.assertArraysEqual(arr1, data) self.assertArraysEqual(arr1 + arr3, data * 2) # TODO(skyewm): check that defragmentation actually happened. I originally # thought to do this with unsafe_buffer_pointer(), but that's not always the # device memory address. Other ideas include causing enough fragmentation to # OOM, and exposing allocator stats in Python. def test_on_device_size_in_bytes(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) a, _ = create_array( (8, 2), jax.sharding.NamedSharding(global_mesh, P('x', 'y'))) shard_size = a.addressable_shards[0].data.on_device_size_in_bytes() self.assertGreaterEqual(shard_size, 4 * 2) self.assertEqual(shard_size * len(a.global_shards), a.on_device_size_in_bytes()) def test_array_is_ready(self): x = jax.device_put(jnp.arange(8.), jax.devices()[0]) x.is_ready() # doesn't crash def test_process_allgather_single_host(self): x = jnp.arange(8.) out = multihost_utils.process_allgather(x, tiled=True) self.assertEqual(out.shape, x.shape) self.assertArraysEqual(out, x) out = multihost_utils.process_allgather(x) self.assertEqual(out.shape, (1, x.shape[0])) self.assertArraysEqual(out, np.expand_dims(x, axis=0)) def test_broadcast_one_to_all_single_host(self): x = jnp.arange(8, dtype=jnp.uint8) out = multihost_utils.broadcast_one_to_all(x) self.assertEqual(out.shape, x.shape) self.assertEqual(out.dtype, x.dtype) self.assertArraysEqual(out, x) @jtu.sample_product( dtype=jtu.dtypes.all, shape=[(), (10), (2, 3)], ) @jtu.run_on_devices("cpu") def test_buffer_protocol(self, dtype, shape): rng = jtu.rand_default(self.rng()) x = rng(shape, dtype) y = jax.device_put(x) if dtype == jax.dtypes.bfloat16: with self.assertRaisesRegex( BufferError, 'Buffers of type BF16 are not supported by the Python buffer ' 'protocol.' ): memoryview(y) return x_bytes = memoryview(x).tobytes() y_bytes = memoryview(y).tobytes() self.assertEqual(x_bytes, y_bytes) @jtu.run_on_devices("cpu") def test_buffer_protocol_deletion(self): rng = jtu.rand_default(self.rng()) x = rng((3, 4), np.float32) y = jax.device_put(x) x_bytes = memoryview(x).tobytes() y_view = memoryview(y) # The array does not actually get deleted until any external reference is # dropped. Arguably we should make calling delete() in these circumstances # return an error instead, but that would be a behavior change for existing # users. y.delete() y_bytes = y_view.tobytes() self.assertEqual(x_bytes, y_bytes) def test_array_copy_to_host_async(self): global_mesh = jtu.create_mesh((2, 2), ('x', 'y')) x = jax.jit(lambda: jnp.arange(8.), out_shardings=jax.NamedSharding(global_mesh, P(None)))() self.assertLen(x.sharding.device_set, 4) x.copy_to_host_async() # doesn't crash self.assertArraysEqual(np.arange(8.), x) def test_array_fully_replicated_shard(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) inp_shape = (8, 2) arr, inp_data = create_array( inp_shape, jax.sharding.NamedSharding(global_mesh, P())) fs = arr._fully_replicated_shard() self.assertEqual(fs.shape, inp_shape) self.assertTrue(dispatch.is_single_device_sharding(fs.sharding)) self.assertArraysEqual(fs, inp_data) self.assertArraysEqual(arr.addressable_data(0), inp_data) def test_shard_array_to_fully_replicated(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) sharding = jax.sharding.NamedSharding(global_mesh, P()) arr = jnp.arange(16) self.assertFalse(arr._committed) self.assertIsInstance(arr.sharding, jax.sharding.SingleDeviceSharding) out = jax.jit(lambda x: x * 2, in_shardings=sharding)(arr) self.assertTrue(out.sharding.is_fully_replicated) self.assertArraysEqual(out, arr * 2) def test_fully_replicated_donated_array_is_deleted(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) sharding = jax.sharding.NamedSharding(global_mesh, P()) arr = jnp.arange(16) arr_copy = arr.copy() self.assertFalse(arr._committed) self.assertIsInstance(arr.sharding, jax.sharding.SingleDeviceSharding) out = jax.jit(lambda x: x * 2, in_shardings=sharding, donate_argnums=0)(arr) self.assertTrue(out.sharding.is_fully_replicated) self.assertArraysEqual(out, arr_copy * 2) self.assertTrue(arr.is_deleted()) @parameterized.product(dtype=jtu.dtypes.all + jtu.dtypes.custom_floats) def test_shards_have_correct_dtype(self, dtype): x = jnp.ones((), dtype=dtype) for shard in x.addressable_shards: self.assertEqual(shard.data.dtype, dtype) def test_make_array_from_callback_global_array(self): mesh = jtu.create_mesh((4, 2), ('x', 'y')) sharding = jax.sharding.NamedSharding(mesh, P()) np_inp = np.arange(16).reshape(8, 2) arr = jax.device_put(np_inp, sharding) out = jax.make_array_from_callback(np_inp.shape, sharding, lambda idx: arr[idx]) self.assertArraysEqual(out, arr) self.assertEqual(out.sharding, sharding) sharding2 = NamedSharding(mesh, P('x', 'y')) arr2 = jax.device_put(np_inp, sharding2) out2 = jax.make_array_from_callback(np_inp.shape, sharding2, lambda idx: arr2[idx]) self.assertArraysEqual(out2, arr2) self.assertEqual(out2.sharding, sharding2) def test_make_array_from_process_data_single_host_data_sharding(self): mesh = jtu.create_mesh((2, 1), ('x', 'y')) data = np.ones((256, 512)) s = jax.NamedSharding(mesh, P('x')) result = jax.make_array_from_process_local_data(s, data) self.assertArraysEqual(result, data) self.assertEqual(result.sharding, s) with jax.set_mesh(mesh): result = jax.make_array_from_process_local_data(P('x'), data) self.assertArraysEqual(result, data) self.assertEqual(result.sharding, s) @parameterized.product(dtype=jtu.dtypes.all + jtu.dtypes.custom_floats) @jtu.run_on_devices("gpu") def test_pinned_host_npy_value_doesnt_cache(self, dtype): # see https://github.com/jax-ml/jax/issues/26216 d_tensor = jnp.array(0, dtype=dtype) d_sharding = d_tensor.sharding h_sharding = d_sharding.with_memory_kind("pinned_host") h_tensor = jax.device_put(d_tensor, h_sharding) np.array(h_tensor) self.assertIsNone(h_tensor._npy_value) def test_make_array_from_single_device_arrays_no_dtype_error(self): mesh = jtu.create_mesh((4, 2), ('x', 'y')) s = jax.sharding.NamedSharding(mesh, P('x', 'y')) with self.assertRaisesRegex( ValueError, 'If the Array has no addressable shards, `dtype` must be provided via ' 'the `dtype` argument to `jax.make_array_from_single_device_arrays`.'): jax.make_array_from_single_device_arrays((8, 2), s, []) def test_make_array_from_single_device_arrays_bad_dtype_error(self): s = jax.sharding.SingleDeviceSharding(jax.devices()[0]) shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) arr = jax.device_put(np_inp, s) with self.assertRaisesRegex( ValueError, 'If `dtype` is provided to `jax.make_array_from_single_device_arrays`, ' 'it must match the dtype of the addressable shards.'): jax.make_array_from_single_device_arrays( shape, s, [arr], dtype=jnp.float32)
JaxArrayTest
python
huggingface__transformers
src/transformers/models/sam_hq/modular_sam_hq.py
{ "start": 19997, "end": 20223 }
class ____(SamVisionModel): pass @auto_docstring( custom_intro=""" Segment Anything Model HQ (SAM-HQ) for generating masks, given an input image and optional 2D location and bounding boxes. """ )
SamHQVisionModel
python
bokeh__bokeh
src/bokeh/core/serialization.py
{ "start": 5437, "end": 14973 }
class ____: """ Convert built-in and custom types into serializable representations. Not all built-in types are supported (e.g., decimal.Decimal due to lacking support for fixed point arithmetic in JavaScript). """ _encoders: ClassVar[dict[type[Any], Encoder]] = {} @classmethod def register(cls, type: type[Any], encoder: Encoder) -> None: assert type not in cls._encoders, f"'{type} is already registered" cls._encoders[type] = encoder _references: dict[ObjID, Ref] _deferred: bool _check_circular: bool _circular: dict[ObjID, Any] _buffers: list[Buffer] def __init__(self, *, references: set[Model] = set(), deferred: bool = True, check_circular: bool = False) -> None: self._references = {id(obj): obj.ref for obj in references} self._deferred = deferred self._check_circular = check_circular self._circular = {} self._buffers = [] def has_ref(self, obj: Any) -> bool: return id(obj) in self._references def add_ref(self, obj: Any, ref: Ref) -> None: assert id(obj) not in self._references self._references[id(obj)] = ref def get_ref(self, obj: Any) -> Ref | None: return self._references.get(id(obj)) @property def buffers(self) -> list[Buffer]: return list(self._buffers) def serialize(self, obj: Any) -> Serialized[Any]: return Serialized(self.encode(obj), self.buffers) def encode(self, obj: Any) -> AnyRep: ref = self.get_ref(obj) if ref is not None: return ref ident = id(obj) if self._check_circular and ident in self._circular: self.error("circular reference") self._circular[ident] = obj try: return self._encode(obj) finally: if ident in self._circular: del self._circular[ident] def encode_struct(self, **fields: Any) -> dict[str, AnyRep]: return {key: self.encode(val) for key, val in fields.items() if val is not Unspecified} def _encode(self, obj: Any) -> AnyRep: if isinstance(obj, Serializable): return obj.to_serializable(self) elif (encoder := self._encoders.get(type(obj))) is not None: return encoder(obj, self) elif obj is None: return None elif isinstance(obj, bool): return self._encode_bool(obj) elif isinstance(obj, str): return self._encode_str(obj) elif isinstance(obj, int): return self._encode_int(obj) elif isinstance(obj, float): return self._encode_float(obj) elif isinstance(obj, tuple): return self._encode_tuple(obj) elif isinstance(obj, list): return self._encode_list(obj) elif isinstance(obj, set): return self._encode_set(obj) elif isinstance(obj, dict): return self._encode_dict(obj) elif isinstance(obj, SimpleNamespace): return self._encode_struct(obj) elif isinstance(obj, bytes): return self._encode_bytes(obj) elif isinstance(obj, slice): return self._encode_slice(obj) elif isinstance(obj, TypedArray): return self._encode_typed_array(obj) elif isinstance(obj, np.ndarray): if obj.shape != (): return self._encode_ndarray(obj) else: return self._encode(obj.item()) elif is_dataclass(obj): return self._encode_dataclass(obj) else: return self._encode_other(obj) def _encode_bool(self, obj: bool) -> AnyRep: return obj def _encode_str(self, obj: str) -> AnyRep: return obj def _encode_int(self, obj: int) -> AnyRep: if -_MAX_SAFE_INT < obj <= _MAX_SAFE_INT: return obj else: from ..util.warnings import BokehUserWarning, warn warn("out of range integer may result in loss of precision", BokehUserWarning) return self._encode_float(float(obj)) def _encode_float(self, obj: float) -> NumberRep | float: if isnan(obj): return NumberRep(type="number", value="nan") elif isinf(obj): return NumberRep(type="number", value="-inf" if obj < 0 else "+inf") else: return obj def _encode_tuple(self, obj: tuple[Any, ...]) -> ArrayRepLike: return self._encode_list(list(obj)) def _encode_list(self, obj: list[Any]) -> ArrayRepLike: return [self.encode(item) for item in obj] def _encode_set(self, obj: set[Any]) -> SetRep: if len(obj) == 0: return SetRep(type="set") else: return SetRep( type="set", entries=[self.encode(entry) for entry in obj], ) def _encode_dict(self, obj: dict[Any, Any]) -> MapRep: if len(obj) == 0: result = MapRep(type="map") else: result = MapRep( type="map", entries=[(self.encode(key), self.encode(val)) for key, val in obj.items()], ) return result def _encode_struct(self, obj: SimpleNamespace) -> MapRep: return self._encode_dict(obj.__dict__) def _encode_dataclass(self, obj: Any) -> ObjectRep: cls = type(obj) module = cls.__module__ name = cls.__qualname__.replace("<locals>.", "") rep = ObjectRep( type="object", name=f"{module}.{name}", ) attributes = list(entries(obj)) if attributes: rep["attributes"] = {key: self.encode(val) for key, val in attributes} return rep def _encode_bytes(self, obj: bytes | memoryview) -> BytesRep: buffer = Buffer(make_id(), obj) data: Buffer | str if self._deferred: self._buffers.append(buffer) data = buffer else: data = buffer.to_base64() return BytesRep(type="bytes", data=data) def _encode_slice(self, obj: slice) -> SliceRep: return SliceRep( type="slice", start=self.encode(obj.start), stop=self.encode(obj.stop), step=self.encode(obj.step), ) def _encode_typed_array(self, obj: TypedArray[Any]) -> TypedArrayRep: array = self._encode_bytes(memoryview(obj)) typecode = obj.typecode itemsize = obj.itemsize def dtype() -> DataType: match typecode: case "f": return "float32" case "d": return "float64" case "B" | "H" | "I" | "L" | "Q": match obj.itemsize: case 1: return "uint8" case 2: return "uint16" case 4: return "uint32" #case 8: return "uint64" case "b" | "h" | "i" | "l" | "q": match obj.itemsize: case 1: return "int8" case 2: return "int16" case 4: return "int32" #case 8: return "int64" self.error(f"can't serialize array with items of type '{typecode}@{itemsize}'") return TypedArrayRep( type="typed_array", array=array, order=sys.byteorder, dtype=dtype(), ) def _encode_ndarray(self, obj: npt.NDArray[Any]) -> NDArrayRep: array = transform_array(obj) data: ArrayRepLike | BytesRep dtype: NDDataType if array_encoding_disabled(array): data = self._encode_list(array.flatten().tolist()) dtype = "object" else: data = self._encode_bytes(array.data) dtype = cast(NDDataType, array.dtype.name) return NDArrayRep( type="ndarray", array=data, shape=list(array.shape), dtype=dtype, order=sys.byteorder, ) def _encode_other(self, obj: Any) -> AnyRep: # date/time values that get serialized as milliseconds if is_datetime_type(obj): return convert_datetime_type(obj) if is_timedelta_type(obj): return convert_timedelta_type(obj) if isinstance(obj, dt.date): return obj.isoformat() # NumPy scalars if np.issubdtype(type(obj), np.floating): return self._encode_float(float(obj)) if np.issubdtype(type(obj), np.integer): return self._encode_int(int(obj)) if np.issubdtype(type(obj), np.bool_): return self._encode_bool(bool(obj)) # avoid importing pandas here unless it is actually in use if uses_pandas(obj): import pandas as pd if isinstance(obj, (pd.Series, pd.Index, pd.api.extensions.ExtensionArray)): return self._encode_ndarray(transform_series(obj)) elif obj is pd.NA: return None # handle array libraries that support conversion to a numpy array (e.g. polars, PyTorch) if hasattr(obj, "__array__") and isinstance(arr := obj.__array__(), np.ndarray): return self._encode_ndarray(arr) self.error(f"can't serialize {type(obj)}") def error(self, message: str) -> NoReturn: raise SerializationError(message)
Serializer
python
tensorflow__tensorflow
tensorflow/python/feature_column/feature_column_test.py
{ "start": 113823, "end": 136179 }
class ____(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegex(ValueError, 'feature_columns must not be empty'): fc.input_layer(features={}, feature_columns=[]) def test_should_be_dense_column(self): with self.assertRaisesRegex(ValueError, 'must be a _DenseColumn'): fc.input_layer( features={'a': [[0]]}, feature_columns=[ fc._categorical_column_with_hash_bucket('wire_cast', 4) ]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegex( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.input_layer( features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')}) def test_bare_column(self): with ops.Graph().as_default(): features = features = {'a': [0.]} net = fc.input_layer(features, fc._numeric_column('a')) with _initialized_session(): self.assertAllClose([[0.]], self.evaluate(net)) def test_column_generator(self): with ops.Graph().as_default(): features = features = {'a': [0.], 'b': [1.]} columns = (fc._numeric_column(key) for key in features) net = fc.input_layer(features, columns) with _initialized_session(): self.assertAllClose([[0., 1.]], self.evaluate(net)) def test_raises_if_duplicate_name(self): with self.assertRaisesRegex( ValueError, 'Duplicate feature column name found for columns'): fc.input_layer( features={'a': [[0]]}, feature_columns=[fc._numeric_column('a'), fc._numeric_column('a')]) def test_one_column(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} net = fc.input_layer(features, [price]) with _initialized_session(): self.assertAllClose([[1.], [5.]], self.evaluate(net)) def test_multi_dimension(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} net = fc.input_layer(features, [price]) with _initialized_session(): self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_raises_if_shape_mismatch(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegex( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc.input_layer(features, [price]) def test_reshaping(self): price = fc._numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} net = fc.input_layer(features, [price]) with _initialized_session(): self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_multi_column(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]] } net = fc.input_layer(features, [price1, price2]) with _initialized_session(): self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_fills_cols_to_vars(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. price1 = fc._numeric_column('price1') dense_feature = fc._numeric_column('dense_feature') dense_feature_bucketized = fc._bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertCountEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) def test_fills_cols_to_vars_shared_embedding(self): # Provide 5 DenseColumn's to input_layer: a NumericColumn, a # BucketizedColumn, an EmbeddingColumn, two SharedEmbeddingColumns. The # EmbeddingColumn creates a Variable and the two SharedEmbeddingColumns # shared one variable. with ops.Graph().as_default(): price1 = fc._numeric_column('price1') dense_feature = fc._numeric_column('dense_feature') dense_feature_bucketized = fc._bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) shared_embedding_a, shared_embedding_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } cols_to_vars = {} all_cols = [ price1, dense_feature_bucketized, some_embedding_column, shared_embedding_a, shared_embedding_b ] fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertCountEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertEqual(1, len(cols_to_vars[shared_embedding_a])) # This is a bug in the current implementation and should be fixed in the # new one. self.assertEqual(0, len(cols_to_vars[shared_embedding_b])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) self.assertIsInstance(cols_to_vars[shared_embedding_a][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[shared_embedding_a][0].shape, [3, 2]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc._numeric_column('price1') dense_feature = fc._numeric_column('dense_feature') dense_feature_bucketized = fc._bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] with variable_scope.variable_scope( 'input_from_feature_columns', partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)): fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertCountEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(3, len(cols_to_vars[some_embedding_column])) self.assertEqual( 'input_from_feature_columns/input_layer/sparse_feature_embedding/' 'embedding_weights/part_0:0', cols_to_vars[some_embedding_column][0].name) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10]) def test_column_order(self): price_a = fc._numeric_column('price_a') price_b = fc._numeric_column('price_b') with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], } net1 = fc.input_layer(features, [price_a, price_b]) net2 = fc.input_layer(features, [price_b, price_a]) with _initialized_session(): self.assertAllClose([[1., 3.]], self.evaluate(net1)) self.assertAllClose([[1., 3.]], self.evaluate(net2)) def test_fails_for_categorical_column(self): animal = fc._categorical_column_with_identity('animal', num_buckets=4) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } with self.assertRaisesRegex(Exception, 'must be a _DenseColumn'): fc.input_layer(features, [animal]) def test_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegex( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.input_layer(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') price3 = fc._numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegex( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.input_layer(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } net = fc.input_layer(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegex(errors.OpError, 'Dimension 0 in both shapes must be equal'): sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } net = fc.input_layer(features, [price1, price2]) with _initialized_session() as sess: sess.run( net, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) def test_multiple_layers_with_same_embedding_column(self): some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'sparse_feature': [['a'], ['x']], } all_cols = [some_embedding_column] fc.input_layer(features, all_cols) fc.input_layer(features, all_cols) # Make sure that 2 variables get created in this case. self.assertEqual(2, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) expected_var_names = [ 'input_layer/sparse_feature_embedding/embedding_weights:0', 'input_layer_1/sparse_feature_embedding/embedding_weights:0' ] self.assertCountEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) def test_multiple_layers_with_same_shared_embedding_column(self): with ops.Graph().as_default(): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc_new.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } all_cols = [embedding_column_a, embedding_column_b] fc.input_layer(features, all_cols) fc.input_layer(features, all_cols) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertCountEqual( ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self): with ops.Graph().as_default(): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc_new.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) all_cols = [embedding_column_a, embedding_column_b] features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.input_layer(features, all_cols) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) with ops.Graph().as_default(): features1 = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.input_layer(features1, all_cols) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertCountEqual( ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) def test_with_1d_sparse_tensor(self): with ops.Graph().as_default(): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc._numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc._indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc._categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc._embedding_column( country, dimension=5, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([11., 12.,]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), # This is dense tensor for the categorical_column. 'country': constant_op.constant(['CA', 'US']), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) self.assertEqual(1, features['country'].shape.ndims) net = fc.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session(): # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 11., 12., 13., 14., 15., 11.], [1., 0., 0., 1., 2., 3., 4., 5., 12.]], self.evaluate(net)) @test_util.run_deprecated_v1 # Placeholders are TF1. Replacing with tf.function not feasible because of V1 # variable creation. def test_with_1d_unknown_shape_sparse_tensor(self): embedding_values = ( (1., 2.), # id 0 (6., 7.), # id 1 (11., 12.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc._numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc._indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc._categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc._embedding_column( country, dimension=2, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), # This is dense tensor for the categorical_column. 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) self.assertIsNone(features['country'].shape.ndims) price_data = np.array([11., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array([['US'], ['CA']]) net = fc.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 2, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 # Placeholders are TF1. Replacing with tf.function not feasible because of V1 # variable creation. def test_with_rank_0_feature(self): # price has 1 dimension in input_layer price = fc._numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'): fc.input_layer(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc.input_layer(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)})
FunctionalInputLayerTest
python
lxml__lxml
src/lxml/tests/test_etree.py
{ "start": 203833, "end": 209627 }
class ____(unittest.TestCase): etree = etree def assert_event_tags(self, events, expected): self.assertEqual([(action, elem.tag) for action, elem in events], expected) def test_pull_from_simple_target(self): class Target: def start(self, tag, attrib): return 'start(%s)' % tag def end(self, tag): return 'end(%s)' % tag def close(self): return 'close()' parser = self.etree.XMLPullParser(target=Target()) events = parser.read_events() parser.feed('<root><element>') self.assertFalse(list(events)) self.assertFalse(list(events)) parser.feed('</element><child>') self.assertEqual([('end', 'end(element)')], list(events)) parser.feed('</child>') self.assertEqual([('end', 'end(child)')], list(events)) parser.feed('</root>') self.assertEqual([('end', 'end(root)')], list(events)) self.assertFalse(list(events)) self.assertEqual('close()', parser.close()) def test_pull_from_simple_target_start_end(self): class Target: def start(self, tag, attrib): return 'start(%s)' % tag def end(self, tag): return 'end(%s)' % tag def close(self): return 'close()' parser = self.etree.XMLPullParser( ['start', 'end'], target=Target()) events = parser.read_events() parser.feed('<root><element>') self.assertEqual( [('start', 'start(root)'), ('start', 'start(element)')], list(events)) self.assertFalse(list(events)) parser.feed('</element><child>') self.assertEqual( [('end', 'end(element)'), ('start', 'start(child)')], list(events)) parser.feed('</child>') self.assertEqual( [('end', 'end(child)')], list(events)) parser.feed('</root>') self.assertEqual( [('end', 'end(root)')], list(events)) self.assertFalse(list(events)) self.assertEqual('close()', parser.close()) def test_pull_from_tree_builder(self): parser = self.etree.XMLPullParser( ['start', 'end'], target=etree.TreeBuilder()) events = parser.read_events() parser.feed('<root><element>') self.assert_event_tags( events, [('start', 'root'), ('start', 'element')]) self.assertFalse(list(events)) parser.feed('</element><child>') self.assert_event_tags( events, [('end', 'element'), ('start', 'child')]) parser.feed('</child>') self.assert_event_tags( events, [('end', 'child')]) parser.feed('</root>') self.assert_event_tags( events, [('end', 'root')]) self.assertFalse(list(events)) root = parser.close() self.assertEqual('root', root.tag) def test_pull_from_tree_builder_subclass(self): class Target(etree.TreeBuilder): def end(self, tag): el = super().end(tag) el.tag += '-huhu' return el parser = self.etree.XMLPullParser( ['start', 'end'], target=Target()) events = parser.read_events() parser.feed('<root><element>') self.assert_event_tags( events, [('start', 'root'), ('start', 'element')]) self.assertFalse(list(events)) parser.feed('</element><child>') self.assert_event_tags( events, [('end', 'element-huhu'), ('start', 'child')]) parser.feed('</child>') self.assert_event_tags( events, [('end', 'child-huhu')]) parser.feed('</root>') self.assert_event_tags( events, [('end', 'root-huhu')]) self.assertFalse(list(events)) root = parser.close() self.assertEqual('root-huhu', root.tag) def test_suite(): suite = unittest.TestSuite() suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeOnlyTestCase)]) suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeXIncludeTestCase)]) suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ElementIncludeTestCase)]) suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeC14NTestCase)]) suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeWriteTestCase)]) suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeErrorLogTest)]) suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(XMLPullParserTest)]) # add original doctests from ElementTree selftest modules from . import selftest, selftest2 suite.addTests(doctest.DocTestSuite(selftest)) suite.addTests(doctest.DocTestSuite(selftest2)) # add doctests doctest_stubs = {} if 'schematron' not in etree.LIBXML_COMPILED_FEATURES: # See doctest of class "lxml.etree.Schematron". class FakeSchematron: def __init__(self, schema): self._results = iter([0, 1]) def validate(self, xml): return next(self._results) doctest_stubs['Schematron'] = FakeSchematron suite.addTests(doctest.DocTestSuite(etree, extraglobs=doctest_stubs)) suite.addTests( [make_doctest('tutorial.txt')]) suite.addTests( [make_doctest('api.txt')]) suite.addTests( [make_doctest('FAQ.txt')]) suite.addTests( [make_doctest('parsing.txt')]) suite.addTests( [make_doctest('resolvers.txt')]) return suite if __name__ == '__main__': print('to test use test.py %s' % __file__)
XMLPullParserTest
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/language/lexer.py
{ "start": 1202, "end": 11467 }
class ____(object): EOF = 1 BANG = 2 DOLLAR = 3 PAREN_L = 4 PAREN_R = 5 SPREAD = 6 COLON = 7 EQUALS = 8 AT = 9 BRACKET_L = 10 BRACKET_R = 11 BRACE_L = 12 PIPE = 13 BRACE_R = 14 NAME = 15 VARIABLE = 16 INT = 17 FLOAT = 18 STRING = 19 def get_token_desc(token): if token.value: return u'{} "{}"'.format( get_token_kind_desc(token.kind), token.value ) else: return get_token_kind_desc(token.kind) def get_token_kind_desc(kind): return TOKEN_DESCRIPTION[kind] TOKEN_DESCRIPTION = { TokenKind.EOF: 'EOF', TokenKind.BANG: '!', TokenKind.DOLLAR: '$', TokenKind.PAREN_L: '(', TokenKind.PAREN_R: ')', TokenKind.SPREAD: '...', TokenKind.COLON: ':', TokenKind.EQUALS: '=', TokenKind.AT: '@', TokenKind.BRACKET_L: '[', TokenKind.BRACKET_R: ']', TokenKind.BRACE_L: '{', TokenKind.PIPE: '|', TokenKind.BRACE_R: '}', TokenKind.NAME: 'Name', TokenKind.VARIABLE: 'Variable', TokenKind.INT: 'Int', TokenKind.FLOAT: 'Float', TokenKind.STRING: 'String', } def char_code_at(s, pos): if 0 <= pos < len(s): return ord(s[pos]) return None PUNCT_CODE_TO_KIND = { ord('!'): TokenKind.BANG, ord('$'): TokenKind.DOLLAR, ord('('): TokenKind.PAREN_L, ord(')'): TokenKind.PAREN_R, ord(':'): TokenKind.COLON, ord('='): TokenKind.EQUALS, ord('@'): TokenKind.AT, ord('['): TokenKind.BRACKET_L, ord(']'): TokenKind.BRACKET_R, ord('{'): TokenKind.BRACE_L, ord('|'): TokenKind.PIPE, ord('}'): TokenKind.BRACE_R, } def print_char_code(code): if code is None: return '<EOF>' if code < 0x007F: return json.dumps(chr(code)) return '"\\u%04X"' % code def read_token(source, from_position): """Gets the next token from the source starting at the given position. This skips over whitespace and comments until it finds the next lexable token, then lexes punctuators immediately or calls the appropriate helper fucntion for more complicated tokens.""" body = source.body body_length = len(body) position = position_after_whitespace(body, from_position) if position >= body_length: return Token(TokenKind.EOF, position, position) code = char_code_at(body, position) if code < 0x0020 and code not in (0x0009, 0x000A, 0x000D): raise GraphQLSyntaxError( source, position, u'Invalid character {}.'.format(print_char_code(code)) ) kind = PUNCT_CODE_TO_KIND.get(code) if kind is not None: return Token(kind, position, position + 1) if code == 46: # . if char_code_at(body, position + 1) == char_code_at(body, position + 2) == 46: return Token(TokenKind.SPREAD, position, position + 3) elif 65 <= code <= 90 or code == 95 or 97 <= code <= 122: # A-Z, _, a-z return read_name(source, position) elif code == 45 or 48 <= code <= 57: # -, 0-9 return read_number(source, position, code) elif code == 34: # " return read_string(source, position) raise GraphQLSyntaxError( source, position, u'Unexpected character {}.'.format(print_char_code(code))) ignored_whitespace_characters = frozenset([ # BOM 0xFEFF, # White Space 0x0009, # tab 0x0020, # space # Line Terminator 0x000A, # new line 0x000D, # carriage return # Comma 0x002C ]) def position_after_whitespace(body, start_position): """Reads from body starting at start_position until it finds a non-whitespace or commented character, then returns the position of that character for lexing.""" body_length = len(body) position = start_position while position < body_length: code = char_code_at(body, position) if code in ignored_whitespace_characters: position += 1 elif code == 35: # #, skip comments position += 1 while position < body_length: code = char_code_at(body, position) if not (code is not None and (code > 0x001F or code == 0x0009) and code not in (0x000A, 0x000D)): break position += 1 else: break return position def read_number(source, start, first_code): """Reads a number token from the source file, either a float or an int depending on whether a decimal point appears. """ code = first_code body = source.body position = start is_float = False if code == 45: # - position += 1 code = char_code_at(body, position) if code == 48: # 0 position += 1 code = char_code_at(body, position) if code is not None and 48 <= code <= 57: raise GraphQLSyntaxError( source, position, u'Invalid number, unexpected digit after 0: {}.'.format(print_char_code(code)) ) else: position = read_digits(source, position, code) code = char_code_at(body, position) if code == 46: # . is_float = True position += 1 code = char_code_at(body, position) position = read_digits(source, position, code) code = char_code_at(body, position) if code in (69, 101): # E e is_float = True position += 1 code = char_code_at(body, position) if code in (43, 45): # + - position += 1 code = char_code_at(body, position) position = read_digits(source, position, code) return Token( TokenKind.FLOAT if is_float else TokenKind.INT, start, position, body[start:position] ) def read_digits(source, start, first_code): body = source.body position = start code = first_code if code is not None and 48 <= code <= 57: # 0 - 9 while True: position += 1 code = char_code_at(body, position) if not (code is not None and 48 <= code <= 57): break return position raise GraphQLSyntaxError( source, position, u'Invalid number, expected digit but got: {}.'.format(print_char_code(code)) ) ESCAPED_CHAR_CODES = { 34: '"', 47: '/', 92: '\\', 98: '\b', 102: '\f', 110: '\n', 114: '\r', 116: '\t', } def read_string(source, start): """Reads a string token from the source file. "([^"\\\u000A\u000D\u2028\u2029]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*" """ body = source.body body_length = len(body) position = start + 1 chunk_start = position code = 0 value = [] append = value.append while position < body_length: code = char_code_at(body, position) if not ( code is not None and code not in ( # LineTerminator 0x000A, 0x000D, # Quote 34 ) ): break if code < 0x0020 and code != 0x0009: raise GraphQLSyntaxError( source, position, u'Invalid character within String: {}.'.format(print_char_code(code)) ) position += 1 if code == 92: # \ append(body[chunk_start:position - 1]) code = char_code_at(body, position) escaped = ESCAPED_CHAR_CODES.get(code) if escaped is not None: append(escaped) elif code == 117: # u char_code = uni_char_code( char_code_at(body, position + 1) or 0, char_code_at(body, position + 2) or 0, char_code_at(body, position + 3) or 0, char_code_at(body, position + 4) or 0, ) if char_code < 0: raise GraphQLSyntaxError( source, position, u'Invalid character escape sequence: \\u{}.'.format(body[position + 1: position + 5]) ) append(chr(char_code)) position += 4 else: raise GraphQLSyntaxError( source, position, u'Invalid character escape sequence: \\{}.'.format(chr(code)) ) position += 1 chunk_start = position if code != 34: # Quote (") raise GraphQLSyntaxError(source, position, 'Unterminated string') append(body[chunk_start:position]) return Token(TokenKind.STRING, start, position + 1, u''.join(value)) def uni_char_code(a, b, c, d): """Converts four hexidecimal chars to the integer that the string represents. For example, uniCharCode('0','0','0','f') will return 15, and uniCharCode('0','0','f','f') returns 255. Returns a negative number on error, if a char was invalid. This is implemented by noting that char2hex() returns -1 on error, which means the result of ORing the char2hex() will also be negative. """ return (char2hex(a) << 12 | char2hex(b) << 8 | char2hex(c) << 4 | char2hex(d)) def char2hex(a): """Converts a hex character to its integer value. '0' becomes 0, '9' becomes 9 'A' becomes 10, 'F' becomes 15 'a' becomes 10, 'f' becomes 15 Returns -1 on error.""" if 48 <= a <= 57: # 0-9 return a - 48 elif 65 <= a <= 70: # A-F return a - 55 elif 97 <= a <= 102: # a-f return a - 87 return -1 def read_name(source, position): """Reads an alphanumeric + underscore name from the source. [_A-Za-z][_0-9A-Za-z]*""" body = source.body body_length = len(body) end = position + 1 while end != body_length: code = char_code_at(body, end) if not (code is not None and ( code == 95 or # _ 48 <= code <= 57 or # 0-9 65 <= code <= 90 or # A-Z 97 <= code <= 122 # a-z )): break end += 1 return Token(TokenKind.NAME, position, end, body[position:end])
TokenKind
python
pytorch__pytorch
torch/_lobpcg.py
{ "start": 10492, "end": 26514 }
class ____(torch.autograd.Function): @staticmethod def forward( # type: ignore[override] ctx, A: Tensor, k: Optional[int] = None, B: Optional[Tensor] = None, X: Optional[Tensor] = None, n: Optional[int] = None, iK: Optional[Tensor] = None, niter: Optional[int] = None, tol: Optional[float] = None, largest: Optional[bool] = None, method: Optional[str] = None, tracker: None = None, ortho_iparams: Optional[dict[str, int]] = None, ortho_fparams: Optional[dict[str, float]] = None, ortho_bparams: Optional[dict[str, bool]] = None, ) -> tuple[Tensor, Tensor]: # makes sure that input is contiguous for efficiency. # Note: autograd does not support dense gradients for sparse input yet. A = A.contiguous() if (not A.is_sparse) else A if B is not None: B = B.contiguous() if (not B.is_sparse) else B D, U = _lobpcg( A, k, B, X, n, iK, niter, tol, largest, method, tracker, ortho_iparams, ortho_fparams, ortho_bparams, ) ctx.save_for_backward(A, B, D, U) ctx.largest = largest return D, U @staticmethod def backward(ctx, D_grad, U_grad): # pyrefly: ignore # bad-override A_grad = B_grad = None grads = [None] * 14 A, B, D, U = ctx.saved_tensors largest = ctx.largest # lobpcg.backward has some limitations. Checks for unsupported input if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]): raise ValueError( "lobpcg.backward does not support sparse input yet." "Note that lobpcg.forward does though." ) if ( A.dtype in (torch.complex64, torch.complex128) or B is not None and B.dtype in (torch.complex64, torch.complex128) ): raise ValueError( "lobpcg.backward does not support complex input yet." "Note that lobpcg.forward does though." ) if B is not None: raise ValueError( "lobpcg.backward does not support backward with B != I yet." ) if largest is None: largest = True # symeig backward if B is None: A_grad = _symeig_backward(D_grad, U_grad, A, D, U, largest) # A has index 0 grads[0] = A_grad # B has index 2 grads[2] = B_grad return tuple(grads) def lobpcg( A: Tensor, k: Optional[int] = None, B: Optional[Tensor] = None, X: Optional[Tensor] = None, n: Optional[int] = None, iK: Optional[Tensor] = None, niter: Optional[int] = None, tol: Optional[float] = None, largest: Optional[bool] = None, method: Optional[str] = None, tracker: None = None, ortho_iparams: Optional[dict[str, int]] = None, ortho_fparams: Optional[dict[str, float]] = None, ortho_bparams: Optional[dict[str, bool]] = None, ) -> tuple[Tensor, Tensor]: """Find the k largest (or smallest) eigenvalues and the corresponding eigenvectors of a symmetric positive definite generalized eigenvalue problem using matrix-free LOBPCG methods. This function is a front-end to the following LOBPCG algorithms selectable via `method` argument: `method="basic"` - the LOBPCG method introduced by Andrew Knyazev, see [Knyazev2001]. A less robust method, may fail when Cholesky is applied to singular input. `method="ortho"` - the LOBPCG method with orthogonal basis selection [StathopoulosEtal2002]. A robust method. Supported inputs are dense, sparse, and batches of dense matrices. .. note:: In general, the basic method spends least time per iteration. However, the robust methods converge much faster and are more stable. So, the usage of the basic method is generally not recommended but there exist cases where the usage of the basic method may be preferred. .. warning:: The backward method does not support sparse and complex inputs. It works only when `B` is not provided (i.e. `B == None`). We are actively working on extensions, and the details of the algorithms are going to be published promptly. .. warning:: While it is assumed that `A` is symmetric, `A.grad` is not. To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric in first-order optimization routines, prior to running `lobpcg` we do the following symmetrization map: `A -> (A + A.t()) / 2`. The map is performed only when the `A` requires gradients. .. warning:: LOBPCG algorithm is not applicable when the number of `A`'s rows is smaller than 3x the number of requested eigenpairs `n`. Args: A (Tensor): the input tensor of size :math:`(*, m, m)` k (integer, optional): the number of requested eigenpairs. Default is the number of :math:`X` columns (when specified) or `1`. B (Tensor, optional): the input tensor of size :math:`(*, m, m)`. When not specified, `B` is interpreted as identity matrix. X (tensor, optional): the input tensor of size :math:`(*, m, n)` where `k <= n <= m`. When specified, it is used as initial approximation of eigenvectors. X must be a dense tensor. n (integer, optional): if :math:`X` is not specified then `n` specifies the size of the generated random approximation of eigenvectors. Default value for `n` is `k`. If :math:`X` is specified, any provided value of `n` is ignored and `n` is automatically set to the number of columns in :math:`X`. iK (tensor, optional): the input tensor of size :math:`(*, m, m)`. When specified, it will be used as preconditioner. niter (int, optional): maximum number of iterations. When reached, the iteration process is hard-stopped and the current approximation of eigenpairs is returned. For infinite iteration but until convergence criteria is met, use `-1`. tol (float, optional): residual tolerance for stopping criterion. Default is `feps ** 0.5` where `feps` is smallest non-zero floating-point number of the given input tensor `A` data type. largest (bool, optional): when True, solve the eigenproblem for the largest eigenvalues. Otherwise, solve the eigenproblem for smallest eigenvalues. Default is `True`. method (str, optional): select LOBPCG method. See the description of the function above. Default is "ortho". tracker (callable, optional) : a function for tracing the iteration process. When specified, it is called at each iteration step with LOBPCG instance as an argument. The LOBPCG instance holds the full state of the iteration process in the following attributes: `iparams`, `fparams`, `bparams` - dictionaries of integer, float, and boolean valued input parameters, respectively `ivars`, `fvars`, `bvars`, `tvars` - dictionaries of integer, float, boolean, and Tensor valued iteration variables, respectively. `A`, `B`, `iK` - input Tensor arguments. `E`, `X`, `S`, `R` - iteration Tensor variables. For instance: `ivars["istep"]` - the current iteration step `X` - the current approximation of eigenvectors `E` - the current approximation of eigenvalues `R` - the current residual `ivars["converged_count"]` - the current number of converged eigenpairs `tvars["rerr"]` - the current state of convergence criteria Note that when `tracker` stores Tensor objects from the LOBPCG instance, it must make copies of these. If `tracker` sets `bvars["force_stop"] = True`, the iteration process will be hard-stopped. ortho_iparams, ortho_fparams, ortho_bparams (dict, optional): various parameters to LOBPCG algorithm when using `method="ortho"`. Returns: E (Tensor): tensor of eigenvalues of size :math:`(*, k)` X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)` References: [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2), 517-541. (25 pages) https://epubs.siam.org/doi/abs/10.1137/S1064827500366124 [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng Wu. (2002) A Block Orthogonalization Procedure with Constant Synchronization Requirements. SIAM J. Sci. Comput., 23(6), 2165-2182. (18 pages) https://epubs.siam.org/doi/10.1137/S1064827500370883 [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming Gu. (2018) A Robust and Efficient Implementation of LOBPCG. SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages) https://arxiv.org/abs/1704.07458 """ if not torch.jit.is_scripting(): tensor_ops = (A, B, X, iK) if not set(map(type, tensor_ops)).issubset( (torch.Tensor, type(None)) ) and has_torch_function(tensor_ops): return handle_torch_function( lobpcg, tensor_ops, A, k=k, B=B, X=X, n=n, iK=iK, niter=niter, tol=tol, largest=largest, method=method, tracker=tracker, ortho_iparams=ortho_iparams, ortho_fparams=ortho_fparams, ortho_bparams=ortho_bparams, ) if not torch._jit_internal.is_scripting(): if A.requires_grad or (B is not None and B.requires_grad): # While it is expected that `A` is symmetric, # the `A_grad` might be not. Therefore we perform the trick below, # so that `A_grad` becomes symmetric. # The symmetrization is important for first-order optimization methods, # so that (A - alpha * A_grad) is still a symmetric matrix. # Same holds for `B`. A_sym = (A + A.mT) / 2 B_sym = (B + B.mT) / 2 if (B is not None) else None return LOBPCGAutogradFunction.apply( A_sym, k, B_sym, X, n, iK, niter, tol, largest, method, tracker, ortho_iparams, ortho_fparams, ortho_bparams, ) else: if A.requires_grad or (B is not None and B.requires_grad): raise RuntimeError( "Script and require grads is not supported atm." "If you just want to do the forward, use .detach()" "on A and B before calling into lobpcg" ) return _lobpcg( A, k, B, X, n, iK, niter, tol, largest, method, tracker, ortho_iparams, ortho_fparams, ortho_bparams, ) def _lobpcg( A: Tensor, k: Optional[int] = None, B: Optional[Tensor] = None, X: Optional[Tensor] = None, n: Optional[int] = None, iK: Optional[Tensor] = None, niter: Optional[int] = None, tol: Optional[float] = None, largest: Optional[bool] = None, method: Optional[str] = None, tracker: None = None, ortho_iparams: Optional[dict[str, int]] = None, ortho_fparams: Optional[dict[str, float]] = None, ortho_bparams: Optional[dict[str, bool]] = None, ) -> tuple[Tensor, Tensor]: # A must be square: assert A.shape[-2] == A.shape[-1], A.shape if B is not None: # A and B must have the same shapes: assert A.shape == B.shape, (A.shape, B.shape) dtype = _utils.get_floating_dtype(A) device = A.device if tol is None: feps = {torch.float32: 1.2e-07, torch.float64: 2.23e-16}[dtype] tol = feps**0.5 m = A.shape[-1] k = (1 if X is None else X.shape[-1]) if k is None else k n = (k if n is None else n) if X is None else X.shape[-1] if m < 3 * n: raise ValueError( f"LPBPCG algorithm is not applicable when the number of A rows (={m})" f" is smaller than 3 x the number of requested eigenpairs (={n})" ) method = "ortho" if method is None else method iparams = { "m": m, "n": n, "k": k, "niter": 1000 if niter is None else niter, } fparams = { "tol": tol, } bparams = {"largest": True if largest is None else largest} if method == "ortho": if ortho_iparams is not None: iparams.update(ortho_iparams) if ortho_fparams is not None: fparams.update(ortho_fparams) if ortho_bparams is not None: bparams.update(ortho_bparams) iparams["ortho_i_max"] = iparams.get("ortho_i_max", 3) iparams["ortho_j_max"] = iparams.get("ortho_j_max", 3) fparams["ortho_tol"] = fparams.get("ortho_tol", tol) fparams["ortho_tol_drop"] = fparams.get("ortho_tol_drop", tol) fparams["ortho_tol_replace"] = fparams.get("ortho_tol_replace", tol) bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False) if not torch.jit.is_scripting(): LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[method-assign] if len(A.shape) > 2: N = int(torch.prod(torch.tensor(A.shape[:-2]))) bA = A.reshape((N,) + A.shape[-2:]) bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None bE = torch.empty((N, k), dtype=dtype, device=device) bXret = torch.empty((N, m, k), dtype=dtype, device=device) for i in range(N): A_ = bA[i] B_ = bB[i] if bB is not None else None X_ = ( torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i] ) assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n)) iparams["batch_index"] = i worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker) worker.run() bE[i] = worker.E[:k] bXret[i] = worker.X[:, :k] if not torch.jit.is_scripting(): LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign] return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k)) X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n)) worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker) worker.run() if not torch.jit.is_scripting(): LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign] return worker.E[:k], worker.X[:, :k]
LOBPCGAutogradFunction
python
keras-team__keras
keras/src/ops/math.py
{ "start": 11293, "end": 13478 }
class ____(Operation): def compute_output_spec(self, x): if not isinstance(x, (tuple, list)) or len(x) != 2: raise ValueError( "Input `x` should be a tuple of two tensors - real and " f"imaginary. Received: x={x}" ) real, imag = x # Both real and imaginary parts should have the same shape. if real.shape != imag.shape: raise ValueError( "Input `x` should be a tuple of two tensors - real and " "imaginary. Both the real and imaginary parts should have the " f"same shape. Received: x[0].shape = {real.shape}, " f"x[1].shape = {imag.shape}" ) # We are calculating 1D FFT. Hence, rank >= 1. if len(real.shape) < 1: raise ValueError( f"Input should have rank >= 1. " f"Received: input.shape = {real.shape}" ) # The axis along which we are calculating FFT should be fully-defined. m = real.shape[-1] if m is None: raise ValueError( f"Input should have its last dimension fully-defined. " f"Received: input.shape = {real.shape}" ) return ( KerasTensor(shape=real.shape, dtype=real.dtype), KerasTensor(shape=imag.shape, dtype=imag.dtype), ) def call(self, x): return backend.math.fft(x) @keras_export("keras.ops.fft") def fft(x): """Computes the Fast Fourier Transform along last axis of input. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. Returns: A tuple containing two tensors - the real and imaginary parts of the output tensor. Example: >>> x = ( ... keras.ops.convert_to_tensor([1., 2.]), ... keras.ops.convert_to_tensor([0., 1.]), ... ) >>> fft(x) (array([ 3., -1.], dtype=float32), array([ 1., -1.], dtype=float32)) """ if any_symbolic_tensors(x): return FFT().symbolic_call(x) return backend.math.fft(x)
FFT
python
getsentry__sentry
src/sentry/uptime/migrations/0049_cleanup_failed_safe_deletes.py
{ "start": 207, "end": 1704 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("uptime", "0048_delete_uptime_status_columns"), ] operations = [ # Clean up table that may not have been deleted due to missing # historical_silo_assignments entry before the fix SafeRunSQL( sql="DROP TABLE IF EXISTS uptime_projectuptimesubscription CASCADE;", reverse_sql=migrations.RunSQL.noop, hints={"tables": ["uptime_projectuptimesubscription"]}, ), ]
Migration
python
numba__numba
numba/cuda/tests/cudadrv/test_context_stack.py
{ "start": 190, "end": 679 }
class ____(CUDATestCase): def setUp(self): super().setUp() # Reset before testing cuda.close() def test_gpus_current(self): self.assertIs(cuda.gpus.current, None) with cuda.gpus[0]: self.assertEqual(int(cuda.gpus.current.id), 0) def test_gpus_len(self): self.assertGreater(len(cuda.gpus), 0) def test_gpus_iter(self): gpulist = list(cuda.gpus) self.assertGreater(len(gpulist), 0)
TestContextStack
python
run-llama__llama_index
llama-index-finetuning/llama_index/finetuning/types.py
{ "start": 614, "end": 922 }
class ____(ABC): """Base Embedding finetuning engine.""" @abstractmethod def finetune(self) -> None: """Goes off and does stuff.""" @abstractmethod def get_finetuned_model(self, **model_kwargs: Any) -> BaseEmbedding: """Gets finetuned model."""
BaseEmbeddingFinetuneEngine
python
doocs__leetcode
solution/1500-1599/1535.Find the Winner of an Array Game/Solution.py
{ "start": 0, "end": 312 }
class ____: def getWinner(self, arr: List[int], k: int) -> int: mx = arr[0] cnt = 0 for x in arr[1:]: if mx < x: mx = x cnt = 1 else: cnt += 1 if cnt == k: break return mx
Solution
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_5.py
{ "start": 133, "end": 173 }
class ____(Parent): baz: DataFrame
Child
python
django__django
tests/utils_tests/test_duration.py
{ "start": 196, "end": 941 }
class ____(unittest.TestCase): def test_simple(self): duration = datetime.timedelta(hours=1, minutes=3, seconds=5) self.assertEqual(duration_string(duration), "01:03:05") def test_days(self): duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5) self.assertEqual(duration_string(duration), "1 01:03:05") def test_microseconds(self): duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345) self.assertEqual(duration_string(duration), "01:03:05.012345") def test_negative(self): duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5) self.assertEqual(duration_string(duration), "-1 01:03:05")
TestDurationString
python
django__django
django/db/models/lookups.py
{ "start": 17132, "end": 17312 }
class ____( IntegerFieldOverflow, IntegerFieldFloatRounding, GreaterThanOrEqual ): underflow_exception = FullResultSet @IntegerField.register_lookup
IntegerGreaterThanOrEqual
python
giampaolo__psutil
tests/test_unicode.py
{ "start": 9388, "end": 9755 }
class ____(TestFSAPIs): """Test FS APIs with a funky, invalid path name.""" funky_suffix = INVALID_UNICODE_SUFFIX def expect_exact_path_match(self): return not MACOS # =================================================================== # Non fs APIs # ===================================================================
TestFSAPIsWithInvalidPath
python
tensorflow__tensorflow
tensorflow/tools/test/run_and_gather_logs_lib.py
{ "start": 1030, "end": 6823 }
class ____(Exception): pass def get_git_commit_sha(): """Get git commit SHA for this build. Attempt to get the SHA from environment variable GIT_COMMIT, which should be available on Jenkins build agents. Returns: SHA hash of the git commit used for the build, if available """ return os.getenv("GIT_COMMIT") def process_test_logs(name, test_name, test_args, benchmark_type, start_time, run_time, log_files): """Gather test information and put it in a TestResults proto. Args: name: Benchmark target identifier. test_name: A unique bazel target, e.g. "//path/to:test" test_args: A string containing all arguments to run the target with. benchmark_type: A string representing the BenchmarkType enum; the benchmark type for this target. start_time: Test starting time (epoch) run_time: Wall time that the test ran for log_files: Paths to the log files Returns: A TestResults proto """ results = test_log_pb2.TestResults() results.name = name results.target = test_name results.start_time = start_time results.run_time = run_time results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value( benchmark_type.upper()) # Gather source code information git_sha = get_git_commit_sha() if git_sha: results.commit_id.hash = git_sha results.entries.CopyFrom(process_benchmarks(log_files)) results.run_configuration.argument.extend(test_args) results.machine_configuration.CopyFrom( system_info_lib.gather_machine_configuration()) return results def process_benchmarks(log_files): benchmarks = test_log_pb2.BenchmarkEntries() for f in log_files: content = gfile.GFile(f, "rb").read() if benchmarks.MergeFromString(content) != len(content): raise Exception("Failed parsing benchmark entry from %s" % f) return benchmarks def run_and_gather_logs(name, test_name, test_args, benchmark_type, skip_processing_logs=False): """Run the bazel test given by test_name. Gather and return the logs. Args: name: Benchmark target identifier. test_name: A unique bazel target, e.g. "//path/to:test" test_args: A string containing all arguments to run the target with. benchmark_type: A string representing the BenchmarkType enum; the benchmark type for this target. skip_processing_logs: Whether to skip processing test results from log files. Returns: A tuple (test_results, mangled_test_name), where test_results: A test_log_pb2.TestResults proto, or None if log processing is skipped. test_adjusted_name: Unique benchmark name that consists of benchmark name optionally followed by GPU type. Raises: ValueError: If the test_name is not a valid target. subprocess.CalledProcessError: If the target itself fails. IOError: If there are problems gathering test log output from the test. MissingLogsError: If we couldn't find benchmark logs. """ if not (test_name and test_name.startswith("//") and ".." not in test_name and not test_name.endswith(":") and not test_name.endswith(":all") and not test_name.endswith("...") and len(test_name.split(":")) == 2): raise ValueError("Expected test_name parameter with a unique test, e.g.: " "--test_name=//path/to:test") test_executable = test_name.rstrip().strip("/").replace(":", "/") if gfile.Exists(os.path.join("bazel-bin", test_executable)): # Running in standalone mode from core of the repository test_executable = os.path.join("bazel-bin", test_executable) else: # Hopefully running in sandboxed mode test_executable = os.path.join(".", test_executable) test_adjusted_name = name gpu_config = gpu_info_lib.gather_gpu_devices() if gpu_config: gpu_name = gpu_config[0].model gpu_short_name_match = re.search( r"(Tesla|NVIDIA) (K40|K80|P100|V100|A100)", gpu_name ) if gpu_short_name_match: gpu_short_name = gpu_short_name_match.group(0) test_adjusted_name = name + "|" + gpu_short_name.replace(" ", "_") temp_directory = tempfile.mkdtemp(prefix="run_and_gather_logs") mangled_test_name = ( test_adjusted_name.strip("/").replace("|", "_").replace("/", "_").replace(":", "_")) test_file_prefix = os.path.join(temp_directory, mangled_test_name) test_file_prefix = "%s." % test_file_prefix try: if not gfile.Exists(test_executable): test_executable_py3 = test_executable + ".python3" if not gfile.Exists(test_executable_py3): raise ValueError("Executable does not exist: %s" % test_executable) test_executable = test_executable_py3 test_args = shlex.split(test_args) # This key is defined in tf/core/util/reporter.h as # TestReporter::kTestReporterEnv. os.environ["TEST_REPORT_FILE_PREFIX"] = test_file_prefix start_time = time.time() subprocess.check_call([test_executable] + test_args) if skip_processing_logs: return None, test_adjusted_name run_time = time.time() - start_time log_files = gfile.Glob("{}*".format(test_file_prefix)) if not log_files: raise MissingLogsError("No log files found at %s." % test_file_prefix) return (process_test_logs( test_adjusted_name, test_name=test_name, test_args=test_args, benchmark_type=benchmark_type, start_time=int(start_time), run_time=run_time, log_files=log_files), test_adjusted_name) finally: try: gfile.DeleteRecursively(temp_directory) except OSError: pass
MissingLogsError
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 202624, "end": 202724 }
class ____( _Int4MultiRangeTests, _MultiRangeTypeRoundTrip ): pass
Int4MultiRangeRoundTripTest
python
huggingface__transformers
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
{ "start": 12114, "end": 15492 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[UniSpeechSatConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights, None
UniSpeechSatAttention
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chartsheet02.py
{ "start": 315, "end": 1507 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chartsheet02.xlsx") def test_create_file(self): """Test the worksheet properties of an XlsxWriter chartsheet file.""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() chartsheet = workbook.add_chartsheet() worksheet2 = workbook.add_worksheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [79858304, 79860096] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet1.write_column("A1", data[0]) worksheet1.write_column("B1", data[1]) worksheet1.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chartsheet.set_chart(chart) chartsheet.activate() worksheet2.select() workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
PrefectHQ__prefect
src/prefect/workers/process.py
{ "start": 3527, "end": 11902 }
class ____( BaseWorker[ProcessJobConfiguration, ProcessVariables, ProcessWorkerResult] ): type = "process" job_configuration: type[ProcessJobConfiguration] = ProcessJobConfiguration job_configuration_variables: type[ProcessVariables] | None = ProcessVariables _description = ( "Execute flow runs as subprocesses on a worker. Works well for local execution" " when first getting started." ) _display_name = "Process" _documentation_url = "https://docs.prefect.io/latest/get-started/quickstart" _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/356e6766a91baf20e1d08bbe16e8b5aaef4d8643-48x48.png" async def start( self, run_once: bool = False, with_healthcheck: bool = False, printer: Callable[..., None] = print, ) -> None: """ Starts the worker and runs the main worker loops. By default, the worker will run loops to poll for scheduled/cancelled flow runs and sync with the Prefect API server. If `run_once` is set, the worker will only run each loop once and then return. If `with_healthcheck` is set, the worker will start a healthcheck server which can be used to determine if the worker is still polling for flow runs and restart the worker if necessary. Args: run_once: If set, the worker will only run each loop once then return. with_healthcheck: If set, the worker will start a healthcheck server. printer: A `print`-like function where logs will be reported. """ healthcheck_server = None healthcheck_thread = None try: async with self as worker: # wait for an initial heartbeat to configure the worker await worker.sync_with_backend() # schedule the scheduled flow run polling loop async with anyio.create_task_group() as loops_task_group: loops_task_group.start_soon( partial( critical_service_loop, workload=self.get_and_submit_flow_runs, interval=PREFECT_WORKER_QUERY_SECONDS.value(), run_once=run_once, jitter_range=0.3, backoff=4, # Up to ~1 minute interval during backoff ) ) # schedule the sync loop loops_task_group.start_soon( partial( critical_service_loop, workload=self.sync_with_backend, interval=self.heartbeat_interval_seconds, run_once=run_once, jitter_range=0.3, backoff=4, ) ) self._started_event = await self._emit_worker_started_event() start_client_metrics_server() if with_healthcheck: from prefect.workers.server import build_healthcheck_server # we'll start the ASGI server in a separate thread so that # uvicorn does not block the main thread healthcheck_server = build_healthcheck_server( worker=worker, query_interval_seconds=PREFECT_WORKER_QUERY_SECONDS.value(), ) healthcheck_thread = threading.Thread( name="healthcheck-server-thread", target=healthcheck_server.run, daemon=True, ) healthcheck_thread.start() printer(f"Worker {worker.name!r} started!") # If running once, wait for active runs to complete before exiting if run_once and self._limiter: while self.limiter.borrowed_tokens > 0: self._logger.debug( "Waiting for %s active run(s) to finish before shutdown...", self.limiter.borrowed_tokens, ) await anyio.sleep(0.1) finally: stop_client_metrics_server() if healthcheck_server and healthcheck_thread: self._logger.debug("Stopping healthcheck server...") healthcheck_server.should_exit = True healthcheck_thread.join() self._logger.debug("Healthcheck server stopped.") printer(f"Worker {worker.name!r} stopped!") async def run( self, flow_run: "FlowRun", configuration: ProcessJobConfiguration, task_status: Optional[anyio.abc.TaskStatus[int]] = None, ) -> ProcessWorkerResult: if task_status is None: task_status = anyio.TASK_STATUS_IGNORED working_dir_ctx = ( tempfile.TemporaryDirectory(suffix="prefect") if not configuration.working_dir else contextlib.nullcontext(configuration.working_dir) ) with working_dir_ctx as working_dir: process = await self._runner.execute_flow_run( flow_run_id=flow_run.id, command=configuration.command, cwd=working_dir, env=configuration.env, stream_output=configuration.stream_output, task_status=task_status, ) status_code = ( getattr(process, "returncode", None) if getattr(process, "returncode", None) is not None else getattr(process, "exitcode", None) ) if process is None or status_code is None: raise RuntimeError("Failed to start flow run process.") return ProcessWorkerResult(status_code=status_code, identifier=str(process.pid)) async def _submit_adhoc_run( self, flow: "Flow[..., FR]", parameters: dict[str, Any] | None = None, job_variables: dict[str, Any] | None = None, task_status: anyio.abc.TaskStatus["FlowRun"] | None = None, ): from prefect._experimental.bundles import ( create_bundle_for_flow_run, ) flow_run = await self.client.create_flow_run( flow, parameters=parameters, state=Pending(), job_variables=job_variables, work_pool_name=self.work_pool.name, ) if task_status is not None: # Emit the flow run object to .submit to allow it to return a future as soon as possible task_status.started(flow_run) api_flow = APIFlow(id=flow_run.flow_id, name=flow.name, labels={}) logger = self.get_flow_run_logger(flow_run) configuration = await self.job_configuration.from_template_and_values( base_job_template=self.work_pool.base_job_template, values=job_variables or {}, client=self._client, ) configuration.prepare_for_flow_run( flow_run=flow_run, flow=api_flow, work_pool=self.work_pool, worker_name=self.name, ) bundle = create_bundle_for_flow_run(flow=flow, flow_run=flow_run) logger.debug("Executing flow run bundle in subprocess...") try: await self._runner.execute_bundle( bundle=bundle, cwd=configuration.working_dir, env=configuration.env, ) except Exception: logger.exception("Error executing flow run bundle in subprocess") await self._propose_crashed_state(flow_run, "Flow run execution failed") finally: logger.debug("Flow run bundle execution complete") async def __aenter__(self) -> ProcessWorker: await super().__aenter__() self._runner = await self._exit_stack.enter_async_context( Runner(pause_on_shutdown=False, limit=None) ) return self async def __aexit__(self, *exc_info: Any) -> None: await super().__aexit__(*exc_info)
ProcessWorker
python
getsentry__sentry
tests/sentry/uptime/endpoints/test_project_uptime_alert_details.py
{ "start": 1079, "end": 9220 }
class ____(ProjectUptimeAlertDetailsBaseEndpointTest): method = "put" def test_all(self) -> None: detector = self.create_uptime_detector() uptime_sub = get_uptime_subscription(detector) resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, environment="uptime-prod", name="test", owner=f"user:{self.user.id}", url="https://santry.io", interval_seconds=300, timeout_ms=1500, headers=[["hello", "world"]], body="something", ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) # Verify the detector config was updated assert detector.config.get("environment") == "uptime-prod" assert detector.name == "test" assert detector.owner assert detector.owner.identifier == f"user:{self.user.id}" uptime_sub = get_uptime_subscription(detector) assert uptime_sub.url == "https://santry.io" assert uptime_sub.interval_seconds == 300 assert uptime_sub.timeout_ms == 1500 assert uptime_sub.headers == [["hello", "world"]] assert uptime_sub.body == "something" assert uptime_sub.trace_sampling is False resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, name="test", owner=f"user:{self.user.id}", url="https://santry.io", interval_seconds=300, timeout_ms=1500, headers=[["hello", "world"]], body=None, ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) assert detector.name == "test" assert detector.owner assert detector.owner.identifier == f"user:{self.user.id}" uptime_sub = get_uptime_subscription(detector) assert uptime_sub.url == "https://santry.io" assert uptime_sub.interval_seconds == 300 assert uptime_sub.timeout_ms == 1500 assert uptime_sub.headers == [["hello", "world"]] assert uptime_sub.body is None assert uptime_sub.trace_sampling is False def test_enviroment(self) -> None: detector = self.create_uptime_detector() resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, name="test", environment="uptime-prod", ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) assert detector.name == "test" assert detector.config.get("environment") == "uptime-prod" def test_user(self) -> None: detector = self.create_uptime_detector() resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, name="test", owner=f"user:{self.user.id}", ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) assert detector.name == "test" assert detector.owner assert detector.owner.identifier == f"user:{self.user.id}" def test_team(self) -> None: detector = self.create_uptime_detector() resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, name="test_2", owner=f"team:{self.team.id}", ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) assert detector.name == "test_2" assert detector.owner assert detector.owner.identifier == f"team:{self.team.id}" def test_invalid_owner(self) -> None: detector = self.create_uptime_detector() bad_user = self.create_user() resp = self.get_error_response( self.organization.slug, detector.project.slug, detector.id, owner=f"user:{bad_user.id}", ) assert resp.data == { "owner": [ ErrorDetail(string="User is not a member of this organization", code="invalid") ] } bad_team = self.create_team(organization=self.create_organization()) resp = self.get_error_response( self.organization.slug, detector.project.slug, detector.id, owner=f"team:{bad_team.id}", ) assert resp.data == { "owner": [ ErrorDetail(string="Team is not a member of this organization", code="invalid") ] } def test_not_found(self) -> None: resp = self.get_error_response(self.organization.slug, self.project.slug, 3) assert resp.status_code == 404 @mock.patch("sentry.uptime.subscriptions.subscriptions.MAX_MONITORS_PER_DOMAIN", 1) def test_domain_limit(self) -> None: # First monitor is for test-one.example.com self.create_uptime_detector( uptime_subscription=self.create_uptime_subscription( url="test-one.example.com", url_domain="example", url_domain_suffix="com", ) ) # Update second monitor to use the same domain. This will fail with a # validation error detector = self.create_uptime_detector() resp = self.get_error_response( self.organization.slug, detector.project.slug, detector.id, status_code=400, url="https://test-two.example.com", ) assert ( resp.data["url"][0] == "The domain *.example.com has already been used in 1 uptime monitoring alerts, which is the limit. You cannot create any additional alerts for this domain." ) def test_status_disable(self) -> None: detector = self.create_uptime_detector() resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, name="test_2", status="disabled", ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) assert detector.enabled is False assert get_uptime_subscription(detector).status == UptimeSubscription.Status.DISABLED.value def test_status_enable(self) -> None: detector = self.create_uptime_detector(enabled=False) resp = self.get_success_response( self.organization.slug, detector.project.slug, detector.id, name="test_2", status="active", ) detector.refresh_from_db() assert resp.data == serialize(detector, self.user, UptimeDetectorSerializer()) assert detector.enabled is True @mock.patch( "sentry.quotas.backend.assign_seat", return_value=1, # Outcome.RATE_LIMITED (anything != Outcome.ACCEPTED) ) @mock.patch( "sentry.quotas.backend.check_assign_seat", return_value=SeatAssignmentResult(assignable=False, reason="Assignment failed in test"), ) def test_status_enable_no_seat_assignment( self, _mock_check_assign_seat: mock.MagicMock, _mock_assign_seat: mock.MagicMock ) -> None: detector = self.create_uptime_detector(enabled=False) resp = self.get_error_response( self.organization.slug, detector.project.slug, detector.id, name="test_2", status="active", ) # The request should have failed with a 400 error # Check that we got an error response about seat assignment assert "status" in resp.data or "non_field_errors" in resp.data
ProjectUptimeAlertDetailsPutEndpointTest
python
getsentry__sentry
src/sentry/runner/commands/presenters/presenterdelegator.py
{ "start": 186, "end": 1182 }
class ____: def __init__(self, source: str, dry_run: bool, timestamp: float | None = None) -> None: from sentry.runner.commands.presenters.audit_log_presenter import AuditLogPresenter self._consolepresenter = ConsolePresenter() self._slackpresenter = None if WebhookPresenter.is_webhook_enabled(): self._slackpresenter = WebhookPresenter(source, timestamp) self._auditlogpresenter = None if AuditLogPresenter.is_webhook_enabled(): self._auditlogpresenter = AuditLogPresenter(source, dry_run) def __getattr__(self, attr: str) -> Any: def wrapper(*args: Any, **kwargs: Any) -> None: getattr(self._consolepresenter, attr)(*args, **kwargs) if self._slackpresenter: getattr(self._slackpresenter, attr)(*args, **kwargs) if self._auditlogpresenter: getattr(self._auditlogpresenter, attr)(*args, **kwargs) return wrapper
PresenterDelegator
python
pytorch__pytorch
torch/_export/serde/schema.py
{ "start": 6270, "end": 6466 }
class ____: # Argument name from the operator schema name: Annotated[str, 10] arg: Annotated[Argument, 20] kind: Annotated[Optional[ArgumentKind], 30] = None @dataclass
NamedArgument
python
pennersr__django-allauth
tests/apps/socialaccount/providers/vk/tests.py
{ "start": 232, "end": 1065 }
class ____(OAuth2TestsMixin, TestCase): provider_id = VKProvider.id def get_mocked_response(self, verified_email=True): return MockedResponse( HTTPStatus.OK, """ { "user": { "user_id": "1234567890", "first_name": "Ivan", "last_name": "I.", "phone": "79991234567", "avatar": "http://avatar.com/12345678", "email": "ivan_i123@vk.ru", "sex": 2, "verified": false, "birthday": "01.01.2000" } } """, ) def get_expected_to_str(self): return "ivan_i123@vk.ru" def get_login_response_json(self, with_refresh_token=True): return """ { "access_token": "testac", "refresh_token": "XXXXX", "expires_in": 0, "user_id": 1234567890, "state": "XXX", "scope": "email phone" } """
VKTests
python
google__jax
tests/pallas/pallas_test.py
{ "start": 72454, "end": 76611 }
class ____(PallasBaseTest): def setUp(self): super().setUp() if jtu.test_device_matches(["tpu"]): # TODO: most tests fail on TPU in non-interpret mode self.skipTest("On TPU the test works only in interpret mode") # TODO: improve tolerance setting self.tol = 1e-5 self.grad_tol = jtu.default_gradient_tolerance[np.dtype(jnp.float32)] @parameterized.named_parameters(*AD_TEST_CASES) def test_jvp(self, impl): grad_tol = self.grad_tol if jtu.test_device_matches(["tpu"]) and "recip_exp_sq" in self._testMethodName: grad_tol = 1e-1 @functools.partial( self.pallas_call, out_shape=jax.ShapeDtypeStruct((), floatx), ) def pallas_impl(x_ref, o_ref): x = x_ref[()] o_ref[()] = impl(x) k1, k2 = random.split(random.key(0)) x = random.normal(k1) t = random.normal(k2) out_primal, out_tangent = jax.jvp(pallas_impl, (x,), (t,)) out_primal_ref, out_tangent_ref = jax.jvp(impl, (x,), (t,)) np.testing.assert_allclose(out_primal, out_primal_ref, atol=self.tol, rtol=self.tol) np.testing.assert_allclose(out_tangent, out_tangent_ref, atol=self.tol, rtol=self.tol) jtu.check_grads(pallas_impl, (x,), modes=["fwd"], order=2, atol=grad_tol, rtol=grad_tol) @parameterized.named_parameters(*AD_TEST_CASES) def test_pallas_around_grad(self, impl): @functools.partial( self.pallas_call, out_shape=jax.ShapeDtypeStruct((), floatx), name=self.id().split(".")[-1], ) def pallas_impl(x_ref, o_ref): x = x_ref[()] o_ref[()] = jax.grad(impl)(x) x = random.normal(random.key(0)) out_grad = pallas_impl(x) out_grad_ref = jax.grad(impl)(x) np.testing.assert_allclose(out_grad, out_grad_ref, atol=1e-5, rtol=1e-5) @parameterized.named_parameters(*AD_TEST_CASES) def test_jvp_slice(self, impl): grad_tol = self.grad_tol if jtu.test_device_matches(["tpu"]) and "tanh" in self._testMethodName: grad_tol = 1e-1 @functools.partial( self.pallas_call, out_shape=jax.ShapeDtypeStruct((4,), floatx), ) def pallas_impl(x_ref, o_ref): x = x_ref[jnp.arange(2)] o_ref[jnp.arange(2)] = jnp.zeros(2) o_ref[2 + jnp.arange(2)] = impl(x) k1, k2 = random.split(random.key(0)) x = random.normal(k1, (8,)) t = random.normal(k2, (8,)) out_primal, out_tangent = jax.jvp(pallas_impl, (x,), (t,)) out_primal_ref, out_tangent_ref = jax.jvp( lambda x: jnp.concatenate([jnp.zeros(2), impl(x[:2])]), (x,), (t,)) np.testing.assert_allclose(out_primal, out_primal_ref, atol=self.tol, rtol=self.tol) np.testing.assert_allclose(out_tangent, out_tangent_ref, atol=self.tol, rtol=self.tol) jtu.check_grads(pallas_impl, (x,), modes=["fwd"], order=2, atol=grad_tol, rtol=grad_tol) def test_custom_jvp_call(self): @functools.partial(jax.custom_jvp, nondiff_argnums=(1,)) def softmax(x, axis=-1): unnormalized = jnp.exp(x - jnp.max(x, axis, keepdims=True)) return unnormalized / jnp.sum(unnormalized, axis, keepdims=True) @softmax.defjvp def softmax_jvp(axis, primals, tangents): (x,), (x_dot,) = primals, tangents y = softmax(x, axis) return y, y * (x_dot - (y * x_dot).sum(axis, keepdims=True)) m, n = 16, 32 x = random.normal(random.key(0), (m, n)) @functools.partial(self.pallas_call, out_shape=x) def softmax_kernel(x_ref, y_ref): y_ref[:] = softmax(x_ref[:]) np.testing.assert_allclose(softmax_kernel(x), jax.nn.softmax(x), atol=1e-7) # TODO(sharadmv): enable this when we update Triton # def test_jvp_matmul(self): # k1, k2 = random.split(random.key(0)) # x = random.normal(k1, (256, 128)) # y = random.normal(k2, (128, 64)) # bm, bn, bk, gm = 64, 128, 32, 8 # mm = functools.partial(matmul, bm=bm, bn=bn, bk=bk, gm=gm, # interpret=self.INTERPRET) # jtu.check_grads(mm, (x, y), modes=["fwd"], order=1)
PallasCallAutodifferentiationTest
python
scipy__scipy
scipy/stats/tests/test_stats.py
{ "start": 242319, "end": 247074 }
class ____: # indices in order [alternative={two-sided, less, greater}, # equal_var={False, True}, trim={0, 0.2}] # reference values in order `statistic, df, pvalue, low, high` # equal_var=False reference values computed with R PairedData yuen.t.test: # # library(PairedData) # options(digits=16) # a < - c(0.88236329, 0.97318744, 0.4549262, 0.97893335, 0.0606677, # 0.44013366, 0.55806018, 0.40151434, 0.14453315, 0.25860601, # 0.20202162) # b < - c(0.93455277, 0.42680603, 0.49751939, 0.14152846, 0.711435, # 0.77669667, 0.20507578, 0.78702772, 0.94691855, 0.32464958, # 0.3873582, 0.35187468, 0.21731811) # yuen.t.test(a, b, tr=0, conf.level = 0.9, alternative = 'l') # # equal_var=True reference values computed with R multicon yuenContrast: # # library(multicon) # options(digits=16) # a < - c(0.88236329, 0.97318744, 0.4549262, 0.97893335, 0.0606677, # 0.44013366, 0.55806018, 0.40151434, 0.14453315, 0.25860601, # 0.20202162) # b < - c(0.93455277, 0.42680603, 0.49751939, 0.14152846, 0.711435, # 0.77669667, 0.20507578, 0.78702772, 0.94691855, 0.32464958, # 0.3873582, 0.35187468, 0.21731811) # dv = c(a, b) # iv = c(rep('a', length(a)), rep('b', length(b))) # yuenContrast(dv~iv, EQVAR = FALSE, alternative = 'unequal', tr = 0.2) r = np.empty(shape=(3, 2, 2, 5)) r[0, 0, 0] = [-0.2314607, 19.894435, 0.8193209, -0.247220294, 0.188729943] r[1, 0, 0] = [-0.2314607, 19.894435, 0.40966045, -np.inf, 0.1382426469] r[2, 0, 0] = [-0.2314607, 19.894435, 0.5903395, -0.1967329982, np.inf] r[0, 0, 1] = [-0.2452886, 11.427896, 0.8105823, -0.34057446, 0.25847383] r[1, 0, 1] = [-0.2452886, 11.427896, 0.40529115, -np.inf, 0.1865829074] r[2, 0, 1] = [-0.2452886, 11.427896, 0.5947089, -0.268683541, np.inf] # confidence interval not available for equal_var=True r[0, 1, 0] = [-0.2345625322555006, 22, 0.8167175905643815, np.nan, np.nan] r[1, 1, 0] = [-0.2345625322555006, 22, 0.4083587952821908, np.nan, np.nan] r[2, 1, 0] = [-0.2345625322555006, 22, 0.5916412047178092, np.nan, np.nan] r[0, 1, 1] = [-0.2505369406507428, 14, 0.8058115135702835, np.nan, np.nan] r[1, 1, 1] = [-0.2505369406507428, 14, 0.4029057567851417, np.nan, np.nan] r[2, 1, 1] = [-0.2505369406507428, 14, 0.5970942432148583, np.nan, np.nan] @pytest.mark.parametrize('alternative', ['two-sided', 'less', 'greater']) @pytest.mark.parametrize('equal_var', [False, True]) @pytest.mark.parametrize('trim', [0, 0.2]) @skip_xp_backends('jax.numpy', reason='Generic stdtrit mutates array.') def test_confidence_interval(self, alternative, equal_var, trim, xp): if equal_var and trim: pytest.xfail('Discrepancy in `main`; needs further investigation.') if trim and not is_numpy(xp): pytest.skip('`trim` is only compatible with NumPy input') rng = np.random.default_rng(3810954496107292580) x = xp.asarray(rng.random(11)) y = xp.asarray(rng.random(13)) res = stats.ttest_ind(x, y, alternative=alternative, equal_var=equal_var, trim=trim) alternatives = {'two-sided': 0, 'less': 1, 'greater': 2} ref = self.r[alternatives[alternative], int(equal_var), int(np.ceil(trim))] statistic, df, pvalue, low, high = ref rtol = 1e-7 # only 7 digits in reference xp_assert_close(res.statistic, xp.asarray(statistic), rtol=rtol) xp_assert_close(res.df, xp.asarray(df), rtol=rtol) xp_assert_close(res.pvalue, xp.asarray(pvalue), rtol=rtol) if not equal_var: # CI not available when `equal_var is True` ci = res.confidence_interval(0.9) xp_assert_close(ci.low, xp.asarray(low), rtol=rtol) xp_assert_close(ci.high, xp.asarray(high), rtol=rtol) def test__broadcast_concatenate(): # test that _broadcast_concatenate properly broadcasts arrays along all # axes except `axis`, then concatenates along axis rng = np.random.default_rng(7544340069) a = rng.random((5, 4, 4, 3, 1, 6)) b = rng.random((4, 1, 8, 2, 6)) c = _broadcast_concatenate((a, b), axis=-3) # broadcast manually as an independent check a = np.tile(a, (1, 1, 1, 1, 2, 1)) b = np.tile(b[None, ...], (5, 1, 4, 1, 1, 1)) for index in product(*(range(i) for i in c.shape)): i, j, k, l, m, n = index if l < a.shape[-3]: assert a[i, j, k, l, m, n] == c[i, j, k, l, m, n] else: assert b[i, j, k, l - a.shape[-3], m, n] == c[i, j, k, l, m, n] @make_xp_test_case(stats.ttest_ind)
Test_ttest_CI
python
openai__openai-python
src/openai/types/responses/response_input_item_param.py
{ "start": 14129, "end": 15060 }
class ____(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" type: Optional[Literal["item_reference"]] """The type of item to reference. Always `item_reference`.""" ResponseInputItemParam: TypeAlias = Union[ EasyInputMessageParam, Message, ResponseOutputMessageParam, ResponseFileSearchToolCallParam, ResponseComputerToolCallParam, ComputerCallOutput, ResponseFunctionWebSearchParam, ResponseFunctionToolCallParam, FunctionCallOutput, ResponseReasoningItemParam, ImageGenerationCall, ResponseCodeInterpreterToolCallParam, LocalShellCall, LocalShellCallOutput, ShellCall, ShellCallOutput, ApplyPatchCall, ApplyPatchCallOutput, McpListTools, McpApprovalRequest, McpApprovalResponse, McpCall, ResponseCustomToolCallOutputParam, ResponseCustomToolCallParam, ItemReference, ]
ItemReference
python
huggingface__transformers
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
{ "start": 101101, "end": 109586 }
class ____(BigBirdPegasusPreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = { "lm_head.weight": "model.shared.weight", } _keys_to_ignore_on_load_missing = ["final_logits_bias"] # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration.__init__ with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) self.model = BigBirdPegasusModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration.resize_token_embeddings with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS def resize_token_embeddings( self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True ) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._resize_final_logits_bias with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, Seq2SeqLMOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for translation and summarization training. By default, the model will create this tensor by shifting the `input_ids` to the right, following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_bigbird_pegasus._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example summarization: ```python >>> from transformers import AutoTokenizer, BigBirdPegasusForConditionalGeneration >>> model = BigBirdPegasusForConditionalGeneration.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> ARTICLE_TO_SUMMARIZE = ( ... "The dominant sequence transduction models are based on complex recurrent or convolutional neural " ... "networks in an encoder-decoder configuration. The best performing models also connect the encoder " ... "and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, " ... "based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. " ... "Experiments on two machine translation tasks show these models to be superior in quality " ... "while being more parallelizable and requiring significantly less time to train." ... ) >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors="pt", truncation=True) >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=15) >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'dominant sequence models are based on recurrent or convolutional neural networks .' ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration.prepare_decoder_input_ids_from_labels with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @auto_docstring( custom_intro=""" BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ )
BigBirdPegasusForConditionalGeneration
python
ansible__ansible
test/lib/ansible_test/_internal/commands/integration/coverage.py
{ "start": 9371, "end": 12681 }
class ____(CoverageHandler[WindowsConfig]): """Configure integration test code coverage for Windows hosts.""" def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None: super().__init__(args, host_state, inventory_path) # Common temporary directory used on all Windows hosts that will be created writable by everyone. self.remote_temp_path = f'C:\\ansible_test_coverage_{generate_name()}' @property def is_active(self) -> bool: """True if the handler should be used, otherwise False.""" return bool(self.profiles) and not self.args.coverage_check def setup(self) -> None: """Perform setup for code coverage.""" self.run_playbook('windows_coverage_setup.yml', self.get_playbook_variables()) def teardown(self) -> None: """Perform teardown for code coverage.""" with tempfile.TemporaryDirectory() as local_temp_path: variables = self.get_playbook_variables() variables.update( local_temp_path=local_temp_path, ) self.run_playbook('windows_coverage_teardown.yml', variables) for filename in os.listdir(local_temp_path): if all(isinstance(profile.config, WindowsRemoteConfig) for profile in self.profiles): prefix = 'remote' elif all(isinstance(profile.config, WindowsInventoryConfig) for profile in self.profiles): prefix = 'inventory' else: raise NotImplementedError() platform = f'{prefix}-{sanitize_host_name(os.path.splitext(filename)[0])}' with zipfile.ZipFile(os.path.join(local_temp_path, filename)) as coverage_zip: for item in coverage_zip.infolist(): if item.is_dir(): raise Exception(f'Unexpected directory in zip file: {item.filename}') item.filename = update_coverage_filename(item.filename, platform) coverage_zip.extract(item, ResultType.COVERAGE.path) def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]: """Return a dictionary of environment variables for running tests with code coverage.""" # Include the command, target and platform marker so the remote host can create a filename with that info. # The remote is responsible for adding '={language-version}=coverage.{hostname}.{pid}.{id}' coverage_name = '='.join((self.args.command, target_name, 'platform')) variables = dict( _ANSIBLE_COVERAGE_REMOTE_OUTPUT=os.path.join(self.remote_temp_path, coverage_name), _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER=os.path.join(data_context().content.root, '*'), ) return variables def create_inventory(self) -> None: """Create inventory.""" create_windows_inventory(self.args, self.inventory_path, self.host_state.target_profiles) def get_playbook_variables(self) -> dict[str, str]: """Return a dictionary of variables for setup and teardown of Windows coverage.""" return dict( remote_temp_path=self.remote_temp_path, )
WindowsCoverageHandler
python
scikit-learn__scikit-learn
sklearn/linear_model/_passive_aggressive.py
{ "start": 11969, "end": 21162 }
class ____(BaseSGDRegressor): """Passive Aggressive Regressor. .. deprecated:: 1.8 The whole class `PassiveAggressiveRegressor` was deprecated in version 1.8 and will be removed in 1.10. Instead use: .. code-block:: python reg = SGDRegressor( loss="epsilon_insensitive", penalty=None, learning_rate="pa1", # or "pa2" eta0=1.0, # for parameter C ) Read more in the :ref:`User Guide <passive_aggressive>`. Parameters ---------- C : float, default=1.0 Aggressiveness parameter for the passive-agressive algorithm, see [1]. For PA-I it is the maximum step size. For PA-II it regularizes the step size (the smaller `C` the more it regularizes). As a general rule-of-thumb, `C` should be small when the data is noisy. fit_intercept : bool, default=True Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. max_iter : int, default=1000 The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the :meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method. .. versionadded:: 0.19 tol : float or None, default=1e-3 The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol). .. versionadded:: 0.19 early_stopping : bool, default=False Whether to use early stopping to terminate training when validation. score is not improving. If set to True, it will automatically set aside a fraction of training data as validation and terminate training when validation score is not improving by at least tol for n_iter_no_change consecutive epochs. .. versionadded:: 0.20 validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True. .. versionadded:: 0.20 n_iter_no_change : int, default=5 Number of iterations with no improvement to wait before early stopping. .. versionadded:: 0.20 shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. verbose : int, default=0 The verbosity level. loss : str, default="epsilon_insensitive" The loss function to be used: epsilon_insensitive: equivalent to PA-I in the reference paper. squared_epsilon_insensitive: equivalent to PA-II in the reference paper. epsilon : float, default=0.1 If the difference between the current prediction and the correct label is below this threshold, the model is not updated. random_state : int, RandomState instance, default=None Used to shuffle the training data, when ``shuffle`` is set to ``True``. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. average : bool or int, default=False When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. .. versionadded:: 0.19 parameter *average* to use weights averaging in SGD. Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_iter_ : int The actual number of iterations to reach the stopping criterion. t_ : int Number of weight updates performed during training. Same as ``(n_iter_ * n_samples + 1)``. See Also -------- SGDRegressor : Linear model fitted by minimizing a regularized empirical loss with SGD. References ---------- Online Passive-Aggressive Algorithms <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf> K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006). Examples -------- >>> from sklearn.linear_model import PassiveAggressiveRegressor >>> from sklearn.datasets import make_regression >>> X, y = make_regression(n_features=4, random_state=0) >>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0, ... tol=1e-3) >>> regr.fit(X, y) PassiveAggressiveRegressor(max_iter=100, random_state=0) >>> print(regr.coef_) [20.48736655 34.18818427 67.59122734 87.94731329] >>> print(regr.intercept_) [-0.02306214] >>> print(regr.predict([[0, 0, 0, 0]])) [-0.02306214] """ _parameter_constraints: dict = { **BaseSGDRegressor._parameter_constraints, "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})], "C": [Interval(Real, 0, None, closed="right")], "epsilon": [Interval(Real, 0, None, closed="left")], } _parameter_constraints.pop("eta0") def __init__( self, *, C=1.0, fit_intercept=True, max_iter=1000, tol=1e-3, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, shuffle=True, verbose=0, loss="epsilon_insensitive", epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False, average=False, ): super().__init__( loss=loss, penalty=None, l1_ratio=0, epsilon=epsilon, eta0=C, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, shuffle=shuffle, verbose=verbose, random_state=random_state, warm_start=warm_start, average=average, ) self.C = C @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of training data. y : numpy array of shape [n_samples] Subset of target values. Returns ------- self : object Fitted estimator. """ if not hasattr(self, "coef_"): self._more_validate_params(for_partial_fit=True) lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._partial_fit( X, y, alpha=1.0, loss="epsilon_insensitive", learning_rate=lr, max_iter=1, sample_weight=None, coef_init=None, intercept_init=None, ) @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : numpy array of shape [n_samples] Target values. coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator. """ self._more_validate_params() lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._fit( X, y, alpha=1.0, loss="epsilon_insensitive", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init, )
PassiveAggressiveRegressor
python
tensorflow__tensorflow
tensorflow/python/distribute/combinations.py
{ "start": 16212, "end": 24737 }
class ____(object): """Holds the test environment information. Tests should modify the attributes of the instance returned by `env()` in the main process if needed, and it will be passed to the worker processes each time a test case is run. """ def __init__(self): self.tf_data_service_dispatcher = None # Note that this includes GPUs that may not be visible to the current # worker. self.total_phsyical_gpus = None def __setattr__(self, name, value): if not in_main_process(): raise ValueError( "combinations.env() should only be modified in the main process. " "Condition your code on combinations.in_main_process().") super().__setattr__(name, value) _env = TestEnvironment() @tf_export("__internal__.distribute.combinations.env", v1=[]) def env(): """Returns the object holds the test environment information. Tests should modify this in the main process if needed, and it will be passed to the worker processes each time a test case is run. Returns: a TestEnvironment object. """ return _env def _set_total_phsyical_gpus(): if in_main_process(): env().total_phsyical_gpus = len( context.context().list_physical_devices("GPU")) # This is needed in case CUDA is lazily loaded. app.call_after_init(_set_total_phsyical_gpus) _TestResult = collections.namedtuple("_TestResult", ["status", "message"]) def _test_runner(test_id, test_env): """Executes the test with the given test_id. This is a simple wrapper around TestRunner to be used with multi_process_runner. Similar to test.main(), but it executes only one test specified by test_id and returns whether the test succeeds. If the test fails, the function prints failures and errors to stdout. Args: test_id: TestCase.id() test_env: a TestEnvironment object. Returns: A boolean indicates whether the test succeeds. """ global _running_in_worker, _env # No need to restore the value of _running_in_worker since it should always be # True in worker processes. _running_in_worker = True _env = test_env test = unittest.defaultTestLoader.loadTestsFromName(test_id) runner = unittest.TextTestRunner() result = runner.run(test) # Treat expected failures as failures, so that the main process can get # them and fail as expected. Also treat errors as failures to simplify the # handling. failures = result.failures + result.expectedFailures + result.errors if failures: ret = _TestResult(status="failure", message=failures[0][1]) elif result.skipped: ret = _TestResult(status="skipped", message=result.skipped[0][1]) else: # Treat unexpectedSuccesses as OK so that the test case in the main process # succeed as well. ret = _TestResult(status="ok", message=None) # Print tracebacks to stdout and multi_process_runner will collect # them and stream back to the main process. if ret.message: print(ret.message) return ret def _multi_worker_test(test_method): """Decorate test_method so that it runs in each worker. We use `multi_process_runner` to simulate multiple workers. Since we run the this function in the main process and all worker processes, this decoration behaves differently in the main process and worker procssses. In the main process, it spawns subprocesses and runs the test on each of them; in a worker process, it executes test in the same way as a normal test, e.g. setUp()/tearDown() are called before/after the test. Args: test_method: a function which must be a test method. Returns: Decorated `test_method`. Note that the decorated function has additional arguments. """ def decorator(self, has_chief, num_workers, num_ps, share_gpu, runner, **kwargs): if _num_total_workers(has_chief, num_workers) == 1 or _running_in_worker or ( # Use in-process cluster for PS combinations # when XLA is enabled. test_util.is_xla_enabled() and num_ps > 0): # We're in worker process or the test is for single worker. Either case we # execute the test method directly instead of spawning subprocesses. # For MultiWorkerMirroredStrategy(CollectiveAllReduceStrategy), install a # session that connects to the local server. This is necessary for multi # worker graph mode tests to work. Those tests cannot use their graphs or # sessions, including the one returned by self.cached_session(). Since # existing tests may already be doing so, we only install the session for # multi worker tests. with _multi_worker_session(kwargs): test_method(self, **kwargs) return # We're in the main process. We spawn subprocesses and run the *test* on # each of them. Note that we're not directly executing test_method passed to # _multi_worker_test, because we need setUp()/tearDown() to be called and # all the decorations on the test method. The conceptual call stack is: # [main process]test.main() # [main process]test_runner.run(test) # [main process]wrapper by combinations.generate() # [main process]_multi_worker_test.decorator() # # A sub process goes through the same code path as the main # # process. # [sub process]_test_runner() # [sub process]test_runner.run(test) # [sub process]wrapper by combinations.generate() # [sub process]_multi_worker_test.decorator() # # _running_in_worker is True # [sub process]test_method() test_id = self.id() if runner: results = runner.run(_test_runner, args=(test_id, _env)) else: cluster_spec = multi_worker_test_base.create_cluster_spec( has_chief=has_chief, num_workers=num_workers, num_ps=num_ps, has_eval=False) ephemeral_runner = multi_process_runner.MultiProcessRunner( _test_runner, cluster_spec, share_gpu=share_gpu, args=(test_id, _env), dependence_on_chief=has_chief) ephemeral_runner.start() results = ephemeral_runner.join().return_value skip_reason = None for result in results: if result.status == "failure": # We can't tell which worker the return value come from, so we fail on # the first error. self.fail(result.message) break elif result.status == "skipped": # Record the skip reason, but do not actually skip the test in case some # processes fail instead. skip_reason = result.message if skip_reason is not None: self.skipTest(skip_reason) argspec = tf_inspect.getfullargspec(test_method) decorator_args = (argspec.args or []) + [ "has_chief", "num_workers", "num_ps", "share_gpu", "runner" ] decorator_argspec = argspec._replace(args=decorator_args) return tf_decorator.make_decorator( test_method, decorator, decorator_argspec=decorator_argspec) def _num_total_workers(has_chief, num_workers): """Returns the number of workers including the chief.""" if has_chief: return num_workers + 1 return num_workers def _multi_worker_session(kwargs): """Returns a context manager that enters a session that is configured for the MultiWorkerMirroredStrategy. Args: kwargs: a dict. Keyword arguments passed to the test. Returns: A context manager. If MultiWorkerMirroredStrategy is the one and only one strategy in kwargs and it's in graph mode, it's the session that is configured for that strategy. Otherwise, it's a no-op context manager. """ strategy = None for _, v in kwargs.items(): if isinstance(v, distribute_lib.StrategyBase): if strategy is not None: logging.warning( "The test uses multiple strategies. Skipping " "entering a session that is configured for the strategy.") return ops.NullContextmanager() strategy = v if context.executing_eagerly() or not isinstance( strategy, collective_all_reduce_strategy.CollectiveAllReduceStrategy): return ops.NullContextmanager() sess_config = copy.deepcopy(context.context().config) sess_config = strategy.update_config_proto(sess_config) target = strategy.cluster_resolver.master() return session.Session(config=sess_config, target=target).as_default()
TestEnvironment
python
apache__airflow
providers/standard/tests/unit/standard/operators/test_python.py
{ "start": 15951, "end": 24744 }
class ____(BasePythonTest): opcls = BranchPythonOperator @pytest.fixture(autouse=True) def setup_tests(self): self.branch_1 = EmptyOperator(task_id="branch_1") self.branch_2 = EmptyOperator(task_id="branch_2") def test_with_dag_run(self): clear_db_runs() with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True): def f(): return "branch_1" branch_op = self.opcls(task_id=self.task_id, python_callable=f, **self.default_kwargs()) branch_op >> [self.branch_1, self.branch_2] dr = self.dag_maker.create_dagrun() if AIRFLOW_V_3_0_1: with pytest.raises(DownstreamTasksSkipped) as dts: self.dag_maker.run_ti(self.task_id, dr) assert dts.value.tasks == [("branch_2", -1)] else: self.dag_maker.run_ti(self.task_id, dr) self.assert_expected_task_states( dr, {self.task_id: State.SUCCESS, "branch_1": State.NONE, "branch_2": State.SKIPPED} ) def test_with_skip_in_branch_downstream_dependencies(self): clear_db_runs() with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True): def f(): return "branch_1" branch_op = self.opcls(task_id=self.task_id, python_callable=f, **self.default_kwargs()) branch_op >> self.branch_1 >> self.branch_2 branch_op >> self.branch_2 dr = self.dag_maker.create_dagrun() self.dag_maker.run_ti(self.task_id, dr) self.assert_expected_task_states( dr, {self.task_id: State.SUCCESS, "branch_1": State.NONE, "branch_2": State.NONE} ) def test_with_skip_in_branch_downstream_dependencies2(self): clear_db_runs() with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True): def f(): return "branch_2" branch_op = self.opcls(task_id=self.task_id, python_callable=f, **self.default_kwargs()) branch_op >> self.branch_1 >> self.branch_2 branch_op >> self.branch_2 dr = self.dag_maker.create_dagrun() if AIRFLOW_V_3_0_1: with pytest.raises(DownstreamTasksSkipped) as dts: branch_op.run(start_date=self.default_date, end_date=self.default_date) assert dts.value.tasks == [("branch_1", -1)] else: self.dag_maker.run_ti(branch_op.task_id, dr) self.assert_expected_task_states( dr, {self.task_id: State.SUCCESS, "branch_1": State.SKIPPED, "branch_2": State.NONE} ) def test_clear_skipped_downstream_task(self, dag_maker): """ After a downstream task is skipped by BranchPythonOperator, clearing the skipped task should not cause it to be executed. """ with dag_maker(serialized=False): def f(): return "branch_1" branch_op = self.opcls(task_id=self.task_id, python_callable=f, **self.default_kwargs()) branches = [self.branch_1, self.branch_2] branch_op >> branches dr = dag_maker.create_dagrun() if AIRFLOW_V_3_0_1: from airflow.exceptions import DownstreamTasksSkipped with create_session() as session: branch_ti = dr.get_task_instance(task_id=self.task_id, session=session) with pytest.raises(DownstreamTasksSkipped) as exc_info: branch_ti.run() assert exc_info.value.tasks == [("branch_2", -1)] branch_ti.set_state(TaskInstanceState.SUCCESS, session=session) dr.task_instance_scheduling_decisions(session=session) branch_2_ti = dr.get_task_instance(task_id="branch_2", session=session) branch_2_ti.task = self.branch_2 assert branch_2_ti.state == TaskInstanceState.SKIPPED branch_2_ti.set_state(None) branch_2_ti.run() assert branch_2_ti.state == TaskInstanceState.SKIPPED else: dag_maker.run_ti(branch_op.task_id, dr) for task in branches: dag_maker.run_ti(task.task_id, dr) expected_states = { self.task_id: State.SUCCESS, "branch_1": State.SUCCESS, "branch_2": State.SKIPPED, } self.assert_expected_task_states(dr, expected_states) # Clear the children tasks. tis = dr.get_task_instances() children_tis = [ti for ti in tis if ti.task_id in branch_op.get_direct_relative_ids()] with create_session() as session: if AIRFLOW_V_3_0_PLUS: clear_task_instances(children_tis, session=session) else: clear_task_instances(children_tis, session=session, dag=branch_op.dag) # Run the cleared tasks again. for task in branches: dag_maker.run_ti(task.task_id, dr) # Check if the states are correct after children tasks are cleared. self.assert_expected_task_states(dr, expected_states) def test_raise_exception_on_no_accepted_type_return(self): def f(): return 5 ti = self.create_ti(f) with pytest.raises( AirflowException, match=r"'branch_task_ids'.*task.*", ): ti.run() def test_raise_exception_on_invalid_task_id(self): def f(): return "some_task_id" ti = self.create_ti(f) with pytest.raises( AirflowException, match=r"Invalid tasks found: {\(False, 'bool'\)}.|'branch_task_ids'.*task.*" ): ti.run() def test_none_return_value_should_skip_all_downstream(self): """Test that returning None from callable should skip all downstream tasks.""" clear_db_runs() with self.dag_maker(self.dag_id, serialized=True): def return_none(): return None branch_op = self.opcls(task_id=self.task_id, python_callable=return_none, **self.default_kwargs()) branch_op >> [self.branch_1, self.branch_2] dr = self.dag_maker.create_dagrun() if AIRFLOW_V_3_0_1: from airflow.exceptions import DownstreamTasksSkipped with pytest.raises(DownstreamTasksSkipped) as dts: self.dag_maker.run_ti(self.task_id, dr) # When None is returned, all downstream tasks should be skipped expected_skipped = {("branch_1", -1), ("branch_2", -1)} assert set(dts.value.tasks) == expected_skipped else: self.dag_maker.run_ti(self.task_id, dr) self.assert_expected_task_states( dr, {self.task_id: State.SUCCESS, "branch_1": State.SKIPPED, "branch_2": State.SKIPPED} ) @pytest.mark.parametrize( ("choice", "expected_states"), [ ("task1", [State.SUCCESS, State.SUCCESS, State.SUCCESS]), ("join", [State.SUCCESS, State.SKIPPED, State.SUCCESS]), ], ) def test_empty_branch(self, choice, expected_states, session): """ Tests that BranchPythonOperator handles empty branches properly. """ with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True): def f(): return choice branch = self.opcls(task_id=self.task_id, python_callable=f, **self.default_kwargs()) task1 = EmptyOperator(task_id="task1") join = EmptyOperator(task_id="join", trigger_rule="none_failed_min_one_success") branch >> [task1, join] task1 >> join dr = self.dag_maker.create_dagrun() task_ids = [self.task_id, "task1", "join"] tis = {ti.task_id: ti for ti in dr.task_instances} for task_id in task_ids: # Mimic the specific order the scheduling would run the tests. task_instance = tis[task_id] task_instance.refresh_from_task(self.dag_maker.dag.get_task(task_id)) if AIRFLOW_V_3_0_1: from airflow.exceptions import DownstreamTasksSkipped try: task_instance.run() except DownstreamTasksSkipped: task_instance.set_state(State.SUCCESS) else: task_instance.run() def get_state(ti): ti.refresh_from_db() return ti.state assert [get_state(tis[task_id]) for task_id in task_ids] == expected_states
TestBranchOperator
python
huggingface__transformers
src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py
{ "start": 163852, "end": 166519 }
class ____(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniToken2WavConfig base_model_prefix = "model" input_modalities = "audio" _no_split_modules = ["Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel"] def __init__(self, config: Qwen2_5OmniToken2WavConfig): super().__init__(config) attn_impl = config._attn_implementation if config._attn_implementation == "flash_attention_2": logger.warning_once( "Qwen2_5OmniToken2WavModel must inference with fp32, but flash_attention_2 only supports fp16 and bf16, " "attention implementation of Qwen2_5OmniToken2WavModel will fallback to sdpa." ) attn_impl = "sdpa" elif config._attn_implementation == "eager": logger.warning_once( "Qwen2_5OmniToken2WavModel does not support eager attention implementation, fall back to sdpa" ) attn_impl = "sdpa" self.code2wav_dit_model = Qwen2_5OmniToken2WavDiTModel._from_config( config.dit_config, attn_implementation=attn_impl ) self.code2wav_bigvgan_model = Qwen2_5OmniToken2WavBigVGANModel._from_config( config.bigvgan_config, attn_implementation=attn_impl ) def forward( self, code, conditioning, reference_mel, num_steps=10, guidance_scale=0.5, sway_coefficient=-1.0, **kwargs, ): """Generates a waveform from input code and conditioning parameters.""" mel_spectrogram = self.code2wav_dit_model.sample( conditioning, reference_mel, code, num_steps=num_steps, guidance_scale=guidance_scale, sway_coefficient=sway_coefficient, ) waveform = self.code2wav_bigvgan_model(mel_spectrogram) return waveform ############################ # Start Qwen2.5Omni # ############################ @auto_docstring( custom_intro=""" The full Qwen2.5Omni model, a multimodal model composed of 3 sub-models: - [`Qwen2_5OmniThinkerForConditionalGeneration`]: a causal auto-regressive transformer takes text, audio, image, video as input and predict text tokens. - [`Qwen2_5OmniTalkerForConditionalGeneration`]: a causal auto-regressive transformer takes thinker hidden states and response as input and predict speech tokens. - [`Qwen2_5OmniToken2WavModel`]: a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform. """ )
Qwen2_5OmniToken2WavModel
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/guides/dagster/dagster_pipes/dagster_pipes_details_and_customization/custom_message_writer.py
{ "start": 615, "end": 1055 }
class ____(PipesBlobStoreMessageWriterChannel): def __init__(self, key_prefix: str): super().__init__() self.key_prefix = key_prefix # This will be called periodically to upload any buffered messages def upload_messages_chunk(self, payload: IO, index: int) -> None: key = f"{self.key_prefix}/{index}.json" cloud_service.write(key, json.dumps(payload.read()))
MyCustomCloudServiceMessageWriterChannel
python
mlflow__mlflow
mlflow/store/model_registry/dbmodels/models.py
{ "start": 4811, "end": 5738 }
class ____(Base): __tablename__ = "model_version_tags" name = Column(String(256)) version = Column(Integer) key = Column(String(250), nullable=False) value = Column(Text, nullable=True) # linked entities model_version = relationship( "SqlModelVersion", foreign_keys=[name, version], backref=backref("model_version_tags", cascade="all"), ) __table_args__ = ( PrimaryKeyConstraint("key", "name", "version", name="model_version_tag_pk"), ForeignKeyConstraint( ("name", "version"), ("model_versions.name", "model_versions.version"), onupdate="cascade", ), ) def __repr__(self): return f"<SqlModelVersionTag ({self.name}, {self.version}, {self.key}, {self.value})>" # entity mappers def to_mlflow_entity(self): return ModelVersionTag(self.key, self.value)
SqlModelVersionTag
python
weaviate__weaviate-python-client
weaviate/cluster/models.py
{ "start": 130, "end": 241 }
class ____(str, Enum): """Enum for replication types.""" COPY = "COPY" MOVE = "MOVE"
ReplicationType
python
joke2k__faker
faker/providers/automotive/bn_BD/__init__.py
{ "start": 118, "end": 4701 }
class ____(AutomotiveProvider): """Implement automotive provider for ``bn_BD`` locale. Sources: - https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Bangladesh """ # noinspection DuplicatedCode cities = ( "বরগুনা", "বরিশাল", "বরিশাল মেট্রো", "ভোলা", "বান্দরবান", "ব্রাহ্মণবাড়িয়া", "বাগেরহাট", "বগুড়া", "চাঁদপুর", "চট্টগ্রাম", "চট্ট মেট্রো", "কুমিল্লা", "কক্সবাজার", "চুয়াডাঙ্গা", "ঢাকা", "ঢাকা মেট্রো", "দিনাজপুর", "ফরিদপুর", "ফেনী", "গাজীপুর", "গোপালগঞ্জ", "গাইবান্ধা", "হবিগঞ্জ", "ঝালকাঠি", "যশোর", "ঝিনাইদহ", "জামালপুর", "জয়পুরহাট", "খাগড়াছড়ি", "কিশোরগঞ্জ", "খুলনা", "খুলনা মেট্রো", "কুষ্টিয়া", "কুড়িগ্রাম", "লক্ষ্মীপুর", "লালমনিরহাট", "মাদারীপুর", "মানিকগঞ্জ", "মুন্সীগঞ্জ", "মাগুরা", "মেহেরপুর", "ময়মনসিংহ", "মৌলভীবাজার", "নোয়াখালী", "নারায়ণগঞ্জ", "নরসিংদী", "নড়াইল", "নেত্রকোণা", "নওগাঁ", "নাটোর", "চাঁপাইনবাবগঞ্জ", "নীলফামারী", "পটুয়াখালী", "পিরোজপুর", "পাবনা", "পঞ্চগড়", "রাঙ্গামাটি", "রাজবাড়ী", "রাজশাহী", "রাজ মেট্রো", "রংপুর", "শরীয়তপুর", "সাতক্ষীরা", "শেরপুর", "সিরাজগঞ্জ", "সুনামগঞ্জ", "সিলেট", "সিলেট মেট্রো", "টাঙ্গাইল", "ঠাকুরগাঁও", ) vehicle_category_letters = ( "অ", "ই", "উ", "এ", "ক", "খ", "গ", "ঘ", "ঙ", "চ", "ছ", "জ", "ঝ", "ত", "থ", "ঢ", "ড", "ট", "ঠ", "দ", "ধ", "ন", "প", "ফ", "ব", "ভ", "ম", "য", "র", "ল", "শ", "স", "হ", ) vehicle_category_numbers = ( "১১", "১২", "১৩", "১৪", "১৫", "১৬", "১৭", "১৮", "১৯", "২০", "২১", "২২", "২৩", "২৪", "২৫", "২৬", "২৭", "২৮", "২৯", "৩০", "৩১", "৩২", "৩৩", "৩৪", "৩৫", "৩৬", "৩৭", "৩৮", "৩৯", "৪০", "৪১", "৪২", "৪৩", "৪৪", "৪৫", "৪৬", "৪৭", "৪৮", "৪৯", "৫০", "৫১", "৫২", "৫৩", "৫৪", "৫৫", "৫৬", "৫৭", "৫৮", "৫৯", "৬০", "৬১", "৬২", "৬৩", "৬৪", "৬৫", "৬৬", "৬৭", "৬৮", "৬৯", "৭০", "৭১", "৭২", "৭৩", "৭৪", "৭৫", "৭৬", "৭৭", "৭৮", "৭৯", "৮০", "৮১", "৮২", "৮৩", "৮৪", "৮৫", "৮৬", "৮৭", "৮৮", "৮৯", "৯০", "৯১", "৯২", "৯৩", "৯৪", "৯৫", "৯৬", "৯৭", "৯৮", "৯৯", ) vehicle_serial_number_formats = ("%###",) license_plate_formats = ( "{{city_name}}-{{vehicle_category_letter}} {{vehicle_category_number}}-{{vehicle_serial_number}}", ) def city_name(self) -> str: """ :example: 'ঢাকা মেট্রো' """ return self.random_element(self.cities) def vehicle_category_letter(self) -> str: """ :example: 'ব' """ return self.random_element(self.vehicle_category_letters) def vehicle_category_number(self) -> str: """ :example: '১১' """ return self.random_element(self.vehicle_category_numbers) def vehicle_serial_number(self) -> str: """ Generate a 4 digits vehicle serial number. :example: '৫৪৩২' """ return translate_to_bengali_digits(self.numerify(self.random_element(self.vehicle_serial_number_formats))) def license_plate(self) -> str: """ Generate a license plate. :example: 'বরিশাল-ভ ৬৭-৪৫৯৩' """ pattern: str = self.random_element(self.license_plate_formats) return self.generator.parse(pattern)
Provider
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/pool/base.py
{ "start": 4230, "end": 4311 }
class ____(Protocol): def __call__(self) -> DBAPIConnection: ...
_CreatorFnType
python
Farama-Foundation__Gymnasium
gymnasium/wrappers/vector/jax_to_torch.py
{ "start": 332, "end": 1338 }
class ____(ArrayConversion): """Wraps a Jax-based vector environment so that it can be interacted with through PyTorch Tensors. Actions must be provided as PyTorch Tensors and observations, rewards, terminations and truncations will be returned as PyTorch Tensors. Example: >>> import gymnasium as gym # doctest: +SKIP >>> envs = gym.make_vec("JaxEnv-vx", 3) # doctest: +SKIP >>> envs = JaxToTorch(envs) # doctest: +SKIP """ def __init__(self, env: VectorEnv, device: Device | None = None): """Vector wrapper to change inputs and outputs to PyTorch tensors. Args: env: The Jax-based vector environment to wrap device: The device the torch Tensors should be moved to """ super().__init__(env, env_xp=jnp, target_xp=torch, target_device=device) self.device: Device | None = device
JaxToTorch
python
PrefectHQ__prefect
src/prefect/settings/models/cli.py
{ "start": 208, "end": 1045 }
class ____(PrefectBaseSettings): """ Settings for controlling CLI behavior """ model_config: ClassVar[SettingsConfigDict] = build_settings_config(("cli",)) colors: bool = Field( default=True, description="If True, use colors in CLI output. If `False`, output will not include colors codes.", ) prompt: Optional[bool] = Field( default=None, description="If `True`, use interactive prompts in CLI commands. If `False`, no interactive prompts will be used. If `None`, the value will be dynamically determined based on the presence of an interactive-enabled terminal.", ) wrap_lines: bool = Field( default=True, description="If `True`, wrap text by inserting new lines in long lines in CLI output. If `False`, output will not be wrapped.", )
CLISettings
python
scipy__scipy
scipy/cluster/tests/test_hierarchy.py
{ "start": 33963, "end": 35532 }
class ____: def test_maxinconsts_empty_linkage(self, xp): # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. Z = xp.zeros((0, 4), dtype=xp.float64) R = xp.zeros((0, 4), dtype=xp.float64) assert_raises(ValueError, maxinconsts, Z, R) def test_maxinconsts_difrow_linkage(self, xp): # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with # different numbers of clusters. Expecting exception. Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) R = np.random.rand(2, 4) R = xp.asarray(R) assert_raises(ValueError, maxinconsts, Z, R) def test_maxinconsts_one_cluster_linkage(self, xp): # Tests maxinconsts(Z, R) on linkage with one cluster. Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) MD = maxinconsts(Z, R) expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) xp_assert_close(MD, expectedMD, atol=1e-15) @pytest.mark.parametrize( "method", ['single', 'complete', 'ward', 'centroid', 'median']) def test_maxinconsts_Q_linkage(self, method, xp): # Tests maxinconsts(Z, R) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) R = xp.asarray(inconsistent(Z)) Z = xp.asarray(Z) MD = maxinconsts(Z, R) expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) xp_assert_close(MD, expectedMD, atol=1e-15) @make_xp_test_case(maxRstat)
TestMaxInconsts
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_axis49.py
{ "start": 315, "end": 1387 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_axis49.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [47713664, 48125056] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.set_x_axis({"visible": 0}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pandas-dev__pandas
pandas/core/dtypes/dtypes.py
{ "start": 49122, "end": 51665 }
class ____(ExtensionDtype): """ A Pandas ExtensionDtype for NumPy dtypes. This is mostly for internal compatibility, and is not especially useful on its own. Parameters ---------- dtype : object Object to be converted to a NumPy data type object. See Also -------- numpy.dtype """ _metadata = ("_dtype",) _supports_2d = False _can_fast_transpose = False def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None: if isinstance(dtype, NumpyEADtype): # make constructor idempotent dtype = dtype.numpy_dtype self._dtype = np.dtype(dtype) def __repr__(self) -> str: return f"NumpyEADtype({self.name!r})" @property def numpy_dtype(self) -> np.dtype: """ The NumPy dtype this NumpyEADtype wraps. """ return self._dtype @property def name(self) -> str: """ A bit-width name for this data-type. """ return self._dtype.name @property def type(self) -> type[np.generic]: """ The type object used to instantiate a scalar of this NumPy data-type. """ return self._dtype.type @property def _is_numeric(self) -> bool: # exclude object, str, unicode, void. return self.kind in set("biufc") @property def _is_boolean(self) -> bool: return self.kind == "b" @classmethod def construct_from_string(cls, string: str) -> NumpyEADtype: try: dtype = np.dtype(string) except TypeError as err: if not isinstance(string, str): msg = f"'construct_from_string' expects a string, got {type(string)}" else: msg = f"Cannot construct a 'NumpyEADtype' from '{string}'" raise TypeError(msg) from err return cls(dtype) def construct_array_type(self) -> type_t[NumpyExtensionArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import NumpyExtensionArray return NumpyExtensionArray @property def kind(self) -> str: """ A character code (one of 'biufcmMOSUV') identifying the general kind of data. """ return self._dtype.kind @property def itemsize(self) -> int: """ The element size of this data-type object. """ return self._dtype.itemsize
NumpyEADtype
python
astropy__astropy
astropy/tests/runner.py
{ "start": 1399, "end": 11850 }
class ____: """ The base class for the TestRunner. A test runner can be constructed by creating a subclass of this class and defining 'keyword' methods. These are methods that have the ``astropy.tests.runner.keyword`` decorator, these methods are used to construct allowed keyword arguments to the `~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow customization of individual keyword arguments (and associated logic) without having to re-implement the whole `~astropy.tests.runner.TestRunnerBase.run_tests` method. Examples -------- A simple keyword method:: class MyRunner(TestRunnerBase): @keyword('default_value'): def spam(self, spam, kwargs): \"\"\" spam : `str` The parameter description for the run_tests docstring. \"\"\" # Return value must be a list with a CLI parameter for pytest. return ['--spam={}'.format(spam)] """ def __init__(self, base_path): self.base_path = os.path.abspath(base_path) def __new__(cls, *args, **kwargs): # Before constructing the class parse all the methods that have been # decorated with ``keyword``. # The objective of this method is to construct a default set of keyword # arguments to the ``run_tests`` method. It does this by inspecting the # methods of the class for functions with the name ``keyword`` which is # the name of the decorator wrapping function. Once it has created this # dictionary, it also formats the docstring of ``run_tests`` to be # comprised of the docstrings for the ``keyword`` methods. # To add a keyword argument to the ``run_tests`` method, define a new # method decorated with ``@keyword`` and with the ``self, name, kwargs`` # signature. # Get all 'function' members as the wrapped methods are functions functions = inspect.getmembers(cls, predicate=inspect.isfunction) # Filter out anything that's not got the name 'keyword' keywords = filter(lambda func: func[1].__name__ == "keyword", functions) # Sort all keywords based on the priority flag. sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True) cls.keywords = {} doc_keywords = "" for name, func in sorted_keywords: # Here we test if the function has been overloaded to return # NotImplemented which is the way to disable arguments on # subclasses. If it has been disabled we need to remove it from the # default keywords dict. We do it in the try except block because # we do not have access to an instance of the class, so this is # going to error unless the method is just doing `return # NotImplemented`. try: # Second argument is False, as it is normally a bool. # The other two are placeholders for objects. if func(None, False, None) is NotImplemented: continue except Exception: pass # Construct the default kwargs dict and docstring cls.keywords[name] = func._default_value if func.__doc__: doc_keywords += " " * 8 doc_keywords += func.__doc__.strip() doc_keywords += "\n\n" cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) return super().__new__(cls) def _generate_args(self, **kwargs): # Update default values with passed kwargs # but don't modify the defaults keywords = copy.deepcopy(self.keywords) keywords.update(kwargs) # Iterate through the keywords (in order of priority) args = [] for keyword in keywords.keys(): func = getattr(self, keyword) result = func(keywords[keyword], keywords) # Allow disabling of options in a subclass if result is NotImplemented: raise TypeError( f"run_tests() got an unexpected keyword argument {keyword}" ) # keyword methods must return a list if not isinstance(result, list): raise TypeError(f"{keyword} keyword method must return a list") args += result return args RUN_TESTS_DOCSTRING = """ Run the tests for the package. This method builds arguments for and then calls ``pytest.main``. .. deprecated:: 8.0 Use pytest instead. Parameters ---------- {keywords} """ _required_dependencies = [ "pytest", "pytest_remotedata", "pytest_doctestplus", "pytest_astropy_header", ] _missing_dependancy_error = ( "Test dependencies are missing: {}. You should install the " "'pytest-astropy' package (you may need to update the package if you " "have a previous version installed, e.g., " "'pip install pytest-astropy --upgrade' or the equivalent with conda)." ) @classmethod def _has_test_dependencies(cls): # pragma: no cover # Using the test runner will not work without these dependencies. for module in cls._required_dependencies: spec = find_spec(module) # Checking loader accounts for packages that were uninstalled. # pytest plugins are special, it's enough if they are picked up the # pytest independently of how they are installed. if spec is None or spec.loader is None: # Don't import pytest until it's actually needed import pytest pluginmanager = pytest.PytestPluginManager() try: pluginmanager.import_plugin(module) except ImportError: raise RuntimeError(cls._missing_dependancy_error.format(module)) def run_tests(self, **kwargs): # This method is weirdly hooked into various things with docstring # overrides, so we keep it simple and not use @deprecated here. warnings.warn( "The test runner is deprecated in v8.0 and may be removed in a future version.\n Use pytest instead.", AstropyDeprecationWarning, ) # The following option will include eggs inside a .eggs folder in # sys.path when running the tests. This is possible so that when # running pytest, test dependencies installed via e.g. # tests_requires are available here. This is not an advertised option # since it is only for internal use if kwargs.pop("add_local_eggs_to_path", False): # Add each egg to sys.path individually for egg in glob.glob(os.path.join(".eggs", "*.egg")): sys.path.insert(0, egg) self._has_test_dependencies() # pragma: no cover # The docstring for this method is defined as a class variable. # This allows it to be built for each subclass in __new__. # Don't import pytest until it's actually needed to run the tests import pytest # Raise error for undefined kwargs allowed_kwargs = set(self.keywords.keys()) passed_kwargs = set(kwargs.keys()) if not passed_kwargs.issubset(allowed_kwargs): wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs)) raise TypeError( f"run_tests() got an unexpected keyword argument {wrong_kwargs[0]}" ) args = self._generate_args(**kwargs) if kwargs.get("plugins") is not None: plugins = kwargs.pop("plugins") elif self.keywords.get("plugins", None) is not None: plugins = self.keywords["plugins"] else: plugins = [] # Avoid the existing config. Note that we need to do this here in # addition to in conftest.py - for users running tests interactively # in e.g. IPython, conftest.py would get read in too late, so we need # to do it here - but at the same time the code here doesn't work when # running tests in parallel mode because this uses subprocesses which # don't know about the temporary config/cache. # Note, this is superfluous if the config_dir option to pytest is in use, # but it's also harmless orig_xdg_config = os.environ.get("XDG_CONFIG_HOME") with tempfile.TemporaryDirectory("astropy_config") as astropy_config: Path(astropy_config, "astropy").mkdir() os.environ["XDG_CONFIG_HOME"] = astropy_config try: return pytest.main(args=args, plugins=plugins) finally: if orig_xdg_config is None: os.environ.pop("XDG_CONFIG_HOME", None) else: os.environ["XDG_CONFIG_HOME"] = orig_xdg_config @classmethod def make_test_runner_in(cls, path): """ Constructs a `TestRunner` to run in the given path, and returns a ``test()`` function which takes the same arguments as `~astropy.tests.runner.TestRunner.run_tests`. The returned ``test()`` function will be defined in the module this was called from. This is used to implement the ``astropy.test()`` function (or the equivalent for affiliated packages). """ runner = cls(path) @wraps(runner.run_tests, ("__doc__",)) def test(**kwargs): return runner.run_tests(**kwargs) module = find_current_module(2) if module is not None: test.__module__ = module.__name__ # A somewhat unusual hack, but delete the attached __wrapped__ # attribute--although this is normally used to tell if the function # was wrapped with wraps, on some version of Python this is also # used to determine the signature to display in help() which is # not useful in this case. We don't really care in this case if the # function was wrapped either if hasattr(test, "__wrapped__"): del test.__wrapped__ test.__test__ = False return test @deprecated("8.0", alternative="pytest")
TestRunnerBase
python
django__django
django/contrib/postgres/fields/citext.py
{ "start": 533, "end": 948 }
class ____(EmailField): system_check_removed_details = { "msg": ( "django.contrib.postgres.fields.CIEmailField is removed except for support " "in historical migrations." ), "hint": ( 'Use EmailField(db_collation="…") with a case-insensitive ' "non-deterministic collation instead." ), "id": "fields.E906", }
CIEmailField
python
great-expectations__great_expectations
tests/datasource/fluent/test_invalid_datasource.py
{ "start": 3020, "end": 5546 }
class ____(Protocol): """ Accept a datasource config and return an InvalidDatasource instance. Raises an error if the config was valid. """ def __call__( self, config: dict[Literal["name", "type", "assets"] | Any, Any] ) -> InvalidDatasource: ... @pytest.fixture def invalid_datasource_factory() -> InvalidDSFactory: def _invalid_ds_fct(config: dict) -> InvalidDatasource: try: ds_type: type[Datasource] = DataSourceManager.type_lookup[config["type"]] ds_type(**config) except (pydantic.ValidationError, LookupError) as config_error: return InvalidDatasource(**config, config_error=config_error) raise ValueError("The Datasource was valid") return _invalid_ds_fct @pytest.mark.parametrize( "invalid_ds_cfg", [ pytest.param( { "name": "my pg", "type": "postgres", "connection_string": "postmalone+psycopg2://postgres:@localhost/test_database", }, id="invalid conn str", ), pytest.param( { "name": "my pg + asset", "type": "postgres", "connection_string": "postgresql+psycopg2://postgres:@localhost/test_database", "assets": [ { "name": "my_bad_asset", "type": "table", "query": "table assets don't have a query", } ], }, id="invalid asset", ), pytest.param( { "name": "my snowflake", "type": "snowflake", "connection_string": "${MY_CONN_STR}", "user": "invalid_extra_field", "assets": [{"name": "my_asset", "type": "table", "table_name": "foobar"}], }, id="extra field", ), pytest.param( { "name": "my pandas", "type": "pandas_filesystem", "assets": [ {"name": "my_asset", "type": "csv"}, {"name": "invalid_asset_type", "type": "whoops"}, ], }, id="pandas asset lookup error", ), pytest.param( { "name": "who knows", "type": "whoops", }, id="datasource type lookup error", ), ], )
InvalidDSFactory
python
jupyterlab__jupyterlab
jupyterlab/handlers/plugin_manager_handler.py
{ "start": 316, "end": 1915 }
class ____(APIHandler): def initialize(self, manager: PluginManager): super().initialize() self.manager = manager @web.authenticated async def get(self): """GET query returns info on plugins locks""" # note: this is informative only - validation is server-side locks = await self.manager.plugin_locks() self.set_status(200) self.finish(json.dumps(locks)) @web.authenticated async def post(self): """POST query performs an action on a specific plugin Body arguments: { "cmd": Action to perform - ["enable", "disable"] "plugin_name": Plugin name } """ data = self.get_json_body() cmd = data["cmd"] name = data["plugin_name"] if cmd not in ("enable", "disable") or not name: raise web.HTTPError( 422, f"Could not process instruction {cmd!r} with plugin name {name!r}", ) ret_value = None try: if cmd == "enable": ret_value = await self.manager.enable(name) elif cmd == "disable": ret_value = await self.manager.disable(name) except Exception as e: raise web.HTTPError(500, str(e)) from e if ret_value.status == "error": self.set_status(500) else: self.set_status(201) self.finish(json.dumps(dataclasses.asdict(ret_value))) # The path for lab plugins handler. plugins_handler_path = r"/lab/api/plugins"
PluginHandler
python
django__django
tests/fixtures_regress/models.py
{ "start": 1039, "end": 1147 }
class ____(Parent): data = models.CharField(max_length=10) # Models to regression test #7572, #20820
Child
python
neetcode-gh__leetcode
python/1985-find-the-kth-largest-integer-in-the-array.py
{ "start": 0, "end": 256 }
class ____: def kthLargestNumber(self, nums: List[str], k: int) -> str: maxHeap = [-int(n) for n in nums] heapq.heapify(maxHeap) while k>1: heapq.heappop(maxHeap) k-=1 return str(-maxHeap[0])
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1026930, "end": 1027407 }
class ____(sgqlc.types.Type): """Autogenerated return type of UpdateEnterpriseProfile""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "enterprise") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise") """The updated enterprise."""
UpdateEnterpriseProfilePayload
python
kubernetes-client__python
kubernetes/client/models/v1_pod_security_context.py
{ "start": 383, "end": 23939 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'app_armor_profile': 'V1AppArmorProfile', 'fs_group': 'int', 'fs_group_change_policy': 'str', 'run_as_group': 'int', 'run_as_non_root': 'bool', 'run_as_user': 'int', 'se_linux_change_policy': 'str', 'se_linux_options': 'V1SELinuxOptions', 'seccomp_profile': 'V1SeccompProfile', 'supplemental_groups': 'list[int]', 'supplemental_groups_policy': 'str', 'sysctls': 'list[V1Sysctl]', 'windows_options': 'V1WindowsSecurityContextOptions' } attribute_map = { 'app_armor_profile': 'appArmorProfile', 'fs_group': 'fsGroup', 'fs_group_change_policy': 'fsGroupChangePolicy', 'run_as_group': 'runAsGroup', 'run_as_non_root': 'runAsNonRoot', 'run_as_user': 'runAsUser', 'se_linux_change_policy': 'seLinuxChangePolicy', 'se_linux_options': 'seLinuxOptions', 'seccomp_profile': 'seccompProfile', 'supplemental_groups': 'supplementalGroups', 'supplemental_groups_policy': 'supplementalGroupsPolicy', 'sysctls': 'sysctls', 'windows_options': 'windowsOptions' } def __init__(self, app_armor_profile=None, fs_group=None, fs_group_change_policy=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_change_policy=None, se_linux_options=None, seccomp_profile=None, supplemental_groups=None, supplemental_groups_policy=None, sysctls=None, windows_options=None, local_vars_configuration=None): # noqa: E501 """V1PodSecurityContext - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._app_armor_profile = None self._fs_group = None self._fs_group_change_policy = None self._run_as_group = None self._run_as_non_root = None self._run_as_user = None self._se_linux_change_policy = None self._se_linux_options = None self._seccomp_profile = None self._supplemental_groups = None self._supplemental_groups_policy = None self._sysctls = None self._windows_options = None self.discriminator = None if app_armor_profile is not None: self.app_armor_profile = app_armor_profile if fs_group is not None: self.fs_group = fs_group if fs_group_change_policy is not None: self.fs_group_change_policy = fs_group_change_policy if run_as_group is not None: self.run_as_group = run_as_group if run_as_non_root is not None: self.run_as_non_root = run_as_non_root if run_as_user is not None: self.run_as_user = run_as_user if se_linux_change_policy is not None: self.se_linux_change_policy = se_linux_change_policy if se_linux_options is not None: self.se_linux_options = se_linux_options if seccomp_profile is not None: self.seccomp_profile = seccomp_profile if supplemental_groups is not None: self.supplemental_groups = supplemental_groups if supplemental_groups_policy is not None: self.supplemental_groups_policy = supplemental_groups_policy if sysctls is not None: self.sysctls = sysctls if windows_options is not None: self.windows_options = windows_options @property def app_armor_profile(self): """Gets the app_armor_profile of this V1PodSecurityContext. # noqa: E501 :return: The app_armor_profile of this V1PodSecurityContext. # noqa: E501 :rtype: V1AppArmorProfile """ return self._app_armor_profile @app_armor_profile.setter def app_armor_profile(self, app_armor_profile): """Sets the app_armor_profile of this V1PodSecurityContext. :param app_armor_profile: The app_armor_profile of this V1PodSecurityContext. # noqa: E501 :type: V1AppArmorProfile """ self._app_armor_profile = app_armor_profile @property def fs_group(self): """Gets the fs_group of this V1PodSecurityContext. # noqa: E501 A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The fs_group of this V1PodSecurityContext. # noqa: E501 :rtype: int """ return self._fs_group @fs_group.setter def fs_group(self, fs_group): """Sets the fs_group of this V1PodSecurityContext. A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param fs_group: The fs_group of this V1PodSecurityContext. # noqa: E501 :type: int """ self._fs_group = fs_group @property def fs_group_change_policy(self): """Gets the fs_group_change_policy of this V1PodSecurityContext. # noqa: E501 fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The fs_group_change_policy of this V1PodSecurityContext. # noqa: E501 :rtype: str """ return self._fs_group_change_policy @fs_group_change_policy.setter def fs_group_change_policy(self, fs_group_change_policy): """Sets the fs_group_change_policy of this V1PodSecurityContext. fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param fs_group_change_policy: The fs_group_change_policy of this V1PodSecurityContext. # noqa: E501 :type: str """ self._fs_group_change_policy = fs_group_change_policy @property def run_as_group(self): """Gets the run_as_group of this V1PodSecurityContext. # noqa: E501 The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The run_as_group of this V1PodSecurityContext. # noqa: E501 :rtype: int """ return self._run_as_group @run_as_group.setter def run_as_group(self, run_as_group): """Sets the run_as_group of this V1PodSecurityContext. The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param run_as_group: The run_as_group of this V1PodSecurityContext. # noqa: E501 :type: int """ self._run_as_group = run_as_group @property def run_as_non_root(self): """Gets the run_as_non_root of this V1PodSecurityContext. # noqa: E501 Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501 :return: The run_as_non_root of this V1PodSecurityContext. # noqa: E501 :rtype: bool """ return self._run_as_non_root @run_as_non_root.setter def run_as_non_root(self, run_as_non_root): """Sets the run_as_non_root of this V1PodSecurityContext. Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501 :param run_as_non_root: The run_as_non_root of this V1PodSecurityContext. # noqa: E501 :type: bool """ self._run_as_non_root = run_as_non_root @property def run_as_user(self): """Gets the run_as_user of this V1PodSecurityContext. # noqa: E501 The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The run_as_user of this V1PodSecurityContext. # noqa: E501 :rtype: int """ return self._run_as_user @run_as_user.setter def run_as_user(self, run_as_user): """Sets the run_as_user of this V1PodSecurityContext. The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param run_as_user: The run_as_user of this V1PodSecurityContext. # noqa: E501 :type: int """ self._run_as_user = run_as_user @property def se_linux_change_policy(self): """Gets the se_linux_change_policy of this V1PodSecurityContext. # noqa: E501 seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\". \"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. \"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes. This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The se_linux_change_policy of this V1PodSecurityContext. # noqa: E501 :rtype: str """ return self._se_linux_change_policy @se_linux_change_policy.setter def se_linux_change_policy(self, se_linux_change_policy): """Sets the se_linux_change_policy of this V1PodSecurityContext. seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\". \"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. \"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes. This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param se_linux_change_policy: The se_linux_change_policy of this V1PodSecurityContext. # noqa: E501 :type: str """ self._se_linux_change_policy = se_linux_change_policy @property def se_linux_options(self): """Gets the se_linux_options of this V1PodSecurityContext. # noqa: E501 :return: The se_linux_options of this V1PodSecurityContext. # noqa: E501 :rtype: V1SELinuxOptions """ return self._se_linux_options @se_linux_options.setter def se_linux_options(self, se_linux_options): """Sets the se_linux_options of this V1PodSecurityContext. :param se_linux_options: The se_linux_options of this V1PodSecurityContext. # noqa: E501 :type: V1SELinuxOptions """ self._se_linux_options = se_linux_options @property def seccomp_profile(self): """Gets the seccomp_profile of this V1PodSecurityContext. # noqa: E501 :return: The seccomp_profile of this V1PodSecurityContext. # noqa: E501 :rtype: V1SeccompProfile """ return self._seccomp_profile @seccomp_profile.setter def seccomp_profile(self, seccomp_profile): """Sets the seccomp_profile of this V1PodSecurityContext. :param seccomp_profile: The seccomp_profile of this V1PodSecurityContext. # noqa: E501 :type: V1SeccompProfile """ self._seccomp_profile = seccomp_profile @property def supplemental_groups(self): """Gets the supplemental_groups of this V1PodSecurityContext. # noqa: E501 A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The supplemental_groups of this V1PodSecurityContext. # noqa: E501 :rtype: list[int] """ return self._supplemental_groups @supplemental_groups.setter def supplemental_groups(self, supplemental_groups): """Sets the supplemental_groups of this V1PodSecurityContext. A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param supplemental_groups: The supplemental_groups of this V1PodSecurityContext. # noqa: E501 :type: list[int] """ self._supplemental_groups = supplemental_groups @property def supplemental_groups_policy(self): """Gets the supplemental_groups_policy of this V1PodSecurityContext. # noqa: E501 Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The supplemental_groups_policy of this V1PodSecurityContext. # noqa: E501 :rtype: str """ return self._supplemental_groups_policy @supplemental_groups_policy.setter def supplemental_groups_policy(self, supplemental_groups_policy): """Sets the supplemental_groups_policy of this V1PodSecurityContext. Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param supplemental_groups_policy: The supplemental_groups_policy of this V1PodSecurityContext. # noqa: E501 :type: str """ self._supplemental_groups_policy = supplemental_groups_policy @property def sysctls(self): """Gets the sysctls of this V1PodSecurityContext. # noqa: E501 Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :return: The sysctls of this V1PodSecurityContext. # noqa: E501 :rtype: list[V1Sysctl] """ return self._sysctls @sysctls.setter def sysctls(self, sysctls): """Sets the sysctls of this V1PodSecurityContext. Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. # noqa: E501 :param sysctls: The sysctls of this V1PodSecurityContext. # noqa: E501 :type: list[V1Sysctl] """ self._sysctls = sysctls @property def windows_options(self): """Gets the windows_options of this V1PodSecurityContext. # noqa: E501 :return: The windows_options of this V1PodSecurityContext. # noqa: E501 :rtype: V1WindowsSecurityContextOptions """ return self._windows_options @windows_options.setter def windows_options(self, windows_options): """Sets the windows_options of this V1PodSecurityContext. :param windows_options: The windows_options of this V1PodSecurityContext. # noqa: E501 :type: V1WindowsSecurityContextOptions """ self._windows_options = windows_options def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PodSecurityContext): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PodSecurityContext): return True return self.to_dict() != other.to_dict()
V1PodSecurityContext
python
cherrypy__cherrypy
cherrypy/test/test_states.py
{ "start": 1724, "end": 9358 }
class ____(helper.CPWebCase): setup_server = staticmethod(setup_server) def setUp(self): cherrypy.server.socket_timeout = 0.1 self.do_gc_test = False def test_0_NormalStateFlow(self): engine.stop() # Our db_connection should not be running self.assertEqual(db_connection.running, False) self.assertEqual(db_connection.startcount, 1) self.assertEqual(len(db_connection.threads), 0) # Test server start engine.start() self.assertEqual(engine.state, engine.states.STARTED) host = cherrypy.server.socket_host port = cherrypy.server.socket_port portend.occupied(host, port, timeout=0.1) # The db_connection should be running now self.assertEqual(db_connection.running, True) self.assertEqual(db_connection.startcount, 2) self.assertEqual(len(db_connection.threads), 0) self.getPage('/') self.assertBody('Hello World') self.assertEqual(len(db_connection.threads), 1) # Test engine stop. This will also stop the HTTP server. engine.stop() self.assertEqual(engine.state, engine.states.STOPPED) # Verify that our custom stop function was called self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) # Block the main thread now and verify that exit() works. def exittest(): self.getPage('/') self.assertBody('Hello World') engine.exit() cherrypy.server.start() engine.start_with_callback(exittest) engine.block() self.assertEqual(engine.state, engine.states.EXITING) def test_1_Restart(self): cherrypy.server.start() engine.start() # The db_connection should be running now self.assertEqual(db_connection.running, True) grace = db_connection.gracecount self.getPage('/') self.assertBody('Hello World') self.assertEqual(len(db_connection.threads), 1) # Test server restart from this thread engine.graceful() self.assertEqual(engine.state, engine.states.STARTED) self.getPage('/') self.assertBody('Hello World') self.assertEqual(db_connection.running, True) self.assertEqual(db_connection.gracecount, grace + 1) self.assertEqual(len(db_connection.threads), 1) # Test server restart from inside a page handler self.getPage('/graceful') self.assertEqual(engine.state, engine.states.STARTED) self.assertBody('app was (gracefully) restarted succesfully') self.assertEqual(db_connection.running, True) self.assertEqual(db_connection.gracecount, grace + 2) # Since we are requesting synchronously, is only one thread used? # Note that the "/graceful" request has been flushed. self.assertEqual(len(db_connection.threads), 0) engine.stop() self.assertEqual(engine.state, engine.states.STOPPED) self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) def test_2_KeyboardInterrupt(self): # Raise a keyboard interrupt in the HTTP server's main thread. # We must start the server in this, the main thread engine.start() cherrypy.server.start() self.persistent = True try: # Make the first request and assert there's no "Connection: close". self.getPage('/') self.assertStatus('200 OK') self.assertBody('Hello World') self.assertNoHeader('Connection') cherrypy.server.httpserver.interrupt = KeyboardInterrupt engine.block() self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) self.assertEqual(engine.state, engine.states.EXITING) finally: self.persistent = False # Raise a keyboard interrupt in a page handler; on multithreaded # servers, this should occur in one of the worker threads. # This should raise a BadStatusLine error, since the worker # thread will just die without writing a response. engine.start() cherrypy.server.start() # From python3.5 a new exception is retuned when the connection # ends abruptly: # http.client.RemoteDisconnected # RemoteDisconnected is a subclass of: # (ConnectionResetError, http.client.BadStatusLine) # and ConnectionResetError is an indirect subclass of: # OSError # From python 3.3 an up socket.error is an alias to OSError # following PEP-3151, therefore http.client.RemoteDisconnected # is considered a socket.error. # # raise_subcls specifies the classes that are not going # to be considered as a socket.error for the retries. # Given that RemoteDisconnected is part BadStatusLine # we can use the same call for all py3 versions without # sideffects. python < 3.5 will raise directly BadStatusLine # which is not a subclass for socket.error/OSError. try: self.getPage('/ctrlc', raise_subcls=BadStatusLine) except BadStatusLine: pass else: print(self.body) self.fail('AssertionError: BadStatusLine not raised') engine.block() self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) def test_4_Autoreload(self): # If test_3 has not been executed, the server won't be stopped, # so we'll have to do it. if engine.state != engine.states.EXITING: engine.exit() # Start the demo script in a new process p = helper.CPProcess(ssl=(self.scheme.lower() == 'https')) p.write_conf(extra='test_case_name: "test_4_Autoreload"') p.start(imports='cherrypy.test._test_states_demo') try: self.getPage('/start') start = float(self.body) # Give the autoreloader time to cache the file time. time.sleep(2) # Touch the file os.utime(os.path.join(thisdir, '_test_states_demo.py'), None) # Give the autoreloader time to re-exec the process time.sleep(2) host = cherrypy.server.socket_host port = cherrypy.server.socket_port portend.occupied(host, port, timeout=5) self.getPage('/start') if not (float(self.body) > start): raise AssertionError( 'start time %s not greater than %s' % (float(self.body), start), ) finally: # Shut down the spawned process self.getPage('/exit') p.join() def test_5_Start_Error(self): # If test_3 has not been executed, the server won't be stopped, # so we'll have to do it. if engine.state != engine.states.EXITING: engine.exit() # If a process errors during start, it should stop the engine # and exit with a non-zero exit code. p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'), wait=True) p.write_conf( extra="""starterror: True test_case_name: "test_5_Start_Error" """, ) p.start(imports='cherrypy.test._test_states_demo') if p.exit_code == 0: self.fail('Process failed to return nonzero exit code.')
ServerStateTests
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py
{ "start": 1822, "end": 3209 }
class ____(test.TestCase): def _check(self, shape, axes, result): output = math_ops.reduced_shape(shape, axes=axes) self.assertAllEqual(output, result) @test_util.run_deprecated_v1 def testSimple(self): with self.cached_session(): self._check([3], [], [3]) self._check([3], [0], [1]) self._check([5, 3], [], [5, 3]) self._check([5, 3], [0], [1, 3]) self._check([5, 3], [1], [5, 1]) self._check([5, 3], [0, 1], [1, 1]) @test_util.run_deprecated_v1 def testZeros(self): """Check that reduced_shape does the right thing with zero dimensions.""" with self.cached_session(): self._check([0], [], [0]) self._check([0], [0], [1]) self._check([0, 3], [], [0, 3]) self._check([0, 3], [0], [1, 3]) self._check([0, 3], [1], [0, 1]) self._check([0, 3], [0, 1], [1, 1]) self._check([3, 0], [], [3, 0]) self._check([3, 0], [0], [1, 0]) self._check([3, 0], [1], [3, 1]) self._check([3, 0], [0, 1], [1, 1]) @test_util.run_deprecated_v1 def testNegAxes(self): with self.cached_session(): self._check([10, 10, 10], [-1], [10, 10, 1]) self._check([10, 10, 10], [-1, 2], [10, 10, 1]) self._check([10, 10, 10], [-1, -1], [10, 10, 1]) self._check([10, 10, 10], [-1, 0], [1, 10, 1]) self._check([10, 10, 10], [-3], [1, 10, 10])
ReducedShapeTest
python
PyCQA__pylint
tests/functional/a/access/access_to_protected_members_typing.py
{ "start": 134, "end": 912 }
class ____: """Class with protected members.""" class _Inner_Class: """Inner class with protected members.""" def __init__(self) -> None: self.data = 1 def return_data(self) -> int: """Return data""" return self.data def return_private_class(self) -> MyClass._Inner_Class: """Doing nothing.""" return self._Inner_Class() def access_protected_class(data: MyClass._Inner_Class) -> int: """Function that always receives a protected class.""" return data.return_data() + 1 def pass_protected_class() -> None: """Function that passes a protected class to another function.""" data_value = access_protected_class(MyClass().return_private_class()) print(data_value)
MyClass
python
catalyst-team__catalyst
examples/detection/custom_runner.py
{ "start": 74, "end": 666 }
class ____(ConfigRunner): """Runner for SSD models.""" def handle_batch(self, batch): """Do a forward pass and compute loss. Args: batch (Dict[str, Any]): batch of data. """ locs, confs = self.model(batch["image"]) regression_loss, classification_loss = self.criterion( locs, batch["bboxes"], confs, batch["labels"].long() ) self.batch["predicted_bboxes"] = locs self.batch["predicted_scores"] = confs self.batch_metrics["loss"] = regression_loss + classification_loss
SSDDetectionRunner
python
sanic-org__sanic
sanic/request/parameters.py
{ "start": 71, "end": 1130 }
class ____(dict): """Hosts a dict with lists as values where get returns the first value of the list and getlist returns the whole shebang""" # noqa: E501 def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]: """Return the first value, either the default or actual Args: name (str): The name of the parameter default (Optional[Any], optional): The default value. Defaults to None. Returns: Optional[Any]: The first value of the list """ # noqa: E501 return super().get(name, [default])[0] def getlist( self, name: str, default: Optional[list[Any]] = None ) -> list[Any]: """Return the entire list Args: name (str): The name of the parameter default (Optional[List[Any]], optional): The default value. Defaults to None. Returns: list[Any]: The entire list of values or [] if not found """ # noqa: E501 return super().get(name, default) or []
RequestParameters
python
apache__airflow
providers/postgres/tests/unit/postgres/hooks/test_postgres.py
{ "start": 19836, "end": 21916 }
class ____: """PostgresHookConn tests that are specific to psycopg2.""" def setup_method(self): self.connection = Connection(login="login", password="password", host="host", schema="database") class UnitTestPostgresHook(PostgresHook): conn_name_attr = "test_conn_id" self.db_hook = UnitTestPostgresHook() self.db_hook.get_connection = mock.Mock() self.db_hook.get_connection.return_value = self.connection def test_sqlalchemy_url(self): conn = Connection(login="login-conn", password="password-conn", host="host", schema="database") hook = PostgresHook(connection=conn) expected = "postgresql://login-conn:password-conn@host/database" if SQLALCHEMY_V_1_4: assert str(hook.sqlalchemy_url) == expected else: assert hook.sqlalchemy_url.render_as_string(hide_password=False) == expected def test_sqlalchemy_url_with_sqlalchemy_query(self): conn = Connection( login="login-conn", password="password-conn", host="host", schema="database", extra=dict(sqlalchemy_query={"gssencmode": "disable"}), ) hook = PostgresHook(connection=conn) expected = "postgresql://login-conn:password-conn@host/database?gssencmode=disable" if SQLALCHEMY_V_1_4: assert str(hook.sqlalchemy_url) == expected else: assert hook.sqlalchemy_url.render_as_string(hide_password=False) == expected def test_get_conn_cursor(self, mock_connect): self.connection.extra = '{"cursor": "dictcursor", "sqlalchemy_query": {"gssencmode": "disable"}}' self.db_hook.get_conn() mock_connect.assert_called_once_with( cursor_factory=psycopg2.extras.DictCursor, user="login", password="password", host="host", dbname="database", port=None, ) @pytest.mark.skipif(not USE_PSYCOPG3, reason="psycopg v3 or sqlalchemy v2 not available")
TestPostgresHookConnPPG2
python
mkdocs__mkdocs
mkdocs/contrib/search/__init__.py
{ "start": 2209, "end": 5230 }
class ____(BasePlugin[_PluginConfig]): """Add a search feature to MkDocs.""" def on_config(self, config: MkDocsConfig, **kwargs) -> MkDocsConfig: """Add plugin templates and scripts to config.""" if config.theme.get('include_search_page'): config.theme.static_templates.add('search.html') if not config.theme.get('search_index_only'): path = os.path.join(base_path, 'templates') config.theme.dirs.append(path) if 'search/main.js' not in config.extra_javascript: config.extra_javascript.append('search/main.js') # type: ignore if self.config.lang is None: # lang setting undefined. Set default based on theme locale validate = _PluginConfig.lang.run_validation self.config.lang = validate(config.theme.locale.language) # The `python` method of `prebuild_index` is pending deprecation as of version 1.2. # TODO: Raise a deprecation warning in a future release (1.3?). if self.config.prebuild_index == 'python': log.info( "The 'python' method of the search plugin's 'prebuild_index' config option " "is pending deprecation and will not be supported in a future release." ) return config def on_pre_build(self, config: MkDocsConfig, **kwargs) -> None: """Create search index instance for later use.""" self.search_index = SearchIndex(**self.config) def on_page_context(self, context: TemplateContext, page: Page, **kwargs) -> None: """Add page to search index.""" self.search_index.add_entry_from_context(page) def on_post_build(self, config: MkDocsConfig, **kwargs) -> None: """Build search index.""" output_base_path = os.path.join(config.site_dir, 'search') search_index = self.search_index.generate_search_index() json_output_path = os.path.join(output_base_path, 'search_index.json') utils.write_file(search_index.encode('utf-8'), json_output_path) assert self.config.lang is not None if not config.theme.get('search_index_only'): # Include language support files in output. Copy them directly # so that only the needed files are included. files = [] if len(self.config.lang) > 1 or 'en' not in self.config.lang: files.append('lunr.stemmer.support.js') if len(self.config.lang) > 1: files.append('lunr.multi.js') if 'ja' in self.config.lang or 'jp' in self.config.lang: files.append('tinyseg.js') for lang in self.config.lang: if lang != 'en': files.append(f'lunr.{lang}.js') for filename in files: from_path = os.path.join(base_path, 'lunr-language', filename) to_path = os.path.join(output_base_path, filename) utils.copy_file(from_path, to_path)
SearchPlugin
python
Textualize__rich
rich/json.py
{ "start": 189, "end": 5019 }
class ____: """A renderable which pretty prints JSON. Args: json (str): JSON encoded data. indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2. highlight (bool, optional): Enable highlighting. Defaults to True. skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. check_circular (bool, optional): Check for circular references. Defaults to True. allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. default (Callable, optional): A callable that converts values that can not be encoded in to something that can be JSON encoded. Defaults to None. sort_keys (bool, optional): Sort dictionary keys. Defaults to False. """ def __init__( self, json: str, indent: Union[None, int, str] = 2, highlight: bool = True, skip_keys: bool = False, ensure_ascii: bool = False, check_circular: bool = True, allow_nan: bool = True, default: Optional[Callable[[Any], Any]] = None, sort_keys: bool = False, ) -> None: data = loads(json) json = dumps( data, indent=indent, skipkeys=skip_keys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, default=default, sort_keys=sort_keys, ) highlighter = JSONHighlighter() if highlight else NullHighlighter() self.text = highlighter(json) self.text.no_wrap = True self.text.overflow = None @classmethod def from_data( cls, data: Any, indent: Union[None, int, str] = 2, highlight: bool = True, skip_keys: bool = False, ensure_ascii: bool = False, check_circular: bool = True, allow_nan: bool = True, default: Optional[Callable[[Any], Any]] = None, sort_keys: bool = False, ) -> "JSON": """Encodes a JSON object from arbitrary data. Args: data (Any): An object that may be encoded in to JSON indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2. highlight (bool, optional): Enable highlighting. Defaults to True. default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None. skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. check_circular (bool, optional): Check for circular references. Defaults to True. allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. default (Callable, optional): A callable that converts values that can not be encoded in to something that can be JSON encoded. Defaults to None. sort_keys (bool, optional): Sort dictionary keys. Defaults to False. Returns: JSON: New JSON object from the given data. """ json_instance: "JSON" = cls.__new__(cls) json = dumps( data, indent=indent, skipkeys=skip_keys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, default=default, sort_keys=sort_keys, ) highlighter = JSONHighlighter() if highlight else NullHighlighter() json_instance.text = highlighter(json) json_instance.text.no_wrap = True json_instance.text.overflow = None return json_instance def __rich__(self) -> Text: return self.text if __name__ == "__main__": import argparse import sys parser = argparse.ArgumentParser(description="Pretty print json") parser.add_argument( "path", metavar="PATH", help="path to file, or - for stdin", ) parser.add_argument( "-i", "--indent", metavar="SPACES", type=int, help="Number of spaces in an indent", default=2, ) args = parser.parse_args() from rich.console import Console console = Console() error_console = Console(stderr=True) try: if args.path == "-": json_data = sys.stdin.read() else: json_data = Path(args.path).read_text() except Exception as error: error_console.print(f"Unable to read {args.path!r}; {error}") sys.exit(-1) console.print(JSON(json_data, indent=args.indent), soft_wrap=True)
JSON
python
getsentry__sentry
src/sentry/api/serializers/models/team.py
{ "start": 12063, "end": 12197 }
class ____(TypedDict): schemas: list[str] id: str displayName: str meta: SCIMMeta
OrganizationTeamSCIMSerializerRequired
python
dask__dask
dask/dataframe/dask_expr/_rolling.py
{ "start": 5701, "end": 5755 }
class ____(RollingReduction): how = "cov"
RollingCov
python
huggingface__transformers
src/transformers/models/canine/modeling_canine.py
{ "start": 31117, "end": 43556 }
class ____(CaninePreTrainedModel): def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config shallow_config = copy.deepcopy(config) shallow_config.num_hidden_layers = 1 self.char_embeddings = CanineEmbeddings(config) # shallow/low-dim transformer encoder to get a initial character encoding self.initial_char_encoder = CanineEncoder( shallow_config, local=True, always_attend_to_first_position=False, first_position_attends_to_all=False, attend_from_chunk_width=config.local_transformer_stride, attend_from_chunk_stride=config.local_transformer_stride, attend_to_chunk_width=config.local_transformer_stride, attend_to_chunk_stride=config.local_transformer_stride, ) self.chars_to_molecules = CharactersToMolecules(config) # deep transformer encoder self.encoder = CanineEncoder(config) self.projection = ConvProjection(config) # shallow/low-dim transformer encoder to get a final character encoding self.final_char_encoder = CanineEncoder(shallow_config) self.pooler = CaninePooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ batch_size, from_seq_length = from_tensor.shape[0], from_tensor.shape[1] to_seq_length = to_mask.shape[1] to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float() # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int): """Downsample 2D character attention mask to 2D molecule attention mask using MaxPool1d layer.""" # first, make char_attention_mask 3D by adding a channel dim batch_size, char_seq_len = char_attention_mask.shape poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len)) # next, apply MaxPool1d to get pooled_molecule_mask of shape (batch_size, 1, mol_seq_len) pooled_molecule_mask = torch.nn.MaxPool1d(kernel_size=downsampling_rate, stride=downsampling_rate)( poolable_char_mask.float() ) # finally, squeeze to get tensor of shape (batch_size, mol_seq_len) molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1) return molecule_attention_mask def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: int) -> torch.Tensor: """Repeats molecules to make them the same length as the char sequence.""" rate = self.config.downsampling_rate molecules_without_extra_cls = molecules[:, 1:, :] # `repeated`: [batch_size, almost_char_seq_len, molecule_hidden_size] repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2) # So far, we've repeated the elements sufficient for any `char_seq_length` # that's a multiple of `downsampling_rate`. Now we account for the last # n elements (n < `downsampling_rate`), i.e. the remainder of floor # division. We do this by repeating the last molecule a few extra times. last_molecule = molecules[:, -1:, :] remainder_length = char_seq_length % rate remainder_repeated = torch.repeat_interleave( last_molecule, # +1 molecule to compensate for truncation. repeats=remainder_length + rate, dim=-2, ) # `repeated`: [batch_size, char_seq_len, molecule_hidden_size] return torch.cat([repeated, remainder_repeated], dim=-2) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, CanineModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) molecule_attention_mask = self._downsample_attention_mask( attention_mask, downsampling_rate=self.config.downsampling_rate ) extended_molecule_attention_mask: torch.Tensor = self.get_extended_attention_mask( molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1]) ) # `input_char_embeddings`: shape (batch_size, char_seq, char_dim) input_char_embeddings = self.char_embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) # Contextualize character embeddings using shallow Transformer. # We use a 3D attention mask for the local attention. # `input_char_encoding`: shape (batch_size, char_seq_len, char_dim) char_attention_mask = self._create_3d_attention_mask_from_input_mask( input_ids if input_ids is not None else inputs_embeds, attention_mask ) init_chars_encoder_outputs = self.initial_char_encoder( input_char_embeddings, attention_mask=char_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) input_char_encoding = init_chars_encoder_outputs.last_hidden_state # Downsample chars to molecules. # The following lines have dimensions: [batch, molecule_seq, molecule_dim]. # In this transformation, we change the dimensionality from `char_dim` to # `molecule_dim`, but do *NOT* add a resnet connection. Instead, we rely on # the resnet connections (a) from the final char transformer stack back into # the original char transformer stack and (b) the resnet connections from # the final char transformer stack back into the deep BERT stack of # molecules. # # Empirically, it is critical to use a powerful enough transformation here: # mean pooling causes training to diverge with huge gradient norms in this # region of the model; using a convolution here resolves this issue. From # this, it seems that molecules and characters require a very different # feature space; intuitively, this makes sense. init_molecule_encoding = self.chars_to_molecules(input_char_encoding) # Deep BERT encoder # `molecule_sequence_output`: shape (batch_size, mol_seq_len, mol_dim) encoder_outputs = self.encoder( init_molecule_encoding, attention_mask=extended_molecule_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) molecule_sequence_output = encoder_outputs[0] pooled_output = self.pooler(molecule_sequence_output) if self.pooler is not None else None # Upsample molecules back to characters. # `repeated_molecules`: shape (batch_size, char_seq_len, mol_hidden_size) repeated_molecules = self._repeat_molecules(molecule_sequence_output, char_seq_length=input_shape[-1]) # Concatenate representations (contextualized char embeddings and repeated molecules): # `concat`: shape [batch_size, char_seq_len, molecule_hidden_size+char_hidden_final] concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1) # Project representation dimension back to hidden_size # `sequence_output`: shape (batch_size, char_seq_len, hidden_size]) sequence_output = self.projection(concat) # Apply final shallow Transformer # `sequence_output`: shape (batch_size, char_seq_len, hidden_size]) final_chars_encoder_outputs = self.final_char_encoder( sequence_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = final_chars_encoder_outputs.last_hidden_state if output_hidden_states: deep_encoder_hidden_states = encoder_outputs.hidden_states if return_dict else encoder_outputs[1] all_hidden_states = ( all_hidden_states + init_chars_encoder_outputs.hidden_states + deep_encoder_hidden_states + final_chars_encoder_outputs.hidden_states ) if output_attentions: deep_encoder_self_attentions = encoder_outputs.attentions if return_dict else encoder_outputs[-1] all_self_attentions = ( all_self_attentions + init_chars_encoder_outputs.attentions + deep_encoder_self_attentions + final_chars_encoder_outputs.attentions ) if not return_dict: output = (sequence_output, pooled_output) output += tuple(v for v in [all_hidden_states, all_self_attentions] if v is not None) return output return CanineModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring( custom_intro=""" CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ )
CanineModel
python
sqlalchemy__sqlalchemy
test/dialect/sqlite/test_dialect.py
{ "start": 22508, "end": 26439 }
class ____(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = "sqlite" __skip_if__ = (full_text_search_missing,) __backend__ = True @classmethod def setup_test_class(cls): global metadata, cattable, matchtable metadata = MetaData() exec_sql( testing.db, """ CREATE VIRTUAL TABLE cattable using FTS3 ( id INTEGER NOT NULL, description VARCHAR(50), PRIMARY KEY (id) ) """, ) cattable = Table("cattable", metadata, autoload_with=testing.db) exec_sql( testing.db, """ CREATE VIRTUAL TABLE matchtable using FTS3 ( id INTEGER NOT NULL, title VARCHAR(200), category_id INTEGER NOT NULL, PRIMARY KEY (id) ) """, ) matchtable = Table("matchtable", metadata, autoload_with=testing.db) with testing.db.begin() as conn: metadata.create_all(conn) conn.execute( cattable.insert(), [ {"id": 1, "description": "Python"}, {"id": 2, "description": "Ruby"}, ], ) conn.execute( matchtable.insert(), [ { "id": 1, "title": "Agile Web Development with Rails", "category_id": 2, }, {"id": 2, "title": "Dive Into Python", "category_id": 1}, { "id": 3, "title": "Programming Matz's Ruby", "category_id": 2, }, { "id": 4, "title": "The Definitive Guide to Django", "category_id": 1, }, { "id": 5, "title": "Python in a Nutshell", "category_id": 1, }, ], ) @classmethod def teardown_test_class(cls): metadata.drop_all(testing.db) def test_expression(self): self.assert_compile( matchtable.c.title.match("somstr"), "matchtable.title MATCH ?", dialect=sqlite.dialect(), ) def test_simple_match(self, connection): results = connection.execute( matchtable.select() .where(matchtable.c.title.match("python")) .order_by(matchtable.c.id) ).fetchall() eq_([2, 5], [r.id for r in results]) def test_simple_prefix_match(self, connection): results = connection.execute( matchtable.select().where(matchtable.c.title.match("nut*")) ).fetchall() eq_([5], [r.id for r in results]) def test_or_match(self, connection): results2 = connection.execute( matchtable.select() .where(matchtable.c.title.match("nutshell OR ruby")) .order_by(matchtable.c.id) ).fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self, connection): results2 = connection.execute( matchtable.select().where( matchtable.c.title.match("python nutshell") ) ).fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self, connection): results = connection.execute( matchtable.select() .where( and_( cattable.c.id == matchtable.c.category_id, cattable.c.description.match("Ruby"), ) ) .order_by(matchtable.c.id) ).fetchall() eq_([1, 3], [r.id for r in results])
MatchTest
python
scrapy__scrapy
scrapy/pipelines/files.py
{ "start": 12131, "end": 14283 }
class ____: FTP_USERNAME: str | None = None FTP_PASSWORD: str | None = None USE_ACTIVE_MODE: bool | None = None def __init__(self, uri: str): if not uri.startswith("ftp://"): raise ValueError(f"Incorrect URI scheme in {uri}, expected 'ftp'") u = urlparse(uri) assert u.port assert u.hostname self.port: int = u.port self.host: str = u.hostname self.port = int(u.port or 21) assert self.FTP_USERNAME assert self.FTP_PASSWORD self.username: str = u.username or self.FTP_USERNAME self.password: str = u.password or self.FTP_PASSWORD self.basedir: str = u.path.rstrip("/") def persist_file( self, path: str, buf: BytesIO, info: MediaPipeline.SpiderInfo, meta: dict[str, Any] | None = None, headers: dict[str, str] | None = None, ) -> Deferred[Any]: path = f"{self.basedir}/{path}" return deferToThread( ftp_store_file, path=path, file=buf, host=self.host, port=self.port, username=self.username, password=self.password, use_active_mode=self.USE_ACTIVE_MODE, ) def stat_file( self, path: str, info: MediaPipeline.SpiderInfo ) -> Deferred[StatInfo]: def _stat_file(path: str) -> StatInfo: try: ftp = FTP() ftp.connect(self.host, self.port) ftp.login(self.username, self.password) if self.USE_ACTIVE_MODE: ftp.set_pasv(False) file_path = f"{self.basedir}/{path}" last_modified = float(ftp.voidcmd(f"MDTM {file_path}")[4:].strip()) m = hashlib.md5() # noqa: S324 ftp.retrbinary(f"RETR {file_path}", m.update) return {"last_modified": last_modified, "checksum": m.hexdigest()} # The file doesn't exist except Exception: return {} return cast("Deferred[StatInfo]", deferToThread(_stat_file, path))
FTPFilesStore
python
google__python-fire
fire/test_components.py
{ "start": 1975, "end": 2490 }
class ____: """Class with functions that have default arguments.""" def double(self, count=0): """Returns the input multiplied by 2. Args: count: Input number that you want to double. Returns: A number that is the double of count. """ return 2 * count def triple(self, count=0): return 3 * count def text( self, string=('0001020304050607080910111213141516171819' '2021222324252627282930313233343536373839') ): return string
WithDefaults
python
langchain-ai__langchain
libs/partners/groq/tests/unit_tests/fake/callbacks.py
{ "start": 6590, "end": 9227 }
class ____(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin): """Fake async callback handler for testing.""" @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ async def on_retry( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retry_common() async def on_llm_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_start_common() async def on_llm_new_token( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_new_token_common() async def on_llm_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_end_common() async def on_llm_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_error_common(*args, **kwargs) async def on_chain_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_start_common() async def on_chain_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_end_common() async def on_chain_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_error_common() async def on_tool_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_start_common() async def on_tool_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_end_common() async def on_tool_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_error_common() async def on_agent_action( self, *args: Any, **kwargs: Any, ) -> None: self.on_agent_action_common() async def on_agent_finish( self, *args: Any, **kwargs: Any, ) -> None: self.on_agent_finish_common() async def on_text( self, *args: Any, **kwargs: Any, ) -> None: self.on_text_common() # Overriding since BaseModel has __deepcopy__ method as well def __deepcopy__(self, memo: dict) -> FakeAsyncCallbackHandler: # type: ignore[override] return self
FakeAsyncCallbackHandler