language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/admin_inlines/admin.py | {
"start": 2512,
"end": 2588
} | class ____(PhotoInlineMixin, admin.TabularInline):
pass
| PhotoTabularInline |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py | {
"start": 69635,
"end": 74051
} | class ____(GoogleCloudBaseOperator):
"""
Renames a field in a tag template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogRenameTagTemplateFieldOperator`
:param location: Required. The location of the tag template field to rename.
:param tag_template: The tag template ID for field that is renamed.
:param field: Required. The old ID of this tag template field. For example,
``my_old_field``.
:param new_tag_template_field_id: Required. The new ID of this tag template field. For example,
``my_new_field``.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"field",
"new_tag_template_field_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template: str,
field: str,
new_tag_template_field_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.field = field
self.new_tag_template_field_id = new_tag_template_field_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.rename_tag_template_field(
location=self.location,
tag_template=self.tag_template,
field=self.field,
new_tag_template_field_id=self.new_tag_template_field_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogTagTemplateLink.persist(
context=context,
tag_template_id=self.tag_template,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogSearchEntriesOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| CloudDataCatalogRenameTagTemplateFieldOperator |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/invocation.py | {
"start": 34978,
"end": 45559
} | class ____(AssetExecutionContext, BaseDirectExecutionContext):
"""The ``context`` object available as the first argument to an asset's compute function when
being invoked directly. Can also be used as a context manager.
"""
def __init__(self, op_execution_context: DirectOpExecutionContext):
self._op_execution_context = op_execution_context
def __enter__(self):
self.op_execution_context._cm_scope_entered = True # noqa: SLF001
return self
def __exit__(self, *exc):
self.op_execution_context._exit_stack.close() # noqa: SLF001
def __del__(self):
self.op_execution_context._exit_stack.close() # noqa: SLF001
def _check_bound_to_invocation(self, fn_name: str, fn_type: str):
if not self._op_execution_context._per_invocation_properties: # noqa: SLF001
raise DagsterInvalidPropertyError(_property_msg(fn_name, fn_type))
def bind( # pyright: ignore[reportIncompatibleMethodOverride]
self,
op_def: OpDefinition,
pending_invocation: Optional[PendingNodeInvocation[OpDefinition]],
assets_def: Optional[AssetsDefinition],
config_from_args: Optional[Mapping[str, Any]],
resources_from_args: Optional[Mapping[str, Any]],
) -> "DirectAssetExecutionContext":
if assets_def is None:
raise DagsterInvariantViolationError(
"DirectAssetExecutionContext can only being used to invoke an asset."
)
if self._op_execution_context._per_invocation_properties is not None: # noqa: SLF001
raise DagsterInvalidInvocationError(
f"This context is currently being used to execute {self.op_execution_context.alias}."
" The context cannot be used to execute another asset until"
f" {self.op_execution_context.alias} has finished executing."
)
self._op_execution_context = self._op_execution_context.bind(
op_def=op_def,
pending_invocation=pending_invocation,
assets_def=assets_def,
config_from_args=config_from_args,
resources_from_args=resources_from_args,
)
return self
def unbind(self):
self._op_execution_context.unbind()
@property
def per_invocation_properties(self) -> PerInvocationProperties:
return self.op_execution_context.per_invocation_properties
@property
def is_bound(self) -> bool:
return self.op_execution_context.is_bound
@property
def execution_properties(self) -> DirectExecutionProperties:
return self.op_execution_context.execution_properties
@property
def op_execution_context(self) -> DirectOpExecutionContext:
return self._op_execution_context
def for_type(self, dagster_type: DagsterType) -> TypeCheckContext:
return self.op_execution_context.for_type(dagster_type)
def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None:
self.op_execution_context.observe_output(output_name=output_name, mapping_key=mapping_key)
def _validate_resource_requirements(
resource_defs: Mapping[str, ResourceDefinition], op_def: OpDefinition
) -> None:
"""Validate correctness of resources against required resource keys."""
if cast("DecoratedOpFunction", op_def.compute_fn).has_context_arg():
for requirement in op_def.get_resource_requirements(
asset_layer=None,
handle=None,
):
if not requirement.is_io_manager_requirement:
ensure_requirements_satisfied(resource_defs, [requirement])
@public
def build_op_context(
resources: Optional[Mapping[str, Any]] = None,
op_config: Any = None,
resources_config: Optional[Mapping[str, Any]] = None,
instance: Optional[DagsterInstance] = None,
config: Any = None,
partition_key: Optional[str] = None,
partition_key_range: Optional[PartitionKeyRange] = None,
mapping_key: Optional[str] = None,
run_tags: Optional[Mapping[str, str]] = None,
event_loop: Optional[AbstractEventLoop] = None,
) -> DirectOpExecutionContext:
"""Builds op execution context from provided parameters.
``build_op_context`` can be used as either a function or context manager. If there is a
provided resource that is a context manager, then ``build_op_context`` must be used as a
context manager. This function can be used to provide the context argument when directly
invoking a op.
Args:
resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be
either values or resource definitions.
op_config (Optional[Mapping[str, Any]]): The config to provide to the op.
resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.
instance (Optional[DagsterInstance]): The dagster instance configured for the context.
Defaults to DagsterInstance.ephemeral().
mapping_key (Optional[str]): A key representing the mapping key from an upstream dynamic
output. Can be accessed using ``context.get_mapping_key()``.
partition_key (Optional[str]): String value representing partition key to execute with.
partition_key_range (Optional[PartitionKeyRange]): Partition key range to execute with.
run_tags: Optional[Mapping[str, str]]: The tags for the executing run.
event_loop: Optional[AbstractEventLoop]: An event loop for handling resources
with async context managers.
Examples:
.. code-block:: python
context = build_op_context()
op_to_invoke(context)
with build_op_context(resources={"foo": context_manager_resource}) as context:
op_to_invoke(context)
"""
if op_config and config:
raise DagsterInvalidInvocationError(
"Attempted to invoke ``build_op_context`` with both ``op_config``, and its "
"legacy version, ``config``. Please provide one or the other."
)
op_config = op_config if op_config else config
return DirectOpExecutionContext(
resources_dict=check.opt_mapping_param(resources, "resources", key_type=str),
resources_config=check.opt_mapping_param(
resources_config, "resources_config", key_type=str
),
op_config=op_config,
instance=check.opt_inst_param(instance, "instance", DagsterInstance),
partition_key=check.opt_str_param(partition_key, "partition_key"),
partition_key_range=check.opt_inst_param(
partition_key_range, "partition_key_range", PartitionKeyRange
),
mapping_key=check.opt_str_param(mapping_key, "mapping_key"),
run_tags=check.opt_mapping_param(run_tags, "run_tags", key_type=str),
event_loop=event_loop,
)
@public
def build_asset_check_context(
resources: Optional[Mapping[str, Any]] = None,
resources_config: Optional[Mapping[str, Any]] = None,
asset_config: Optional[Mapping[str, Any]] = None,
instance: Optional[DagsterInstance] = None,
) -> DirectAssetCheckExecutionContext:
"""Builds an asset check execution context from provided parameters.
Args:
resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be
either values or resource definitions.
resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.
asset_config (Optional[Mapping[str, Any]]): The config to provide to the asset.
instance (Optional[DagsterInstance]): The dagster instance configured for the context.
Defaults to DagsterInstance.ephemeral().
Examples:
.. code-block:: python
context = build_asset_check_context()
asset_check_to_invoke(context)
"""
op_context = build_op_context(
op_config=asset_config,
resources=resources,
resources_config=resources_config,
instance=instance,
)
return DirectAssetCheckExecutionContext(op_execution_context=op_context)
@public
def build_asset_context(
resources: Optional[Mapping[str, Any]] = None,
resources_config: Optional[Mapping[str, Any]] = None,
asset_config: Optional[Mapping[str, Any]] = None,
instance: Optional[DagsterInstance] = None,
partition_key: Optional[str] = None,
partition_key_range: Optional[PartitionKeyRange] = None,
run_tags: Optional[Mapping[str, str]] = None,
event_loop: Optional[AbstractEventLoop] = None,
) -> DirectAssetExecutionContext:
"""Builds asset execution context from provided parameters.
``build_asset_context`` can be used as either a function or context manager. If there is a
provided resource that is a context manager, then ``build_asset_context`` must be used as a
context manager. This function can be used to provide the context argument when directly
invoking an asset.
Args:
resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be
either values or resource definitions.
resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.
asset_config (Optional[Mapping[str, Any]]): The config to provide to the asset.
instance (Optional[DagsterInstance]): The dagster instance configured for the context.
Defaults to DagsterInstance.ephemeral().
partition_key (Optional[str]): String value representing partition key to execute with.
partition_key_range (Optional[PartitionKeyRange]): Partition key range to execute with.
run_tags: Optional[Mapping[str, str]]: The tags for the executing run.
event_loop: Optional[AbstractEventLoop]: An event loop for handling resources
with async context managers.
Examples:
.. code-block:: python
context = build_asset_context()
asset_to_invoke(context)
with build_asset_context(resources={"foo": context_manager_resource}) as context:
asset_to_invoke(context)
"""
op_context = build_op_context(
op_config=asset_config,
resources=resources,
resources_config=resources_config,
partition_key=partition_key,
partition_key_range=partition_key_range,
instance=instance,
run_tags=run_tags,
event_loop=event_loop,
)
return DirectAssetExecutionContext(op_execution_context=op_context)
| DirectAssetExecutionContext |
python | django__django | tests/db_functions/tests.py | {
"start": 303,
"end": 2552
} | class ____(TestCase):
def test_nested_function_ordering(self):
Author.objects.create(name="John Smith")
Author.objects.create(name="Rhonda Simpson", alias="ronny")
authors = Author.objects.order_by(Length(Coalesce("alias", "name")))
self.assertQuerySetEqual(
authors,
[
"Rhonda Simpson",
"John Smith",
],
lambda a: a.name,
)
authors = Author.objects.order_by(Length(Coalesce("alias", "name")).desc())
self.assertQuerySetEqual(
authors,
[
"John Smith",
"Rhonda Simpson",
],
lambda a: a.name,
)
def test_func_transform_bilateral(self):
with register_lookup(CharField, UpperBilateral):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__exact="john smith")
self.assertQuerySetEqual(
authors.order_by("name"),
[
"John Smith",
],
lambda a: a.name,
)
def test_func_transform_bilateral_multivalue(self):
with register_lookup(CharField, UpperBilateral):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__in=["john smith", "rhonda"])
self.assertQuerySetEqual(
authors.order_by("name"),
[
"John Smith",
"Rhonda",
],
lambda a: a.name,
)
def test_function_as_filter(self):
Author.objects.create(name="John Smith", alias="SMITHJ")
Author.objects.create(name="Rhonda")
self.assertQuerySetEqual(
Author.objects.filter(alias=Upper(V("smithj"))),
["John Smith"],
lambda x: x.name,
)
self.assertQuerySetEqual(
Author.objects.exclude(alias=Upper(V("smithj"))),
["Rhonda"],
lambda x: x.name,
)
| FunctionTests |
python | ray-project__ray | ci/ray_ci/bisect/generic_validator.py | {
"start": 705,
"end": 2333
} | class ____(Validator):
def _get_buildkite(self) -> Buildkite:
buildkite = Buildkite()
buildkite.set_access_token(
get_secret_token(get_global_config()["ci_pipeline_buildkite_secret"]),
)
return buildkite
def _get_rayci_select(self, test: Test) -> str:
return test.get_test_results(limit=1)[0].rayci_step_id
def run(self, test: Test, revision: str) -> bool:
buildkite = self._get_buildkite()
build = buildkite.builds().create_build(
BUILDKITE_ORGANIZATION,
BUILDKITE_POSTMERGE_PIPELINE,
revision,
"master",
message=f"[bisection] running single test: {test.get_name()}",
env={
"RAYCI_SELECT": self._get_rayci_select(test),
"RAYCI_BISECT_TEST_TARGET": test.get_target(),
},
)
total_wait = 0
while True:
logger.info(f"... waiting for test result ...({total_wait} seconds)")
time.sleep(WAIT)
build = buildkite.builds().get_build_by_number(
BUILDKITE_ORGANIZATION,
BUILDKITE_POSTMERGE_PIPELINE,
build["number"],
)
# return build status
if build["state"] in BUILDKITE_BUILD_PASSING_STATE:
return True
if build["state"] in BUILDKITE_BUILD_FAILING_STATE:
return False
# continue waiting
total_wait += WAIT
if total_wait > TIMEOUT:
logger.error("Timeout")
return False
| GenericValidator |
python | mlflow__mlflow | mlflow/utils/search_utils.py | {
"start": 47357,
"end": 47882
} | class ____:
def __init__(self, obj):
self.obj = obj
# Only need < and == are needed for use as a key parameter in the sorted function
def __eq__(self, other):
return other.obj == self.obj
def __lt__(self, other):
if self.obj is None:
return False
if other.obj is None:
return True
return other.obj < self.obj
def _apply_reversor(model, key, ascending):
attr = getattr(model, key)
return attr if ascending else _Reversor(attr)
| _Reversor |
python | tornadoweb__tornado | tornado/netutil.py | {
"start": 12349,
"end": 15432
} | class ____(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.DefaultLoopResolver`
* `tornado.netutil.DefaultExecutorResolver` (deprecated)
* `tornado.netutil.BlockingResolver` (deprecated)
* `tornado.netutil.ThreadedResolver` (deprecated)
* `tornado.netutil.OverrideResolver`
* `tornado.platform.caresresolver.CaresResolver` (deprecated)
.. versionchanged:: 5.0
The default implementation has changed from `BlockingResolver` to
`DefaultExecutorResolver`.
.. versionchanged:: 6.2
The default implementation has changed from `DefaultExecutorResolver` to
`DefaultLoopResolver`.
"""
@classmethod
def configurable_base(cls) -> Type["Resolver"]:
return Resolver
@classmethod
def configurable_default(cls) -> Type["Resolver"]:
return DefaultLoopResolver
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> Awaitable[List[Tuple[int, Any]]]:
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
:raises IOError: if the address cannot be resolved.
.. versionchanged:: 4.4
Standardized all implementations to raise `IOError`.
.. versionchanged:: 6.0 The ``callback`` argument was removed.
Use the returned awaitable object instead.
"""
raise NotImplementedError()
def close(self) -> None:
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
def _resolve_addr(
host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for fam, socktype, proto, canonname, address in addrinfo:
results.append((fam, address))
return results # type: ignore
| Resolver |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_dns_config.py | {
"start": 383,
"end": 6059
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'nameservers': 'list[str]',
'options': 'list[V1PodDNSConfigOption]',
'searches': 'list[str]'
}
attribute_map = {
'nameservers': 'nameservers',
'options': 'options',
'searches': 'searches'
}
def __init__(self, nameservers=None, options=None, searches=None, local_vars_configuration=None): # noqa: E501
"""V1PodDNSConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._nameservers = None
self._options = None
self._searches = None
self.discriminator = None
if nameservers is not None:
self.nameservers = nameservers
if options is not None:
self.options = options
if searches is not None:
self.searches = searches
@property
def nameservers(self):
"""Gets the nameservers of this V1PodDNSConfig. # noqa: E501
A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. # noqa: E501
:return: The nameservers of this V1PodDNSConfig. # noqa: E501
:rtype: list[str]
"""
return self._nameservers
@nameservers.setter
def nameservers(self, nameservers):
"""Sets the nameservers of this V1PodDNSConfig.
A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. # noqa: E501
:param nameservers: The nameservers of this V1PodDNSConfig. # noqa: E501
:type: list[str]
"""
self._nameservers = nameservers
@property
def options(self):
"""Gets the options of this V1PodDNSConfig. # noqa: E501
A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. # noqa: E501
:return: The options of this V1PodDNSConfig. # noqa: E501
:rtype: list[V1PodDNSConfigOption]
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this V1PodDNSConfig.
A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. # noqa: E501
:param options: The options of this V1PodDNSConfig. # noqa: E501
:type: list[V1PodDNSConfigOption]
"""
self._options = options
@property
def searches(self):
"""Gets the searches of this V1PodDNSConfig. # noqa: E501
A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. # noqa: E501
:return: The searches of this V1PodDNSConfig. # noqa: E501
:rtype: list[str]
"""
return self._searches
@searches.setter
def searches(self, searches):
"""Sets the searches of this V1PodDNSConfig.
A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. # noqa: E501
:param searches: The searches of this V1PodDNSConfig. # noqa: E501
:type: list[str]
"""
self._searches = searches
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodDNSConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodDNSConfig):
return True
return self.to_dict() != other.to_dict()
| V1PodDNSConfig |
python | mlflow__mlflow | mlflow/system_metrics/system_metrics_monitor.py | {
"start": 756,
"end": 8738
} | class ____:
"""Class for monitoring system stats.
This class is used for pulling system metrics and logging them to MLflow. Calling `start()` will
spawn a thread that logs system metrics periodically. Calling `finish()` will stop the thread.
Logging is done on a different frequency from pulling metrics, so that the metrics are
aggregated over the period. Users can change the logging frequency by setting
`MLFLOW_SYSTEM_METRICS_SAMPLING_INTERVAL` and `MLFLOW_SYSTEM_METRICS_SAMPLES_BEFORE_LOGGING`
environment variables, e.g., run `export MLFLOW_SYSTEM_METRICS_SAMPLING_INTERVAL=10` in terminal
will set the sampling interval to 10 seconds.
System metrics are logged with a prefix "system/", e.g., "system/cpu_utilization_percentage".
Args:
run_id: string, the MLflow run ID.
sampling_interval: float, default to 10. The interval (in seconds) at which to pull system
metrics. Will be overridden by `MLFLOW_SYSTEM_METRICS_SAMPLING_INTERVAL` environment
variable.
samples_before_logging: int, default to 1. The number of samples to aggregate before
logging. Will be overridden by `MLFLOW_SYSTEM_METRICS_SAMPLES_BEFORE_LOGGING`
evnironment variable.
resume_logging: bool, default to False. If True, we will resume the system metrics logging
from the `run_id`, and the first step to log will be the last step of `run_id` + 1, if
False, system metrics logging will start from step 0.
node_id: string, default to None. The node ID of the machine where the metrics are
collected. Will be overridden by `MLFLOW_SYSTEM_METRICS_NODE_ID`
evnironment variable. This is useful in multi-node training to distinguish the metrics
from different nodes. For example, if you set node_id to "node_0", the system metrics
getting logged will be of format "system/node_0/cpu_utilization_percentage".
tracking_uri: string, default to None. The tracking URI of the MLflow server, or `None` to
use whatever is set via `mlflow.set_tracking_uri()`.
"""
def __init__(
self,
run_id,
sampling_interval=10,
samples_before_logging=1,
resume_logging=False,
node_id=None,
tracking_uri=None,
):
from mlflow.tracking import get_tracking_uri
from mlflow.utils.autologging_utils import BatchMetricsLogger
# Instantiate default monitors.
self.monitors = [CPUMonitor(), DiskMonitor(), NetworkMonitor()]
if gpu_monitor := self._initialize_gpu_monitor():
self.monitors.append(gpu_monitor)
self.sampling_interval = MLFLOW_SYSTEM_METRICS_SAMPLING_INTERVAL.get() or sampling_interval
self.samples_before_logging = (
MLFLOW_SYSTEM_METRICS_SAMPLES_BEFORE_LOGGING.get() or samples_before_logging
)
self._run_id = run_id
self._shutdown_event = threading.Event()
self._process = None
self._metrics_prefix = "system/"
self.node_id = MLFLOW_SYSTEM_METRICS_NODE_ID.get() or node_id
self._tracking_uri = tracking_uri or get_tracking_uri()
self._logging_step = self._get_next_logging_step(run_id) if resume_logging else 0
self.mlflow_logger = BatchMetricsLogger(self._run_id, tracking_uri=self._tracking_uri)
def _get_next_logging_step(self, run_id):
from mlflow.tracking.client import MlflowClient
client = MlflowClient(self._tracking_uri)
try:
run = client.get_run(run_id)
except MlflowException:
return 0
system_metric_name = None
for metric_name in run.data.metrics.keys():
if metric_name.startswith(self._metrics_prefix):
system_metric_name = metric_name
break
if system_metric_name is None:
return 0
metric_history = client.get_metric_history(run_id, system_metric_name)
return metric_history[-1].step + 1
def start(self):
"""Start monitoring system metrics."""
try:
self._process = threading.Thread(
target=self.monitor,
daemon=True,
name="SystemMetricsMonitor",
)
self._process.start()
_logger.info("Started monitoring system metrics.")
except Exception as e:
_logger.warning(f"Failed to start monitoring system metrics: {e}")
self._process = None
def monitor(self):
"""Main monitoring loop, which consistently collect and log system metrics."""
from mlflow.tracking.client import MlflowClient
while not self._shutdown_event.is_set():
for _ in range(self.samples_before_logging):
self.collect_metrics()
self._shutdown_event.wait(self.sampling_interval)
try:
# Get the MLflow run to check if the run is not RUNNING.
run = MlflowClient(self._tracking_uri).get_run(self._run_id)
except Exception as e:
_logger.warning(f"Failed to get mlflow run: {e}.")
return
if run.info.status != "RUNNING" or self._shutdown_event.is_set():
# If the mlflow run is terminated or receives the shutdown signal, stop
# monitoring.
return
metrics = self.aggregate_metrics()
try:
self.publish_metrics(metrics)
except Exception as e:
_logger.warning(
f"Failed to log system metrics: {e}, this is expected if the experiment/run is "
"already terminated."
)
return
def collect_metrics(self):
"""Collect system metrics."""
metrics = {}
for monitor in self.monitors:
monitor.collect_metrics()
metrics.update(monitor._metrics)
return metrics
def aggregate_metrics(self):
"""Aggregate collected metrics."""
metrics = {}
for monitor in self.monitors:
metrics.update(monitor.aggregate_metrics())
return metrics
def publish_metrics(self, metrics):
"""Log collected metrics to MLflow."""
# Add prefix "system/" to the metrics name for grouping. If `self.node_id` is not None, also
# add it to the metrics name.
prefix = self._metrics_prefix + (self.node_id + "/" if self.node_id else "")
metrics = {prefix + k: v for k, v in metrics.items()}
self.mlflow_logger.record_metrics(metrics, self._logging_step)
self._logging_step += 1
for monitor in self.monitors:
monitor.clear_metrics()
def finish(self):
"""Stop monitoring system metrics."""
if self._process is None:
return
_logger.info("Stopping system metrics monitoring...")
self._shutdown_event.set()
try:
self._process.join()
self.mlflow_logger.flush()
_logger.info("Successfully terminated system metrics monitoring!")
except Exception as e:
_logger.error(f"Error terminating system metrics monitoring process: {e}.")
self._process = None
def _initialize_gpu_monitor(self) -> BaseMetricsMonitor | None:
# NVIDIA GPU
try:
return GPUMonitor()
except Exception:
_logger.debug("Failed to initialize GPU monitor for NVIDIA GPU.", exc_info=True)
# Falling back to pyrocml (AMD/HIP GPU)
try:
return ROCMMonitor()
except Exception:
_logger.debug("Failed to initialize GPU monitor for AMD/HIP GPU.", exc_info=True)
_logger.info("Skip logging GPU metrics. Set logger level to DEBUG for more details.")
return None
| SystemMetricsMonitor |
python | Textualize__textual | src/textual/driver.py | {
"start": 373,
"end": 10329
} | class ____(ABC):
"""A base class for drivers."""
def __init__(
self,
app: App[Any],
*,
debug: bool = False,
mouse: bool = True,
size: tuple[int, int] | None = None,
) -> None:
"""Initialize a driver.
Args:
app: The App instance.
debug: Enable debug mode.
mouse: Enable mouse support,
size: Initial size of the terminal or `None` to detect.
"""
self._app = app
self._debug = debug
self._mouse = mouse
self._size = size
self._loop = asyncio.get_running_loop()
self._down_buttons: list[int] = []
self._last_move_event: events.MouseMove | None = None
self._auto_restart = True
"""Should the application auto-restart (where appropriate)?"""
self.cursor_origin: tuple[int, int] | None = None
@property
def is_headless(self) -> bool:
"""Is the driver 'headless' (no output)?"""
return False
@property
def is_inline(self) -> bool:
"""Is the driver 'inline' (not full-screen)?"""
return False
@property
def is_web(self) -> bool:
"""Is the driver 'web' (running via a browser)?"""
return False
@property
def can_suspend(self) -> bool:
"""Can this driver be suspended?"""
return False
def send_message(self, message: messages.Message) -> None:
"""Send a message to the target app.
Args:
message: A message.
"""
asyncio.run_coroutine_threadsafe(
self._app._post_message(message), loop=self._loop
)
def process_message(self, message: messages.Message) -> None:
"""Perform additional processing on a message, prior to sending.
Args:
event: A message to process.
"""
# NOTE: This runs in a thread.
# Avoid calling methods on the app.
message.set_sender(self._app)
if self.cursor_origin is None:
offset_x = 0
offset_y = 0
else:
offset_x, offset_y = self.cursor_origin
if isinstance(message, events.MouseEvent):
message._x -= offset_x
message._y -= offset_y
message._screen_x -= offset_x
message._screen_y -= offset_y
if isinstance(message, events.MouseDown):
if message.button:
self._down_buttons.append(message.button)
elif isinstance(message, events.MouseUp):
if message.button and message.button in self._down_buttons:
self._down_buttons.remove(message.button)
elif isinstance(message, events.MouseMove):
if (
self._down_buttons
and not message.button
and self._last_move_event is not None
):
# Deduplicate self._down_buttons while preserving order.
buttons = list(dict.fromkeys(self._down_buttons).keys())
self._down_buttons.clear()
move_event = self._last_move_event
for button in buttons:
self.send_message(
MouseUp(
message.widget,
x=move_event.x,
y=move_event.y,
delta_x=0,
delta_y=0,
button=button,
shift=message.shift,
meta=message.meta,
ctrl=message.ctrl,
screen_x=move_event.screen_x,
screen_y=move_event.screen_y,
style=message.style,
)
)
self._last_move_event = message
self.send_message(message)
@abstractmethod
def write(self, data: str) -> None:
"""Write data to the output device.
Args:
data: Raw data.
"""
def flush(self) -> None:
"""Flush any buffered data."""
@abstractmethod
def start_application_mode(self) -> None:
"""Start application mode."""
@abstractmethod
def disable_input(self) -> None:
"""Disable further input."""
@abstractmethod
def stop_application_mode(self) -> None:
"""Stop application mode, restore state."""
def suspend_application_mode(self) -> None:
"""Suspend application mode.
Used to suspend application mode and allow uninhibited access to the
terminal.
"""
self.stop_application_mode()
self.close()
def resume_application_mode(self) -> None:
"""Resume application mode.
Used to resume application mode after it has been previously
suspended.
"""
self.start_application_mode()
class SignalResume(events.Event):
"""Event sent to the app when a resume signal should be published."""
@contextmanager
def no_automatic_restart(self) -> Iterator[None]:
"""A context manager used to tell the driver to not auto-restart.
For drivers that support the application being suspended by the
operating system, this context manager is used to mark a body of
code as one that will manage its own stop and start.
"""
auto_restart = self._auto_restart
self._auto_restart = False
try:
yield
finally:
self._auto_restart = auto_restart
def close(self) -> None:
"""Perform any final cleanup."""
def open_url(self, url: str, new_tab: bool = True) -> None:
"""Open a URL in the default web browser.
Args:
url: The URL to open.
new_tab: Whether to open the URL in a new tab.
This is only relevant when running via the WebDriver,
and is ignored when called while running through the terminal.
"""
import webbrowser
webbrowser.open(url)
def deliver_binary(
self,
binary: BinaryIO | TextIO,
*,
delivery_key: str,
save_path: Path,
open_method: Literal["browser", "download"] = "download",
encoding: str | None = None,
mime_type: str | None = None,
name: str | None = None,
) -> None:
"""Save the file `path_or_file` to `save_path`.
If running via web through Textual Web or Textual Serve,
this will initiate a download in the web browser.
Args:
binary: The binary file to save.
delivery_key: The unique key that was used to deliver the file.
save_path: The location to save the file to.
open_method: *web only* Whether to open the file in the browser or
to prompt the user to download it. When running via a standard
(non-web) terminal, this is ignored.
encoding: *web only* The text encoding to use when saving the file.
This will be passed to Python's `open()` built-in function.
When running via web, this will be used to set the charset
in the `Content-Type` header.
mime_type: *web only* The MIME type of the file. This will be used to
set the `Content-Type` header in the HTTP response.
name: A user-defined name which will be returned in [`DeliveryComplete`][textual.events.DeliveryComplete]
and [`DeliveryComplete`][textual.events.DeliveryComplete].
"""
def save_file_thread(binary: BinaryIO | TextIO, mode: str) -> None:
try:
with open(
save_path, mode, encoding=encoding or "utf-8"
) as destination_file:
read = binary.read
write = destination_file.write
chunk_size = 1024 * 64
while True:
data = read(chunk_size)
if not data:
# No data left to read - delivery is complete.
self._delivery_complete(
delivery_key, save_path=save_path, name=name
)
break
write(data)
except Exception as error:
# If any exception occurs during the delivery, pass
# it on to the app via a DeliveryFailed event.
log.error(f"Failed to deliver file: {error}")
import traceback
log.error(str(traceback.format_exc()))
self._delivery_failed(delivery_key, exception=error, name=name)
finally:
if not binary.closed:
binary.close()
if isinstance(binary, BinaryIO):
mode = "wb"
else:
mode = "w"
thread = threading.Thread(target=save_file_thread, args=(binary, mode))
thread.start()
def _delivery_complete(
self, delivery_key: str, save_path: Path | None, name: str | None
) -> None:
"""Called when a file has been delivered successfully.
Delivers a DeliveryComplete event to the app.
"""
self._app.call_from_thread(
self._app.post_message,
events.DeliveryComplete(key=delivery_key, path=save_path, name=name),
)
def _delivery_failed(
self, delivery_key: str, exception: BaseException, name: str | None
) -> None:
"""Called when a file delivery fails.
Delivers a DeliveryFailed event to the app.
"""
self._app.call_from_thread(
self._app.post_message,
events.DeliveryFailed(key=delivery_key, exception=exception, name=name),
)
| Driver |
python | google__jax | tests/lax_numpy_indexing_test.py | {
"start": 18044,
"end": 52563
} | class ____(jtu.JaxTestCase):
"""Tests for Numpy indexing translation rules."""
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer, _ in index_specs],
dtype=all_dtypes
)
def testStaticIndexing(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.asarray(x)[indexer]
jnp_fun = lambda x: jnp.asarray(x)[indexer]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
# Tests x.at[...].get(...) as well.
jnp_fun = lambda x: jnp.asarray(x).at[indexer].get()
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testStaticIndexingWithJaxArray(self):
shape = (10,)
indexer = slice(jnp.array(2, dtype=np.int32),
np.array(11, dtype=np.int32),
jnp.array(1, dtype=np.int32))
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, np.int32)]
np_fun = lambda x: np.asarray(x)[indexer]
jnp_fun = lambda x: jnp.asarray(x)[indexer]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
# Tests x.at[...].get(...) as well.
jnp_fun = lambda x: jnp.asarray(x).at[indexer].get()
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
funcname=["negative", "sin", "cos", "square", "sqrt", "log", "exp"],
)
def testIndexApply(self, funcname, size=10, dtype='float32'):
rng = jtu.rand_default(self.rng())
idx_rng = jtu.rand_int(self.rng(), -size, size)
np_func = getattr(np, funcname)
jnp_func = getattr(jnp, funcname)
@jtu.ignore_warning(category=RuntimeWarning)
def np_op(x, idx):
y = x.copy()
np_func.at(y, idx)
return y
def jnp_op(x, idx):
return jnp.asarray(x).at[idx].apply(jnp_func)
# Test with traced integer index
args_maker = lambda: [rng(size, dtype), idx_rng(size, int)]
tol = (
5e-5
if jtu.test_device_matches(["tpu"]) and funcname in ("log", "exp")
else None
)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, atol=tol)
self._CompileAndCheck(jnp_op, args_maker)
# Test with slice index
idx = slice(1, 5)
np_op_idx = partial(np_op, idx=idx)
jnp_op_idx = partial(jnp_op, idx=idx)
args_maker = lambda: [rng(size, dtype)]
self._CheckAgainstNumpy(np_op_idx, jnp_op_idx, args_maker, atol=tol,
rtol=tol)
self._CompileAndCheck(jnp_op_idx, args_maker)
def testIndexApplyBatchingBug(self):
# https://github.com/jax-ml/jax/issues/16655
arr = jnp.array([[1, 2, 3, 4, 5, 6]])
ind = jnp.array([3])
func = lambda a, i: a.at[i].apply(lambda x: x - 1)
expected = jnp.array(list(map(func, arr, ind)))
out = jax.vmap(func)(arr, ind)
self.assertArraysEqual(out, expected)
def testIndexUpdateScalarBug(self):
# https://github.com/jax-ml/jax/issues/14923
a = jnp.arange(10.)
out = a.at[0].apply(jnp.cos)
self.assertArraysEqual(out, a.at[0].set(1))
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer, mode=mode)
for mode in MODES
for name, index_specs in (
STATIC_INDEXING_TESTS if mode == "promise_in_bounds" else
STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)
for shape, indexer, _ in index_specs
],
dtype=float_dtypes,
)
def testStaticIndexingGrads(self, name, shape, dtype, indexer, mode):
rng = jtu.rand_default(self.rng())
tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None
arg = rng(shape, dtype)
# Use an arbitrary finite fill_value, since NaNs won't work in a numerical
# gradient test.
fun = lambda x: jnp.asarray(x).at[indexer].get(mode=mode, fill_value=7)**2
check_grads(fun, (arg,), 2, tol, tol, tol)
def _ReplaceSlicesWithTuples(self, idx):
"""Helper method to replace slices with tuples for dynamic indexing args."""
if isinstance(idx, slice):
triple = idx.start, idx.stop, idx.step
isnone = [i for i, elt in enumerate(triple) if elt is None]
zeros = itertools.repeat(0)
nones = itertools.repeat(None)
out = util.subvals(triple, zip(isnone, zeros))
return out, lambda out: slice(*util.subvals(out, zip(isnone, nones)))
elif isinstance(idx, (tuple, list)) and idx:
t = type(idx)
elts, packs = zip(*map(self._ReplaceSlicesWithTuples, idx))
return elts, lambda elts: t((pack(i) for pack, i in zip(packs, elts)))
else:
return idx, lambda x: x
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in [
("OneSliceIndex",
[IndexSpec(shape=(5,), indexer=slice(1, 3)),
IndexSpec(shape=(5, 4), indexer=slice(1, 3))]),
("TwoSliceIndices",
[IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2)))]),
("NonUnitStrides", [
IndexSpec(shape=(3,), indexer=slice(None, None, -1)),
IndexSpec(shape=(3, 3), indexer=slice(0, 3, -2)),
IndexSpec(shape=(3, 4, 5), indexer=slice(0, 4, 2))
]),
("OnlyStartOrStopDynamic", [
IndexSpec(shape=(5, 4), indexer=(slice(None, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None)))
]),
]
for shape, indexer, _ in index_specs
],
dtype=all_dtypes,
)
def testDynamicIndexingWithSlicesErrors(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
@jax.jit
def fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self.assertRaises(IndexError, lambda: fun(*args_maker()))
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2)]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2))]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer, _ in index_specs
],
dtype=all_dtypes,
)
def testDynamicIndexingWithIntegers(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
def np_fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return np.asarray(x)[indexer]
def jnp_fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return jnp.array(x)[indexer]
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2),
]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),
]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer, _ in index_specs
],
dtype=float_dtypes,
)
def testDynamicIndexingWithIntegersGrads(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
@jax.jit
def fun(unpacked_indexer, x):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
arr = rng(shape, dtype)
check_grads(partial(fun, unpacked_indexer), (arr,), 2, tol, tol, tol)
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in ADVANCED_INDEXING_TESTS
for shape, indexer, _ in index_specs
],
dtype=all_dtypes,
)
def testAdvancedIntegerIndexing(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), indexer]
np_fun = lambda x, idx: np.asarray(x)[idx]
jnp_fun = lambda x, idx: jnp.asarray(x)[idx]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(dtype=jtu.dtypes.unsigned + jtu.dtypes.integer)
def testIndicesNormalizationByType(self, dtype):
x = jnp.arange(10)
jaxpr = jax.make_jaxpr(x.__getitem__)(jnp.arange(3, dtype=dtype))
primitives = [eqn.primitive for eqn in jaxpr.eqns]
if np.issubdtype(dtype, np.unsignedinteger):
# Unsigned integers should not require lt, add, and select.
self.assertEqual(primitives, [lax.convert_element_type_p, lax.broadcast_in_dim_p, lax.gather_p])
else:
# May or may not contain convert_element_type.
self.assertIn(len(primitives), [5, 6])
self.assertEqual(primitives[:3], [lax.lt_p, lax.add_p, lax.select_n_p])
self.assertEqual(primitives[-2:], [lax.broadcast_in_dim_p, lax.gather_p])
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in [
("One1DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=np.array([0, 1])),
IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1])),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1])),
IndexSpec(shape=(3,), indexer=np.array([-1, 1])),
IndexSpec(shape=(3,), indexer=np.array([-2, -1])),
]),
("One2DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=np.array([[0, 0]])),
IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1],
[0, 1, -1]])),
IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1],
[-1, -2, 1, 0]])),
]),
("Two1DIntArrayIndicesNoBroadcasting",
[IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]),
np.array([1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(np.array([0, 2, 0, 1]),
np.array([-1, 0, -1, 2]))),
]),
("Two1DIntArrayIndicesWithBroadcasting",
[IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]),
np.array([1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(np.array([[0, 2, 0, 1]]),
np.array([-1, 0, -1, 2]))),
]),
("TupleOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1]))),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1,
np.array([[2, 3, 0, 3]]))),
]),
("TupleOfListsOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0]))),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]],
np.array([[2, 3, 0, 3]]))),
]),
]
for shape, indexer, _ in index_specs
],
dtype=float_dtypes,
)
def testAdvancedIntegerIndexingGrads(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None
arg = rng(shape, dtype)
fun = lambda x: jnp.asarray(x)[indexer]
check_grads(fun, (arg,), 2, tol, tol, eps=1.)
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer)
for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS
for shape, indexer, _ in index_specs
],
dtype=all_dtypes,
)
def testMixedAdvancedIntegerIndexing(self, name, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
indexer_with_dummies = [e if isinstance(e, np.ndarray) else ()
for e in indexer]
substitutes = [(i, e) for i, e in enumerate(indexer)
if not isinstance(e, np.ndarray)]
args_maker = lambda: [rng(shape, dtype), indexer_with_dummies]
def jnp_fun(x, indexer_with_dummies):
idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))
return jnp.asarray(x)[idx]
def np_fun(x, indexer_with_dummies):
idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))
return np.asarray(x)[idx]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testAdvancedIndexingManually(self):
x = self.rng().randn(3, 4, 5)
index_array = np.array([0, 2, -1, 0])
op = lambda x, index_array: x[..., index_array, :]
cop = jax.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2)
op = lambda x, index_array: x[..., index_array, :, index_array, None]
cop = jax.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2)
op = lambda x, index_array: x[index_array, ..., index_array[:, None], None]
cop = jax.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2)
def testUnpacking(self):
def foo(x):
a, b, c = x
return a + b + c
cfoo = jax.jit(foo)
a1 = foo(np.arange(3))
a2 = cfoo(np.arange(3))
self.assertAllClose(a1, a2)
def testBooleanIndexingArray1D(self):
idx = np.array([True, True, False])
x = jax.device_put(np.arange(3))
ans = x[idx]
expected = np.arange(3)[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList1D(self):
idx = [True, True, False]
x = jax.device_put(np.arange(3))
with self.assertRaisesRegex(TypeError, ARRAY_MSG):
x[idx]
def testBooleanIndexingArray2DBroadcast(self):
idx = np.array([True, True, False, True])
x = np.arange(8).reshape(4, 2)
ans = jax.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList2DBroadcast(self):
idx = [True, True, False, True]
x = np.arange(8).reshape(4, 2)
with self.assertRaisesRegex(TypeError, ARRAY_MSG):
jax.device_put(x)[idx]
def testBooleanIndexingArray2D(self):
idx = np.array([[True, False],
[False, True],
[False, False],
[True, True]])
x = np.arange(8).reshape(4, 2)
ans = jax.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBoolean1DIndexingWithEllipsis(self):
# Regression test for https://github.com/jax-ml/jax/issues/8412
x = np.arange(24).reshape(4, 3, 2)
idx = (..., np.array([True, False]))
ans = jnp.array(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBoolean1DIndexingWithEllipsis2(self):
# Regression test for https://github.com/jax-ml/jax/issues/9050
x = np.arange(3)
idx = (..., np.array([True, False, True]))
ans = jnp.array(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBoolean1DIndexingWithEllipsis3(self):
x = np.arange(6).reshape(2, 3)
idx = (0, ..., np.array([True, False, True]))
ans = jnp.array(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBoolean2DIndexingWithEllipsis(self):
x = np.arange(24).reshape(4, 3, 2)
idx = (..., np.array([[True, False], [True, False], [False, False]]))
ans = jnp.array(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBoolean1DIndexingWithTrailingEllipsis(self):
x = np.arange(24).reshape(4, 3, 2)
idx = (np.array([True, False, True, False]), ...)
ans = jnp.array(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingDynamicShapeError(self):
x = np.zeros(3)
i = np.array([True, True, False])
self.assertRaises(IndexError, lambda: jax.jit(lambda x, i: x[i])(x, i))
def testIssue187(self):
x = jnp.ones((5, 5))
x[[0, 2, 4], [0, 2, 4]] # doesn't crash
x = np.arange(25).reshape((5, 5))
ans = jax.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x)
expected = x[[0, 2, 4], [0, 2, 4]]
self.assertAllClose(ans, expected, check_dtypes=False)
def testJVPOfGradOfIndexing(self):
# Should return a value, even though we didn't pass a symbolic zero as the
# index tangent.
x = jnp.ones((3, 4), jnp.float32)
i = jnp.ones((3,), jnp.int32)
f = lambda x, i: jnp.sum(x[i])
primals, tangents = jax.jvp(jax.grad(f), (x, i),
(x, np.zeros(i.shape, dtypes.float0)))
expected = np.broadcast_to(
np.array([0, 3, 0], dtype=np.float32)[:, None], (3, 4))
self.assertAllClose(expected, primals)
self.assertAllClose(np.zeros_like(x), tangents)
def testSimpleIndexingUsesSlice(self):
jaxpr = jax.make_jaxpr(lambda x: x[:2, :2])(jnp.ones((3, 4)))
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.slice_p)
jaxpr = jax.make_jaxpr(lambda x: x[0, :2, 1])(jnp.ones((3, 4, 5)))
self.assertEqual(len(jaxpr.jaxpr.eqns), 2)
self.assertEqual(jaxpr.jaxpr.eqns[-2].primitive, lax.slice_p)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.squeeze_p)
jaxpr = jax.make_jaxpr(lambda x: x[0, 0])(jnp.ones((3, 4, 5)))
self.assertEqual(len(jaxpr.jaxpr.eqns), 2)
self.assertEqual(jaxpr.jaxpr.eqns[-2].primitive, lax.slice_p)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.squeeze_p)
jaxpr = jax.make_jaxpr(lambda x: x[:, 1])(jnp.ones((3, 4, 5)))
self.assertEqual(len(jaxpr.jaxpr.eqns), 2)
self.assertEqual(jaxpr.jaxpr.eqns[-2].primitive, lax.slice_p)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.squeeze_p)
# Indexing with `Ellipsis` is not lowered to `gather` ...
jaxpr = jax.make_jaxpr(lambda x: x[..., 0])(jnp.ones((3, 4, 5)))
self.assertLen((jaxpr.jaxpr.eqns), 2)
self.assertEqual(jaxpr.jaxpr.eqns[-2].primitive, lax.slice_p)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.squeeze_p)
# ... even when the ellipsis expands to no dimensions.
jaxpr = jax.make_jaxpr(lambda x: x[..., 0:1])(jnp.ones((3,)))
self.assertLen((jaxpr.jaxpr.eqns), 1)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.slice_p)
jaxpr = jax.make_jaxpr(lambda x: x[0:1, ...])(jnp.ones((3,)))
self.assertLen((jaxpr.jaxpr.eqns), 1)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.slice_p)
# Simple reverses lower to lax.rev_p
jaxpr = jax.make_jaxpr(lambda x: x[:, ::-1])(jnp.ones((3, 4)))
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
self.assertEqual(jaxpr.jaxpr.eqns[0].primitive, lax.rev_p)
# Non-static indices produce a dynamic slice
jaxpr = jax.make_jaxpr(lambda x, i: x[i])(jnp.ones((4,)), 2)
self.assertEqual(len(jaxpr.jaxpr.eqns), 6)
self.assertEqual(jaxpr.jaxpr.eqns[-2].primitive, lax.dynamic_slice_p)
self.assertEqual(jaxpr.jaxpr.eqns[-1].primitive, lax.squeeze_p)
def testTrivialGatherIsntGenerated(self):
# https://github.com/jax-ml/jax/issues/1621
jaxpr = jax.make_jaxpr(lambda x: x[:, None])(np.arange(4))
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
self.assertNotIn('gather', str(jaxpr))
jaxpr = jax.make_jaxpr(lambda x: x[0:6:1])(np.arange(4))
self.assertEqual(len(jaxpr.jaxpr.eqns), 0)
jaxpr = jax.make_jaxpr(lambda x: x[:4])(np.arange(4))
self.assertEqual(len(jaxpr.jaxpr.eqns), 0)
jaxpr = jax.make_jaxpr(lambda x: x[::-1])(np.arange(4))
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
self.assertEqual(jaxpr.jaxpr.eqns[0].primitive, lax.rev_p)
def testOOBEmptySlice(self):
x = jnp.arange(4, dtype='float32')
self.assertArraysEqual(x[1:0], jnp.empty(0, dtype='float32'))
self.assertArraysEqual(x[-2:-10], jnp.empty(0, dtype='float32'))
self.assertArraysEqual(x[5:10], jnp.empty(0, dtype='float32'))
x = jnp.arange(6, dtype='float32').reshape(2, 3)
self.assertArraysEqual(x[-1:-4], jnp.empty((0, 3), dtype='float32'))
self.assertArraysEqual(x[:, 3:2], jnp.empty((2, 0), dtype='float32'))
def testIndexingEmptyDimension(self):
# Issue 2671: XLA error when indexing into dimension of size 0
x = jnp.ones((2, 0))
# The following work, even on axis 1 of size 0
with jax.numpy_rank_promotion('allow'):
_ = x[0, :] + x[0, None] + x[0, 1:] + x[0, 1:3:2]
with self.assertRaisesRegex(IndexError,
"index .* is out of bounds for axis .* with size 0"):
_ = np.ones((2, 0))[0, 0] # The numpy error
with self.assertRaisesRegex(IndexError,
"index is out of bounds for axis .* with size 0"):
_ = x[0, 0] # JAX indexing
with self.assertRaisesRegex(IndexError,
"index is out of bounds for axis .* with size 0"):
jax.jit(lambda i: x[0, i])(0) # JAX indexing under jit
def testBooleanIndexingWithEmptyResult(self):
# based on a TensorFlow Probability test that started failing after #1622
x = jnp.array([-1])
mask = jnp.array([False])
ans = x[mask] # doesn't crash
expected = np.array([-1])[np.array([False])]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingShapeMismatch(self):
# Regression test for https://github.com/jax-ml/jax/issues/7329
x = jnp.arange(4)
idx = jnp.array([True, False])
with self.assertRaisesRegex(IndexError, "boolean index did not match shape.*"):
x[idx]
def testBooleanIndexingWithNone(self):
# Regression test for https://github.com/jax-ml/jax/issues/18542
x = jnp.arange(6).reshape(2, 3)
idx = (None, jnp.array([True, False]))
ans = x[idx]
expected = jnp.arange(3).reshape(1, 1, 3)
self.assertAllClose(ans, expected)
def testBooleanIndexingWithNoneAndEllipsis(self):
# Regression test for https://github.com/jax-ml/jax/issues/18542
x = jnp.arange(6).reshape(2, 3)
mask = jnp.array([True, False, False])
ans = x[None, ..., mask]
expected = jnp.array([0, 3]).reshape(1, 2, 1)
self.assertAllClose(ans, expected)
def testBooleanIndexingWithEllipsisAndNone(self):
# Regression test for https://github.com/jax-ml/jax/issues/18542
x = jnp.arange(6).reshape(2, 3)
mask = jnp.array([True, False, False])
ans = x[..., None, mask]
expected = jnp.array([0, 3]).reshape(2, 1, 1)
self.assertAllClose(ans, expected)
def testNontrivialBooleanIndexing(self):
# Test nontrivial corner case in boolean indexing shape validation
rng = jtu.rand_default(self.rng())
index = (rng((2, 3), np.bool_), rng((6,), np.bool_))
args_maker = lambda: [rng((2, 3, 6), np.int32)]
np_fun = lambda x: np.asarray(x)[index]
jnp_fun = lambda x: jnp.asarray(x)[index]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
[(3,), (0,)],
[(3, 4), (0,)],
[(3, 4), (0, 4)],
[(3, 4), (3, 0)],
[(3, 4, 5), (3, 0)],
)
def testEmptyBooleanIndexing(self, x_shape, m_shape):
# Regression test for https://github.com/jax-ml/jax/issues/22886
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(x_shape, np.int32), np.empty(m_shape, dtype=bool)]
np_fun = lambda x, m: np.asarray(x)[np.asarray(m)]
jnp_fun = lambda x, m: jnp.asarray(x)[jnp.asarray(m)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@jtu.sample_product(
shape=[(2, 3, 4, 5)],
idx=[
np.index_exp[True],
np.index_exp[False],
np.index_exp[..., True],
np.index_exp[..., False],
np.index_exp[0, :2, True],
np.index_exp[0, :2, False],
np.index_exp[:2, 0, True],
np.index_exp[:2, 0, False],
np.index_exp[:2, np.array([0, 2]), True],
np.index_exp[np.array([1, 0]), :, True],
np.index_exp[True, :, True, :, np.array(True)],
]
)
def testScalarBooleanIndexing(self, shape, idx):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, np.int32)]
np_fun = lambda x: np.asarray(x)[idx]
jnp_fun = lambda x: jnp.asarray(x)[idx]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@jtu.sample_product(
shape=[(2, 3, 4, 5)],
update_ndim=[0, 1, 2],
idx=[
np.index_exp[True],
np.index_exp[False],
np.index_exp[..., True],
np.index_exp[..., False],
np.index_exp[0, :2, True],
np.index_exp[0, :2, False],
np.index_exp[:2, 0, True],
np.index_exp[:2, 0, False],
np.index_exp[:2, np.array([0, 2]), True],
np.index_exp[np.array([1, 0]), :, True],
np.index_exp[True, :, True, :, np.array(True)],
]
)
def testScalarBoolUpdate(self, shape, idx, update_ndim):
update_shape = np.zeros(shape)[idx].shape[-update_ndim:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, np.int32), rng(update_shape, np.int32)]
def np_fun(x, update):
x = np.array(x, copy=True)
x[idx] = update
return x
jnp_fun = lambda x, update: jnp.asarray(x).at[idx].set(update)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
def testFloatIndexingError(self):
BAD_INDEX_TYPE_ERROR = "Indexer must have integer or boolean type, got indexer with type"
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2)[0.]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros((2, 2))[(0, 0.)]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros((2, 2))[(0, 0.)]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jax.jit(lambda idx: jnp.zeros((2, 2))[idx])((0, 0.))
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2).at[0.].add(1.)
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2).at[0.].set(1.)
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros((2, 2))[jnp.arange(2), 1.0]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros((2, 2))[jnp.arange(2), 1 + 1j]
def testStrIndexingError(self):
msg = "JAX does not support string indexing"
with self.assertRaisesRegex(TypeError, msg):
jnp.zeros(2)['abc']
with self.assertRaisesRegex(TypeError, msg):
jnp.zeros((2, 3))[:, 'abc']
@jtu.sample_product(
mode=["promise_in_bounds", "fill", "clip", "drop"],
wrap_negative_indices=[True, False],
shape=[(5,), (10,)],
idx_shape=[(5,)],
)
def testWrapNegativeIndices1D(self, mode, wrap_negative_indices, shape, idx_shape):
"""Test the behavior of the wrap_negative_indices parameter in array.at[...].get()"""
fill_value = 99
data_rng = jtu.rand_default(self.rng())
idx_rng = jtu.rand_uniform(self.rng(), low=-12, high=12)
args_maker = lambda: [data_rng(shape, 'float32'), idx_rng(idx_shape, 'int32')]
def jnp_fun(data, idx):
return jnp.array(data).at[idx].get(
mode=mode,
fill_value=fill_value,
wrap_negative_indices=wrap_negative_indices)
def np_fun(data, idx):
if wrap_negative_indices:
idx = np.where(idx < 0, idx + len(data), idx)
out_of_bound = (idx < 0) | (idx >= len(data))
safe_idx = np.where(out_of_bound, 0, idx)
result = data[safe_idx]
if mode in ["fill", "drop"]:
result = np.where(out_of_bound, fill_value, result)
elif mode in ["promise_in_bounds", "clip"]:
result = np.where(idx < 0, data[0],
np.where(idx >= len(data), data[-1],
result))
else:
raise ValueError(f"Unrecognized mode {mode!r}")
return result
tol = 1E-4 if jtu.test_device_matches(["tpu"]) else None
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, tol=tol)
def testIndexOutOfBounds(self): # https://github.com/jax-ml/jax/issues/2245
x = jnp.arange(5, dtype=jnp.int32) + 1
self.assertAllClose(x, x[:10])
idx = jnp.array([-10, -6, -5, -4, 0, 3, 4, 5, 6, 100])
self.assertArraysEqual(
x.at[idx].get(mode="clip"),
jnp.array([1, 1, 1, 2, 1, 4, 5, 5, 5, 5], jnp.int32))
nan = np.nan
self.assertArraysEqual(
x.astype(jnp.float32).at[idx].get(mode="fill"),
jnp.array([nan, nan, 1, 2, 1, 4, 5, nan, nan, nan], jnp.float32))
imin = np.iinfo(np.int32).min
self.assertArraysEqual(
x.at[idx].get(mode="fill"),
jnp.array([imin, imin, 1, 2, 1, 4, 5, imin, imin, imin], jnp.int32))
umax = np.iinfo(np.uint32).max
self.assertArraysEqual(
x.astype(np.uint32).at[idx].get(mode="fill"),
jnp.array([umax, umax, 1, 2, 1, 4, 5, umax, umax, umax], jnp.uint32))
self.assertArraysEqual(
x.at[idx].get(mode="fill", fill_value=7),
jnp.array([7, 7, 1, 2, 1, 4, 5, 7, 7, 7], jnp.int32))
def testIndexingWeakTypes(self):
x = lax_internal._convert_element_type(jnp.arange(5), dtypes.dtype(float),
weak_type=True)
a = x.at[0].set(1.0)
self.assertEqual(a.dtype, x.dtype)
self.assertTrue(dtypes.is_weakly_typed(a))
b = x.at[0].add(1.0)
self.assertEqual(b.dtype, x.dtype)
self.assertTrue(dtypes.is_weakly_typed(b))
c = x.at[0].mul(1.0)
self.assertEqual(c.dtype, x.dtype)
self.assertTrue(dtypes.is_weakly_typed(c))
def testIndexingTypePromotion(self):
def _check(x_type, y_type):
x = jnp.arange(5, dtype=x_type)
y = y_type(0)
out = x.at[0].set(y)
self.assertEqual(x.dtype, out.dtype)
@jtu.ignore_warning(category=np.exceptions.ComplexWarning,
message="Casting complex values to real")
def _check_warns(x_type, y_type, msg):
with self.assertWarnsRegex(FutureWarning, msg):
_check(x_type, y_type)
def _check_raises(x_type, y_type, msg):
with self.assertRaisesRegex(ValueError, msg):
_check(x_type, y_type)
# Matching dtypes are always OK
_check(jnp.int32, jnp.int32)
_check(jnp.float32, jnp.float32)
_check(jnp.complex64, jnp.complex64)
# Weakly-typed y values promote.
_check(jnp.int32, int)
_check(jnp.float32, int)
_check(jnp.float32, float)
_check(jnp.complex64, int)
_check(jnp.complex64, float)
_check(jnp.complex64, complex)
# in standard promotion mode, strong types can promote.
msg = "scatter inputs have incompatible types"
with jax.numpy_dtype_promotion('standard'):
_check(jnp.int32, jnp.int16)
_check(jnp.float32, jnp.float16)
_check(jnp.float32, jnp.int32)
_check(jnp.complex64, jnp.int32)
_check(jnp.complex64, jnp.float32)
# TODO(jakevdp): make these _check_raises
_check_warns(jnp.int16, jnp.int32, msg)
_check_warns(jnp.int32, jnp.float32, msg)
_check_warns(jnp.int32, jnp.complex64, msg)
_check_warns(jnp.float16, jnp.float32, msg)
_check_warns(jnp.float32, jnp.complex64, msg)
# in strict promotion mode, strong types do not promote.
msg = "Input dtypes .* have no available implicit dtype promotion path"
with jax.numpy_dtype_promotion('strict'):
_check_raises(jnp.int32, jnp.int16, msg)
_check_raises(jnp.float32, jnp.float16, msg)
_check_raises(jnp.float32, jnp.int32, msg)
_check_raises(jnp.complex64, jnp.int32, msg)
_check_raises(jnp.complex64, jnp.float32, msg)
_check_raises(jnp.int16, jnp.int32, msg)
_check_raises(jnp.int32, jnp.float32, msg)
_check_raises(jnp.int32, jnp.complex64, msg)
_check_raises(jnp.float16, jnp.float32, msg)
_check_raises(jnp.float32, jnp.complex64, msg)
def testWrongNumberOfIndices(self):
with self.assertRaisesRegex(
IndexError,
"Too many indices: 0-dimensional array indexed with 1 regular index."):
jnp.array(1)[0]
with self.assertRaisesRegex(
IndexError,
"Too many indices: 1-dimensional array indexed with 2 regular indices."):
jnp.zeros(3)[:, 5]
@jtu.sample_product(shape=[(), (1,)])
def testIndexDtypePromotion(self, shape):
# Regression test for https://github.com/jax-ml/jax/issues/31396
numbers = jnp.arange(1000)[:, None]
idx = jnp.int8(0).reshape(shape)
expected = np.array(999).reshape(shape)
self.assertArraysEqual(numbers[999, idx], expected)
def _broadcastable_shapes(shape):
"""Returns all shapes that broadcast to `shape`."""
def f(rshape):
yield []
if rshape:
for s in f(rshape[1:]):
yield rshape[0:1] + s
if rshape[0] != 1:
for s in f(rshape[1:]):
yield [1] + s
for x in f(list(reversed(shape))):
yield list(reversed(x))
# TODO(jakevdp): move this implementation to jax.dtypes & use in scatter?
def _can_cast(from_, to):
with jax.numpy_dtype_promotion('standard'):
return lax.dtype(to) == dtypes.result_type(from_, to)
def _compatible_dtypes(op, dtype, inexact=False):
if op == UpdateOps.ADD or op == UpdateOps.SUB:
return [dtype]
elif inexact:
return [dt for dt in float_dtypes if _can_cast(dt, dtype)]
else:
return [dt for dt in all_dtypes if _can_cast(dt, dtype)]
| IndexingTest |
python | readthedocs__readthedocs.org | readthedocs/api/v3/proxied_views.py | {
"start": 171,
"end": 240
} | class ____(ProxiedAPIMixin, EmbedAPIBase):
pass
| ProxiedEmbedAPIBase |
python | streamlit__streamlit | lib/streamlit/components/v2/types.py | {
"start": 1720,
"end": 12505
} | class ____(Protocol):
'''Signature of the mounting command returned by ``st.components.v2.component``.
This callable mounts a bidirectional component in a Streamlit app and
returns a ``BidiComponentResult`` object that exposes the component's
state and trigger values.
For published components, this callable is often wrapped in a user-friendly
command with typed parameters and declared defaults.
Parameters
----------
key : str or None
An optional string to use as the unique key for the
component instance. If this is omitted, an internal key is generated
for the component instance based on its mounting parameters. No two
Streamlit elements may have the same key.
When a key is defined, the component's state is available in Session
State via the key.
.. note::
If you want to access this key in your component's frontend, you
must pass it explicitly within the ``data`` parameter. The ``key``
parameter in ``BidiComponentCallable`` is not the same as the
``key`` property in ``ComponentArgs`` in the component's frontend
code.
The frontend key is automatically generated to be unique among all
instances of all components and to avoid collisions with classes
and IDs in the app's DOM.
data : Any or None
Data to pass to the component. This can be one of the following:
- A JSON-serializable object, like ``Dict[str, str | int]`` or
``List[str]``.
- An Arrow-serializable object, like ``pandas.DataFrame``.
- Raw bytes.
- A dictionary of JSON-serializable and Arrow-serializable objects.
The dictionary's keys must be Python primitives.
Because this data is sent to the frontend, it must be serializable by
one of the supported serialization methods (JSON, Arrow, or raw bytes).
You can't pass arbitrary Python objects. Arrow-serialization is only
supported at the top level of the ``data`` parameter or one level deep
in a dictionary. Raw bytes are only supported at the top level.
default : dict[str, Any] or None
Default state values for the component. Each key in the dictionary must
correspond to a valid state attribute with an ``on_<key>_change``
callback. This callback can be empty, but must be included as a
parameter when the component is mounted.
Trigger values do not support manual defaults. All trigger and state
values defined by an associated callback are initialized to ``None`` by
default.
width : "stretch", "content", or int
Width of the component. This can be one of the following:
- ``"stretch"`` (default): The component is wrapped in a ``<div>`` with
CSS style ``width: 100%;``.
- ``"content"``: The component is wrapped in a ``<div>`` with CSS
style ``width: fit-content;``.
- An integer specifying the width in pixels: The component is wrapped
in a ``<div>`` with the specified pixel width.
You are responsible for ensuring the component's inner HTML content is
responsive to the ``<div>`` wrapper.
height : "content", "stretch", or int
Height of the component. This can be one of the following:
- ``"content"`` (default): The component is wrapped in a ``<div>`` with
CSS style ``height: auto;``.
- ``"stretch"``: The component is wrapped in a ``<div>`` with CSS
style ``height: 100%;``.
- An integer specifying the height in pixels: The component is wrapped
in a ``<div>`` with the specified pixel height. If the component
content is larger than the specified height, scrolling is enabled.
.. note::
Use scrolling containers sparingly. If you use scrolling
containers, avoid heights that exceed 500 pixels. Otherwise,
the scroll surface of the container might cover the majority of
the screen on mobile devices, which makes it hard to scroll the
rest of the app.
If you want to disable scrolling for a fixed-height component,
include an inner ``<div>`` wrapper in your component's HTML to
control the overflow behavior.
You are responsible for ensuring the component's inner HTML content is
responsive to the ``<div>`` wrapper.
isolate_styles : bool
Whether to sandbox the component styles in a shadow root. If this is
``True`` (default), the component's HTML is mounted inside a shadow DOM
and, in your component's JavaScript, ``parentElement`` returns a
``ShadowRoot``. If this is ``False``, the component's HTML is mounted
directly into the app's DOM tree, and ``parentElement`` returns a
regular ``HTMLElement``.
**callbacks : Callable or None
Callbacks with the naming pattern ``on_<key>_change`` for each state and
trigger key. For example, if your component has a state key of
``"value"`` and a trigger key of ``"click"``, its callbacks can include
``on_value_change`` and ``on_click_change``.
Only names that follow this pattern are recognized. Custom components
don't currently support callbacks with arguments.
Callbacks are required for any state values defined in the ``default``
parameter. Otherwise, a callback is optional. To ensure your
component's result always returns the expected attributes, you can pass
empty callbacks like ``lambda: None``.
Returns
-------
BidiComponentResult
Component state object that exposes state and trigger values.
Examples
--------
**Example 1: Create a bidirectional text input component**
If you assign a key to a mounted instance of a component, you can feed its
state back into the component through the ``data`` parameter. This allows
you to both read and write state values from Session State. The following
example has a user-friendly wrapper around the mounting command to provide
typed parameters and a clean end-user API. A couple buttons demonstrate
programmatic updates to the component's state.
.. code-block:: python
import streamlit as st
HTML = """
<label style='padding-right: 1em;' for='txt'>Enter text</label>
<input id='txt' type='text' />
"""
JS = """
export default function(component) {
const { setStateValue, parentElement, data } = component;
const label = parentElement.querySelector('label');
label.innerText = data.label;
const input = parentElement.querySelector('input');
if (input.value !== data.value) {
input.value = data.value ?? '';
};
input.onkeydown = (e) => {
if (e.key === 'Enter') {
setStateValue('value', e.target.value);
}
};
input.onblur = (e) => {
setStateValue('value', e.target.value);
};
}
"""
my_component = st.components.v2.component(
"my_text_input",
html=HTML,
js=JS,
)
def my_component_wrapper(
label, *, default="", key=None, on_change=lambda: None
):
component_state = st.session_state.get(key, {})
value = component_state.get("value", default)
data = {"label": label, "value": value}
result = my_component(
data=data,
default={"value": value},
key=key,
on_value_change=on_change,
)
return result
st.title("My custom component")
if st.button("Hello World"):
st.session_state["my_text_input_instance"]["value"] = "Hello World"
if st.button("Clear text"):
st.session_state["my_text_input_instance"]["value"] = ""
result = my_component_wrapper(
"Enter something",
default="I love Streamlit!",
key="my_text_input_instance",
)
st.write("Result:", result)
st.write("Session state:", st.session_state)
.. output ::
https://doc-components-text-input.streamlit.app/
height: 600px
**Example 2: Add Tailwind CSS to a component**
You can use the ``isolate_styles`` parameter to disable shadow DOM
isolation and apply global styles like Tailwind CSS to your component. The
following example creates a simple button styled with Tailwind CSS. This
example also demonstrates using different keys to mount multiple instances
of the same component in one app.
.. code-block:: python
import streamlit as st
with open("tailwind.js", "r") as f:
TAILWIND_SCRIPT = f.read()
HTML = """
<button class="bg-blue-500 hover:bg-blue-700 text-white py-1 px-3 rounded">
Click me!
</button>
"""
JS = (
TAILWIND_SCRIPT
+ """
export default function(component) {
const { setTriggerValue, parentElement } = component;
const button = parentElement.querySelector('button');
button.onclick = () => {
setTriggerValue('clicked', true);
};
}
"""
)
my_component = st.components.v2.component(
"my_tailwind_button",
html=HTML,
js=JS,
)
result_1 = my_component(
isolate_styles=False, on_clicked_change=lambda: None, key="one"
)
result_1
result_2 = my_component(
isolate_styles=False, on_clicked_change=lambda: None, key="two"
)
result_2
.. output ::
https://doc-components-tailwind-button.streamlit.app/
height: 350px
'''
def __call__(
self,
*,
key: BidiComponentKey = None,
data: BidiComponentData = None,
default: BidiComponentDefaults = None,
width: Width = "stretch",
height: Height = "content",
isolate_styles: ComponentIsolateStyles = True,
**on_callbacks: WidgetCallback | None,
) -> BidiComponentResult: ...
__all__ = [
"BidiComponentCallable",
"BidiComponentData",
"BidiComponentDefaults",
"BidiComponentKey",
"ComponentIsolateStyles",
]
| BidiComponentCallable |
python | facebook__pyre-check | client/language_server/tests/protocol_test.py | {
"start": 1231,
"end": 1600
} | class ____(AsyncBytesWriter):
"""
An AsyncBytesWriter that always raises a given except when write is invoked.
"""
def __init__(self, exception: Exception) -> None:
self.exception = exception
async def write(self, data: bytes) -> None:
raise self.exception
async def close(self) -> None:
pass
| ExceptionRaisingBytesWriter |
python | walkccc__LeetCode | solutions/2248. Intersection of Multiple Arrays/2248.py | {
"start": 0,
"end": 240
} | class ____:
def intersection(self, nums: list[list[int]]) -> list[int]:
count = [0] * 1001
for row in nums:
for a in row:
count[a] += 1
return [i for i, c in enumerate(count)
if c == len(nums)]
| Solution |
python | py-pdf__pypdf | pypdf/generic/_data_structures.py | {
"start": 39134,
"end": 39186
} | class ____(StreamObject):
pass
| DecodedStreamObject |
python | dask__distributed | distributed/comm/inproc.py | {
"start": 3715,
"end": 6934
} | class ____(Comm):
"""
An established communication based on a pair of in-process queues.
Reminder: a Comm must always be used from a single thread.
Its peer Comm can be running in any thread.
"""
_initialized = False
def __init__( # type: ignore[no-untyped-def]
self,
local_addr: str,
peer_addr: str,
read_q,
write_q,
write_loop,
deserialize: bool = True,
):
super().__init__(deserialize=deserialize)
self._local_addr = local_addr
self._peer_addr = peer_addr
self._read_q = read_q
self._write_q = write_q
self._write_loop = write_loop
self._closed = False
self._finalizer = weakref.finalize(self, self._get_finalizer())
self._finalizer.atexit = False
self._initialized = True
def _get_finalizer(self):
r = repr(self)
def finalize(
read_q=self._read_q,
write_q=self._write_q,
write_loop=self._write_loop,
is_finalizing=sys.is_finalizing,
r=r,
):
if read_q.peek(None) is _EOF or is_finalizing():
return
logger.warning(f"Closing dangling queue in {r}")
write_loop.add_callback(write_q.put_nowait, _EOF)
return finalize
@property
def local_address(self) -> str:
return self._local_addr
@property
def peer_address(self) -> str:
return self._peer_addr
@property
def same_host(self) -> bool:
return True
async def read(self, deserializers="ignored"):
if self._closed:
raise CommClosedError()
msg = await self._read_q.get()
if msg is _EOF:
self._closed = True
self._finalizer.detach()
raise CommClosedError()
msg = _nested_deserialize(msg, self.deserialize)
return msg
async def write(self, msg, serializers=None, on_error=None):
if self.closed():
raise CommClosedError()
# Ensure we feed the queue in the same thread it is read from.
self._write_loop.add_callback(self._write_q.put_nowait, msg)
return 1
async def close(self):
self.abort()
def abort(self):
if not self.closed():
# Putting EOF is cheap enough that we do it on abort() too
self._write_loop.add_callback(self._write_q.put_nowait, _EOF)
self._read_q.put_nowait(_EOF)
self._write_q = self._read_q = None
self._closed = True
self._finalizer.detach()
def closed(self):
"""
Whether this comm is closed. An InProc comm is closed if:
1) close() or abort() was called on this comm
2) close() or abort() was called on the other end and the
read queue is empty
"""
if self._closed:
return True
# NOTE: repr() is called by finalize() during __init__()...
if self._initialized and self._read_q.peek(None) is _EOF:
self._closed = True
self._finalizer.detach()
return True
else:
return False
| InProc |
python | doocs__leetcode | solution/3100-3199/3165.Maximum Sum of Subsequence With Non-adjacent Elements/Solution.py | {
"start": 1721,
"end": 2126
} | class ____:
def maximumSumSubsequence(self, nums: List[int], queries: List[List[int]]) -> int:
n = len(nums)
tree = SegmentTree(n)
for i, x in enumerate(nums, 1):
tree.modify(1, i, x)
ans = 0
mod = 10**9 + 7
for i, x in queries:
tree.modify(1, i + 1, x)
ans = (ans + tree.query(1, 1, n)) % mod
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_ops_test.py | {
"start": 46658,
"end": 49950
} | class ____(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with test_util.force_cpu():
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one)
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one)
self._assertSparseTensorValueEqual(sp_one, max_tf)
self._assertSparseTensorValueEqual(sp_zero, min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2)
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2)
self._assertSparseTensorValueEqual(expected, max_tf)
self._assertSparseTensorValueEqual(expected, min_tf)
def testInvalidSparseInputs(self):
with test_util.force_cpu():
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
".*Index rank .* and shape rank .* do not match.*",
):
self.evaluate(
gen_sparse_ops.sparse_sparse_maximum(
[[1]], [0], [2], [[]], [1], [2]
)
)
@test_util.run_deprecated_v1
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.cached_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with test_util.force_cpu():
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
| SparseMinimumMaximumTest |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/framework_test.py | {
"start": 15112,
"end": 16637
} | class ____(test_util.TensorFlowTestCase):
def testWrapperHasAllPublicMethodsOfSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(session.Session, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
def testWrapperHasAllPublicMethodsOfMonitoredSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(monitored_session.MonitoredSession,
predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
if __name__ == "__main__":
googletest.main()
| SessionWrapperPublicMethodParityTest |
python | ray-project__ray | python/ray/serve/_private/benchmarks/serialization/common.py | {
"start": 385,
"end": 605
} | class ____(BaseModel):
text: Optional[str] = None
floats: Optional[List[float]] = None
ints: Optional[List[int]] = None
ts: Optional[float] = None
reason: Optional[str] = None
@dataclass
| PayloadPydantic |
python | jazzband__django-waffle | test_app/models.py | {
"start": 446,
"end": 631
} | class ____(AbstractBaseUser):
company = models.ForeignKey(
Company,
on_delete=CASCADE
)
username = models.CharField(
max_length=100,
)
| CompanyUser |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 21422,
"end": 21954
} | class ____(TypedDict):
dimensionality: int
# The current number of elements in the index (total = additions - deletes)
curr_elements: int
# The auto-incrementing ID of the last inserted element, never decreases so
# can be used as a count of total historical size. Should increase by 1 every add.
# Assume cannot overflow
total_elements_added: int
time_created: float
Space = Literal["cosine", "l2", "ip"]
# TODO: make warnings prettier and add link to migration docs
@runtime_checkable
| IndexMetadata |
python | mozilla__bleach | bleach/_vendor/html5lib/_inputstream.py | {
"start": 13677,
"end": 21145
} | class ____(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except Exception:
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overridden, we've been overridden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
| HTMLBinaryInputStream |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 3081,
"end": 3154
} | class ____(Variadic[T]): ...
# This should generate an error.
| VariadicChild |
python | FactoryBoy__factory_boy | tests/alchemyapp/models.py | {
"start": 1098,
"end": 1254
} | class ____(Base):
__tablename__ = 'SpecialFieldModelTable'
id = Column(Integer(), primary_key=True)
session = Column(Unicode(20))
| SpecialFieldModel |
python | google__jax | tests/lax_autodiff_test.py | {
"start": 9308,
"end": 49064
} | class ____(jtu.JaxTestCase):
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op=rec.op, rng_factory=rec.rng_factory, order=rec.order, tol=rec.tol)],
shapes=[
shapes for shape_group in compatible_shapes
for shapes in itertools.combinations_with_replacement(shape_group, rec.nargs)
],
dtype=rec.dtypes,
)
for rec in LAX_GRAD_OPS
))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
if jtu.test_device_matches(["cpu", "tpu"]):
if op is lax.cosh and dtype == np.complex64:
tol = 3e-1 # 2nd-order gradients are noisy on CPU and TPU
if jtu.test_device_matches(["tpu"]):
if op is lax.pow:
raise SkipTest("pow grad imprecise on tpu")
if op is lax.cos:
order = 1 # 2nd-order gradient is imprecise on TPU.
if op is lax.sin:
order = 1 # 2nd-order gradient is imprecise on TPUv5p.
if op is lax.log:
order = 1 # 2nd-order gradient is imprecise on TPU.
tol = jtu.join_tolerance(1.5e-1, tol) if jtu.num_float_bits(dtype) == 32 else tol
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op=rec.op, tol=rec.tol)],
special_value=rec.values,
)
for rec in LAX_GRAD_SPECIAL_VALUE_TESTS
))
def testOpGradSpecialValue(self, op, special_value, tol):
if op in (lax.sinh, lax.cosh) and jtu.test_device_matches(["tpu"]):
tol = {np.float32: 1e-2}
check_grads(op, (special_value,), 2, ["fwd", "rev"], rtol=tol, atol=tol)
@jtu.sample_product(
from_dtype=inexact_dtypes,
to_dtype=inexact_dtypes,
)
def testConvertElementTypeGrad(self, from_dtype, to_dtype):
rng = jtu.rand_default(self.rng())
tol = max(jtu.tolerance(to_dtype, jtu.default_gradient_tolerance),
jtu.tolerance(from_dtype, jtu.default_gradient_tolerance))
args = (rng((2, 3), from_dtype),)
convert_element_type = lambda x: lax.convert_element_type(x, to_dtype)
convert_element_type = jtu.ignore_warning(category=np.exceptions.ComplexWarning)(
convert_element_type)
check_grads(convert_element_type, args, 2, ["fwd", "rev"], tol, tol, eps=1.)
@jtu.sample_product(
shape=[(), (2, 3)],
dtype=grad_float_dtypes,
)
def testClampGrad(self, shape, dtype):
rng = jtu.rand_default(self.rng())
operand = rng(shape, dtype)
low = operand - dtype(10)
high = operand + dtype(10)
# Avoids points near the boundary where the gradient may be inaccurate.
check_grads(lax.clamp, (operand, low, high), 2, ["fwd", "rev"], eps=1e-2)
check_grads(lax.clamp, (low, operand, high), 2, ["fwd", "rev"], eps=1e-2)
check_grads(lax.clamp, (low, high, operand), 2, ["fwd", "rev"], eps=1e-2)
@jtu.sample_product(
[dict(base_shape=base_shape, dim=dim)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(base_shape))
],
num_arrs=[3],
dtype=float_dtypes,
)
def testConcatenateGrad(self, dim, base_shape, dtype, num_arrs):
rng = jtu.rand_default(self.rng())
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
operands = tuple(rng(shape, dtype) for shape in shapes)
concatenate = lambda *args: lax.concatenate(args, dim)
check_grads(concatenate, operands, 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(base_shape=base_shape, axis=axis)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(len(base_shape))
],
num_pieces=range(3),
dtype=float_dtypes,
)
def testSplitGrad(self, axis, base_shape, dtype, num_pieces):
sizes = jtu.rand_int(self.rng(), 5)((num_pieces + 1,), np.int64)
shape = list(base_shape)
shape[axis] = np.sum(sizes)
rng = jtu.rand_default(self.rng())
operands = (rng(shape, dtype),)
split = lambda x: lax.split(x, sizes, axis)
check_grads(split, operands, 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape, strides=strides)
for lhs_shape, rhs_shape, all_strides in itertools.chain(
[((b, i, 3, 4), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)])
for b, i, j in itertools.product([2, 3], repeat=3)],
[((4, 2, 1), (3, 2, 1), [(1,)])])
for strides in all_strides
],
dtype=float_dtypes,
padding=["VALID", "SAME"],
)
def testConvGrad(self, lhs_shape, rhs_shape, dtype, strides, padding):
rng = jtu.rand_small(self.rng())
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
conv = partial(lax.conv, window_strides=strides, padding=padding,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=1e-2, rtol=1e-2)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape, strides=strides,
padding=padding, lhs_dil=lhs_dil, rhs_dil=rhs_dil)
for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in
itertools.chain(
[((b, i, 3, 4), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)],
[((0, 0), (0, 0)), ((-1, 0), (0, -1)), ((1, 0), (0, 1))],
[(1, 1), (2, 1)], [(1, 1)])
for b, i, j in itertools.product([2, 3], repeat=3)],
[((4, 2, 1), (3, 2, 1), [(1,)], [((1, 1),), ((0, 0),)],
[(1,), (2,)], [(1,), (2,)])])
for strides in all_strides
for rhs_dil in rhs_dils
for lhs_dil in lhs_dils
for padding in all_pads
],
dtype=float_dtypes,
)
def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dil, rhs_dil):
rng = jtu.rand_small(self.rng())
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
conv = partial(lax.conv_with_general_padding, window_strides=strides,
padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=1e-2, rtol=1e-2)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape, strides=strides,
padding=padding, lhs_dil=lhs_dil, rhs_dil=rhs_dil,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
for batch_group_count, feature_group_count in ([(1, 1), (2, 1), (1, 2)])
for lhs_shapes, rhs_shape, all_strides, lhs_dils, rhs_dils in [
([(b * batch_group_count, i * feature_group_count, 6, 7),
(b * batch_group_count, i * feature_group_count, 0, 4)], # lhs_shape
(j * batch_group_count * feature_group_count, i, 1, 2), # rhs_shape
[(1, 1), (1, 2), (2, 1)], # strides
[(1, 1), (2, 1)], # lhs_dils
[(1, 1), (2, 2)]) # rhs_dils
for b, i, j in itertools.product([1, 2], repeat=3)]
for lhs_shape in lhs_shapes
for strides in all_strides
for rhs_dil in rhs_dils
for lhs_dil in lhs_dils
for padding in ([((0, 0), (0, 0)), ((1, 0), (0, 1))] +
([((0, -1), (0, 0))] if lhs_shape[2] != 0 else []))
],
[dict(dimension_numbers=dim_nums, perms=perms)
for dim_nums, perms in [
(("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
(("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])),
(("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3]))]
],
dtype=grad_inexact_dtypes,
)
def testConvGeneralDilatedGrad(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dil, rhs_dil, dimension_numbers,
perms, feature_group_count, batch_group_count):
if dtype == np.float16:
raise SkipTest("float16 numerical issues") # TODO(mattjj): resolve
rng = jtu.rand_default(self.rng())
tol = {dtypes.bfloat16: 1e-0, np.float16: 5e-1, np.float32: 1e-3}
# permute shapes to match dim_spec, scale by feature_group_count
lhs_perm, rhs_perm = perms
lhs_shape = list(np.take(lhs_shape, lhs_perm))
rhs_shape = list(np.take(rhs_shape, rhs_perm))
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
conv = partial(lax.conv_general_dilated, window_strides=strides,
padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=tol, rtol=tol)
@jtu.sample_product(
lhs_shape=[(2,), (3, 2)],
rhs_shape=[(2,), (2, 4)],
dtype=float_dtypes,
)
def testDotGrad(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
tol = {np.float16: 1e-1, np.float32: 1e-4}
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
dot = partial(lax.dot, precision=lax.Precision.HIGHEST)
check_grads_bilinear(dot, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=tol, rtol=tol)
# check that precision config is preserved
result, pullback = jax.vjp(dot, lhs, rhs)
s = str(jax.make_jaxpr(pullback)(result))
assert "Precision.HIGHEST" in s
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape,
dimension_numbers=dimension_numbers)
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 2), (2, 4), (([1], [0]), ([], []))),
((3, 5), (2, 5), (([1], [1]), ([], []))),
((5, 3), (5, 2), (([0], [0]), ([], []))),
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 5, 2), (2, 4, 5), (([2], [0]), ([1], [2]))),
((7, 3, 5, 2), (2, 2, 4, 5), (([3], [0]), ([2], [3]))),
]
],
dtype=float_dtypes,
)
def testDotGeneralContractAndBatchGrads(self, lhs_shape, rhs_shape, dtype,
dimension_numbers):
rng = jtu.rand_small(self.rng())
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
dot_general = partial(lax.dot_general, dimension_numbers=dimension_numbers,
precision=lax.Precision.HIGHEST)
atol = {np.float16: 5E-2} if jtu.test_device_matches(['tpu']) else None
check_grads_bilinear(dot_general, (lhs, rhs), order=2,
modes=["fwd", "rev"], atol=atol)
# check that precision config is preserved
result, pullback = jax.vjp(dot_general, lhs, rhs)
s = str(jax.make_jaxpr(pullback)(result))
assert "Precision.HIGHEST" in s
def testDotPreferredElementType(self):
# https://github.com/jax-ml/jax/issues/10818
x = jax.numpy.ones((), jax.numpy.float16)
def f(x):
return jax.lax.dot_general(x, x, (((), ()), ((), ())),
preferred_element_type=jax.numpy.float32)
jax.jacrev(f)(x) # don't crash!
@jtu.sample_product(
shape=[(), (2, 3)],
dtype=float_dtypes,
broadcast_sizes=[(), (2,), (1, 2)],
)
def testBroadcastGrad(self, shape, dtype, broadcast_sizes):
rng = jtu.rand_default(self.rng())
args = (rng(shape, dtype),)
broadcast = lambda x: lax.broadcast(x, broadcast_sizes)
check_grads(broadcast, args, 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(inshape=inshape, outshape=outshape, dimensions=broadcast_dimensions)
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
]
],
dtype=float_dtypes,
)
def testBroadcastInDimGrad(self, inshape, dtype, outshape, dimensions):
rng = jtu.rand_default(self.rng())
operand = rng(inshape, dtype)
broadcast_in_dim = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
check_grads(broadcast_in_dim, (operand,), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(arg_shape=arg_shape, out_shape=out_shape, permutation=permutation)
for arg_shape, out_shape, permutation in [
[(3, 4), (12,), None],
[(2, 1, 4), (8,), None],
[(2, 2, 4), (2, 8), None],
[(3, 4), (12,), (0, 1)],
[(3, 4), (12,), (1, 0)],
[(2, 1, 4), (8,), (0, 2, 1)],
[(2, 1, 4), (8,), (2, 0, 1)],
[(2, 2, 4), (2, 8), (0, 2, 1)],
[(2, 2, 4), (2, 8), (2, 0, 1)],
]
],
dtype=float_dtypes,
)
def testReshapeGrad(self, arg_shape, out_shape, permutation, dtype):
rng = jtu.rand_default(self.rng())
operand = rng(arg_shape, dtype)
reshape = lambda x: lax.reshape(x, out_shape, permutation)
check_grads(reshape, (operand,), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(shape=shape, pads=pads)
for shape, paddings in [
[(), [()]],
((2, 3), [[(1, 2, 1), (0, 1, 0)], [(-1, 0, 0), (-1, 0, 2)]]),
]
for pads in paddings
],
dtype=float_dtypes,
)
def testPadGrad(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
operand = rng(shape, dtype)
pad = lambda operand: lax.pad(operand, np.array(0, dtype), pads)
check_grads(pad, (operand,), 2, ["fwd", "rev"], eps=1.)
operand = rng(shape, dtype)
padding_value = np.array(0., dtype)
pad = lambda operand, padding_value: lax.pad(operand, padding_value, pads)
check_grads(pad, (operand, padding_value), 2, ["fwd", "rev"], eps=1.)
def testReverseGrad(self):
rev = lambda operand: lax.rev(operand, dimensions)
dimensions = [0]
check_grads(rev, (np.array([3., 2., 1.]),), 2)
dimensions = [0, 1]
check_grads(rev, (np.array([[6., 5., 4.], [3., 2., 1.]]),), 2,
rtol={np.float32: 3e-3})
def testPowSecondDerivative(self):
# https://github.com/jax-ml/jax/issues/12033
x, y = 4.0, 0.0
expected = ((0.0, 1/x), (1/x, np.log(x) ** 2))
with self.subTest("jacfwd"):
result_fwd = jax.jacfwd(jax.jacfwd(lax.pow, (0, 1)), (0, 1))(x, y)
self.assertAllClose(result_fwd, expected)
with self.subTest("jacrev"):
result_rev = jax.jacrev(jax.jacrev(lax.pow, (0, 1)), (0, 1))(x, y)
self.assertAllClose(result_rev, expected)
with self.subTest("zero to the zero"):
result = jax.grad(lax.pow)(0.0, 0.0)
# TODO(jakevdp) special-case zero in a way that doesn't break other cases
# See https://github.com/jax-ml/jax/pull/12041#issuecomment-1222766191
# self.assertEqual(result, 0.0)
self.assertAllClose(result, np.nan)
def testPowIntPowerAtZero(self):
# https://github.com/jax-ml/jax/issues/14397
ans = jax.grad(jax.jit(lambda x, n: x ** n))(0., 0)
self.assertAllClose(ans, 0., check_dtypes=False)
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises mixed type promotion
def testPowIntPowerAtZero2(self):
# https://github.com/jax-ml/jax/issues/17995
a = lambda z: jax.numpy.sum(z**jax.numpy.arange(0, 2, dtype=int))
b = lambda z: jax.numpy.sum(z**jax.numpy.arange(0, 2, dtype=float))
c = lambda z: 1 + z
d = lambda z: z ** 0 + z
e = lambda z: z ** 0. + z
self.assertAllClose(jax.grad(a)(3.14), 1., check_dtypes=False)
self.assertAllClose(jax.grad(b)(3.14), 1., check_dtypes=False)
self.assertAllClose(jax.grad(c)(3.14), 1., check_dtypes=False)
self.assertAllClose(jax.grad(d)(3.14), 1., check_dtypes=False)
self.assertAllClose(jax.grad(e)(3.14), 1., check_dtypes=False)
@jtu.sample_product(
[dict(arg_shape=arg_shape, pred_shape=pred_shape)
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
],
dtype=float_dtypes,
)
def testSelectGrad(self, pred_shape, arg_shape, dtype):
rng = jtu.rand_default(self.rng())
pred = rng(pred_shape, np.bool_)
on_true = rng(arg_shape, dtype)
on_false = rng(arg_shape, dtype)
select = lambda on_true, on_false: lax.select(pred, on_true, on_false)
check_grads(select, (on_true, on_false), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(shape=shape, starts=start_indices, limits=limit_indices,
strides=strides)
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
[(3, 3, 5), (0, 2, 0), (3, 2, 5), (1, 2, 1)]
]
],
dtype=float_dtypes,
)
def testSliceGrad(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
operand = rng(shape, dtype)
slice = lambda x: lax.slice(x, starts, limits, strides)
check_grads(slice, (operand,), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(shape=shape, start_indices=start_indices, size_indices=size_indices)
for shape, start_indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
],
dtype=float_dtypes,
)
def testDynamicSliceGrad(self, shape, dtype, start_indices, size_indices):
rng = jtu.rand_default(self.rng())
operand = rng(shape, dtype)
dynamic_slice = lambda x: lax.dynamic_slice(x, start_indices, size_indices)
check_grads(dynamic_slice, (operand,), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(shape=shape, start_indices=start_indices, update_shape=update_shape)
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
],
dtype=float_dtypes,
)
def testDynamicUpdateSliceGrad(self, shape, dtype, start_indices,
update_shape):
rng = jtu.rand_default(self.rng())
operand = rng(shape, dtype)
update = rng(update_shape, dtype)
start_indices = np.array(start_indices)
dus = lambda x, y: lax.dynamic_update_slice(x, y, start_indices)
check_grads(dus, (operand, update), 2, ["fwd", "rev"], eps=1.)
dus = lambda x: lax.dynamic_update_slice(x, update, start_indices)
check_grads(dus, (operand,), 2, ["fwd", "rev"], eps=1.)
dus = lambda y: lax.dynamic_update_slice(operand, y, start_indices)
check_grads(dus, (update,), 2, ["fwd", "rev"], eps=1.)
def testDynamicSliceValueAndGrad(self):
# Regression test for https://github.com/jax-ml/jax/issues/10984
# Issue arose due to an out-of-range negative index.
rng = jtu.rand_default(self.rng())
shape = (5, 5)
axis = 0
index = -(shape[axis] + 3)
def f(x):
return lax.dynamic_index_in_dim(x, index, axis).sum()
x = rng(shape, np.float32)
result1 = f(x)
result2, _ = jax.value_and_grad(f, 0)(x)
self.assertAllClose(result1, result2)
def testDynamicUpdateSliceValueAndGrad(self):
# Regression test for https://github.com/jax-ml/jax/issues/10984
# Issue arose due to an out-of-range negative index.
rng = jtu.rand_default(self.rng())
shape = (5, 5)
axis = 0
index = -(shape[axis] + 3)
def f(x, y):
return lax.dynamic_update_index_in_dim(x, y, index, axis).sum()
x = rng(shape, np.float32)
y = rng([1 for s in shape], np.float32)
result1 = f(x, y)
result2, _ = jax.value_and_grad(f, 0)(x, y)
self.assertAllClose(result1, result2)
@jtu.sample_product(
[dict(shape=shape, perm=perm)
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
],
dtype=float_dtypes,
)
def testTransposeGrad(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
operand = rng(shape, dtype)
transpose = lambda x: lax.transpose(x, perm)
check_grads(transpose, (operand,), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(init_val=init_val, op=op, dtype=dtype, rng_factory=rng_factory)
for init_val, op, dtypes, rng_factory in [
(0, lax.add, float_dtypes + jtu.dtypes.complex, jtu.rand_default),
(-np.inf, lax.max, grad_inexact_dtypes, jtu.rand_unique_int),
(np.inf, lax.min, grad_inexact_dtypes, jtu.rand_unique_int),
(1, lax.mul, grad_float_dtypes, partial(jtu.rand_default, scale=1)),
]
for dtype in dtypes
],
[dict(shape=shape, dims=dims)
for shape, dims in [
[(), ()],
[(3, 4, 5), ()],
[(3, 4, 5), (0,)],
[(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)],
[(3, 4, 5), (0, 1, 2)],
[(3, 1), (1,)],
[(3, 0, 5), (1,)],
]
],
)
def testReduceGrad(self, op, init_val, shape, dtype, dims, rng_factory):
rng = rng_factory(self.rng())
if jtu.test_device_matches(["tpu"]) and op is lax.mul:
raise SkipTest("unimplemented case")
tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-1, np.float32: 1e-1,
np.float64: 1e-3, np.complex64: 1e-1}
operand = rng(shape, dtype)
init_val = np.asarray(init_val, dtype=dtype)
reduce = lambda operand: lax.reduce(operand, init_val, op, dims)
eps = (1.0 if dtypes.finfo(dtype).bits == 16 and op is lax.add else
1e-1 if dtype == dtypes.bfloat16 else
1e-2 if dtypes.finfo(dtype).bits == 32 else None)
if op not in (lax.max, lax.min) or all(d > 0 for d in shape):
check_grads(reduce, (operand,), 2, ["fwd", "rev"], tol, tol, eps)
@jtu.sample_product(
[dict(shape=shape, dims=dims)
for shape, dims in [
[(3, 4, 5), ()],
[(3, 4, 5), (0,)],
[(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)],
[(3, 4, 5), (0, 1, 2)],
[(3, 1), (1,)],
[(3, 0, 5), (1,)],
]
],
dtype=grad_float_dtypes,
)
def testReducePairGrad(self, shape, dtype, dims):
rng = jtu.rand_default(self.rng(), scale=1)
tol = {np.float32: 1e-2, np.float64: 1e-4}
operands = (rng(shape, dtype), rng(shape, dtype))
init_vals = (np.array(0, dtype), np.array(1, dtype))
def op(xs, ys):
return (xs[0] + ys[0], xs[1] * ys[1])
reduce = lambda xs, ys: lax.reduce((xs, ys), init_vals, op, dims)
check_grads(reduce, operands, 2, ["fwd", "rev"], tol, tol)
@jtu.sample_product(
[dict(init_val=init_val, op=op, dtype=dtype, rng_factory=rng_factory,
shape=shape, dims=dims, strides=strides, padding=padding,
base_dilation=base_dilation, window_dilation=window_dilation)
for init_val, op, dtypes, rng_factory in [
(0, lax.add, grad_float_dtypes, jtu.rand_small),
(-np.inf, lax.max, grad_float_dtypes, jtu.rand_unique_int),
(np.inf, lax.min, grad_float_dtypes, jtu.rand_unique_int),
]
for dtype in dtypes
for shape, dims, strides, padding, base_dilation, window_dilation in (
itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)],
["VALID", "SAME", [(0, 3), (1, 2)]],
[(1, 1)] + ([(2, 3)]),
[(1, 1)] + ([(1, 2)] if op is lax.add else [])),
itertools.product(
[(3, 2, 4, 6)],
[(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)],
["VALID", "SAME", [(0, 1), (1, 0), (2, 3), (0, 2)]],
[(1, 1, 1, 1)] + ([(2, 1, 3, 2)]),
[(1, 1, 1, 1)] + ([(1, 2, 2, 1)] if op is lax.add else []))))
],
)
@jtu.ignore_warning(category=UserWarning,
message="Using reduced precision for gradient.*")
def testReduceWindowGrad(
self, op, init_val, dtype, shape, dims, strides,
padding, base_dilation, window_dilation, rng_factory):
rng = rng_factory(self.rng())
init_val = np.asarray(init_val, dtype=dtype)
gradient_order = 3
# We need this conditional and the corresponding loop logic to be in the
# test method, rather than at the parameterized test level, because it
# depends on FLAGS for the device under test.
# TODO(b/31565929): enable when fixed.
if jtu.test_device_matches(["tpu"]) and op is not lax.add:
if (len(shape) != 4 or dims != (1, 1, 2, 1)
or not isinstance(padding, str)):
raise SkipTest("Only R4 SelectAndScatter implemented on TPU")
def fun(operand):
return lax.reduce_window(operand, init_val, op, dims, strides, padding,
base_dilation, window_dilation)
operand = rng(shape, dtype)
if op is lax.add:
eps = 1.
tol = None
else:
# this test can fail if there are duplicates in operand
self.assertEqual(np.unique(operand).size, operand.size,
msg="test requires operand elements to be unique.")
eps = 1e-2
tol = {np.float16: 1e-1, np.float32: 6e-2, np.float64: 6e-2}
check_grads(fun, (operand,), gradient_order, ["fwd", "rev"], tol, tol,
eps)
@jtu.sample_product(
[dict(op=op, dtype=dtype)
for op, types in [
(lax.cumsum, [np.float32, np.float64]),
(lax.cumprod, [np.float32, np.float64]),
]
for dtype in types
],
[dict(shape=shape, axis=axis)
for shape in [[10], [3, 4, 5]]
for axis in range(len(shape))
],
reverse=[False, True],
)
def testCumulativeReduceGrad(self, op, shape, dtype, axis, reverse):
rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
else jtu.rand_small)
rng = rng_factory(self.rng())
check_grads(partial(op, axis=axis, reverse=reverse), (rng(shape, dtype),),
order=2)
# TODO(b/205052657): enable more tests when supported
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(0,), (5,), (5, 7), (4, 9, 3)]
for axis in [len(shape) - 1]
],
dtype=[np.float32],
is_stable=[False, True],
)
def testSortGrad(self, shape, dtype, axis, is_stable):
rng = jtu.rand_unique_int(self.rng())
operand = rng(shape, dtype)
sort = lambda x: lax.sort(x, dimension=axis, is_stable=is_stable)
check_grads(sort, (operand,), 2, ["fwd", "rev"], eps=1e-2)
# TODO(b/205052657): enable more tests when supported
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(3,), (5, 3), (4, 9, 3)]
for axis in [len(shape) - 1]
],
key_dtype=[np.float32],
val_dtype=[np.float32],
is_stable=[False, True],
)
def testSortKeyValGrad(self, shape, key_dtype, val_dtype, axis, is_stable):
rng = jtu.rand_default(self.rng())
# This test relies on the property that wherever keys are tied, values are
# too, since we don't guarantee the same ordering of values with equal keys.
# To avoid that case, we generate unique keys (globally in the key array).
def args_maker():
flat_keys = np.arange(math.prod(shape), dtype=key_dtype)
keys = self.rng().permutation(flat_keys).reshape(shape)
values = rng(shape, val_dtype)
return keys, values
keys, values = args_maker()
fun = lambda keys, values: lax.sort_key_val(keys, values, axis, is_stable)
check_grads(fun, (keys, values), 2, ["fwd", "rev"], 1e-2, 1e-2, 1e-2)
@jtu.sample_product(
dtype=[np.float32,],
shape=[(4,), (5, 5), (3, 1, 4)],
k=[1, 3],
axis=[0, -1]
)
def testTopKGrad(self, shape, dtype, k, axis):
flat_values = np.arange(math.prod(shape), dtype=dtype)
values = self.rng().permutation(flat_values).reshape(shape)
fun = lambda vs: lax.top_k(vs, k=k, axis=axis)[0]
check_grads(fun, (values,), 2, ["fwd", "rev"], eps=1e-2)
@jtu.sample_product(
[dict(shape=shape, idxs=idxs, axes=axes)
for shape, idxs, axes in [
[(3, 4, 5), (np.array([0, 2, 1]),), (0,)],
[(3, 4, 5), (np.array([-1, -2]),), (0,)],
[(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 1)],
[(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 2)],
]
],
dtype=float_dtypes,
)
@jax.numpy_rank_promotion('allow') # Test explicitly exercises implicit rank promotion.
def testIndexTakeGrad(self, shape, dtype, idxs, axes):
rng = jtu.rand_default(self.rng())
src = rng(shape, dtype)
index_take = lambda src: lax.index_take(src, idxs, axes)
check_grads(index_take, (src,), 2, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
[dict(shape=shape, idxs_shape=idxs.shape, idxs_dtype=idxs.dtype,
dnums=dnums, slice_sizes=slice_sizes, max_idx=max_idx)
for shape, idxs, dnums, slice_sizes, max_idx in [
((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,), 5),
((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,), 9),
((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3), 3),
]
],
dtype=grad_float_dtypes,
mode=["clip", "fill", "promise_in_bounds"],
iteration=range(5),
)
def testGatherGrad(self, shape, dtype, idxs_shape, idxs_dtype, dnums,
slice_sizes, mode, max_idx, iteration):
rng = jtu.rand_default(self.rng())
if mode == "promise_in_bounds":
rng_idx = jtu.rand_int(self.rng(), high=max_idx)
else:
# Only test out-of-bounds indices if using a mode that guarantees correct
# gradients for out-of-bounds indices.
rng_idx = jtu.rand_int(self.rng(), low=-max_idx, high=2 * max_idx)
idxs = rng_idx(idxs_shape, idxs_dtype)
# Use an arbitrary finite fill_value, since NaNs won't work in a numerical
# gradient test.
gather = lambda x: lax.gather(x, idxs, dimension_numbers=dnums,
slice_sizes=slice_sizes, mode=mode,
fill_value=-1)
x = rng(shape, dtype)
check_grads(gather, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs_shape=idxs.shape, idxs_dtype=idxs.dtype,
dnums=dnums, update_shape=update_shape, max_idx=max_idx)
for arg_shape, idxs, update_shape, dnums, max_idx in [
((5,), np.array([[0], [2]]), (2,),
lax.ScatterDimensionNumbers(update_window_dims=(),
inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,)), 4),
((10,), np.array([[0], [0], [0]]), (3, 2),
lax.ScatterDimensionNumbers(update_window_dims=(1,),
inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,)), 9),
((10, 5,), np.array([[0], [2], [1]]), (3, 3),
lax.ScatterDimensionNumbers(update_window_dims=(1,),
inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,)), 3),
]
],
dtype=grad_float_dtypes,
mode=["clip", "fill", "promise_in_bounds"],
iteration=range(5),
)
def testScatterAddGrad(self, arg_shape, dtype, idxs_shape, idxs_dtype,
update_shape, dnums, max_idx, mode, iteration):
rng = jtu.rand_default(self.rng())
if mode == "promise_in_bounds":
rng_idx = jtu.rand_int(self.rng(), high=max_idx)
else:
# Only test out-of-bounds indices if using a mode that guarantees correct
# gradients for out-of-bounds indices.
rng_idx = jtu.rand_int(self.rng(), low=-max_idx, high=2 * max_idx)
idxs = rng_idx(idxs_shape, idxs_dtype)
scatter_add = lambda x, y: lax.scatter_add(
x, idxs, y, dimension_numbers=dnums, mode=mode)
x = rng(arg_shape, dtype)
y = rng(update_shape, dtype)
check_grads(scatter_add, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, dnums=dnums,
update_shape=update_shape, max_idx=max_idx, multiplier=multiplier)
for arg_shape, idxs, update_shape, dnums, max_idx, multiplier in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,)), 4, 1),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,)), 4, 2),
((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,)), 9, 1),
]
],
dtype=grad_float_dtypes,
)
def testScatterGrad(self, arg_shape, dtype, idxs, update_shape, dnums,
max_idx, multiplier):
# Scatters with conflicting indices are not deterministic on GPU, so we
# use indices that do not collide.
rng_idx = jtu.rand_unique_int(self.rng(), high=max_idx)
rng = jtu.rand_default(self.rng())
# The multiplier ensures we don't pick overlapping windows if the update
# window is not of size 1.
idxs = rng_idx(idxs.shape, idxs.dtype) * multiplier
scatter = lambda x, y: lax.scatter(x, idxs, y, dimension_numbers=dnums)
x = rng(arg_shape, dtype)
y = rng(update_shape, dtype)
check_grads(scatter, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.)
def testScatterGradSymbolicZeroUpdate(self):
# https://github.com/jax-ml/jax/issues/1901
def f(x):
n = x.shape[0]
y = np.arange(n, dtype=x.dtype)
return jax.device_put(x).at[np.diag_indices(n)].set(y)
rng = jtu.rand_default(self.rng())
check_grads(f, (rng((5, 5), np.float32),), 2, ["fwd", "rev"], 1e-2, 1e-2,
1.)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, dnums=dnums,
update_shape=update_shape)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
],
dtype=grad_float_dtypes,
)
def testScatterMax(self, arg_shape, dtype, idxs, update_shape, dnums):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
idxs = rng_idx(idxs.shape, idxs.dtype)
scatter_max = lambda x, y: lax.scatter_max(x, idxs, y, dnums)
x = rng(arg_shape, dtype)
y = rng(update_shape, dtype)
check_grads(scatter_max, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, dnums=dnums,
update_shape=update_shape)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
],
dtype=grad_float_dtypes,
)
def testScatterMin(self, arg_shape, dtype, idxs, update_shape, dnums):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
idxs = rng_idx(idxs.shape, idxs.dtype)
scatter_min = lambda x, y: lax.scatter_min(x, idxs, y, dnums)
x = rng(arg_shape, dtype)
y = rng(update_shape, dtype)
check_grads(scatter_min, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2)
def testStopGradient(self):
def f(x):
return lax.sin(x) * lax.cos(lax.stop_gradient(x))
def f2(x, y):
return lax.sin(x) * lax.cos(y)
x = 3.14
ans = jax.grad(f)(x)
expected = jax.grad(f2)(x, x)
self.assertAllClose(ans, expected)
ans = jax.grad(jax.grad(f))(x)
expected = jax.grad(jax.grad(f2))(x, x)
self.assertAllClose(ans, expected)
ans = jax.grad(lambda x: lax.stop_gradient({'foo':x})['foo'])(3.)
expected = np.array(0.0)
self.assertAllClose(ans, expected, check_dtypes=False)
with jax.enable_checks(False):
with self.assertRaises(TypeError):
lax.stop_gradient(lambda x: x)
# TODO(mattjj): make this a more systematic test
def testRemainder(self):
def gen_x(rng, size):
return rng.uniform(-9, 9, size=size)
def gen_y(rng, size):
# avoid values near zero because gradients diverge
return rng.uniform(0.1, 5, size=size) * rng.choice([-1, 1], size=size)
rng = self.rng()
x = gen_x(rng, (5, 8))
y = gen_y(rng, (1, 8))
assert not set(np.unique(x)) & set(np.unique(y))
check_grads(lax.rem, (x, y), 2, ["fwd", "rev"])
rng = self.rng()
x = gen_x(rng, (1, 8))
y = gen_y(rng, (5, 8))
assert not set(np.unique(x)) & set(np.unique(y))
check_grads(lax.rem, (x, y), 2, ["fwd", "rev"])
def testHigherOrderGradientOfReciprocal(self):
# Regression test for https://github.com/jax-ml/jax/issues/3136
def inv(x):
# N.B.: intentionally written as 1/x, not x ** -1 or reciprocal(x)
return 1 / x
grad_fn = jax.grad(jax.grad(jax.grad(jax.grad(jax.grad(jax.grad(inv))))))
self.assertAllClose(np.float32(0.0439453125), grad_fn(np.float32(4.)))
def test_linear_transpose_real(self):
f = lambda x: x.real
transpose = jax.linear_transpose(f, 1.j)
actual, = transpose(1.)
expected = 1.
self.assertEqual(actual, expected)
def test_linear_transpose_imag(self):
f = lambda x: x.imag
transpose = jax.linear_transpose(f, 1.j)
actual, = transpose(1.)
expected = -1.j
self.assertEqual(actual, expected)
def test_scatter_apply_jvp(self):
def f(x):
return x.at[1].apply(jax.numpy.sin)
x = jax.numpy.array([1.0, 2.0])
with self.assertRaises(NotImplementedError):
jax.jacfwd(f)(x)
def test_scatter_apply_vjp(self):
def f(x):
return x.at[1].apply(jax.numpy.sin)
x = jax.numpy.array([1.0, 2.0])
with self.assertRaises(NotImplementedError):
jax.jacrev(f)(x)
def testPowShapeMismatch(self):
# Regression test for https://github.com/jax-ml/jax/issues/17294
x = lax.iota('float32', 4)
y = 2
actual = jax.jacrev(jax.jit(jax.lax.pow))(x, y) # no error
expected = jax.numpy.diag(y * x ** (y - 1))
self.assertArraysEqual(actual, expected)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| LaxAutodiffTest |
python | getsentry__sentry | src/sentry/tasks/check_am2_compatibility.py | {
"start": 9140,
"end": 9214
} | class ____(Enum):
ERROR = 0
IN_PROGRESS = 1
DONE = 2
| CheckStatus |
python | google__pytype | pytype/annotation_utils.py | {
"start": 518,
"end": 588
} | class ____:
typ: Any
value: Any
final: bool = False
| AnnotatedValue |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 2320,
"end": 2498
} | class ____(BaseModel, Generic[T]):
data: T
error: Optional[str]
response = Response[Model](data=model, error=None)
response = Response[Model](data=1, error=None)
| Response |
python | python__mypy | mypy/test/testexportjson.py | {
"start": 486,
"end": 2574
} | class ____(DataSuite):
required_out_section = True
files = ["exportjson.test"]
def run_case(self, testcase: DataDrivenTestCase) -> None:
error = False
src = "\n".join(testcase.input)
try:
options = Options()
options.use_builtins_fixtures = True
options.show_traceback = True
options.allow_empty_bodies = True
options.fixed_format_cache = True
fnam = os.path.join(self.base_path, "main.py")
with open(fnam, "w") as f:
f.write(src)
result = build.build(
sources=[BuildSource(fnam, "main")], options=options, alt_lib_path=test_temp_dir
)
a = result.errors
error = bool(a)
major, minor = sys.version_info[:2]
cache_dir = os.path.join(".mypy_cache", f"{major}.{minor}")
for module in result.files:
if module in (
"builtins",
"typing",
"_typeshed",
"__future__",
"typing_extensions",
"sys",
):
continue
fnam = os.path.join(cache_dir, f"{module}.data.ff")
with open(fnam, "rb") as f:
json_data = convert_binary_cache_to_json(f.read(), implicit_names=False)
for line in json.dumps(json_data, indent=4).splitlines():
if '"path": ' in line:
# We source file path is unpredictable, so filter it out
line = re.sub(r'"[^"]+\.pyi?"', "...", line)
assert "ERROR" not in line, line
a.append(line)
except CompileError as e:
a = e.messages
error = True
if error or "\n".join(testcase.output).strip() != "<not checked>":
assert_string_arrays_equal(
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
)
| TypeExportSuite |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 93998,
"end": 94430
} | class ____(
BaseStreamingRequestFlowControlTest, WebTestCase
):
def get_handlers(self):
class DecoratedFlowControlHandler(BaseFlowControlHandler):
@gen.coroutine
def data_received(self, data):
with self.in_method("data_received"):
yield gen.moment
return [("/", DecoratedFlowControlHandler, dict(test=self))]
| DecoratedStreamingRequestFlowControlTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/utils/sagemaker.py | {
"start": 846,
"end": 1040
} | class ____(Enum):
"""Approval statuses for a Sagemaker Model Package."""
APPROVED = "Approved"
REJECTED = "Rejected"
PENDING_MANUAL_APPROVAL = "PendingManualApproval"
| ApprovalStatus |
python | scrapy__scrapy | tests/test_command_crawl.py | {
"start": 2721,
"end": 3178
} | class ____(scrapy.Spider):
name = 'myspider'
async def start(self):
return
yield
"""
args = ["-o", "example1.json", "-O", "example2.json"]
log = self.get_log(spider_code, proj_path, args=args)
assert (
"error: Please use only one of -o/--output and -O/--overwrite-output" in log
)
def test_default_reactor(self, proj_path: Path) -> None:
spider_code = """
import scrapy
| MySpider |
python | django__django | tests/forms_tests/tests/test_forms.py | {
"start": 232650,
"end": 232853
} | class ____(BoundField):
def css_classes(self, extra_classes=None):
parent_classes = super().css_classes(extra_classes)
return f"field-class {parent_classes}"
| BoundFieldWithWrappingClass |
python | huggingface__transformers | src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py | {
"start": 2898,
"end": 13751
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 128 * 28 * 28, "longest_edge": 28 * 28 * 768}
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
min_pixels = 128 * 28 * 28
max_pixels = 28 * 28 * 768
patch_size = 14
temporal_patch_size = 2
merge_size = 2
min_frames = 4
max_frames = 768
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
valid_kwargs = Qwen2VLVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos", "video_grid_thw"]
def __init__(self, **kwargs: Unpack[Qwen2VLVideoProcessorInitKwargs]):
size = kwargs.pop("size", None)
min_pixels = kwargs.pop("min_pixels", None)
max_pixels = kwargs.pop("max_pixels", None)
# backward compatibility: override size with min_pixels and max_pixels if they are provided
size = self.size if size is None else size
if min_pixels is not None:
size["shortest_edge"] = min_pixels
size.pop("min_pixels", None)
if max_pixels is not None:
size["longest_edge"] = max_pixels
size.pop("max_pixels", None)
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
super().__init__(size=size, min_pixels=min_pixels, max_pixels=max_pixels, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
temporal_patch_size: Optional[int] = None,
min_frames: Optional[int] = None,
max_frames: Optional[int] = None,
num_frames: Optional[int] = None,
fps: Optional[Union[int, float]] = None,
**kwargs,
):
"""
Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
and `fps` are mutually exclusive.
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
temporal_patch_size (`int`, *optional*):
The temporal patch size of the vision encoder. Number of sampled frames will be rounded to be divisible by frame factor.
min_frames (`int`, *optional*):
The minimum number of frames that can be sampled.
max_frames (`int`, *optional*):
The maximum number of frames that can be sampled.
num_frames (`int`, *optional*):
Maximum number of frames to sample. Defaults to `self.num_frames`.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if fps is not None and num_frames is not None:
raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!")
num_frames = num_frames if num_frames is not None else self.num_frames
fps = fps if fps is not None else self.fps
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
min_frames = min_frames if min_frames is not None else self.min_frames
max_frames = max_frames if max_frames is not None else self.max_frames
total_num_frames = metadata.total_num_frames
# If num_frames is not given but fps is, calculate num_frames from fps
if num_frames is not None:
num_frames = round(num_frames / temporal_patch_size) * temporal_patch_size
elif fps is not None:
if metadata is None or metadata.fps is None:
raise ValueError(
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
)
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
num_frames = total_num_frames / metadata.fps * fps
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
if num_frames > total_num_frames:
raise ValueError(
f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
"Decrease `num_frames` or `fps` for sampling."
)
if num_frames is not None:
indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()
else:
indices = torch.arange(0, total_num_frames).int()
return indices
def _preprocess(
self,
videos: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
):
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
resized_height, resized_width = height, width
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
stacked_videos = self.resize(
image=stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
T = patches.shape[1]
if pad := -T % temporal_patch_size:
repeats = patches[:, -1:].expand(-1, pad, -1, -1, -1)
patches = torch.cat((patches, repeats), dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
return BatchFeature(
data={"pixel_values_videos": pixel_values_videos, "video_grid_thw": video_grid_thw},
tensor_type=return_tensors,
)
def get_num_of_video_patches(self, num_frames: int, height: int, width: int, videos_kwargs=None):
"""
A utility that returns number of video patches a given video size.
Args:
num_frames (`int`):
Number of frames in the input video.
height (`int`):
Height of the input video.
width (`int`):
Width of the input video.
videos_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the video processor.
Returns:
`Tuple(int, int)`: Number of placeholder tokens required and number of patches per image.
"""
min_pixels = videos_kwargs.get("min_pixels", None) or self.size["shortest_edge"]
max_pixels = videos_kwargs.get("max_pixels", None) or self.size["longest_edge"]
patch_size = videos_kwargs.get("patch_size", None) or self.patch_size
merge_size = videos_kwargs.get("merge_size", None) or self.merge_size
temporal_patch_size = videos_kwargs.get("temporal_patch_size", None) or self.temporal_patch_size
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
grid_t = num_frames // temporal_patch_size
return grid_t * grid_h * grid_w
__all__ = ["Qwen2VLVideoProcessor"]
| Qwen2VLVideoProcessor |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_feature_agglomeration.py | {
"start": 321,
"end": 1853
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(FeatureAgglomeration)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_classify(self):
for i in range(3):
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="digits", make_sparse=False
)
configuration_space = FeatureAgglomeration.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = FeatureAgglomeration(
random_state=1, **{hp_name: default[hp_name] for hp_name in default}
)
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = RandomForestClassifier(random_state=1)
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)
self.assertAlmostEqual(accuracy, 0.8761384335154827)
def test_preprocessing_dtype(self):
super(FeatureAgglomerationComponentTest, self)._test_preprocessing_dtype(
FeatureAgglomeration, test_sparse=False
)
| FeatureAgglomerationComponentTest |
python | patrick-kidger__equinox | equinox/nn/_dropout.py | {
"start": 197,
"end": 3182
} | class ____(Module):
"""Applies dropout.
Note that this layer behaves differently during training and inference. During
training then dropout is randomly applied; during inference this layer does nothing.
Whether the model is in training or inference mode should be toggled using
[`equinox.nn.inference_mode`][].
"""
# Not static fields as it makes sense to want to modify them via equinox.tree_at.
p: float
inference: bool
def __init__(
self,
p: float = 0.5,
inference: bool = False,
*,
deterministic: bool | None = None,
):
"""**Arguments:**
- `p`: The fraction of entries to set to zero. (On average.)
- `inference`: Whether to actually apply dropout at all. If `True` then dropout
is *not* applied. If `False` then dropout is applied. This may be toggled
with [`equinox.nn.inference_mode`][] or overridden during
[`equinox.nn.Dropout.__call__`][].
- `deterministic`: Deprecated alternative to `inference`.
"""
if deterministic is not None:
inference = deterministic
warnings.warn(
"Dropout(deterministic=...) is deprecated "
"in favour of Dropout(inference=...)"
)
self.p = p
self.inference = inference
# Backward compatibility
@property
def deterministic(self):
return self.inference
@named_scope("eqx.nn.Dropout")
def __call__(
self,
x: Array,
*,
key: PRNGKeyArray | None = None,
inference: bool | None = None,
deterministic: bool | None = None,
) -> Array:
"""**Arguments:**
- `x`: An any-dimensional JAX array to dropout.
- `key`: A `jax.random.PRNGKey` used to provide randomness for calculating
which elements to dropout. (Keyword only argument.)
- `inference`: As per [`equinox.nn.Dropout.__init__`][]. If `True` or
`False` then it will take priority over `self.inference`. If `None`
then the value from `self.inference` will be used.
- `deterministic`: Deprecated alternative to `inference`.
"""
if deterministic is not None:
inference = deterministic
warnings.warn(
"Dropout()(deterministic=...) is deprecated "
"in favour of Dropout()(inference=...)"
)
if inference is None:
inference = self.inference
if isinstance(self.p, (int, float)) and self.p == 0:
inference = True
if inference:
return x
elif key is None:
raise RuntimeError(
"Dropout requires a key when running in non-deterministic mode."
)
else:
q = 1 - lax.stop_gradient(self.p)
mask = jrandom.bernoulli(key, q, x.shape)
return jnp.where(mask, x / q, 0)
| Dropout |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 90107,
"end": 90572
} | class ____(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
| WSGIFileWrapper |
python | jackfrued__Python-100-Days | Day31-35/code/example18.py | {
"start": 94,
"end": 535
} | class ____(type):
"""自定义元类"""
def __init__(cls, *args, **kwargs):
cls.__instance = None
cls.lock = threading.Lock()
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls.__instance is None:
with cls.lock:
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
return cls.__instance
| SingletonMeta |
python | apache__airflow | dev/breeze/tests/test_docs_version_validation.py | {
"start": 976,
"end": 2290
} | class ____:
def setup_method(self):
os.environ["AIRFLOW_SITE_DIRECTORY"] = "/path/to/docs-archive"
error_versions.clear()
@patch("os.listdir")
@patch("os.path.join")
def test_validate_docs_version_with_invalid_versions(self, mock_path_join, mock_listdir):
mock_listdir.side_effect = [
["apache-airflow", "apache-airflow-providers-google"],
["1.10.0", "stable", "invalid_version"],
["2.0.0", "stable", "stable.txt"],
]
mock_path_join.side_effect = lambda *args: "/".join(args)
with pytest.raises(SystemExit):
validate_docs_version()
assert "Invalid version: 'invalid_version' found under doc folder apache-airflow" in error_versions
@patch("os.listdir")
@patch("os.path.join")
def test_validate_docs_version_with_valid_versions(self, mock_path_join, mock_listdir):
mock_listdir.side_effect = [
["apache-airflow", "apache-airflow-providers-standard"],
["1.10.0", "stable"],
["2.0.0", "stable", "stable.txt"],
]
mock_path_join.side_effect = lambda *args: "/".join(args)
validate_docs_version()
assert not error_versions, f"No errors should be found for valid versions, {error_versions}"
| TestValidateDocsVersion |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_N.py | {
"start": 1597,
"end": 2771
} | class ____(Benchmark):
r"""
NewFunction01 objective function.
This class defines the NewFunction01 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{NewFunction01}}(x) = \left | {\cos\left(\sqrt{\left|{x_{1}^{2}
+ x_{2}}\right|}\right)} \right |^{0.5} + (x_{1} + x_{2})/100
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.18459899925` for
:math:`x = [-8.46669057, -9.99982177]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 355
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-8.46668984648, -9.99980944557]]
self.fglob = -0.184648852475
def fun(self, x, *args):
self.nfev += 1
return ((abs(cos(sqrt(abs(x[0] ** 2 + x[1]))))) ** 0.5
+ 0.01 * (x[0] + x[1]))
| NewFunction01 |
python | ipython__ipython | tests/test_debugger.py | {
"start": 1178,
"end": 25315
} | class ____(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
def __exit__(self, *exc):
sys.stdin = self.real_stdin
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_ipdb_magics():
'''Test calling some IPython magics from ipdb.
First, set up some test functions and classes which we can inspect.
In [1]: class ExampleClass(object):
...: """Docstring for ExampleClass."""
...: def __init__(self):
...: """Docstring for ExampleClass.__init__"""
...: pass
...: def __str__(self):
...: return "ExampleClass()"
In [2]: def example_function(x, y, z="hello"):
...: """Docstring for example_function."""
...: pass
In [3]: old_trace = sys.gettrace()
Create a function which triggers ipdb.
In [4]: def trigger_ipdb():
...: a = ExampleClass()
...: debugger.Pdb().set_trace()
Run ipdb with faked input & check output. Because of a difference between
Python 3.13 & older versions, the first bit of the output is inconsistent.
We need to use ... to accommodate that, so the examples have to use IPython
prompts so that ... is distinct from the Python PS2 prompt.
In [5]: with PdbTestInput([
...: 'pdef example_function',
...: 'pdoc ExampleClass',
...: 'up',
...: 'down',
...: 'list',
...: 'pinfo a',
...: 'll',
...: 'continue',
...: ]):
...: trigger_ipdb()
...> <doctest ...>(3)trigger_ipdb()
1 def trigger_ipdb():
2 a = ExampleClass()
----> 3 debugger.Pdb().set_trace()
<BLANKLINE>
ipdb> pdef example_function
example_function(x, y, z='hello')
ipdb> pdoc ExampleClass
Class docstring:
Docstring for ExampleClass.
Init docstring:
Docstring for ExampleClass.__init__
ipdb> up
> <doctest ...>(11)<module>()
7 'pinfo a',
8 'll',
9 'continue',
10 ]):
---> 11 trigger_ipdb()
<BLANKLINE>
ipdb> down...
> <doctest ...>(3)trigger_ipdb()
1 def trigger_ipdb():
2 a = ExampleClass()
----> 3 debugger.Pdb().set_trace()
<BLANKLINE>
ipdb> list
1 def trigger_ipdb():
2 a = ExampleClass()
----> 3 debugger.Pdb().set_trace()
<BLANKLINE>
ipdb> pinfo a
Type: ExampleClass
String form: ExampleClass()
Namespace: Local...
Docstring: Docstring for ExampleClass.
Init docstring: Docstring for ExampleClass.__init__
ipdb> ll
1 def trigger_ipdb():
2 a = ExampleClass()
----> 3 debugger.Pdb().set_trace()
<BLANKLINE>
ipdb> continue
Restore previous trace function, e.g. for coverage.py
In [6]: sys.settrace(old_trace)
'''
def test_ipdb_closure():
"""Test evaluation of expressions which depend on closure.
In [1]: old_trace = sys.gettrace()
Create a function which triggers ipdb.
In [2]: def trigger_ipdb():
...: debugger.Pdb().set_trace()
In [3]: with PdbTestInput([
...: 'x = 1; sum(x * i for i in range(5))',
...: 'continue',
...: ]):
...: trigger_ipdb()
...> <doctest ...>(2)trigger_ipdb()
1 def trigger_ipdb():
----> 2 debugger.Pdb().set_trace()
<BLANKLINE>
ipdb> x = 1; sum(x * i for i in range(5))
ipdb> continue
Restore previous trace function, e.g. for coverage.py
In [4]: sys.settrace(old_trace)
"""
def test_ipdb_magics2():
"""Test ipdb with a very short function.
>>> old_trace = sys.gettrace()
>>> def bar():
... pass
Run ipdb.
>>> with PdbTestInput([
... 'continue',
... ]):
... debugger.Pdb().runcall(bar)
> <doctest ...>(2)bar()
1 def bar():
----> 2 pass
<BLANKLINE>
ipdb> continue
Restore previous trace function, e.g. for coverage.py
>>> sys.settrace(old_trace)
"""
def can_quit():
"""Test that quit work in ipydb
>>> old_trace = sys.gettrace()
>>> def bar():
... pass
>>> with PdbTestInput([
... 'quit',
... ]):
... debugger.Pdb().runcall(bar)
> <doctest ...>(2)bar()
1 def bar():
----> 2 pass
<BLANKLINE>
ipdb> quit
Restore previous trace function, e.g. for coverage.py
>>> sys.settrace(old_trace)
"""
def can_exit():
"""Test that quit work in ipydb
>>> old_trace = sys.gettrace()
>>> def bar():
... pass
>>> with PdbTestInput([
... 'exit',
... ]):
... debugger.Pdb().runcall(bar)
> <doctest ...>(2)bar()
1 def bar():
----> 2 pass
<BLANKLINE>
ipdb> exit
Restore previous trace function, e.g. for coverage.py
>>> sys.settrace(old_trace)
"""
def test_interruptible_core_debugger():
"""The debugger can be interrupted.
The presumption is there is some mechanism that causes a KeyboardInterrupt
(this is implemented in ipykernel). We want to ensure the
KeyboardInterrupt cause debugging to cease.
"""
def raising_input(msg="", called=[0]):
called[0] += 1
assert called[0] == 1, "input() should only be called once!"
raise KeyboardInterrupt()
tracer_orig = sys.gettrace()
try:
with patch.object(builtins, "input", raising_input):
debugger.InterruptiblePdb().set_trace()
# The way this test will fail is by set_trace() never exiting,
# resulting in a timeout by the test runner. The alternative
# implementation would involve a subprocess, but that adds issues
# with interrupting subprocesses that are rather complex, so it's
# simpler just to do it this way.
finally:
# restore the original trace function
sys.settrace(tracer_orig)
@skip_win32
def test_xmode_skip():
"""that xmode skip frames
Not as a doctest as pytest does not run doctests.
"""
import pexpect
env = os.environ.copy()
env["IPY_TEST_SIMPLE_PROMPT"] = "1"
child = pexpect.spawn(
sys.executable, ["-m", "IPython", "--colors=nocolor"], env=env
)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("IPython")
child.expect("\n")
child.expect_exact("In [1]")
block = dedent(
"""
def f():
__tracebackhide__ = True
g()
def g():
raise ValueError
f()
"""
)
for line in block.splitlines():
child.sendline(line)
child.expect_exact(line)
child.expect_exact("skipping")
block = dedent(
"""
def f():
__tracebackhide__ = True
g()
def g():
from IPython.core.debugger import set_trace
set_trace()
f()
"""
)
for line in block.splitlines():
child.sendline(line)
child.expect_exact(line)
child.expect("ipdb>")
child.sendline("w")
child.expect("hidden")
child.expect("ipdb>")
child.sendline("skip_hidden false")
child.sendline("w")
child.expect("__traceba")
child.expect("ipdb>")
child.close()
skip_decorators_blocks = (
"""
def helpers_helper():
pass # should not stop here except breakpoint
""",
"""
def helper_1():
helpers_helper() # should not stop here
""",
"""
def helper_2():
pass # should not stop here
""",
"""
def pdb_skipped_decorator2(function):
def wrapped_fn(*args, **kwargs):
__debuggerskip__ = True
helper_2()
__debuggerskip__ = False
result = function(*args, **kwargs)
__debuggerskip__ = True
helper_2()
return result
return wrapped_fn
""",
"""
def pdb_skipped_decorator(function):
def wrapped_fn(*args, **kwargs):
__debuggerskip__ = True
helper_1()
__debuggerskip__ = False
result = function(*args, **kwargs)
__debuggerskip__ = True
helper_2()
return result
return wrapped_fn
""",
"""
@pdb_skipped_decorator
@pdb_skipped_decorator2
def bar(x, y):
return x * y
""",
"""import IPython.terminal.debugger as ipdb""",
"""
def f():
ipdb.set_trace()
bar(3, 4)
""",
"""
f()
""",
)
def _decorator_skip_setup():
import pexpect
env = os.environ.copy()
env["IPY_TEST_SIMPLE_PROMPT"] = "1"
env["PROMPT_TOOLKIT_NO_CPR"] = "1"
child = pexpect.spawn(
sys.executable, ["-m", "IPython", "--colors=nocolor"], env=env
)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("IPython")
child.expect("\n")
child.timeout = 5 * IPYTHON_TESTING_TIMEOUT_SCALE
child.str_last_chars = 500
dedented_blocks = [dedent(b).strip() for b in skip_decorators_blocks]
in_prompt_number = 1
for cblock in dedented_blocks:
child.expect_exact(f"In [{in_prompt_number}]:")
in_prompt_number += 1
for line in cblock.splitlines():
child.sendline(line)
child.expect_exact(line)
child.sendline("")
return child
@pytest.mark.skip(reason="recently fail for unknown reason on CI")
@skip_win32
def test_decorator_skip():
"""test that decorator frames can be skipped."""
child = _decorator_skip_setup()
child.expect_exact("ipython-input-8")
child.expect_exact("3 bar(3, 4)")
child.expect("ipdb>")
child.expect("ipdb>")
child.sendline("step")
child.expect_exact("step")
child.expect_exact("--Call--")
child.expect_exact("ipython-input-6")
child.expect_exact("1 @pdb_skipped_decorator")
child.sendline("s")
child.expect_exact("return x * y")
child.close()
@pytest.mark.skip(reason="recently fail for unknown reason on CI")
@pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="issues on PyPy")
@skip_win32
def test_decorator_skip_disabled():
"""test that decorator frame skipping can be disabled"""
child = _decorator_skip_setup()
child.expect_exact("3 bar(3, 4)")
for input_, expected in [
("skip_predicates debuggerskip False", ""),
("skip_predicates", "debuggerskip : False"),
("step", "---> 2 def wrapped_fn"),
("step", "----> 3 __debuggerskip__"),
("step", "----> 4 helper_1()"),
("step", "---> 1 def helper_1():"),
("next", "----> 2 helpers_helper()"),
("next", "--Return--"),
("next", "----> 5 __debuggerskip__ = False"),
]:
child.expect("ipdb>")
child.sendline(input_)
child.expect_exact(input_)
child.expect_exact(expected)
child.close()
@pytest.mark.skip(reason="recently fail for unknown reason on CI")
@pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="issues on PyPy")
@skip_win32
def test_decorator_skip_with_breakpoint():
"""test that decorator frame skipping can be disabled"""
import pexpect
env = os.environ.copy()
env["IPY_TEST_SIMPLE_PROMPT"] = "1"
env["PROMPT_TOOLKIT_NO_CPR"] = "1"
child = pexpect.spawn(
sys.executable, ["-m", "IPython", "--colors=nocolor"], env=env
)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.str_last_chars = 500
child.expect("IPython")
child.expect("\n")
child.timeout = 5 * IPYTHON_TESTING_TIMEOUT_SCALE
### we need a filename, so we need to exec the full block with a filename
with NamedTemporaryFile(suffix=".py", dir=".", delete=True) as tf:
name = tf.name[:-3].split("/")[-1]
tf.write("\n".join([dedent(x) for x in skip_decorators_blocks[:-1]]).encode())
tf.flush()
codeblock = f"from {name} import f"
dedented_blocks = [
codeblock,
"f()",
]
in_prompt_number = 1
for cblock in dedented_blocks:
child.expect_exact(f"In [{in_prompt_number}]:")
in_prompt_number += 1
for line in cblock.splitlines():
child.sendline(line)
child.expect_exact(line)
child.sendline("")
# From 3.13, set_trace()/breakpoint() stop on the line where they're
# called, instead of the next line.
if sys.version_info >= (3, 14):
child.expect_exact(" 46 ipdb.set_trace()")
extra_step = [("step", "--> 47 bar(3, 4)")]
elif sys.version_info >= (3, 13):
child.expect_exact("--> 46 ipdb.set_trace()")
extra_step = [("step", "--> 47 bar(3, 4)")]
else:
child.expect_exact("--> 47 bar(3, 4)")
extra_step = []
for input_, expected in (
[
(f"b {name}.py:3", ""),
]
+ extra_step
+ [
("step", "1---> 3 pass # should not stop here except"),
("step", "---> 38 @pdb_skipped_decorator"),
("continue", ""),
]
):
child.expect("ipdb>")
child.sendline(input_)
child.expect_exact(input_)
child.expect_exact(expected)
child.close()
@skip_win32
def test_where_erase_value():
"""Test that `where` does not access f_locals and erase values."""
import pexpect
env = os.environ.copy()
env["IPY_TEST_SIMPLE_PROMPT"] = "1"
child = pexpect.spawn(
sys.executable, ["-m", "IPython", "--colors=nocolor"], env=env
)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("IPython")
child.expect("\n")
child.expect_exact("In [1]")
block = dedent(
"""
def simple_f():
myvar = 1
print(myvar)
1/0
print(myvar)
simple_f() """
)
for line in block.splitlines():
child.sendline(line)
child.expect_exact(line)
child.expect_exact("ZeroDivisionError")
child.expect_exact("In [2]:")
child.sendline("%debug")
##
child.expect("ipdb>")
child.sendline("myvar")
child.expect("1")
##
child.expect("ipdb>")
child.sendline("myvar = 2")
##
child.expect_exact("ipdb>")
child.sendline("myvar")
child.expect_exact("2")
##
child.expect("ipdb>")
child.sendline("where")
##
child.expect("ipdb>")
child.sendline("myvar")
child.expect_exact("2")
child.expect("ipdb>")
child.close()
@skip_win32
def test_ignore_module_basic_functionality():
"""Test basic ignore/unignore functionality and error handling."""
import pexpect
env = os.environ.copy()
env["IPY_TEST_SIMPLE_PROMPT"] = "1"
with TemporaryDirectory() as temp_dir:
main_path = create_test_modules(temp_dir)
child = pexpect.spawn(sys.executable, [main_path], env=env, cwd=temp_dir)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("ipdb>")
# Test listing modules when none are ignored
child.sendline("ignore_module")
child.expect_exact("No modules are currently ignored.")
child.expect("ipdb>")
# Test ignoring a module
child.sendline("ignore_module level2_module")
child.expect("ipdb>")
# Test listing ignored modules
child.sendline("ignore_module")
child.expect_exact("Currently ignored modules: ['level2_module']")
child.expect("ipdb>")
# Test wildcard pattern
child.sendline("ignore_module testpkg.*")
child.expect("ipdb>")
child.sendline("ignore_module")
child.expect_exact("Currently ignored modules: ['level2_module', 'testpkg.*']")
child.expect("ipdb>")
# Test error handling - removing non-existent module
child.sendline("unignore_module nonexistent")
child.expect_exact("Module nonexistent is not currently ignored")
child.expect("ipdb>")
# Test successful removal
child.sendline("unignore_module level2_module")
child.expect("ipdb>")
child.sendline("ignore_module")
child.expect_exact("Currently ignored modules: ['testpkg.*']")
child.expect("ipdb>")
# Test removing already removed module
child.sendline("unignore_module level2_module")
child.expect_exact("Module level2_module is not currently ignored")
child.expect("ipdb>")
# Remove wildcard pattern
child.sendline("unignore_module testpkg.*")
child.expect("ipdb>")
child.sendline("ignore_module")
child.expect_exact("No modules are currently ignored.")
child.expect("ipdb>")
child.sendline("continue")
child.close()
# Helper function for creating temporary modules
def create_test_modules(temp_dir):
"""Create a comprehensive module hierarchy for testing all debugger commands."""
temp_path = Path(temp_dir)
# Create package structure for wildcard testing
package_dir = temp_path / "testpkg"
package_dir.mkdir()
# Package __init__.py
(package_dir / "__init__.py").write_text("# Test package")
# testpkg/submod1.py
(package_dir / "submod1.py").write_text(
dedent(
"""
def submod1_func():
x = 1
y = 2
return x + y
"""
)
)
# testpkg/submod2.py
(package_dir / "submod2.py").write_text(
dedent(
"""
def submod2_func():
z = 10
return z * 2
"""
)
)
# Level 1 (top level module)
(temp_path / "level1_module.py").write_text(
dedent(
"""
from level2_module import level2_func
def level1_func():
return level2_func()
"""
)
)
# Level 2 (middle level module)
(temp_path / "level2_module.py").write_text(
dedent(
"""
from level3_module import level3_func
from testpkg.submod1 import submod1_func
from testpkg.submod2 import submod2_func
def level2_func():
# Call package functions for step/next testing
result1 = submod1_func()
result2 = submod2_func()
return level3_func() + result1 + result2
"""
)
)
# Level 3 (bottom level with debugger)
(temp_path / "level3_module.py").write_text(
dedent(
"""
from level4_module import level4_func
from IPython.core.debugger import set_trace
def level3_func():
set_trace()
pass
result = level4_func()
return result
"""
)
)
# Level 4 (bottom level with debugger)
(temp_path / "level4_module.py").write_text(
dedent(
"""
def level4_func():
a = 70
b = 30
return a + b
"""
)
)
# Main runner
main_path = temp_path / "main_runner.py"
main_path.write_text(
dedent(
"""
import sys
sys.path.insert(0, '.')
from level1_module import level1_func
if __name__ == "__main__":
result = level1_func()
print(f"Final result: {result}")
"""
)
)
return str(main_path)
@skip_win32
def test_ignore_module_all_commands():
"""Comprehensive test for all debugger commands (up/down/step/next) with ignore functionality."""
import pexpect
env = os.environ.copy()
env["IPY_TEST_SIMPLE_PROMPT"] = "1"
with TemporaryDirectory() as temp_dir:
main_path = create_test_modules(temp_dir)
# Test UP and DOWN commands
child = pexpect.spawn(sys.executable, [main_path], env=env, cwd=temp_dir)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("ipdb>")
# Test up without ignores (baseline)
child.sendline("up")
child.expect("ipdb>")
child.sendline("__name__")
child.expect_exact("level2_module")
child.expect("ipdb>")
# Reset position
child.sendline("down")
child.expect("ipdb>")
# Test up with single module ignore
child.sendline("ignore_module level2_module")
child.expect("ipdb>")
child.sendline("up")
child.expect_exact(
"[... skipped 1 frame(s): 0 hidden frames + 1 ignored modules]"
)
child.expect("ipdb>")
child.sendline("__name__")
child.expect_exact("level1_module")
child.expect("ipdb>")
# Test up with wildcard ignore
child.sendline("down")
child.expect_exact(
"[... skipped 1 frame(s): 0 hidden frames + 1 ignored modules]"
)
child.expect("ipdb>")
child.sendline("unignore_module level2_module")
child.expect("ipdb>")
child.sendline("ignore_module level*")
child.expect("ipdb>")
child.sendline("up")
child.expect_exact(
"[... skipped 2 frame(s): 0 hidden frames + 2 ignored modules]"
)
child.expect("ipdb>")
child.sendline("__name__")
child.expect_exact("__main__")
child.expect("ipdb>")
child.sendline("continue")
child.close()
# Test STEP command
child = pexpect.spawn(sys.executable, [main_path], env=env, cwd=temp_dir)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("ipdb>")
# Test step without ignores (should step into module)
child.sendline("until 9")
child.expect("ipdb>")
child.sendline("step")
child.expect("ipdb>")
child.sendline("__name__")
child.expect_exact("level4_module")
child.expect("ipdb>")
child.sendline("continue")
child.close()
# Test step with single module ignore
child = pexpect.spawn(sys.executable, [main_path], env=env, cwd=temp_dir)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("ipdb>")
child.sendline("ignore_module level4_module")
child.expect("ipdb>")
child.sendline("until 9")
child.expect("ipdb>")
child.sendline("step")
child.expect_exact("[... skipped 1 ignored module(s)]")
child.expect("ipdb>")
child.sendline("__name__")
child.expect_exact("level3_module")
child.expect("ipdb>")
child.sendline("continue")
child.close()
# Test NEXT command
child = pexpect.spawn(sys.executable, [main_path], env=env, cwd=temp_dir)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("ipdb>")
# Test next without ignores
child.sendline("until 9")
child.expect("ipdb>")
child.sendline("next")
child.expect("ipdb>")
child.sendline("__name__")
child.expect_exact("level3_module")
child.expect("ipdb>")
child.sendline("continue")
child.close()
# Test next with module ignore
child = pexpect.spawn(sys.executable, [main_path], env=env, cwd=temp_dir)
child.timeout = 15 * IPYTHON_TESTING_TIMEOUT_SCALE
child.expect("ipdb>")
child.sendline("ignore_module level2_module")
child.expect("ipdb>")
child.sendline("return")
child.expect("ipdb>")
child.sendline("next")
child.expect_exact("[... skipped 1 ignored module(s)]")
child.expect("ipdb>")
child.sendline("continue")
child.close()
| PdbTestInput |
python | facebook__pyre-check | client/commands/pyre_language_server.py | {
"start": 4864,
"end": 17962
} | class ____(PyreLanguageServerApi):
# Channel to send responses to the editor
output_channel: connections.AsyncTextWriter
# NOTE: The fields inside `server_state` are mutable and can be changed by the background
# task.
server_state: state.ServerState
querier: daemon_querier.AbstractDaemonQuerier
client_type_error_handler: type_error_handler.ClientTypeErrorHandler
async def write_telemetry(
self,
parameters: Dict[str, object],
activity_key: Optional[Dict[str, object]],
) -> None:
should_write_telemetry = self.server_state.server_options.language_server_features.telemetry.is_enabled()
if should_write_telemetry:
parameters = dict(parameters)
parameters["project_identifier"] = (
self.server_state.server_options.project_identifier
)
await lsp.write_json_rpc_ignore_connection_error(
self.output_channel,
json_rpc.Request(
activity_key=activity_key,
method="telemetry/event",
parameters=json_rpc.ByNameParameters(parameters),
),
)
def get_language_server_features(self) -> features.LanguageServerFeatures:
return self.server_state.server_options.language_server_features
async def update_overlay_if_needed(self, document_path: Path) -> float:
"""
Send an overlay update to the daemon if three conditions are met:
- unsaved changes support is enabled
- a document is listed in `server_state.opened_documents`
- the OpenedDocumentState says the overlay overlay may be stale
Returns the time taken to run the update.
"""
update_timer = timer.Timer()
if (
self.get_language_server_features().unsaved_changes.is_enabled()
and document_path in self.server_state.opened_documents
):
opened_document_state = self.server_state.opened_documents[document_path]
code_changes = opened_document_state.code
current_is_dirty_state = opened_document_state.is_dirty
if not opened_document_state.pyre_code_updated:
result = await self.querier.update_overlay(
path=document_path, code=code_changes
)
if isinstance(result, daemon_connection.DaemonConnectionFailure):
LOG.info(
daemon_failure_string(
"didChange", str(type(result)), result.error_message
)
)
LOG.info(result.error_message)
else:
self.server_state.opened_documents[document_path] = (
OpenedDocumentState(
code=code_changes,
is_dirty=current_is_dirty_state,
pyre_code_updated=True,
)
)
return update_timer.stop_in_millisecond()
async def process_open_request(
self,
parameters: lsp.DidOpenTextDocumentParameters,
activity_key: Optional[Dict[str, object]] = None,
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
document_path = document_path.resolve()
self.server_state.opened_documents[document_path] = OpenedDocumentState(
code=parameters.text_document.text,
is_dirty=False,
pyre_code_updated=True,
)
LOG.info(f"File opened: {document_path}")
await self.querier.handle_file_opened(
document_path, parameters.text_document.text
)
async def process_close_request(
self, parameters: lsp.DidCloseTextDocumentParameters
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
document_path = document_path.resolve()
try:
del self.server_state.opened_documents[document_path]
LOG.info(f"File closed: {document_path}")
await self.querier.handle_file_closed(document_path)
except KeyError:
LOG.warning(f"Trying to close an un-opened file: {document_path}")
async def _publish_type_errors_for_files(
self,
type_errors: Mapping[Path, Collection[error.Error]],
type_checked_files: Set[Path],
set_unused_as_warning: bool = False,
) -> None:
for file in type_checked_files:
document_errors = type_errors.get(file, set())
await self.client_type_error_handler.show_overlay_type_errors(
path=file,
type_errors=list(document_errors),
set_unused_as_warning=set_unused_as_warning,
)
async def _query_pyre_daemon_type_errors(
self, document_path: Path, type_checkable_files: Set[Path]
) -> Tuple[Dict[Path, List[error.Error]], Optional[str]]:
if len(type_checkable_files) == 0:
LOG.debug("No daemon type checkable files found")
return {}, None
await self.update_overlay_if_needed(document_path)
result = await self.querier.get_type_errors(
type_checkable_files,
)
type_errors: Dict[Path, List[error.Error]] = {}
if isinstance(result, DaemonQueryFailure):
return type_errors, result.error_message
else:
type_errors = result
await self._publish_type_errors_for_files(type_errors, type_checkable_files)
return type_errors, None
async def handle_overlay_type_errors(
self,
document_path: Path,
new_file_loaded: bool,
activity_key: Optional[Dict[str, object]] = None,
) -> None:
client_register_event = self.server_state.client_register_event
if client_register_event is not None and not client_register_event.is_set():
return
daemon_status_before = self.server_state.status_tracker.get_status()
type_errors_timer = timer.Timer()
open_documents = set(self.server_state.opened_documents.keys())
type_errors, error_message = await self._query_pyre_daemon_type_errors(
document_path, open_documents
)
json_type_errors = {
str(path): [type_error.to_json() for type_error in errors]
for path, errors in type_errors.items()
}
await self.write_telemetry(
{
"type": "LSP",
"operation": "typeErrors",
"filePath": str(document_path),
"server_state_open_documents_count": len(
self.server_state.opened_documents
),
"duration_ms": type_errors_timer.stop_in_millisecond(),
"error_message": error_message,
"type_errors": json_type_errors,
**daemon_status_before.as_telemetry_dict(),
},
activity_key,
)
async def process_did_change_request(
self,
parameters: lsp.DidChangeTextDocumentParameters,
activity_key: Optional[Dict[str, object]] = None,
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
document_path = document_path.resolve()
if document_path not in self.server_state.opened_documents:
return
daemon_status_before = self.server_state.status_tracker.get_status()
did_change_timer = timer.Timer()
process_unsaved_changes = self.server_state.server_options.language_server_features.unsaved_changes.is_enabled()
error_message = None
code_changes = str(
"".join(
[content_change.text for content_change in parameters.content_changes]
)
)
self.server_state.opened_documents[document_path] = OpenedDocumentState(
code=code_changes,
is_dirty=True,
pyre_code_updated=False,
)
await self.write_telemetry(
{
"type": "LSP",
"operation": "didChange",
"filePath": str(document_path),
"server_state_open_documents_count": len(
self.server_state.opened_documents
),
"duration_ms": did_change_timer.stop_in_millisecond(),
"error_message": error_message,
"overlays_enabled": process_unsaved_changes,
**daemon_status_before.as_telemetry_dict(),
},
activity_key,
)
if (
process_unsaved_changes
and self.get_language_server_features().type_errors.is_enabled()
):
await self.handle_overlay_type_errors(
document_path=document_path,
new_file_loaded=False,
activity_key=activity_key,
)
async def process_did_save_request(
self,
parameters: lsp.DidSaveTextDocumentParameters,
activity_key: Optional[Dict[str, object]] = None,
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
document_path = document_path.resolve()
if document_path not in self.server_state.opened_documents:
return
daemon_status_before = self.server_state.status_tracker.get_status()
code_changes = self.server_state.opened_documents[document_path].code
self.server_state.opened_documents[document_path] = OpenedDocumentState(
code=code_changes,
is_dirty=False,
# False here because even though a didSave event means the base environment
# will be up-to-date (after an incremental push), it is not necessarily
# the case that the overlay environment is up to date.
pyre_code_updated=False,
)
await self.write_telemetry(
{
"type": "LSP",
"operation": "didSave",
"filePath": str(document_path),
"server_state_open_documents_count": len(
self.server_state.opened_documents
),
# We don't do any blocking work on didSave, but analytics are easier if
# we avoid needlessly introducing NULL values.
"duration_ms": 0,
**daemon_status_before.as_telemetry_dict(),
},
activity_key,
)
async def process_type_coverage_request(
self,
parameters: lsp.TypeCoverageParameters,
request_id: Union[int, str, None],
activity_key: Optional[Dict[str, object]] = None,
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
document_path = document_path.resolve()
daemon_status_before = self.server_state.status_tracker.get_status()
type_coverage_timer = timer.Timer()
response = await self.querier.get_type_coverage(path=document_path)
if response is not None:
await lsp.write_json_rpc(
self.output_channel,
json_rpc.SuccessResponse(
id=request_id,
activity_key=activity_key,
result=response.to_dict(),
),
)
await self.write_telemetry(
{
"type": "LSP",
"operation": "typeCoverage",
"filePath": str(document_path),
"duration_ms": type_coverage_timer.stop_in_millisecond(),
"coverage_type": self.get_language_server_features().type_coverage.value,
**daemon_status_before.as_telemetry_dict(),
},
activity_key,
)
async def process_shutdown_request(self, request_id: Union[int, str, None]) -> None:
await lsp.write_json_rpc_ignore_connection_error(
self.output_channel,
json_rpc.SuccessResponse(id=request_id, activity_key=None, result=None),
)
| PyreLanguageServer |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 12016,
"end": 12086
} | class ____(BertForPreTrainingOutput):
pass
| ErnieForPreTrainingOutput |
python | pytorch__pytorch | torch/testing/_internal/autograd_function_db.py | {
"start": 11451,
"end": 12123
} | class ____(torch.autograd.Function):
@staticmethod
def forward(x, idx):
return x[idx]
@staticmethod
def setup_context(ctx, inputs, output):
x, idx = inputs
ctx.x_shape = x.shape
ctx.idx = idx
@staticmethod
def backward(ctx, grad_output):
result = grad_output.new_zeros(ctx.x_shape)
result[ctx.idx] = grad_output
return result, None
@staticmethod
def vmap(info, in_dims, x, idx):
x_bdim, _ = in_dims
x = x.movedim(x_bdim, 1)
return Select.apply(x, idx), 0
@staticmethod
def jvp(ctx, x_tangent, _):
return Select.apply(x_tangent, ctx.idx)
| Select |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/base.py | {
"start": 4422,
"end": 16077
} | class ____(log.Identified, event.EventTarget):
"""Abstract base class for connection pools."""
dispatch: dispatcher[Pool]
echo: log._EchoFlagType
_orig_logging_name: Optional[str]
_dialect: Union[_ConnDialect, Dialect] = _ConnDialect()
_creator_arg: Union[_CreatorFnType, _CreatorWRecFnType]
_invoke_creator: _CreatorWRecFnType
_invalidate_time: float
def __init__(
self,
creator: Union[_CreatorFnType, _CreatorWRecFnType],
recycle: int = -1,
echo: log._EchoFlagType = None,
logging_name: Optional[str] = None,
reset_on_return: _ResetStyleArgType = True,
events: Optional[List[Tuple[_ListenerFnType, str]]] = None,
dialect: Optional[Union[_ConnDialect, Dialect]] = None,
pre_ping: bool = False,
_dispatch: Optional[_DispatchCommon[Pool]] = None,
):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to a value other than -1, number of
seconds between connection recycling, which means upon
checkout, if this timeout is surpassed the connection will be
closed and replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output.. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
The :paramref:`_pool.Pool.echo` parameter can also be set from the
:func:`_sa.create_engine` call by using the
:paramref:`_sa.create_engine.echo_pool` parameter.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool, which were
not otherwise handled by a :class:`_engine.Connection`.
Available from :func:`_sa.create_engine` via the
:paramref:`_sa.create_engine.pool_reset_on_return` parameter.
:paramref:`_pool.Pool.reset_on_return` can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting may be appropriate if the database / DBAPI
works in pure "autocommit" mode at all times, or if
a custom reset handler is established using the
:meth:`.PoolEvents.reset` event handler.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``False`` - same as None, this is here for
backwards compatibility.
For further customization of reset on return, the
:meth:`.PoolEvents.reset` event hook may be used which can perform
any connection activity desired on reset.
.. seealso::
:ref:`pool_reset_on_return`
:meth:`.PoolEvents.reset`
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`_sa.create_engine` before dialect-level
listeners are applied.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`_sa.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
:param pre_ping: if True, the pool will emit a "ping" (typically
"SELECT 1", but is dialect-specific) on the connection
upon checkout, to test if the connection is alive or not. If not,
the connection is transparently re-connected and upon success, all
other pooled connections established prior to that timestamp are
invalidated. Requires that a dialect is passed as well to
interpret the disconnection error.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._pre_ping = pre_ping
self._reset_on_return = util.parse_user_argument_for_enum(
reset_on_return,
{
ResetStyle.reset_rollback: ["rollback", True],
ResetStyle.reset_none: ["none", None, False],
ResetStyle.reset_commit: ["commit"],
},
"reset_on_return",
)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
@util.hybridproperty
def _is_asyncio(self) -> bool:
return self._dialect.is_async
@property
def _creator(self) -> Union[_CreatorFnType, _CreatorWRecFnType]:
return self._creator_arg
@_creator.setter
def _creator(
self, creator: Union[_CreatorFnType, _CreatorWRecFnType]
) -> None:
self._creator_arg = creator
# mypy seems to get super confused assigning functions to
# attributes
self._invoke_creator = self._should_wrap_creator(creator)
@_creator.deleter
def _creator(self) -> None:
# needed for mock testing
del self._creator_arg
del self._invoke_creator
def _should_wrap_creator(
self, creator: Union[_CreatorFnType, _CreatorWRecFnType]
) -> _CreatorWRecFnType:
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
creator_fn = cast(_CreatorFnType, creator)
return lambda rec: creator_fn()
if argspec.defaults is not None:
defaulted = len(argspec.defaults)
else:
defaulted = 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
return cast(_CreatorWRecFnType, creator)
# or just a single positional
elif positionals == 1:
return cast(_CreatorWRecFnType, creator)
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
creator_fn = cast(_CreatorFnType, creator)
return lambda rec: creator_fn()
def _close_connection(
self, connection: DBAPIConnection, *, terminate: bool = False
) -> None:
self.logger.debug(
"%s connection %r",
"Hard-closing" if terminate else "Closing",
connection,
)
try:
if terminate:
self._dialect.do_terminate(connection)
else:
self._dialect.do_close(connection)
except BaseException as e:
self.logger.error(
f"Exception {'terminating' if terminate else 'closing'} "
f"connection %r",
connection,
exc_info=True,
)
if not isinstance(e, Exception):
raise
def _create_connection(self) -> ConnectionPoolEntry:
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(
self,
connection: PoolProxiedConnection,
exception: Optional[BaseException] = None,
_checkin: bool = True,
) -> None:
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if _checkin and getattr(connection, "is_valid", False):
connection.invalidate(exception)
def recreate(self) -> Pool:
"""Return a new :class:`_pool.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`_pool.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self) -> None:
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
.. seealso::
:meth:`Pool.recreate`
"""
raise NotImplementedError()
def connect(self) -> PoolProxiedConnection:
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
return _ConnectionFairy._checkout(self)
def _return_conn(self, record: ConnectionPoolEntry) -> None:
"""Given a _ConnectionRecord, return it to the :class:`_pool.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
self._do_return_conn(record)
def _do_get(self) -> ConnectionPoolEntry:
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, record: ConnectionPoolEntry) -> None:
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self) -> str:
"""Returns a brief description of the state of this pool."""
raise NotImplementedError()
| Pool |
python | doocs__leetcode | lcci/16.07.Maximum/Solution.py | {
"start": 0,
"end": 157
} | class ____:
def maximum(self, a: int, b: int) -> int:
k = (int(((a - b) & 0xFFFFFFFFFFFFFFFF) >> 63)) & 1
return a * (k ^ 1) + b * k
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 108376,
"end": 110086
} | class ____(Normalize):
"""
Arbitrary normalization using functions for the forward and inverse.
Parameters
----------
functions : (callable, callable)
two-tuple of the forward and inverse functions for the normalization.
The forward function must be monotonic.
Both functions must have the signature ::
def forward(values: array-like) -> array-like
vmin, vmax : float or None
If *vmin* and/or *vmax* is not given, they are initialized from the
minimum and maximum value, respectively, of the first input
processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``.
clip : bool, default: False
Determines the behavior for mapping values outside the range
``[vmin, vmax]``.
If clipping is off, values outside the range ``[vmin, vmax]`` are also
transformed by the function, resulting in values outside ``[0, 1]``.
This behavior is usually desirable, as colormaps can mark these *under*
and *over* values with specific colors.
If clipping is on, values below *vmin* are mapped to 0 and values above
*vmax* are mapped to 1. Such values become indistinguishable from
regular boundary values, which may cause misinterpretation of the data.
"""
LogNorm = make_norm_from_scale(
functools.partial(scale.LogScale, nonpositive="mask"))(Normalize)
LogNorm.__name__ = LogNorm.__qualname__ = "LogNorm"
LogNorm.__doc__ = "Normalize a given value to the 0-1 range on a log scale."
@make_norm_from_scale(
scale.SymmetricalLogScale,
init=lambda linthresh, linscale=1., vmin=None, vmax=None, clip=False, *,
base=10: None)
| FuncNorm |
python | getsentry__sentry | src/sentry/dynamic_sampling/tasks/boost_low_volume_transactions.py | {
"start": 8134,
"end": 11970
} | class ____:
"""
Fetches the total number of transactions and the number of distinct transaction types for each
project in the given organizations
"""
def __init__(self, orgs: Sequence[int]):
transaction_string_id = indexer.resolve_shared_org("transaction")
self.transaction_tag = f"tags_raw[{transaction_string_id}]"
self.metric_id = indexer.resolve_shared_org(
str(TransactionMRI.COUNT_PER_ROOT_PROJECT.value)
)
self.org_ids = list(orgs)
self.offset = 0
self.has_more_results = True
self.cache: list[dict[str, int | float]] = []
self.last_org_id: int | None = None
def __iter__(self) -> FetchProjectTransactionTotals:
return self
def __next__(self) -> ProjectTransactionsTotals:
if not self._cache_empty():
return self._get_from_cache()
granularity = Granularity(60)
if self.has_more_results:
query = (
Query(
match=Entity(EntityKey.GenericOrgMetricsCounters.value),
select=[
Function("sum", [Column("value")], "num_transactions"),
Function("uniq", [Column(self.transaction_tag)], "num_classes"),
Column("org_id"),
Column("project_id"),
],
groupby=[
Column("org_id"),
Column("project_id"),
],
where=[
Condition(
Column("timestamp"),
Op.GTE,
datetime.utcnow() - BOOST_LOW_VOLUME_TRANSACTIONS_QUERY_INTERVAL,
),
Condition(Column("timestamp"), Op.LT, datetime.utcnow()),
Condition(Column("metric_id"), Op.EQ, self.metric_id),
Condition(Column("org_id"), Op.IN, self.org_ids),
],
granularity=granularity,
orderby=[
OrderBy(Column("org_id"), Direction.ASC),
OrderBy(Column("project_id"), Direction.ASC),
],
)
.set_limit(CHUNK_SIZE + 1)
.set_offset(self.offset)
)
request = Request(
dataset=Dataset.PerformanceMetrics.value,
app_id="dynamic_sampling",
query=query,
tenant_ids={"use_case_id": UseCaseID.TRANSACTIONS.value, "cross_org_query": 1},
)
data = raw_snql_query(
request,
referrer=Referrer.DYNAMIC_SAMPLING_COUNTERS_FETCH_PROJECTS_WITH_TRANSACTION_TOTALS.value,
)["data"]
count = len(data)
self.has_more_results = count > CHUNK_SIZE
self.offset += CHUNK_SIZE
if self.has_more_results:
data = data[:-1]
self.cache.extend(data)
return self._get_from_cache()
def _get_from_cache(self) -> ProjectTransactionsTotals:
if self._cache_empty():
raise StopIteration()
row = self.cache.pop(0)
proj_id = int(row["project_id"])
org_id = int(row["org_id"])
num_transactions = row["num_transactions"]
num_classes = int(row["num_classes"])
if self.last_org_id != org_id:
self.last_org_id = org_id
return {
"project_id": proj_id,
"org_id": org_id,
"total_num_transactions": num_transactions,
"total_num_classes": num_classes,
}
def _cache_empty(self) -> bool:
return not self.cache
| FetchProjectTransactionTotals |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py | {
"start": 8302,
"end": 8378
} | class ____(Dinov2PatchEmbeddings):
pass
| Dinov2WithRegistersPatchEmbeddings |
python | tornadoweb__tornado | tornado/testing.py | {
"start": 13493,
"end": 18102
} | class ____(AsyncTestCase):
"""A test case that starts up an HTTP server.
Subclasses must override `get_app()`, which returns the
`tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
Tests will typically use the provided ``self.http_client`` to fetch
URLs from this server.
Example, assuming the "Hello, world" example from the user guide is in
``hello.py``::
import hello
class TestHelloApp(AsyncHTTPTestCase):
def get_app(self):
return hello.make_app()
def test_homepage(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, 'Hello, world')
That call to ``self.fetch()`` is equivalent to ::
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
which illustrates how AsyncTestCase can turn an asynchronous operation,
like ``http_client.fetch()``, into a synchronous operation. If you need
to do other asynchronous operations in tests, you'll probably need to use
``stop()`` and ``wait()`` yourself.
"""
def setUp(self) -> None:
super().setUp()
sock, port = bind_unused_port()
self.__port = port
self.http_client = self.get_http_client()
self._app = self.get_app()
self.http_server = self.get_http_server()
self.http_server.add_sockets([sock])
def get_http_client(self) -> AsyncHTTPClient:
return AsyncHTTPClient()
def get_http_server(self) -> HTTPServer:
return HTTPServer(self._app, **self.get_httpserver_options())
def get_app(self) -> Application:
"""Should be overridden by subclasses to return a
`tornado.web.Application` or other `.HTTPServer` callback.
"""
raise NotImplementedError()
def fetch(
self, path: str, raise_error: bool = False, **kwargs: Any
) -> HTTPResponse:
"""Convenience method to synchronously fetch a URL.
The given path will be appended to the local server's host and
port. Any additional keyword arguments will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc).
If the path begins with http:// or https://, it will be treated as a
full URL and will be fetched as-is.
If ``raise_error`` is ``True``, a `tornado.httpclient.HTTPError` will
be raised if the response code is not 200. This is the same behavior
as the ``raise_error`` argument to `.AsyncHTTPClient.fetch`, but
the default is ``False`` here (it's ``True`` in `.AsyncHTTPClient`)
because tests often need to deal with non-200 response codes.
.. versionchanged:: 5.0
Added support for absolute URLs.
.. versionchanged:: 5.1
Added the ``raise_error`` argument.
.. deprecated:: 5.1
This method currently turns any exception into an
`.HTTPResponse` with status code 599. In Tornado 6.0,
errors other than `tornado.httpclient.HTTPError` will be
passed through, and ``raise_error=False`` will only
suppress errors that would be raised due to non-200
response codes.
"""
if path.lower().startswith(("http://", "https://")):
url = path
else:
url = self.get_url(path)
return self.io_loop.run_sync(
lambda: self.http_client.fetch(url, raise_error=raise_error, **kwargs),
timeout=get_async_test_timeout(),
)
def get_httpserver_options(self) -> Dict[str, Any]:
"""May be overridden by subclasses to return additional
keyword arguments for the server.
"""
return {}
def get_http_port(self) -> int:
"""Returns the port used by the server.
A new port is chosen for each test.
"""
return self.__port
def get_protocol(self) -> str:
return "http"
def get_url(self, path: str) -> str:
"""Returns an absolute url for the given path on the test server."""
return f"{self.get_protocol()}://127.0.0.1:{self.get_http_port()}{path}"
def tearDown(self) -> None:
self.http_server.stop()
self.io_loop.run_sync(
self.http_server.close_all_connections, timeout=get_async_test_timeout()
)
self.http_client.close()
del self.http_server
del self._app
super().tearDown()
| AsyncHTTPTestCase |
python | RaRe-Technologies__gensim | gensim/models/fasttext.py | {
"start": 38583,
"end": 55108
} | class ____(KeyedVectors):
def __init__(self, vector_size, min_n, max_n, bucket, count=0, dtype=REAL):
"""Vectors and vocab for :class:`~gensim.models.fasttext.FastText`.
Implements significant parts of the FastText algorithm. For example,
the :func:`word_vec` calculates vectors for out-of-vocabulary (OOV)
entities. FastText achieves this by keeping vectors for ngrams:
adding the vectors for the ngrams of an entity yields the vector for the
entity.
Similar to a hashmap, this class keeps a fixed number of buckets, and
maps all ngrams to buckets using a hash function.
Parameters
----------
vector_size : int
The dimensionality of all vectors.
min_n : int
The minimum number of characters in an ngram
max_n : int
The maximum number of characters in an ngram
bucket : int
The number of buckets.
count : int, optional
If provided, vectors will be pre-allocated for at least this many vectors. (Otherwise
they can be added later.)
dtype : type, optional
Vector dimensions will default to `np.float32` (AKA `REAL` in some Gensim code) unless
another type is provided here.
Attributes
----------
vectors_vocab : np.array
Each row corresponds to a vector for an entity in the vocabulary.
Columns correspond to vector dimensions. When embedded in a full
FastText model, these are the full-word-token vectors updated
by training, whereas the inherited vectors are the actual per-word
vectors synthesized from the full-word-token and all subword (ngram)
vectors.
vectors_ngrams : np.array
A vector for each ngram across all entities in the vocabulary.
Each row is a vector that corresponds to a bucket.
Columns correspond to vector dimensions.
buckets_word : list of np.array
For each key (by its index), report bucket slots their subwords map to.
"""
super(FastTextKeyedVectors, self).__init__(vector_size=vector_size, count=count, dtype=dtype)
self.min_n = min_n
self.max_n = max_n
self.bucket = bucket # count of buckets, fka num_ngram_vectors
self.buckets_word = None # precalculated cache of buckets for each word's ngrams
self.vectors_vocab = np.zeros((count, vector_size), dtype=dtype) # fka (formerly known as) syn0_vocab
self.vectors_ngrams = None # must be initialized later
self.compatible_hash = True
@classmethod
def load(cls, fname_or_handle, **kwargs):
"""Load a previously saved `FastTextKeyedVectors` model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.fasttext.FastTextKeyedVectors`
Loaded model.
See Also
--------
:meth:`~gensim.models.fasttext.FastTextKeyedVectors.save`
Save :class:`~gensim.models.fasttext.FastTextKeyedVectors` model.
"""
return super(FastTextKeyedVectors, cls).load(fname_or_handle, **kwargs)
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(FastTextKeyedVectors, self)._load_specials(*args, **kwargs)
if not isinstance(self, FastTextKeyedVectors):
raise TypeError("Loaded object of type %s, not expected FastTextKeyedVectors" % type(self))
if not hasattr(self, 'compatible_hash') or self.compatible_hash is False:
raise TypeError(
"Pre-gensim-3.8.x fastText models with nonstandard hashing are no longer compatible. "
"Loading your old model into gensim-3.8.3 & re-saving may create a model compatible with gensim 4.x."
)
if not hasattr(self, 'vectors_vocab_lockf') and hasattr(self, 'vectors_vocab'):
self.vectors_vocab_lockf = ones(1, dtype=REAL)
if not hasattr(self, 'vectors_ngrams_lockf') and hasattr(self, 'vectors_ngrams'):
self.vectors_ngrams_lockf = ones(1, dtype=REAL)
# fixup mistakenly overdimensioned gensim-3.x lockf arrays
if len(self.vectors_vocab_lockf.shape) > 1:
self.vectors_vocab_lockf = ones(1, dtype=REAL)
if len(self.vectors_ngrams_lockf.shape) > 1:
self.vectors_ngrams_lockf = ones(1, dtype=REAL)
if not hasattr(self, 'buckets_word') or not self.buckets_word:
self.recalc_char_ngram_buckets()
if not hasattr(self, 'vectors') or self.vectors is None:
self.adjust_vectors() # recompose full-word vectors
def __contains__(self, word):
"""Check if `word` or any character ngrams in `word` are present in the vocabulary.
A vector for the word is guaranteed to exist if current method returns True.
Parameters
----------
word : str
Input word.
Returns
-------
bool
True if `word` or any character ngrams in `word` are present in the vocabulary, False otherwise.
Note
----
This method **always** returns True with char ngrams, because of the way FastText works.
If you want to check if a word is an in-vocabulary term, use this instead:
.. pycon:
>>> from gensim.test.utils import datapath
>>> from gensim.models import FastText
>>> cap_path = datapath("crime-and-punishment.bin")
>>> model = FastText.load_fasttext_format(cap_path, full_model=False)
>>> 'steamtrain' in model.wv.key_to_index # If False, is an OOV term
False
"""
if self.bucket == 0: # check for the case when char ngrams not used
return word in self.key_to_index
else:
return True
def save(self, *args, **kwargs):
"""Save object.
Parameters
----------
fname : str
Path to the output file.
See Also
--------
:meth:`~gensim.models.fasttext.FastTextKeyedVectors.load`
Load object.
"""
super(FastTextKeyedVectors, self).save(*args, **kwargs)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""Arrange any special handling for the gensim.utils.SaveLoad protocol"""
# don't save properties that are merely calculated from others
ignore = set(ignore).union(['buckets_word', 'vectors', ])
return super(FastTextKeyedVectors, self)._save_specials(
fname, separately, sep_limit, ignore, pickle_protocol, compress, subname)
def get_vector(self, word, norm=False):
"""Get `word` representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
Input word.
norm : bool, optional
If True, resulting vector will be L2-normalized (unit Euclidean length).
Returns
-------
numpy.ndarray
Vector representation of `word`.
Raises
------
KeyError
If word and all its ngrams not in vocabulary.
"""
if word in self.key_to_index:
return super(FastTextKeyedVectors, self).get_vector(word, norm=norm)
elif self.bucket == 0:
raise KeyError('cannot calculate vector for OOV word without ngrams')
else:
word_vec = np.zeros(self.vectors_ngrams.shape[1], dtype=np.float32)
ngram_weights = self.vectors_ngrams
ngram_hashes = ft_ngram_hashes(word, self.min_n, self.max_n, self.bucket)
if len(ngram_hashes) == 0:
#
# If it is impossible to extract _any_ ngrams from the input
# word, then the best we can do is return a vector that points
# to the origin. The reference FB implementation does this,
# too.
#
# https://github.com/RaRe-Technologies/gensim/issues/2402
#
logger.warning('could not extract any ngrams from %r, returning origin vector', word)
return word_vec
for nh in ngram_hashes:
word_vec += ngram_weights[nh]
if norm:
return word_vec / np.linalg.norm(word_vec)
else:
return word_vec / len(ngram_hashes)
def get_sentence_vector(self, sentence):
"""Get a single 1-D vector representation for a given `sentence`.
This function is workalike of the official fasttext's get_sentence_vector().
Parameters
----------
sentence : list of (str or int)
list of words specified by string or int ids.
Returns
-------
numpy.ndarray
1-D numpy array representation of the `sentence`.
"""
return super(FastTextKeyedVectors, self).get_mean_vector(sentence)
def resize_vectors(self, seed=0):
"""Make underlying vectors match 'index_to_key' size; random-initialize any new rows."""
vocab_shape = (len(self.index_to_key), self.vector_size)
# Unlike in superclass, 'vectors_vocab' array is primary with 'vectors' derived from it & ngrams
self.vectors_vocab = prep_vectors(vocab_shape, prior_vectors=self.vectors_vocab, seed=seed)
ngrams_shape = (self.bucket, self.vector_size)
self.vectors_ngrams = prep_vectors(ngrams_shape, prior_vectors=self.vectors_ngrams, seed=seed + 1)
self.allocate_vecattrs()
self.norms = None
self.recalc_char_ngram_buckets() # ensure new words have precalc buckets
self.adjust_vectors() # ensure `vectors` filled as well (though may be nonsense pre-training)
def init_post_load(self, fb_vectors):
"""Perform initialization after loading a native Facebook model.
Expects that the vocabulary (self.key_to_index) has already been initialized.
Parameters
----------
fb_vectors : np.array
A matrix containing vectors for all the entities, including words
and ngrams. This comes directly from the binary model.
The order of the vectors must correspond to the indices in
the vocabulary.
"""
vocab_words = len(self)
assert fb_vectors.shape[0] == vocab_words + self.bucket, 'unexpected number of vectors'
assert fb_vectors.shape[1] == self.vector_size, 'unexpected vector dimensionality'
#
# The incoming vectors contain vectors for both words AND
# ngrams. We split them into two separate matrices, because our
# implementation treats them differently.
#
self.vectors_vocab = np.array(fb_vectors[:vocab_words, :])
self.vectors_ngrams = np.array(fb_vectors[vocab_words:, :])
self.recalc_char_ngram_buckets()
self.adjust_vectors() # calculate composite full-word vectors
def adjust_vectors(self):
"""Adjust the vectors for words in the vocabulary.
The adjustment composes the trained full-word-token vectors with
the vectors of the subword ngrams, matching the Facebook reference
implementation behavior.
"""
if self.bucket == 0:
self.vectors = self.vectors_vocab # no ngrams influence
return
self.vectors = self.vectors_vocab[:].copy()
for i, _ in enumerate(self.index_to_key):
ngram_buckets = self.buckets_word[i]
for nh in ngram_buckets:
self.vectors[i] += self.vectors_ngrams[nh]
self.vectors[i] /= len(ngram_buckets) + 1
def recalc_char_ngram_buckets(self):
"""
Scan the vocabulary, calculate ngrams and their hashes, and cache the list of ngrams for each known word.
"""
# TODO: evaluate if precaching even necessary, compared to recalculating as needed.
if self.bucket == 0:
self.buckets_word = [np.array([], dtype=np.uint32)] * len(self.index_to_key)
return
self.buckets_word = [None] * len(self.index_to_key)
for i, word in enumerate(self.index_to_key):
self.buckets_word[i] = np.array(
ft_ngram_hashes(word, self.min_n, self.max_n, self.bucket),
dtype=np.uint32,
)
def _pad_random(m, new_rows, rand):
"""Pad a matrix with additional rows filled with random values."""
_, columns = m.shape
low, high = -1.0 / columns, 1.0 / columns
suffix = rand.uniform(low, high, (new_rows, columns)).astype(REAL)
return vstack([m, suffix])
def _unpack(m, num_rows, hash2index, seed=1, fill=None):
"""Restore the array to its natural shape, undoing the optimization.
A packed matrix contains contiguous vectors for ngrams, as well as a hashmap.
The hash map maps the ngram hash to its index in the packed matrix.
To unpack the matrix, we need to do several things:
1. Restore the matrix to its "natural" shape, where the number of rows
equals the number of buckets.
2. Rearrange the existing rows such that the hashmap becomes the identity
function and is thus redundant.
3. Fill the new rows with random values.
Parameters
----------
m : np.ndarray
The matrix to restore.
num_rows : int
The number of rows that this array should have.
hash2index : dict
the product of the optimization we are undoing.
seed : float, optional
The seed for the PRNG. Will be used to initialize new rows.
fill : float or array or None, optional
Value for new rows. If None (the default), randomly initialize.
Returns
-------
np.array
The unpacked matrix.
Notes
-----
The unpacked matrix will reference some rows in the input matrix to save memory.
Throw away the old matrix after calling this function, or use np.copy.
"""
orig_rows, *more_dims = m.shape
if orig_rows == num_rows:
#
# Nothing to do.
#
return m
assert num_rows > orig_rows
if fill is None:
rand_obj = np.random
rand_obj.seed(seed)
#
# Rows at the top of the matrix (the first orig_rows) will contain "packed" learned vectors.
# Rows at the bottom of the matrix will be "free": initialized to random values.
#
m = _pad_random(m, num_rows - orig_rows, rand_obj)
else:
m = np.concatenate([m, [fill] * (num_rows - orig_rows)])
#
# Swap rows to transform hash2index into the identify function.
# There are two kinds of swaps.
# First, rearrange the rows that belong entirely within the original matrix dimensions.
# Second, swap out rows from the original matrix dimensions, replacing them with
# randomly initialized values.
#
# N.B. We only do the swap in one direction, because doing it in both directions
# nullifies the effect.
#
swap = {h: i for (h, i) in hash2index.items() if h < i < orig_rows}
swap.update({h: i for (h, i) in hash2index.items() if h >= orig_rows})
for h, i in swap.items():
assert h != i
m[[h, i]] = m[[i, h]] # swap rows i and h
return m
#
# UTF-8 bytes that begin with 10 are subsequent bytes of a multi-byte sequence,
# as opposed to a new character.
#
_MB_MASK = 0xC0
_MB_START = 0x80
def _is_utf8_continue(b):
return b & _MB_MASK == _MB_START
def ft_ngram_hashes(word, minn, maxn, num_buckets):
"""Calculate the ngrams of the word and hash them.
Parameters
----------
word : str
The word to calculate ngram hashes for.
minn : int
Minimum ngram length
maxn : int
Maximum ngram length
num_buckets : int
The number of buckets
Returns
-------
A list of hashes (integers), one per each detected ngram.
"""
encoded_ngrams = compute_ngrams_bytes(word, minn, maxn)
hashes = [ft_hash_bytes(n) % num_buckets for n in encoded_ngrams]
return hashes
# BACKWARD COMPATIBILITY FOR OLDER PICKLES
from gensim.models import keyedvectors # noqa: E402
keyedvectors.FastTextKeyedVectors = FastTextKeyedVectors
| FastTextKeyedVectors |
python | openai__gym | gym/vector/sync_vector_env.py | {
"start": 357,
"end": 8760
} | class ____(VectorEnv):
"""Vectorized environment that serially runs multiple environments.
Example::
>>> import gym
>>> env = gym.vector.SyncVectorEnv([
... lambda: gym.make("Pendulum-v0", g=9.81),
... lambda: gym.make("Pendulum-v0", g=1.62)
... ])
>>> env.reset()
array([[-0.8286432 , 0.5597771 , 0.90249056],
[-0.85009176, 0.5266346 , 0.60007906]], dtype=float32)
"""
def __init__(
self,
env_fns: Iterator[Callable[[], Env]],
observation_space: Space = None,
action_space: Space = None,
copy: bool = True,
):
"""Vectorized environment that serially runs multiple environments.
Args:
env_fns: iterable of callable functions that create the environments.
observation_space: Observation space of a single environment. If ``None``,
then the observation space of the first environment is taken.
action_space: Action space of a single environment. If ``None``,
then the action space of the first environment is taken.
copy: If ``True``, then the :meth:`reset` and :meth:`step` methods return a copy of the observations.
Raises:
RuntimeError: If the observation space of some sub-environment does not match observation_space
(or, by default, the observation space of the first sub-environment).
"""
self.env_fns = env_fns
self.envs = [env_fn() for env_fn in env_fns]
self.copy = copy
self.metadata = self.envs[0].metadata
if (observation_space is None) or (action_space is None):
observation_space = observation_space or self.envs[0].observation_space
action_space = action_space or self.envs[0].action_space
super().__init__(
num_envs=len(self.envs),
observation_space=observation_space,
action_space=action_space,
)
self._check_spaces()
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros
)
self._rewards = np.zeros((self.num_envs,), dtype=np.float64)
self._terminateds = np.zeros((self.num_envs,), dtype=np.bool_)
self._truncateds = np.zeros((self.num_envs,), dtype=np.bool_)
self._actions = None
def seed(self, seed: Optional[Union[int, Sequence[int]]] = None):
"""Sets the seed in all sub-environments.
Args:
seed: The seed
"""
super().seed(seed=seed)
if seed is None:
seed = [None for _ in range(self.num_envs)]
if isinstance(seed, int):
seed = [seed + i for i in range(self.num_envs)]
assert len(seed) == self.num_envs
for env, single_seed in zip(self.envs, seed):
env.seed(single_seed)
def reset_wait(
self,
seed: Optional[Union[int, List[int]]] = None,
options: Optional[dict] = None,
):
"""Waits for the calls triggered by :meth:`reset_async` to finish and returns the results.
Args:
seed: The reset environment seed
options: Option information for the environment reset
Returns:
The reset observation of the environment and reset information
"""
if seed is None:
seed = [None for _ in range(self.num_envs)]
if isinstance(seed, int):
seed = [seed + i for i in range(self.num_envs)]
assert len(seed) == self.num_envs
self._terminateds[:] = False
self._truncateds[:] = False
observations = []
infos = {}
for i, (env, single_seed) in enumerate(zip(self.envs, seed)):
kwargs = {}
if single_seed is not None:
kwargs["seed"] = single_seed
if options is not None:
kwargs["options"] = options
observation, info = env.reset(**kwargs)
observations.append(observation)
infos = self._add_info(infos, info, i)
self.observations = concatenate(
self.single_observation_space, observations, self.observations
)
return (deepcopy(self.observations) if self.copy else self.observations), infos
def step_async(self, actions):
"""Sets :attr:`_actions` for use by the :meth:`step_wait` by converting the ``actions`` to an iterable version."""
self._actions = iterate(self.action_space, actions)
def step_wait(self):
"""Steps through each of the environments returning the batched results.
Returns:
The batched environment step results
"""
observations, infos = [], {}
for i, (env, action) in enumerate(zip(self.envs, self._actions)):
(
observation,
self._rewards[i],
self._terminateds[i],
self._truncateds[i],
info,
) = env.step(action)
if self._terminateds[i] or self._truncateds[i]:
old_observation, old_info = observation, info
observation, info = env.reset()
info["final_observation"] = old_observation
info["final_info"] = old_info
observations.append(observation)
infos = self._add_info(infos, info, i)
self.observations = concatenate(
self.single_observation_space, observations, self.observations
)
return (
deepcopy(self.observations) if self.copy else self.observations,
np.copy(self._rewards),
np.copy(self._terminateds),
np.copy(self._truncateds),
infos,
)
def call(self, name, *args, **kwargs) -> tuple:
"""Calls the method with name and applies args and kwargs.
Args:
name: The method name
*args: The method args
**kwargs: The method kwargs
Returns:
Tuple of results
"""
results = []
for env in self.envs:
function = getattr(env, name)
if callable(function):
results.append(function(*args, **kwargs))
else:
results.append(function)
return tuple(results)
def set_attr(self, name: str, values: Union[list, tuple, Any]):
"""Sets an attribute of the sub-environments.
Args:
name: The property name to change
values: Values of the property to be set to. If ``values`` is a list or
tuple, then it corresponds to the values for each individual
environment, otherwise, a single value is set for all environments.
Raises:
ValueError: Values must be a list or tuple with length equal to the number of environments.
"""
if not isinstance(values, (list, tuple)):
values = [values for _ in range(self.num_envs)]
if len(values) != self.num_envs:
raise ValueError(
"Values must be a list or tuple with length equal to the "
f"number of environments. Got `{len(values)}` values for "
f"{self.num_envs} environments."
)
for env, value in zip(self.envs, values):
setattr(env, name, value)
def close_extras(self, **kwargs):
"""Close the environments."""
[env.close() for env in self.envs]
def _check_spaces(self) -> bool:
for env in self.envs:
if not (env.observation_space == self.single_observation_space):
raise RuntimeError(
"Some environments have an observation space different from "
f"`{self.single_observation_space}`. In order to batch observations, "
"the observation spaces from all environments must be equal."
)
if not (env.action_space == self.single_action_space):
raise RuntimeError(
"Some environments have an action space different from "
f"`{self.single_action_space}`. In order to batch actions, the "
"action spaces from all environments must be equal."
)
return True
| SyncVectorEnv |
python | crytic__slither | slither/solc_parsing/yul/parse_yul.py | {
"start": 6529,
"end": 7204
} | class ____: # pylint: disable=too-few-public-methods
__slots__ = ["_variable", "_root"]
def __init__(self, var: LocalVariable, root: YulScope, ast: Dict) -> None:
assert ast["nodeType"] == "YulTypedName"
self._variable = var
self._root = root
# start initializing the underlying variable
var.set_function(root.function)
var.set_offset(ast["src"], root.compilation_unit)
var.name = _name_to_yul_name(ast["name"], root.id)
var.set_type(ElementaryType("uint256"))
var.set_location("memory")
@property
def underlying(self) -> LocalVariable:
return self._variable
| YulLocalVariable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 48046,
"end": 48789
} | class ____(AssetSelection):
selected_key_substring: str
include_sources: bool
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
base_set = (
asset_graph.get_all_asset_keys()
if self.include_sources
else asset_graph.materializable_asset_keys
)
return {key for key in base_set if self.selected_key_substring in key.to_user_string()}
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return self
def to_selection_str(self) -> str:
return f'key_substring:"{self.selected_key_substring}"'
@whitelist_for_serdes
@record
| KeySubstringAssetSelection |
python | Pylons__pyramid | src/pyramid/path.py | {
"start": 3851,
"end": 7917
} | class ____(Resolver):
"""A class used to resolve an :term:`asset specification` to an
:term:`asset descriptor`.
.. versionadded:: 1.3
The constructor accepts a single argument named ``package`` which may be
any of:
- A fully qualified (not relative) dotted name to a module or package
- a Python module or package object
- The value ``None``
- The constant value :attr:`pyramid.path.CALLER_PACKAGE`.
The default value is :attr:`pyramid.path.CALLER_PACKAGE`.
The ``package`` is used when a relative asset specification is supplied
to the :meth:`~pyramid.path.AssetResolver.resolve` method. An asset
specification without a colon in it is treated as relative.
If ``package`` is ``None``, the resolver will
only be able to resolve fully qualified (not relative) asset
specifications. Any attempt to resolve a relative asset specification
will result in an :exc:`ValueError` exception.
If ``package`` is :attr:`pyramid.path.CALLER_PACKAGE`,
the resolver will treat relative asset specifications as
relative to the caller of the :meth:`~pyramid.path.AssetResolver.resolve`
method.
If ``package`` is a *module* or *module name* (as opposed to a package or
package name), its containing package is computed and this
package is used to derive the package name (all names are resolved relative
to packages, never to modules). For example, if the ``package`` argument
to this type was passed the string ``xml.dom.expatbuilder``, and
``template.pt`` is supplied to the
:meth:`~pyramid.path.AssetResolver.resolve` method, the resulting absolute
asset spec would be ``xml.minidom:template.pt``, because
``xml.dom.expatbuilder`` is a module object, not a package object.
If ``package`` is a *package* or *package name* (as opposed to a module or
module name), this package will be used to compute relative
asset specifications. For example, if the ``package`` argument to this
type was passed the string ``xml.dom``, and ``template.pt`` is supplied
to the :meth:`~pyramid.path.AssetResolver.resolve` method, the resulting
absolute asset spec would be ``xml.minidom:template.pt``.
"""
def resolve(self, spec):
"""
Resolve the asset spec named as ``spec`` to an object that has the
attributes and methods described in
:class:`pyramid.interfaces.IAssetDescriptor`.
If ``spec`` is an absolute filename
(e.g. ``/path/to/myproject/templates/foo.pt``) or an absolute asset
spec (e.g. ``myproject:templates.foo.pt``), an asset descriptor is
returned without taking into account the ``package`` passed to this
class' constructor.
If ``spec`` is a *relative* asset specification (an asset
specification without a ``:`` in it, e.g. ``templates/foo.pt``), the
``package`` argument of the constructor is used as the package
portion of the asset spec. For example:
.. code-block:: python
a = AssetResolver('myproject')
resolver = a.resolve('templates/foo.pt')
print(resolver.abspath())
# -> /path/to/myproject/templates/foo.pt
If the AssetResolver is constructed without a ``package`` argument of
``None``, and a relative asset specification is passed to
``resolve``, an :exc:`ValueError` exception is raised.
"""
if os.path.isabs(spec):
return FSAssetDescriptor(spec)
path = spec
if ':' in path:
package_name, path = spec.split(':', 1)
else:
if self.package is CALLER_PACKAGE:
package_name = caller_package().__name__
else:
package_name = getattr(self.package, '__name__', None)
if package_name is None:
raise ValueError(
f'relative spec {spec!r} irresolveable without package'
)
return PkgResourcesAssetDescriptor(package_name, path)
| AssetResolver |
python | PrefectHQ__prefect | tests/utilities/test_callables.py | {
"start": 25276,
"end": 49831
} | class ____:
def test_function_not_found(self, tmp_path: Path):
source_code = dedent(
"""
def f():
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
with pytest.raises(ValueError):
callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:g")
def test_simple_function_with_no_arguments(self, tmp_path: Path):
source_code = dedent(
"""
def f():
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"properties": {},
"title": "Parameters",
"type": "object",
"definitions": {},
}
def test_function_with_pydantic_base_model_collisions(self, tmp_path: Path):
source_code = dedent(
"""
def f(
json,
copy,
parse_obj,
parse_raw,
parse_file,
from_orm,
schema,
schema_json,
construct,
validate,
foo,
):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"foo": {"title": "foo", "position": 10},
"json": {"title": "json", "position": 0},
"copy": {"title": "copy", "position": 1},
"parse_obj": {"title": "parse_obj", "position": 2},
"parse_raw": {"title": "parse_raw", "position": 3},
"parse_file": {"title": "parse_file", "position": 4},
"from_orm": {"title": "from_orm", "position": 5},
"schema": {"title": "schema", "position": 6},
"schema_json": {"title": "schema_json", "position": 7},
"construct": {"title": "construct", "position": 8},
"validate": {"title": "validate", "position": 9},
},
"required": [
"json",
"copy",
"parse_obj",
"parse_raw",
"parse_file",
"from_orm",
"schema",
"schema_json",
"construct",
"validate",
"foo",
],
"definitions": {},
}
def test_function_with_one_required_argument(self, tmp_path: Path):
source_code = dedent(
"""
def f(x):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_function_with_one_optional_argument(self, tmp_path: Path):
source_code = dedent(
"""
def f(x=42):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "default": 42, "position": 0}},
"definitions": {},
}
def test_function_with_one_optional_annotated_argument(self, tmp_path: Path):
source_code = dedent(
"""
def f(x: int = 42):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {"title": "x", "default": 42, "type": "integer", "position": 0}
},
"definitions": {},
}
def test_function_with_two_arguments(self, tmp_path: Path):
source_code = dedent(
"""
def f(x: int, y: float = 5.0):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {"title": "x", "type": "integer", "position": 0},
"y": {"title": "y", "default": 5.0, "type": "number", "position": 1},
},
"required": ["x"],
"definitions": {},
}
def test_function_with_datetime_arguments(self, tmp_path: Path):
source_code = dedent(
"""
import datetime
from prefect.types import DateTime
def f(
x: datetime.datetime,
y: DateTime = DateTime(2025, 1, 1, tzinfo=datetime.timezone.utc),
z: datetime.timedelta = datetime.timedelta(seconds=5),
):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
expected_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"format": "date-time",
"position": 0,
"title": "x",
"type": "string",
},
"y": {
"default": "2025-01-01T00:00:00Z",
"format": "date-time",
"position": 1,
"title": "y",
"type": "string",
},
"z": {
"default": "PT5S",
"format": "duration",
"position": 2,
"title": "z",
"type": "string",
},
},
"required": ["x"],
"definitions": {},
}
assert schema.model_dump_for_openapi() == expected_schema
def test_function_with_enum_argument(self, tmp_path: Path):
class Color(Enum):
RED = "RED"
GREEN = "GREEN"
BLUE = "BLUE"
source_code = dedent(
"""
from enum import Enum
class Color(Enum):
RED = "RED"
GREEN = "GREEN"
BLUE = "BLUE"
def f(x: Color = Color.RED):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
expected_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"$ref": "#/definitions/Color",
"default": "RED",
"position": 0,
"title": "x",
}
},
"definitions": {
"Color": {
"enum": ["RED", "GREEN", "BLUE"],
"title": "Color",
"type": "string",
}
},
}
assert schema.model_dump_for_openapi() == expected_schema
def test_function_with_generic_arguments(self, tmp_path: Path):
source_code = dedent(
"""
from typing import List, Dict, Any, Tuple, Union
def f(
a: List[str],
b: Dict[str, Any],
c: Any,
d: Tuple[int, float],
e: Union[str, bytes, int],
):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
expected_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"a": {
"items": {"type": "string"},
"position": 0,
"title": "a",
"type": "array",
},
"b": {
"additionalProperties": True,
"position": 1,
"title": "b",
"type": "object",
},
"c": {"position": 2, "title": "c"},
"d": {
"maxItems": 2,
"minItems": 2,
"position": 3,
"prefixItems": [{"type": "integer"}, {"type": "number"}],
"title": "d",
"type": "array",
},
"e": {
"anyOf": [
{"type": "string"},
{"format": "binary", "type": "string"},
{"type": "integer"},
],
"position": 4,
"title": "e",
},
},
"required": ["a", "b", "c", "d", "e"],
"definitions": {},
}
assert schema.model_dump_for_openapi() == expected_schema
def test_function_with_user_defined_type(self, tmp_path: Path):
source_code = dedent(
"""
class Foo:
y: int
def f(x: Foo):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_function_with_user_defined_pydantic_model(self, tmp_path: Path):
source_code = dedent(
"""
from pydantic import BaseModel
class Foo(BaseModel):
y: int
z: str
def f(x: Foo):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"definitions": {
"Foo": {
"properties": {
"y": {"title": "Y", "type": "integer"},
"z": {"title": "Z", "type": "string"},
},
"required": ["y", "z"],
"title": "Foo",
"type": "object",
}
},
"properties": {
"x": {
"$ref": "#/definitions/Foo",
"title": "x",
"position": 0,
}
},
"required": ["x"],
"title": "Parameters",
"type": "object",
}
def test_function_with_pydantic_model_default_across_v1_and_v2(
self, tmp_path: Path
):
source_code = dedent(
"""
from pydantic import BaseModel
class Foo(BaseModel):
bar: str
def f(foo: Foo = Foo(bar="baz")):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"foo": {
"$ref": "#/definitions/Foo",
"default": {"bar": "baz"},
"position": 0,
"title": "foo",
}
},
"definitions": {
"Foo": {
"properties": {"bar": {"title": "Bar", "type": "string"}},
"required": ["bar"],
"title": "Foo",
"type": "object",
}
},
}
def test_function_with_complex_args_across_v1_and_v2(self, tmp_path: Path):
source_code = dedent(
"""
from datetime import timezone
from pydantic import BaseModel
from enum import Enum
from prefect.types._datetime import DateTime, Date, Duration
class Foo(BaseModel):
bar: str
class Color(Enum):
RED = "RED"
GREEN = "GREEN"
BLUE = "BLUE"
def f(
a: int,
s: list[None],
m: Foo,
i: int = 0,
x: float = 1.0,
model: Foo = Foo(bar="bar"),
pdt: DateTime = DateTime(2025, 1, 1, tzinfo=timezone.utc),
pdate: Date = Date(2025, 1, 1),
pduration: Duration = Duration(seconds=5),
c: Color = Color.BLUE,
):
pass
"""
)
datetime_schema = {
"title": "pdt",
"default": "2025-01-01T00:00:00+00:00",
"position": 6,
"type": "string",
"format": "date-time",
}
duration_schema = {
"title": "pduration",
"default": 5.0,
"position": 8,
"type": "number",
"format": "time-delta",
}
enum_schema = {
"enum": ["RED", "GREEN", "BLUE"],
"title": "Color",
"type": "string",
"description": "An enumeration.",
}
# these overrides represent changes in how pydantic generates schemas in v2
datetime_schema["default"] = "2025-01-01T00:00:00Z"
duration_schema["default"] = "PT5S"
duration_schema["type"] = "string"
duration_schema["format"] = "duration"
enum_schema.pop("description")
schema = tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"a": {"position": 0, "title": "a", "type": "integer"},
"s": {
"items": {"type": "null"},
"position": 1,
"title": "s",
"type": "array",
},
"m": {
"$ref": "#/definitions/Foo",
"position": 2,
"title": "m",
},
"i": {"default": 0, "position": 3, "title": "i", "type": "integer"},
"x": {"default": 1.0, "position": 4, "title": "x", "type": "number"},
"model": {
"$ref": "#/definitions/Foo",
"default": {"bar": "bar"},
"position": 5,
"title": "model",
},
"pdt": datetime_schema,
"pdate": {
"title": "pdate",
"default": "2025-01-01",
"position": 7,
"type": "string",
"format": "date",
},
"pduration": duration_schema,
"c": {
"title": "c",
"default": "BLUE",
"position": 9,
"$ref": "#/definitions/Color",
},
},
"required": ["a", "s", "m"],
"definitions": {
"Foo": {
"properties": {"bar": {"title": "Bar", "type": "string"}},
"required": ["bar"],
"title": "Foo",
"type": "object",
},
"Color": enum_schema,
},
}
def test_function_with_secretstr(self, tmp_path: Path):
source_code = dedent(
"""
from pydantic import SecretStr
def f(x: SecretStr):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"title": "x",
"position": 0,
"format": "password",
"type": "string",
"writeOnly": True,
},
},
"required": ["x"],
"definitions": {},
}
@pytest.mark.skipif(
sys.version_info >= (3, 14),
reason="pydantic v1 is not supported in Python 3.14+",
)
def test_function_with_v1_secretstr_from_compat_module(self, tmp_path: Path):
source_code = dedent(
"""
import pydantic.v1 as pydantic
def f(x: pydantic.SecretStr):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"title": "x",
"position": 0,
"type": "string",
"writeOnly": True,
"format": "password",
},
},
"required": ["x"],
"definitions": {},
}
def test_flow_with_args_docstring(self, tmp_path: Path):
source_code = dedent(
'''
def f(x):
"""Function f.
Args:
x: required argument x
"""
'''
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {"title": "x", "description": "required argument x", "position": 0}
},
"required": ["x"],
"definitions": {},
}
def test_flow_without_args_docstring(self, tmp_path: Path):
source_code = dedent(
'''
def f(x):
"""Function f."""
'''
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_flow_with_complex_args_docstring(self, tmp_path: Path):
source_code = dedent(
'''
def f(x, y):
"""Function f.
Second line of docstring.
Args:
x: required argument x
y (str): required typed argument y
with second line
Returns:
None: nothing
"""
'''
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"title": "x",
"description": "required argument x",
"position": 0,
},
"y": {
"title": "y",
"description": "required typed argument y\nwith second line",
"position": 1,
},
},
"required": ["x", "y"],
"definitions": {},
}
def test_does_not_raise_when_missing_dependencies(self, tmp_path: Path):
source_code = dedent(
"""
import bipitty_boopity
def f(x):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_handles_dynamically_created_models(self, tmp_path: Path):
source_code = dedent(
"""
from pydantic import BaseModel, create_model, Field
def get_model() -> BaseModel:
return create_model(
"MyModel",
param=(
int,
Field(
title="param",
default=1,
),
),
)
MyModel = get_model()
def f(
param: MyModel,
) -> None:
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"param": {
"$ref": "#/definitions/MyModel",
"position": 0,
"title": "param",
}
},
"required": ["param"],
"definitions": {
"MyModel": {
"properties": {
"param": {
"default": 1,
"title": "param",
"type": "integer",
}
},
"title": "MyModel",
"type": "object",
}
},
}
def test_function_with_kwargs_only(self, tmp_path: Path):
source_code = dedent(
"""
def f(
*,
x: int = 42,
):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"properties": {
"x": {"title": "x", "position": 0, "type": "integer", "default": 42}
},
"title": "Parameters",
"type": "object",
"definitions": {},
}
def test_function_with_positional_only_args(self, tmp_path: Path):
source_code = dedent(
"""
def f(x=1, /, y=2, z=3):
pass
"""
)
tmp_path.joinpath("test.py").write_text(source_code)
schema = callables.parameter_schema_from_entrypoint(f"{tmp_path}/test.py:f")
assert schema.model_dump_for_openapi() == {
"properties": {
"x": {"title": "x", "position": 0, "default": 1},
"y": {"title": "y", "position": 1, "default": 2},
"z": {"title": "z", "position": 2, "default": 3},
},
"title": "Parameters",
"type": "object",
"definitions": {},
}
| TestEntrypointToSchema |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 67185,
"end": 67639
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
| TwoLayerConvModel |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_gcs_to_bigquery.py | {
"start": 3253,
"end": 69534
} | class ____:
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_max_value_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_job.return_value.result.return_value = ("1",)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
schema_fields=SCHEMA_FIELDS,
max_id_key=MAX_ID_KEY,
external_table=True,
project_id=JOB_PROJECT_ID,
)
result = operator.execute(context=MagicMock())
assert result == "1"
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS},
},
},
)
hook.return_value.insert_job.assert_called_once_with(
configuration={
"query": {
"query": f"SELECT MAX({MAX_ID_KEY}) AS max_value FROM {TEST_EXPLICIT_DEST}",
"useLegacySql": False,
"schemaUpdateOptions": [],
}
},
project_id=JOB_PROJECT_ID,
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_external_table_explicitly_passes_dataset_and_table_ids(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
def _validate_create_table(**kwargs):
assert kwargs["dataset_id"] == DATASET
assert kwargs["table_id"] == TABLE
hook.return_value.create_table.side_effect = _validate_create_table
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
external_table=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_max_value_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_job.return_value.result.return_value = ("1",)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
result = operator.execute(context=MagicMock())
assert result == "1"
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": WRITE_DISPOSITION,
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
),
call(
configuration={
"query": {
"query": f"SELECT MAX({MAX_ID_KEY}) AS max_value FROM {TEST_EXPLICIT_DEST}",
"useLegacySql": False,
"schemaUpdateOptions": [],
}
},
project_id=JOB_PROJECT_ID,
),
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_two_partitionings_should_fail(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
with pytest.raises(
ValueError, match=r"Only one of time_partitioning or range_partitioning can be set."
):
GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
time_partitioning={"field": "created", "type": "DAY"},
range_partitioning={"field": "grade", "range": {"start": 0, "end": 100, "interval": 20}},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_max_value_should_throw_ex_when_query_returns_no_rows(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
with pytest.raises(RuntimeError, match=r"returned no rows!"):
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": "WRITE_TRUNCATE",
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
),
call(
configuration={
"query": {
"query": f"SELECT MAX({MAX_ID_KEY}) AS max_value FROM {TEST_EXPLICIT_DEST}",
"useLegacySql": False,
"schemaUpdateOptions": [],
}
},
project_id=JOB_PROJECT_ID,
),
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_labels_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
write_disposition=WRITE_DISPOSITION,
external_table=True,
labels=LABELS,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": LABELS,
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS},
},
},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_labels_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
schema_fields=SCHEMA_FIELDS,
external_table=False,
labels=LABELS,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": "WRITE_TRUNCATE",
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
"destinationTableProperties": {"labels": LABELS},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
)
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_description_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
schema_fields=SCHEMA_FIELDS,
description=DESCRIPTION,
external_table=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS},
},
"description": DESCRIPTION,
},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_description_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
write_disposition=WRITE_DISPOSITION,
external_table=False,
description=DESCRIPTION,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": dict(
autodetect=True,
createDisposition="CREATE_IF_NEEDED",
destinationTable={"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
destinationTableProperties={
"description": DESCRIPTION,
},
sourceFormat="CSV",
skipLeadingRows=None,
sourceUris=[f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
writeDisposition=WRITE_DISPOSITION,
ignoreUnknownValues=False,
allowQuotedNewlines=False,
encoding="UTF-8",
schema={"fields": SCHEMA_FIELDS},
quote=None,
fieldDelimiter=",",
),
},
project_id=JOB_PROJECT_ID,
location=None,
job_id=REAL_JOB_ID,
timeout=None,
retry=DEFAULT_RETRY,
nowait=True,
),
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_source_objs_as_list_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
schema_fields=SCHEMA_FIELDS,
write_disposition=WRITE_DISPOSITION,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
external_table=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [
f"gs://{TEST_BUCKET}/{source_object}" for source_object in TEST_SOURCE_OBJECTS_LIST
],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS},
},
},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_source_objs_as_list_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
schema_fields=SCHEMA_FIELDS,
write_disposition=WRITE_DISPOSITION,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
external_table=False,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {
"projectId": "test-project",
"datasetId": "dataset",
"tableId": "table",
},
"sourceFormat": "CSV",
"sourceUris": ["gs://test-bucket/test/objects/test.csv"],
"writeDisposition": "WRITE_TRUNCATE",
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
)
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_source_objs_as_string_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
schema_fields=SCHEMA_FIELDS,
write_disposition=WRITE_DISPOSITION,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
external_table=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS},
},
},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_source_objs_as_string_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
schema_fields=SCHEMA_FIELDS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET,
"tableId": "table",
},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": "WRITE_TRUNCATE",
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
)
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("GCSHook"))
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_schema_obj_external_table_should_execute_successfully(self, bq_hook, gcs_hook):
bq_hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
bq_hook.return_value.generate_job_id.return_value = REAL_JOB_ID
bq_hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
gcs_hook.return_value.download.return_value = bytes(json.dumps(SCHEMA_FIELDS), "utf-8")
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
schema_object_bucket=SCHEMA_BUCKET,
schema_object=SCHEMA_OBJECT,
write_disposition=WRITE_DISPOSITION,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
external_table=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
bq_hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS},
},
},
)
gcs_hook.return_value.download.assert_called_once_with(SCHEMA_BUCKET, SCHEMA_OBJECT)
@mock.patch(GCS_TO_BQ_PATH.format("GCSHook"))
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_schema_obj_without_external_table_should_execute_successfully(self, bq_hook, gcs_hook):
bq_hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
bq_hook.return_value.generate_job_id.return_value = REAL_JOB_ID
bq_hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
gcs_hook.return_value.download.return_value = bytes(json.dumps(SCHEMA_FIELDS), "utf-8")
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
schema_object_bucket=SCHEMA_BUCKET,
schema_object=SCHEMA_OBJECT,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": "WRITE_TRUNCATE",
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
project_id=JOB_PROJECT_ID,
location=None,
job_id=REAL_JOB_ID,
timeout=None,
retry=DEFAULT_RETRY,
nowait=True,
)
]
bq_hook.return_value.insert_job.assert_has_calls(calls)
gcs_hook.return_value.download.assert_called_once_with(SCHEMA_BUCKET, SCHEMA_OBJECT)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_autodetect_none_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
write_disposition=WRITE_DISPOSITION,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
external_table=True,
autodetect=None,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": None,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
},
},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_autodetect_none_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
autodetect=None,
external_table=False,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": None,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": "WRITE_TRUNCATE",
"ignoreUnknownValues": False,
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
project_id=JOB_PROJECT_ID,
location=None,
job_id=REAL_JOB_ID,
timeout=None,
retry=DEFAULT_RETRY,
nowait=True,
)
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_execute_should_throw_ex_when_no_bucket_specified(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
with pytest.raises((TypeError, AirflowException), match=r"missing keyword argument 'bucket'"):
GCSToBigQueryOperator(
task_id=TASK_ID,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_execute_should_throw_ex_when_no_source_objects_specified(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
with pytest.raises((TypeError, AirflowException), match=r"missing keyword argument 'source_objects'"):
GCSToBigQueryOperator(
task_id=TASK_ID,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
bucket=TEST_BUCKET,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_execute_should_throw_ex_when_no_destination_project_dataset_table_specified(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
with pytest.raises(
(TypeError, AirflowException),
match=r"missing keyword argument 'destination_project_dataset_table'",
):
GCSToBigQueryOperator(
task_id=TASK_ID,
schema_fields=SCHEMA_FIELDS,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
project_id=JOB_PROJECT_ID,
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_source_format_check_should_throw_ex_when_incorrect_source_type(
self,
hook,
):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_job.return_value.result.return_value = ("1",)
with pytest.raises(
ValueError,
match=r"is not a valid source format.",
):
GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
max_id_key=MAX_ID_KEY,
write_disposition=WRITE_DISPOSITION,
external_table=False,
autodetect=False,
source_format="incorrect",
project_id=JOB_PROJECT_ID,
)
@mock.patch(GCS_TO_BQ_PATH.format("GCSHook"))
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_schema_fields_integer_scanner_external_table_should_execute_successfully(
self, bq_hook, gcs_hook
):
"""
Check detection of schema fields if schema_fields parameter is not
specified and fields are read from source objects correctly by BigQuery if at least
one field includes non-string value.
"""
bq_hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
bq_hook.return_value.generate_job_id.return_value = REAL_JOB_ID
bq_hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
bq_hook.return_value.get_job.return_value.result.return_value = ("1",)
gcs_hook.return_value.download.return_value = b"id,name\r\n1,Anna"
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
max_id_key=MAX_ID_KEY,
external_table=True,
autodetect=True,
project_id=JOB_PROJECT_ID,
)
result = operator.execute(context=MagicMock())
assert result == "1"
bq_hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
},
},
)
bq_hook.return_value.insert_job.assert_called_once_with(
configuration={
"query": {
"query": f"SELECT MAX({MAX_ID_KEY}) AS max_value FROM {TEST_EXPLICIT_DEST}",
"useLegacySql": False,
"schemaUpdateOptions": [],
}
},
project_id=JOB_PROJECT_ID,
)
@mock.patch(GCS_TO_BQ_PATH.format("GCSHook"))
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_schema_fields_integer_scanner_without_external_table_should_execute_successfully(
self, bq_hook, gcs_hook
):
"""
Check detection of schema fields if schema_fields parameter is not
specified and fields are read from source objects correctly by BigQuery if at least
one field includes non-string value.
"""
bq_hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
bq_hook.return_value.generate_job_id.return_value = REAL_JOB_ID
bq_hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
bq_hook.return_value.get_job.return_value.result.return_value = ("1",)
gcs_hook.return_value.download.return_value = b"id,name\r\n1,Anna"
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
max_id_key=MAX_ID_KEY,
external_table=False,
autodetect=True,
project_id=JOB_PROJECT_ID,
)
result = operator.execute(context=MagicMock())
assert result == "1"
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": WRITE_DISPOSITION,
"ignoreUnknownValues": False,
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
),
call(
configuration={
"query": {
"query": f"SELECT MAX({MAX_ID_KEY}) AS max_value FROM {TEST_EXPLICIT_DEST}",
"useLegacySql": False,
"schemaUpdateOptions": [],
}
},
project_id=JOB_PROJECT_ID,
),
]
bq_hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_schema_fields_without_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_job.return_value.result.return_value = ("1",)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
schema_fields=SCHEMA_FIELDS_INT,
external_table=False,
autodetect=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
calls = [
call(
configuration={
"load": {
"autodetect": True,
"createDisposition": "CREATE_IF_NEEDED",
"destinationTable": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"writeDisposition": WRITE_DISPOSITION,
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS_INT},
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
}
},
job_id=REAL_JOB_ID,
location=None,
nowait=True,
project_id=JOB_PROJECT_ID,
retry=DEFAULT_RETRY,
timeout=None,
)
]
hook.return_value.insert_job.assert_has_calls(calls)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_schema_fields_external_table_should_execute_successfully(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_job.return_value.result.return_value = ("1",)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
write_disposition=WRITE_DISPOSITION,
schema_fields=SCHEMA_FIELDS_INT,
external_table=True,
autodetect=True,
project_id=JOB_PROJECT_ID,
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {"projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE},
"labels": {},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "CSV",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"csvOptions": {
"skipLeadingRows": None,
"fieldDelimiter": ",",
"quote": None,
"allowQuotedNewlines": False,
"allowJaggedRows": False,
"encoding": "UTF-8",
},
"schema": {"fields": SCHEMA_FIELDS_INT},
},
},
)
@pytest.mark.parametrize(
("source_object", "expected_dataset_name"),
(
(f"{TEST_FOLDER}/{TEST_OBJECT_NO_WILDCARD}", f"{TEST_FOLDER}/{TEST_OBJECT_NO_WILDCARD}"),
(TEST_OBJECT_NO_WILDCARD, TEST_OBJECT_NO_WILDCARD),
(f"{TEST_FOLDER}/{TEST_OBJECT_WILDCARD}", TEST_FOLDER),
(f"{TEST_OBJECT_WILDCARD}", "/"),
(f"{TEST_FOLDER}/*", TEST_FOLDER),
),
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_get_openlineage_facets_on_complete_gcs_dataset_name(
self, hook, source_object, expected_dataset_name
):
hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False)
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
project_id=JOB_PROJECT_ID,
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=[source_object],
destination_project_dataset_table=TEST_EXPLICIT_DEST,
)
expected_symlink = SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace=f"gs://{TEST_BUCKET}",
name=source_object,
type="file",
)
]
)
operator.execute(context=mock.MagicMock())
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.inputs) == 1
assert lineage.inputs[0].name == expected_dataset_name
if "*" in source_object:
assert lineage.inputs[0].facets.get("symlink")
assert lineage.inputs[0].facets.get("symlink") == expected_symlink
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_get_openlineage_facets_on_complete_gcs_multiple_uris(self, hook):
hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False)
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
project_id=JOB_PROJECT_ID,
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=[
TEST_OBJECT_NO_WILDCARD,
TEST_OBJECT_WILDCARD,
f"{TEST_FOLDER}1/{TEST_OBJECT_NO_WILDCARD}",
f"{TEST_FOLDER}2/{TEST_OBJECT_WILDCARD}",
],
destination_project_dataset_table=TEST_EXPLICIT_DEST,
)
operator.execute(context=mock.MagicMock())
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.inputs) == 4
assert lineage.inputs[0].name == TEST_OBJECT_NO_WILDCARD
assert lineage.inputs[1].name == "/"
assert lineage.inputs[1].facets.get("symlink") == SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace=f"gs://{TEST_BUCKET}",
name=TEST_OBJECT_WILDCARD,
type="file",
)
]
)
assert lineage.inputs[2].name == f"{TEST_FOLDER}1/{TEST_OBJECT_NO_WILDCARD}"
assert lineage.inputs[3].name == f"{TEST_FOLDER}2"
assert lineage.inputs[3].facets.get("symlink") == SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace=f"gs://{TEST_BUCKET}",
name=f"{TEST_FOLDER}2/{TEST_OBJECT_WILDCARD}",
type="file",
)
]
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_get_openlineage_facets_on_complete_bq_dataset(self, hook):
hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False)
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_client.return_value.get_table.return_value = TEST_TABLE
expected_output_dataset_facets = {
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING", description="field1 description"),
SchemaDatasetFacetFields(name="field2", type="INTEGER"),
]
),
"documentation": DocumentationDatasetFacet(description="Test Description"),
"columnLineage": ColumnLineageDatasetFacet(
fields={
"field1": Fields(
inputFields=[
InputField(
namespace=f"gs://{TEST_BUCKET}", name=TEST_OBJECT_NO_WILDCARD, field="field1"
)
],
transformationType="IDENTITY",
transformationDescription="identical",
),
"field2": Fields(
inputFields=[
InputField(
namespace=f"gs://{TEST_BUCKET}", name=TEST_OBJECT_NO_WILDCARD, field="field2"
)
],
transformationType="IDENTITY",
transformationDescription="identical",
),
}
),
}
operator = GCSToBigQueryOperator(
project_id=JOB_PROJECT_ID,
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=[TEST_OBJECT_NO_WILDCARD],
destination_project_dataset_table=TEST_EXPLICIT_DEST,
)
operator.execute(context=mock.MagicMock())
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.outputs) == 1
assert lineage.outputs[0] == Dataset(
namespace="bigquery",
name=TEST_EXPLICIT_DEST,
facets=expected_output_dataset_facets,
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_get_openlineage_facets_on_complete_bq_dataset_multiple_gcs_uris(self, hook):
hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False)
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_client.return_value.get_table.return_value = TEST_TABLE
expected_output_dataset_facets = {
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING", description="field1 description"),
SchemaDatasetFacetFields(name="field2", type="INTEGER"),
]
),
"documentation": DocumentationDatasetFacet(description="Test Description"),
"columnLineage": ColumnLineageDatasetFacet(
fields={
"field1": Fields(
inputFields=[
InputField(
namespace=f"gs://{TEST_BUCKET}", name=TEST_OBJECT_NO_WILDCARD, field="field1"
),
InputField(namespace=f"gs://{TEST_BUCKET}", name="/", field="field1"),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
"field2": Fields(
inputFields=[
InputField(
namespace=f"gs://{TEST_BUCKET}", name=TEST_OBJECT_NO_WILDCARD, field="field2"
),
InputField(namespace=f"gs://{TEST_BUCKET}", name="/", field="field2"),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
}
),
}
operator = GCSToBigQueryOperator(
project_id=JOB_PROJECT_ID,
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=[TEST_OBJECT_NO_WILDCARD, TEST_OBJECT_WILDCARD],
destination_project_dataset_table=TEST_EXPLICIT_DEST,
)
operator.execute(context=mock.MagicMock())
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.outputs) == 1
assert lineage.outputs[0] == Dataset(
namespace="bigquery",
name=TEST_EXPLICIT_DEST,
facets=expected_output_dataset_facets,
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_get_openlineage_facets_on_complete_empty_table(self, hook):
hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False)
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_client.return_value.get_table.return_value = TEST_EMPTY_TABLE
operator = GCSToBigQueryOperator(
project_id=JOB_PROJECT_ID,
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=[TEST_OBJECT_NO_WILDCARD, TEST_OBJECT_WILDCARD],
destination_project_dataset_table=TEST_EXPLICIT_DEST,
)
operator.execute(context=mock.MagicMock())
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.inputs) == 2
assert len(lineage.outputs) == 1
assert lineage.outputs[0] == Dataset(
namespace="bigquery",
name=TEST_EXPLICIT_DEST,
facets={},
)
assert lineage.inputs[0] == Dataset(
namespace=f"gs://{TEST_BUCKET}",
name=TEST_OBJECT_NO_WILDCARD,
facets={},
)
assert lineage.inputs[1] == Dataset(
namespace=f"gs://{TEST_BUCKET}",
name="/",
facets={
"symlink": SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace=f"gs://{TEST_BUCKET}",
name=TEST_OBJECT_WILDCARD,
type="file",
)
]
),
},
)
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_get_openlineage_facets_on_complete_full_table_multiple_gcs_uris(self, hook):
hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False)
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
hook.return_value.get_client.return_value.get_table.return_value = TEST_TABLE
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
schema_facet = SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING", description="field1 description"),
SchemaDatasetFacetFields(name="field2", type="INTEGER"),
]
)
expected_input_wildcard_dataset_facets = {
"schema": schema_facet,
"symlink": SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace=f"gs://{TEST_BUCKET}",
name=TEST_OBJECT_WILDCARD,
type="file",
)
]
),
}
expected_input_no_wildcard_dataset_facets = {"schema": schema_facet}
expected_output_dataset_facets = {
"schema": schema_facet,
"documentation": DocumentationDatasetFacet(description="Test Description"),
"columnLineage": ColumnLineageDatasetFacet(
fields={
"field1": Fields(
inputFields=[
InputField(
namespace=f"gs://{TEST_BUCKET}", name=TEST_OBJECT_NO_WILDCARD, field="field1"
),
InputField(namespace=f"gs://{TEST_BUCKET}", name="/", field="field1"),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
"field2": Fields(
inputFields=[
InputField(
namespace=f"gs://{TEST_BUCKET}", name=TEST_OBJECT_NO_WILDCARD, field="field2"
),
InputField(namespace=f"gs://{TEST_BUCKET}", name="/", field="field2"),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
}
),
}
operator = GCSToBigQueryOperator(
project_id=JOB_PROJECT_ID,
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=[TEST_OBJECT_NO_WILDCARD, TEST_OBJECT_WILDCARD],
destination_project_dataset_table=TEST_EXPLICIT_DEST,
)
operator.execute(context=mock.MagicMock())
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.inputs) == 2
assert len(lineage.outputs) == 1
assert lineage.outputs[0] == Dataset(
namespace="bigquery", name=TEST_EXPLICIT_DEST, facets=expected_output_dataset_facets
)
assert lineage.inputs[0] == Dataset(
namespace=f"gs://{TEST_BUCKET}",
name=TEST_OBJECT_NO_WILDCARD,
facets=expected_input_no_wildcard_dataset_facets,
)
assert lineage.inputs[1] == Dataset(
namespace=f"gs://{TEST_BUCKET}", name="/", facets=expected_input_wildcard_dataset_facets
)
assert lineage.run_facets == {
"externalQuery": ExternalQueryRunFacet(externalQueryId=REAL_JOB_ID, source="bigquery")
}
assert lineage.job_facets == {}
@mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook"))
def test_external_table_should_accept_orc_source_format(self, hook):
hook.return_value.insert_job.side_effect = [
MagicMock(job_id=REAL_JOB_ID, error_result=False),
REAL_JOB_ID,
]
hook.return_value.generate_job_id.return_value = REAL_JOB_ID
hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE)
operator = GCSToBigQueryOperator(
task_id=TASK_ID,
bucket=TEST_BUCKET,
source_objects=TEST_SOURCE_OBJECTS,
destination_project_dataset_table=TEST_EXPLICIT_DEST,
schema_fields=SCHEMA_FIELDS,
write_disposition=WRITE_DISPOSITION,
external_table=True,
project_id=JOB_PROJECT_ID,
source_format="ORC",
)
operator.execute(context=MagicMock())
hook.return_value.create_table.assert_called_once_with(
exists_ok=True,
location=None,
project_id=JOB_PROJECT_ID,
dataset_id=DATASET,
table_id=TABLE,
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET,
"tableId": TABLE,
},
"externalDataConfiguration": {
"autodetect": True,
"sourceFormat": "ORC",
"sourceUris": [f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"],
"compression": "NONE",
"ignoreUnknownValues": False,
"schema": {"fields": SCHEMA_FIELDS},
},
},
)
@pytest.fixture
def create_task_instance(create_task_instance_of_operator, session):
return functools.partial(
create_task_instance_of_operator,
session=session,
operator_class=GCSToBigQueryOperator,
dag_id="adhoc_airflow",
)
| TestGCSToBigQueryOperator |
python | pytest-dev__pytest-mock | tests/test_pytest_mock.py | {
"start": 1264,
"end": 1541
} | class ____:
"""
Wrapper to os functions to simulate a Unix file system, used for testing
the mock fixture.
"""
@classmethod
def rm(cls, filename):
os.remove(filename)
@classmethod
def ls(cls, path):
return os.listdir(path)
| UnixFS |
python | sympy__sympy | sympy/stats/matrix_distributions.py | {
"start": 2665,
"end": 4196
} | class ____:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size, seed=None):
return cls._sample_scipy(dist, size, seed)
@classmethod
def _sample_scipy(cls, dist, size, seed):
"""Sample from SciPy."""
from scipy import stats as scipy_stats
import numpy
scipy_rv_map = {
'WishartDistribution': lambda dist, size, rand_state: scipy_stats.wishart.rvs(
df=int(dist.n), scale=matrix2numpy(dist.scale_matrix, float), size=size),
'MatrixNormalDistribution': lambda dist, size, rand_state: scipy_stats.matrix_normal.rvs(
mean=matrix2numpy(dist.location_matrix, float),
rowcov=matrix2numpy(dist.scale_matrix_1, float),
colcov=matrix2numpy(dist.scale_matrix_2, float), size=size, random_state=rand_state)
}
sample_shape = {
'WishartDistribution': lambda dist: dist.scale_matrix.shape,
'MatrixNormalDistribution' : lambda dist: dist.location_matrix.shape
}
dist_list = scipy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
if seed is None or isinstance(seed, int):
rand_state = numpy.random.default_rng(seed=seed)
else:
rand_state = seed
samp = scipy_rv_map[dist.__class__.__name__](dist, prod(size), rand_state)
return samp.reshape(size + sample_shape[dist.__class__.__name__](dist))
| SampleMatrixScipy |
python | pydantic__pydantic | pydantic-core/tests/validators/test_model_fields.py | {
"start": 52122,
"end": 68742
} | class ____:
def test_on_error_bad_default(self):
with pytest.raises(SchemaError, match="'on_error = default' requires a `default` or `default_factory`"):
SchemaValidator(
schema=core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.str_schema(), on_error='default')
)
}
)
)
def test_on_error_raise_by_default(self, py_and_json: PyAndJson):
v = py_and_json({'type': 'model-fields', 'fields': {'x': {'type': 'model-field', 'schema': {'type': 'str'}}}})
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'x': ['foo']})
assert exc_info.value.errors(include_url=False) == [
{'input': ['foo'], 'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string'}
]
def test_on_error_raise_explicit(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {'type': 'default', 'schema': {'type': 'str'}, 'on_error': 'raise'},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
with pytest.raises(ValidationError) as exc_info:
v.validate_test({'x': ['foo']})
assert exc_info.value.errors(include_url=False) == [
{'input': ['foo'], 'type': 'string_type', 'loc': ('x',), 'msg': 'Input should be a valid string'}
]
def test_on_error_default(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {
'type': 'default',
'schema': {'type': 'str'},
'on_error': 'default',
'default': 'pika',
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
assert v.validate_test({'x': ['foo']}) == ({'x': 'pika'}, None, {'x'})
def test_on_error_default_factory(self, py_and_json: PyAndJson):
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {
'type': 'default',
'schema': {'type': 'str'},
'on_error': 'default',
'default_factory': lambda: 'pika',
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
assert v.validate_test({'x': ['foo']}) == ({'x': 'pika'}, None, {'x'})
def test_wrap_on_error(self, py_and_json: PyAndJson):
def wrap_function(input_value, validator, info):
try:
return validator(input_value)
except ValidationError:
if isinstance(input_value, list):
return str(len(input_value))
else:
return repr(input_value)
v = py_and_json(
{
'type': 'model-fields',
'fields': {
'x': {
'type': 'model-field',
'schema': {
'type': 'default',
'on_error': 'raise',
'schema': {
'type': 'function-wrap',
'function': {'type': 'with-info', 'function': wrap_function},
'schema': {'type': 'str'},
},
},
}
},
}
)
assert v.validate_test({'x': 'foo'}) == ({'x': 'foo'}, None, {'x'})
assert v.validate_test({'x': ['foo']}) == ({'x': '1'}, None, {'x'})
assert v.validate_test({'x': ['foo', 'bar']}) == ({'x': '2'}, None, {'x'})
assert v.validate_test({'x': {'a': 'b'}}) == ({'x': "{'a': 'b'}"}, None, {'x'})
def test_frozen_field():
v = SchemaValidator(
core_schema.model_fields_schema(
fields={
'name': core_schema.model_field(schema=core_schema.str_schema()),
'age': core_schema.model_field(schema=core_schema.int_schema()),
'is_developer': core_schema.model_field(
schema=core_schema.with_default_schema(schema=core_schema.bool_schema(), default=True), frozen=True
),
}
)
)
r1, model_extra, fields_set = v.validate_python({'name': 'Samuel', 'age': '36'})
assert r1 == {'name': 'Samuel', 'age': 36, 'is_developer': True}
assert model_extra is None
assert fields_set == {'name', 'age'}
v.validate_assignment(r1, 'age', '35')
assert r1 == {'name': 'Samuel', 'age': 35, 'is_developer': True}
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(r1, 'is_developer', False)
assert exc_info.value.errors(include_url=False) == [
{'type': 'frozen_field', 'loc': ('is_developer',), 'msg': 'Field is frozen', 'input': False}
]
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': None}),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}),
(None, {'extra_behavior': 'allow'}),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}),
],
)
@pytest.mark.parametrize(
'extras_schema_kw, expected_extra_value',
[({}, '123'), ({'extras_schema': None}, '123'), ({'extras_schema': core_schema.int_schema()}, 123)],
ids=['extras_schema=unset', 'extras_schema=None', 'extras_schema=int'],
)
def test_extra_behavior_allow(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
extras_schema_kw: dict[str, Any],
expected_extra_value: Any,
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw, **extras_schema_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': '123'})
assert m == {'f': 'x'}
assert model_extra == {'extra_field': expected_extra_value}
assert fields_set == {'f', 'extra_field'}
v.validate_assignment(m, 'f', 'y')
assert m == {'f': 'y'}
new_m, new_model_extra, new_fields_set = v.validate_assignment({**m, **model_extra}, 'not_f', '123')
assert new_m == {'f': 'y'}
assert new_model_extra == {'extra_field': expected_extra_value, 'not_f': expected_extra_value}
assert new_fields_set == {'not_f'}
# We can't test the extra parameter of the validate_* functions above, since the
# extras_schema parameter isn't valid unless the models are configured with extra='allow'.
# Test the validate_* extra parameter separately instead:
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}),
(None, {'extra_behavior': 'forbid'}),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': 'forbid'}),
(core_schema.CoreConfig(), {}),
(core_schema.CoreConfig(), {'extra_behavior': None}),
(None, {'extra_behavior': None}),
],
)
def test_extra_behavior_allow_with_validate_fn_override(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': '123'}, extra='allow')
assert m == {'f': 'x'}
assert model_extra == {'extra_field': '123'}
assert fields_set == {'f', 'extra_field'}
v.validate_assignment(m, 'f', 'y', extra='allow')
assert m == {'f': 'y'}
new_m, new_model_extra, new_fields_set = v.validate_assignment({**m, **model_extra}, 'not_f', '123', extra='allow')
assert new_m == {'f': 'y'}
assert new_model_extra == {'extra_field': '123', 'not_f': '123'}
assert new_fields_set == {'not_f'}
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}, None),
(None, {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': None}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, 'forbid'),
(None, {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(), {}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': None}, 'forbid'),
(None, {'extra_behavior': None}, 'forbid'),
],
)
def test_extra_behavior_forbid(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x'}, extra=validate_fn_extra_kw)
assert m == {'f': 'x'}
assert fields_set == {'f'}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{'type': 'extra_forbidden', 'loc': ('extra_field',), 'msg': 'Extra inputs are not permitted', 'input': 123}
]
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m['f'] == 'y'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'not_f', 'xyz', extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('not_f',),
'msg': "Object has no attribute 'not_f'",
'input': 'xyz',
'ctx': {'attribute': 'not_f'},
}
]
assert 'not_f' not in m
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, None),
(None, {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(), {}, None),
(core_schema.CoreConfig(), {'extra_behavior': None}, None),
(None, {'extra_behavior': None}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}, 'ignore'),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}, 'ignore'),
(None, {'extra_behavior': 'allow'}, 'ignore'),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}, 'ignore'),
],
)
def test_extra_behavior_ignore(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
v = SchemaValidator(
core_schema.model_fields_schema(
{'f': core_schema.model_field(core_schema.str_schema())}, **schema_extra_behavior_kw
),
config=config,
)
m, model_extra, fields_set = v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert m == {'f': 'x'}
assert model_extra is None
assert fields_set == {'f'}
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m['f'] == 'y'
# even if we ignore extra attributes during initialization / validation
# we never ignore them during assignment
# instead if extra='ignore' was set (or nothing was set since that's the default)
# we treat it as if it were extra='forbid'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'not_f', 'xyz', extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('not_f',),
'msg': "Object has no attribute 'not_f'",
'input': 'xyz',
'ctx': {'attribute': 'not_f'},
}
]
assert 'not_f' not in m
def test_extra_behavior_allow_keys_validation() -> None:
v = SchemaValidator(
core_schema.model_fields_schema(
{}, extra_behavior='allow', extras_keys_schema=core_schema.str_schema(max_length=3)
)
)
m, model_extra, fields_set = v.validate_python({'ext': 123})
assert m == {}
assert model_extra == {'ext': 123}
assert fields_set == {'ext'}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'extra_too_long': 123})
assert exc_info.value.errors()[0]['type'] == 'string_too_long'
@pytest.mark.parametrize('config_by_alias', [None, True, False])
@pytest.mark.parametrize('config_by_name', [None, True, False])
@pytest.mark.parametrize('runtime_by_alias', [None, True, False])
@pytest.mark.parametrize('runtime_by_name', [None, True, False])
def test_by_alias_and_name_config_interaction(
config_by_alias: Union[bool, None],
config_by_name: Union[bool, None],
runtime_by_alias: Union[bool, None],
runtime_by_name: Union[bool, None],
) -> None:
"""This test reflects the priority that applies for config vs runtime validation alias configuration.
Runtime values take precedence over config values, when set.
By default, by_alias is True and by_name is False.
"""
if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False:
pytest.skip("Can't have both by_alias and by_name as effectively False")
class Model:
def __init__(self, my_field: int) -> None:
self.my_field = my_field
core_config = {
**({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}),
**({'validate_by_name': config_by_name} if config_by_name is not None else {}),
}
schema = core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'my_field': core_schema.model_field(core_schema.int_schema(), validation_alias='my_alias'),
}
),
config=core_schema.CoreConfig(**core_config),
)
s = SchemaValidator(schema)
alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None)
name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None)
if alias_allowed:
assert s.validate_python({'my_alias': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name).my_field == 1
if name_allowed:
assert s.validate_python({'my_field': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name).my_field == 1
| TestOnError |
python | mlflow__mlflow | mlflow/entities/span_status.py | {
"start": 324,
"end": 1882
} | class ____(str, Enum):
"""Enum for status code of a span"""
# Uses the same set of status codes as OpenTelemetry
UNSET = "UNSET"
OK = "OK"
ERROR = "ERROR"
def to_otel_proto_status_code_name(self) -> str:
"""
Convert the SpanStatusCode to the corresponding OpenTelemetry protobuf enum name.
"""
proto_code = OtelStatus.StatusCode
mapping = {
SpanStatusCode.UNSET: proto_code.Name(proto_code.STATUS_CODE_UNSET),
SpanStatusCode.OK: proto_code.Name(proto_code.STATUS_CODE_OK),
SpanStatusCode.ERROR: proto_code.Name(proto_code.STATUS_CODE_ERROR),
}
return mapping[self]
@staticmethod
def from_otel_proto_status_code_name(status_code_name: str) -> SpanStatusCode:
"""
Convert an OpenTelemetry protobuf enum name to the corresponding SpanStatusCode enum value.
"""
proto_code = OtelStatus.StatusCode
mapping = {
proto_code.Name(proto_code.STATUS_CODE_UNSET): SpanStatusCode.UNSET,
proto_code.Name(proto_code.STATUS_CODE_OK): SpanStatusCode.OK,
proto_code.Name(proto_code.STATUS_CODE_ERROR): SpanStatusCode.ERROR,
}
try:
return mapping[status_code_name]
except KeyError:
raise MlflowException(
f"Invalid status code name: {status_code_name}. "
f"Valid values are: {', '.join(mapping.keys())}",
error_code=INVALID_PARAMETER_VALUE,
)
@dataclass
| SpanStatusCode |
python | gevent__gevent | src/gevent/threadpool.py | {
"start": 9189,
"end": 22453
} | class ____(GroupMappingMixin):
"""
A pool of native worker threads.
This can be useful for CPU intensive functions, or those that
otherwise will not cooperate with gevent. The best functions to execute
in a thread pool are small functions with a single purpose; ideally they release
the CPython GIL. Such functions are extension functions implemented in C.
It implements the same operations as a :class:`gevent.pool.Pool`,
but using threads instead of greenlets.
.. note:: The method :meth:`apply_async` will always return a new
greenlet, bypassing the threadpool entirely.
Most users will not need to create instances of this class. Instead,
use the threadpool already associated with gevent's hub::
pool = gevent.get_hub().threadpool
result = pool.spawn(lambda: "Some func").get()
.. important:: It is only possible to use instances of this class from
the thread running their hub. Typically that means from the thread that
created them. Using the pattern shown above takes care of this.
There is no gevent-provided way to have a single process-wide limit on the
number of threads in various pools when doing that, however. The suggested
way to use gevent and threadpools is to have a single gevent hub
and its one threadpool (which is the default without doing any extra work).
Only dispatch minimal blocking functions to the threadpool, functions that
do not use the gevent hub.
The `len` of instances of this class is the number of enqueued
(unfinished) tasks.
Just before a task starts running in a worker thread,
the values of :func:`threading.setprofile` and :func:`threading.settrace`
are consulted. Any values there are installed in that thread for the duration
of the task (using :func:`sys.setprofile` and :func:`sys.settrace`, respectively).
(Because worker threads are long-lived and outlast any given task, this arrangement
lets the hook functions change between tasks, but does not let them see the
bookkeeping done by the worker thread itself.)
.. caution:: Instances of this class are only true if they have
unfinished tasks.
.. versionchanged:: 1.5a3
The undocumented ``apply_e`` function, deprecated since 1.1,
was removed.
.. versionchanged:: 20.12.0
Install the profile and trace functions in the worker thread while
the worker thread is running the supplied task.
.. versionchanged:: 22.08.0
Add the option to let idle threads expire and be removed
from the pool after *idle_task_timeout* seconds (-1 for no
timeout)
"""
__slots__ = (
'hub',
'_maxsize',
# A Greenlet that runs to adjust the number of worker
# threads.
'manager',
# The PID of the process we were created in.
# Used to help detect a fork and then re-create
# internal state.
'pid',
'fork_watcher',
# A semaphore initialized with ``maxsize`` counting the
# number of available worker threads we have. As a
# gevent.lock.Semaphore, this is only safe to use from a single
# native thread.
'_available_worker_threads_greenlet_sem',
# A set of running or pending _WorkerGreenlet objects;
# we rely on the GIL for thread safety.
'_worker_greenlets',
# The task queue is itself safe to use from multiple
# native threads.
'task_queue',
'_idle_task_timeout',
)
_WorkerGreenlet = _WorkerGreenlet
def __init__(self, maxsize, hub=None, idle_task_timeout=-1):
if hub is None:
hub = get_hub()
self.hub = hub
self.pid = os.getpid()
self.manager = None
self.task_queue = Queue()
self.fork_watcher = None
self._idle_task_timeout = idle_task_timeout
self._worker_greenlets = set()
self._maxsize = 0
# Note that by starting with 1, we actually allow
# maxsize + 1 tasks in the queue.
self._available_worker_threads_greenlet_sem = Semaphore(1, hub)
self._set_maxsize(maxsize)
self.fork_watcher = hub.loop.fork(ref=False)
def _register_worker(self, worker):
self._worker_greenlets.add(worker)
def _unregister_worker(self, worker):
self._worker_greenlets.discard(worker)
def _set_maxsize(self, maxsize):
if not isinstance(maxsize, integer_types):
raise TypeError('maxsize must be integer: %r' % (maxsize, ))
if maxsize < 0:
raise ValueError('maxsize must not be negative: %r' % (maxsize, ))
difference = maxsize - self._maxsize
self._available_worker_threads_greenlet_sem.counter += difference
self._maxsize = maxsize
self.adjust()
# make sure all currently blocking spawn() start unlocking if maxsize increased
self._available_worker_threads_greenlet_sem._start_notify()
def _get_maxsize(self):
return self._maxsize
maxsize = property(_get_maxsize, _set_maxsize, doc="""\
The maximum allowed number of worker threads.
This is also (approximately) a limit on the number of tasks that
can be queued without blocking the waiting greenlet. If this many
tasks are already running, then the next greenlet that submits a task
will block waiting for a task to finish.
""")
def __repr__(self, _format_hub=_format_hub):
return '<%s at 0x%x tasks=%s size=%s maxsize=%s hub=%s>' % (
self.__class__.__name__,
id(self),
len(self), self.size, self.maxsize,
_format_hub(self.hub),
)
def __len__(self):
# XXX just do unfinished_tasks property
# Note that this becomes the boolean value of this class,
# that's probably not what we want!
return self.task_queue.unfinished_tasks
def _get_size(self):
return len(self._worker_greenlets)
def _set_size(self, size):
if size < 0:
raise ValueError('Size of the pool cannot be negative: %r' % (size, ))
if size > self._maxsize:
raise ValueError('Size of the pool cannot be bigger than maxsize: %r > %r' % (size, self._maxsize))
if self.manager:
self.manager.kill()
while len(self._worker_greenlets) < size:
self._add_thread()
delay = self.hub.loop.approx_timer_resolution
while len(self._worker_greenlets) > size:
while len(self._worker_greenlets) - size > self.task_queue.unfinished_tasks:
self.task_queue.put(None)
if getcurrent() is self.hub:
break
sleep(delay)
delay = min(delay * 2, .05)
if self._worker_greenlets:
self.fork_watcher.start(self._on_fork)
else:
self.fork_watcher.stop()
size = property(_get_size, _set_size, doc="""\
The number of running pooled worker threads.
Setting this attribute will add or remove running
worker threads, up to `maxsize`.
Initially there are no pooled running worker threads, and
threads are created on demand to satisfy concurrent
requests up to `maxsize` threads.
""")
def _on_fork(self):
# fork() only leaves one thread; also screws up locks;
# let's re-create locks and threads, and do our best to
# clean up any worker threads left behind.
# NOTE: See comment in gevent.hub.reinit.
pid = os.getpid()
if pid != self.pid:
# The OS threads have been destroyed, but the Python
# objects may live on, creating refcount "leaks". Python 2
# leaves dead frames (those that are for dead OS threads)
# around; Python 3.8 does not.
thread_ident_to_frame = dict(sys._current_frames())
for worker in list(self._worker_greenlets):
frame = thread_ident_to_frame.get(worker._thread_ident)
clear_stack_frames(frame)
worker.cleanup(worker._hub_of_worker)
# We can't throw anything to the greenlet, nor can we
# switch to it or set a parent. Those would all be cross-thread
# operations, which aren't allowed.
worker.__dict__.clear()
# We've cleared f_locals and on Python 3.4, possibly the actual
# array locals of the stack frame, but the task queue may still be
# referenced if we didn't actually get all the locals. Shut it down
# and clear it before we throw away our reference.
self.task_queue.kill()
self.__init__(self._maxsize)
def join(self):
"""Waits until all outstanding tasks have been completed."""
delay = max(0.0005, self.hub.loop.approx_timer_resolution)
while self.task_queue.unfinished_tasks > 0:
sleep(delay)
delay = min(delay * 2, .05)
def kill(self):
self.size = 0
self.fork_watcher.close()
def _adjust_step(self):
# if there is a possibility & necessity for adding a thread, do it
while (len(self._worker_greenlets) < self._maxsize
and self.task_queue.unfinished_tasks > len(self._worker_greenlets)):
self._add_thread()
# while the number of threads is more than maxsize, kill one
# we do not check what's already in task_queue - it could be all Nones
while len(self._worker_greenlets) - self._maxsize > self.task_queue.unfinished_tasks:
self.task_queue.put(None)
if self._worker_greenlets:
self.fork_watcher.start(self._on_fork)
elif self.fork_watcher is not None:
self.fork_watcher.stop()
def _adjust_wait(self):
delay = self.hub.loop.approx_timer_resolution
while True:
self._adjust_step()
if len(self._worker_greenlets) <= self._maxsize:
return
sleep(delay)
delay = min(delay * 2, .05)
def adjust(self):
self._adjust_step()
if not self.manager and len(self._worker_greenlets) > self._maxsize:
# might need to feed more Nones into the pool to shutdown
# threads.
self.manager = Greenlet.spawn(self._adjust_wait)
def _add_thread(self):
self._WorkerGreenlet(self)
def spawn(self, func, *args, **kwargs):
"""
Add a new task to the threadpool that will run ``func(*args,
**kwargs)``.
Waits until a slot is available. Creates a new native thread
if necessary.
This must only be called from the native thread that owns this
object's hub. This is because creating the necessary data
structures to communicate back to this thread isn't thread
safe, so the hub must not be running something else. Also,
ensuring the pool size stays correct only works within a
single thread.
:return: A :class:`gevent.event.AsyncResult`.
:raises InvalidThreadUseError: If called from a different thread.
.. versionchanged:: 1.5
Document the thread-safety requirements.
"""
if self.hub != get_hub():
raise InvalidThreadUseError
while 1:
semaphore = self._available_worker_threads_greenlet_sem
semaphore.acquire()
if semaphore is self._available_worker_threads_greenlet_sem:
# If we were asked to change size or re-init we could have changed
# semaphore objects.
break
# Returned; lets a greenlet in this thread wait
# for the pool thread. Signaled when the async watcher
# is fired from the pool thread back into this thread.
result = AsyncResult()
task_queue = self.task_queue
# Encapsulates the async watcher the worker thread uses to
# call back into this thread. Immediately allocates and starts the
# async watcher in this thread, because it uses this hub/loop,
# which is not thread safe.
thread_result = None
try:
thread_result = ThreadResult(result, self.hub, semaphore.release)
task_queue.put((func, args, kwargs, thread_result))
self.adjust()
except:
if thread_result is not None:
thread_result.destroy_in_main_thread()
semaphore.release()
raise
return result
def _apply_immediately(self):
# If we're being called from a different thread than the one that
# created us, e.g., because a worker task is trying to use apply()
# recursively, we have no choice but to run the task immediately;
# if we try to AsyncResult.get() in the worker thread, it's likely to have
# nothing to switch to and lead to a LoopExit.
return get_hub() is not self.hub
def _apply_async_cb_spawn(self, callback, result):
callback(result)
def _apply_async_use_greenlet(self):
# Always go to Greenlet because our self.spawn uses threads
return True
| ThreadPool |
python | explosion__spaCy | spacy/lang/eu/__init__.py | {
"start": 161,
"end": 294
} | class ____(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
| BasqueDefaults |
python | Farama-Foundation__Gymnasium | gymnasium/envs/functional_jax_env.py | {
"start": 510,
"end": 3294
} | class ____(gym.Env, Generic[StateType]):
"""A conversion layer for jax-based environments."""
state: StateType
rng: PRNGKeyType
def __init__(
self,
func_env: FuncEnv,
metadata: dict[str, Any] | None = None,
render_mode: str | None = None,
spec: EnvSpec | None = None,
):
"""Initialize the environment from a FuncEnv."""
if metadata is None:
# metadata.get("jax", False) can be used downstream to know that the environment returns jax arrays
metadata = {"render_mode": [], "jax": True}
self.func_env = func_env
self.observation_space = func_env.observation_space
self.action_space = func_env.action_space
self.metadata = metadata
self.render_mode = render_mode
self.spec = spec
if self.render_mode == "rgb_array":
self.render_state = self.func_env.render_init()
else:
self.render_state = None
np_random, _ = seeding.np_random()
seed = np_random.integers(0, 2**32 - 1, dtype="uint32")
self.rng = jrng.PRNGKey(seed)
def reset(self, *, seed: int | None = None, options: dict | None = None):
"""Resets the environment using the seed."""
super().reset(seed=seed)
if seed is not None:
self.rng = jrng.PRNGKey(seed)
rng, self.rng = jrng.split(self.rng)
self.state = self.func_env.initial(rng=rng)
obs = self.func_env.observation(self.state, rng)
info = self.func_env.state_info(self.state)
return obs, info
def step(self, action: ActType):
"""Steps through the environment using the action."""
rng, self.rng = jrng.split(self.rng)
next_state = self.func_env.transition(self.state, action, rng)
observation = self.func_env.observation(next_state, rng)
reward = self.func_env.reward(self.state, action, next_state, rng)
terminated = self.func_env.terminal(next_state, rng)
info = self.func_env.transition_info(self.state, action, next_state)
self.state = next_state
return observation, float(reward), bool(terminated), False, info
def render(self):
"""Returns the render state if `render_mode` is "rgb_array"."""
if self.render_mode == "rgb_array":
self.render_state, image = self.func_env.render_image(
self.state, self.render_state
)
return image
else:
raise NotImplementedError
def close(self):
"""Closes the environments and render state if set."""
if self.render_state is not None:
self.func_env.render_close(self.render_state)
self.render_state = None
| FunctionalJaxEnv |
python | TheAlgorithms__Python | graphs/depth_first_search_2.py | {
"start": 48,
"end": 3319
} | class ____:
def __init__(self):
self.vertex = {}
# for printing the Graph vertices
def print_graph(self) -> None:
"""
Print the graph vertices.
Example:
>>> g = Graph()
>>> g.add_edge(0, 1)
>>> g.add_edge(0, 2)
>>> g.add_edge(1, 2)
>>> g.add_edge(2, 0)
>>> g.add_edge(2, 3)
>>> g.add_edge(3, 3)
>>> g.print_graph()
{0: [1, 2], 1: [2], 2: [0, 3], 3: [3]}
0 -> 1 -> 2
1 -> 2
2 -> 0 -> 3
3 -> 3
"""
print(self.vertex)
for i in self.vertex:
print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]]))
# for adding the edge between two vertices
def add_edge(self, from_vertex: int, to_vertex: int) -> None:
"""
Add an edge between two vertices.
:param from_vertex: The source vertex.
:param to_vertex: The destination vertex.
Example:
>>> g = Graph()
>>> g.add_edge(0, 1)
>>> g.add_edge(0, 2)
>>> g.print_graph()
{0: [1, 2]}
0 -> 1 -> 2
"""
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(to_vertex)
else:
# else make a new vertex
self.vertex[from_vertex] = [to_vertex]
def dfs(self) -> None:
"""
Perform depth-first search (DFS) traversal on the graph
and print the visited vertices.
Example:
>>> g = Graph()
>>> g.add_edge(0, 1)
>>> g.add_edge(0, 2)
>>> g.add_edge(1, 2)
>>> g.add_edge(2, 0)
>>> g.add_edge(2, 3)
>>> g.add_edge(3, 3)
>>> g.dfs()
0 1 2 3
"""
# visited array for storing already visited nodes
visited = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(i, visited)
def dfs_recursive(self, start_vertex: int, visited: list) -> None:
"""
Perform a recursive depth-first search (DFS) traversal on the graph.
:param start_vertex: The starting vertex for the traversal.
:param visited: A list to track visited vertices.
Example:
>>> g = Graph()
>>> g.add_edge(0, 1)
>>> g.add_edge(0, 2)
>>> g.add_edge(1, 2)
>>> g.add_edge(2, 0)
>>> g.add_edge(2, 3)
>>> g.add_edge(3, 3)
>>> visited = [False] * len(g.vertex)
>>> g.dfs_recursive(0, visited)
0 1 2 3
"""
# mark start vertex as visited
visited[start_vertex] = True
print(start_vertex, end="")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
print(" ", end="")
self.dfs_recursive(i, visited)
if __name__ == "__main__":
import doctest
doctest.testmod()
g = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
| Graph |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gridly/source_gridly/source.py | {
"start": 2712,
"end": 4288
} | class ____(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
api_key = config.get("api_key")
grid_id = config.get("grid_id")
auth = TokenAuthenticator(auth_method="ApiKey", token=api_key)
logger.info(f"Checking connection on grid {grid_id}")
Helpers.get_grid(auth=auth, grid_id=grid_id)
return True, None
def discover(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteCatalog:
api_key = config.get("api_key")
grid_id = config.get("grid_id")
auth = TokenAuthenticator(auth_method="ApiKey", token=api_key)
logger.info(f"Running discovery on grid {grid_id}")
views = Helpers.get_views(auth=auth, grid_id=grid_id)
streams = []
for view in views:
stream = Helpers.get_airbyte_stream(view)
streams.append(stream)
return AirbyteCatalog(streams=streams)
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
api_key = config.get("api_key")
grid_id = config.get("grid_id")
auth = TokenAuthenticator(auth_method="ApiKey", token=api_key)
views = Helpers.get_views(auth=auth, grid_id=grid_id)
streams = []
for view in views:
view_id = view.get("id")
view_name = view.get("name")
schema = Helpers.get_json_schema(view)
stream = GridlyStream(view_id=view_id, view_name=view_name, schema=schema, authenticator=auth)
streams.append(stream)
return streams
| SourceGridly |
python | ApeWorX__ape | src/ape_pm/project.py | {
"start": 571,
"end": 4224
} | class ____(ProjectAPI):
"""
Allows traditional Brownie projects to work with Ape.
This class implements the necessary methods in order
to detect config settings in a Brownie project and
treat it like an Ape project.
"""
@property
def brownie_config_file(self) -> Path:
return self.path / "brownie-config.yaml"
@property
def is_valid(self) -> bool:
return self.brownie_config_file.is_file()
def extract_config(self, **overrides) -> ApeConfig:
migrated_config_data: dict[str, Any] = {}
text = self.brownie_config_file.read_text()
text = expand_environment_variables(text)
try:
brownie_config_data = safe_load(text) or {}
except Exception:
brownie_config_data = {}
contracts_folder = brownie_config_data.get("contracts_folder", "contracts")
migrated_config_data["contracts_folder"] = contracts_folder
# Migrate dependencies
dependencies = []
for dependency in brownie_config_data.get("dependencies", []):
dependency_dict = {}
dep_parts = dependency.split("/")
gh_name = dep_parts[0]
dep_name = gh_name.lower()
if len(dep_parts) > 1:
dependency_dict["name"] = dep_name
if "@" in dep_parts[1]:
suffix_parts = dep_parts[1].split("@")
dependency_dict["github"] = f"{gh_name}/{suffix_parts[0]}"
dependency_dict["version"] = suffix_parts[1]
else:
dependency_dict["github"] = dep_parts[1]
if dependency_dict:
dependencies.append(dependency_dict)
if dependencies:
migrated_config_data["dependencies"] = dependencies
# Migrate solidity remapping
import_remapping = []
solidity_version = None
if "compiler" in brownie_config_data:
compiler_config = brownie_config_data["compiler"]
if "solc" in compiler_config:
solidity_config = compiler_config["solc"]
solidity_version = solidity_config.get("version")
available_dependencies = [d["name"] for d in dependencies]
brownie_import_remapping = solidity_config.get("remappings", [])
for remapping in brownie_import_remapping:
parts = remapping.split("=")
map_key = parts[0]
real_path = parts[1]
real_path_parts = real_path.split("/")
dependency_name = real_path_parts[0].lower()
if dependency_name in available_dependencies:
suffix = real_path_parts[1]
if "@" in suffix:
version_id = suffix.split("@")[1]
entry = f"{dependency_name}/{version_id}"
import_remapping.append(f"{map_key}={entry}")
else:
import_remapping.append(f"{map_key}={dependency_name}")
if import_remapping or solidity_version:
migrated_solidity_config: dict[str, Any] = {}
if import_remapping:
migrated_solidity_config["import_remapping"] = import_remapping
if solidity_version:
migrated_solidity_config["version"] = solidity_version
migrated_config_data["solidity"] = migrated_solidity_config
model = {**migrated_config_data, **overrides}
return ApeConfig.model_validate(model)
| BrownieProject |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/rpc_test.py | {
"start": 16299,
"end": 17464
} | class ____:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
| MyParameterServer |
python | gevent__gevent | src/gevent/monkey/_errors.py | {
"start": 145,
"end": 425
} | class ____(AttributeError):
"""
Raised when ``__implements__`` is incorrect.
"""
def __init__(self, module):
AttributeError.__init__(
self,
"Module %r has a bad or missing value for __implements__" % (module,)
)
| _BadImplements |
python | ansible__ansible | lib/ansible/module_utils/facts/hardware/base.py | {
"start": 2003,
"end": 2724
} | class ____(BaseFactCollector):
name = 'hardware'
_fact_ids = set(['processor',
'processor_cores',
'processor_count',
# TODO: mounts isn't exactly hardware
'mounts',
'devices']) # type: t.Set[str]
_fact_class = Hardware
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| HardwareCollector |
python | plotly__plotly.py | plotly/graph_objs/carpet/aaxis/_tickfont.py | {
"start": 233,
"end": 9881
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "carpet.aaxis"
_path_str = "carpet.aaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.carpet.aaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.carpet.aaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.aaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/agent.py | {
"start": 20143,
"end": 22697
} | class ____(BaseSingleActionAgent):
"""Base class for single action agents."""
llm_chain: LLMChain
"""LLMChain to use for agent."""
output_parser: AgentOutputParser
"""Output parser to use for agent."""
stop: list[str]
"""List of strings to stop on."""
@property
def input_keys(self) -> list[str]:
"""Return the input keys.
Returns:
List of input keys.
"""
return list(set(self.llm_chain.input_keys) - {"intermediate_steps"})
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent."""
_dict = super().dict()
del _dict["output_parser"]
return _dict
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
output = self.llm_chain.run(
intermediate_steps=intermediate_steps,
stop=self.stop,
callbacks=callbacks,
**kwargs,
)
return self.output_parser.parse(output)
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
output = await self.llm_chain.arun(
intermediate_steps=intermediate_steps,
stop=self.stop,
callbacks=callbacks,
**kwargs,
)
return self.output_parser.parse(output)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {
"llm_prefix": "",
"observation_prefix": "" if len(self.stop) == 0 else self.stop[0],
}
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
| LLMSingleActionAgent |
python | geekcomputers__Python | nitkarshchourasia/to_sort/determine_sign.py | {
"start": 436,
"end": 1827
} | class ____:
def __init__(self, num=None):
if num is None:
self.get_number()
else:
self.num = round(self.convert_to_float(num), 1)
# TODO: Word2number
# Need to further understand this.
# ? NEED TO UNDERSTAND THIS. FOR SURETY.
def convert_to_float(self, input_value):
try:
return float(input_value)
except ValueError:
try:
return w2n.word_to_num(input_value)
except ValueError:
raise ValueError(
"Invalid input. Please enter a number or a word representing a number."
)
# Now use this in other methods.
def get_number(self):
self.input_value = format(float(input("Enter a number: ")), ".1f")
self.num = round(self.convert_to_float(self.input_value), 1)
return self.num
# Do I want to return the self.num?
# I think I have to just store it as it is.
def determine_sign(self):
if self.num > 0:
return "Positive number"
elif self.num < 0:
return "Negative number"
else:
return "Zero"
def __repr__(self):
return f"Number: {self.num}, Sign: {self.determine_sign()}"
if __name__ == "__main__":
number1 = DetermineSign()
print(number1.determine_sign())
# !Incomplete.
| DetermineSign |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 357853,
"end": 358597
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateProjectCard"""
__schema__ = github_schema
__field_names__ = ("project_card_id", "is_archived", "note", "client_mutation_id")
project_card_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectCardId")
"""The ProjectCard ID to update."""
is_archived = sgqlc.types.Field(Boolean, graphql_name="isArchived")
"""Whether or not the ProjectCard should be archived"""
note = sgqlc.types.Field(String, graphql_name="note")
"""The note of ProjectCard."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateProjectCardInput |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1209713,
"end": 1210127
} | class ____(sgqlc.types.Type, Node):
"""Represents a given language found in repositories."""
__schema__ = github_schema
__field_names__ = ("color", "name")
color = sgqlc.types.Field(String, graphql_name="color")
"""The color defined for the current language."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the current language."""
| Language |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 24547,
"end": 25082
} | class ____(object):
"""*
jina gRPC service to trigger a snapshot at the Executor Runtime.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.restore_status = channel.unary_unary(
'/jina.JinaExecutorRestoreProgress/restore_status',
request_serializer=jina__pb2.RestoreId.SerializeToString,
response_deserializer=jina__pb2.RestoreSnapshotStatusProto.FromString,
)
| JinaExecutorRestoreProgressStub |
python | gevent__gevent | src/gevent/tests/test__compat.py | {
"start": 1295,
"end": 1439
} | class ____(TestFSPath):
def _callFUT(self, arg):
return os.fspath(arg)
if __name__ == '__main__':
unittest.main()
| TestNativeFSPath |
python | wandb__wandb | wandb/errors/term.py | {
"start": 1759,
"end": 10429
} | class ____(Protocol):
"""Portion of the standard logging.Logger used in this module."""
def info(self, msg: str) -> None: ...
def warning(self, msg: str) -> None: ...
def error(self, msg: str) -> None: ...
def termsetup(
settings: wandb.Settings,
logger: SupportsLeveledLogging | None,
) -> None:
"""Configure the global logging functions.
Args:
settings: The settings object passed to wandb.setup() or wandb.init().
logger: A fallback logger to use for "silent" mode. In this mode,
the logger is used instead of printing to stderr.
"""
global _silent, _show_info, _show_warnings, _show_errors, _logger
_silent = settings.silent
_show_info = settings.show_info
_show_warnings = settings.show_warnings
_show_errors = settings.show_errors
_logger = logger
@contextlib.contextmanager
def dynamic_text() -> Iterator[DynamicBlock | None]:
"""A context manager that provides a handle to a new dynamic text area.
The text goes to stderr. Returns None if dynamic text is not supported.
Dynamic text must only be used while `wandb` has control of the terminal,
or else text written by other programs will be overwritten. It's
appropriate to use during a blocking operation.
```
with term.dynamic_text() as text_area:
if text_area:
text_area.set_text("Writing to a terminal.")
for i in range(2000):
text_area.set_text(f"Still going... ({i}/2000)")
time.sleep(0.001)
else:
wandb.termlog("Writing to a file or dumb terminal.")
time.sleep(1)
wandb.termlog("Finished 1000/2000 tasks, still working...")
time.sleep(1)
wandb.termlog("Done!", err=True)
```
"""
# For now, dynamic text always corresponds to the "INFO" level.
if _silent or not _show_info:
yield None
return
# NOTE: In Jupyter notebooks, this will return False. Notebooks
# support ANSI color sequences and the '\r' character, but not
# cursor motions or line clear commands.
if not _sys_stderr_isatty() or _is_term_dumb():
yield None
return
# NOTE: On Windows < 10, ANSI escape sequences such as \x1b[Am and \x1b[2K,
# used to move the cursor and clear text, aren't supported by the built-in
# console. However, we rely on the click library's use of colorama which
# emulates support for such sequences.
#
# For this reason, we don't have special checks for Windows.
block = DynamicBlock()
with _dynamic_text_lock:
_dynamic_blocks.append(block)
try:
yield block
finally:
with _dynamic_text_lock:
block._lines_to_print = []
_l_rerender_dynamic_blocks()
_dynamic_blocks.remove(block)
def _sys_stderr_isatty() -> bool:
"""Returns sys.stderr.isatty().
Defined here for patching in tests.
"""
return _isatty(sys.stderr)
def _sys_stdin_isatty() -> bool:
"""Returns sys.stdin.isatty().
Defined here for patching in tests.
"""
return _isatty(sys.stdin)
def _isatty(stream: object) -> bool:
"""Returns true if the stream defines isatty and returns true for it.
This is needed because some people patch `sys.stderr` / `sys.stdin`
with incompatible objects, e.g. a Logger.
Args:
stream: An IO object like stdin or stderr.
"""
isatty = getattr(stream, "isatty", None)
if not isatty or not callable(isatty):
return False
try:
return bool(isatty())
except TypeError: # if isatty has required arguments
return False
def _is_term_dumb() -> bool:
"""Returns whether the TERM environment variable is set to 'dumb'.
This is a convention to indicate that the terminal doesn't support
ANSI sequences like colors, clearing the screen and positioning the cursor.
"""
return os.getenv("TERM") == "dumb"
def termlog(
string: str = "",
newline: bool = True,
repeat: bool = True,
prefix: bool = True,
) -> None:
r"""Log an informational message to stderr.
The message may contain ANSI color sequences and the \n character.
Colors are stripped if stderr is not a TTY.
Args:
string: The message to display.
newline: Whether to add a newline to the end of the string.
repeat: If false, then the string is not printed if an exact match has
already been printed through any of the other logging functions
in this file.
prefix: Whether to include the 'wandb:' prefix.
"""
_log(
string,
newline=newline,
repeat=repeat,
prefix=prefix,
silent=not _show_info,
)
def termwarn(
string: str,
newline: bool = True,
repeat: bool = True,
prefix: bool = True,
) -> None:
"""Log a warning to stderr.
The arguments are the same as for `termlog()`.
"""
string = "\n".join([f"{WARN_STRING} {s}" for s in string.split("\n")])
_log(
string,
newline=newline,
repeat=repeat,
prefix=prefix,
silent=not _show_warnings,
level=logging.WARNING,
)
def termerror(
string: str,
newline: bool = True,
repeat: bool = True,
prefix: bool = True,
) -> None:
"""Log an error to stderr.
The arguments are the same as for `termlog()`.
"""
string = "\n".join([f"{ERROR_STRING} {s}" for s in string.split("\n")])
_log(
string,
newline=newline,
repeat=repeat,
prefix=prefix,
silent=not _show_errors,
level=logging.ERROR,
)
def _in_jupyter() -> bool:
"""Returns True if we're in a Jupyter notebook."""
# Lazy import to avoid circular imports.
from wandb.sdk.lib import ipython
return ipython.in_jupyter()
def can_use_terminput() -> bool:
"""Returns True if terminput won't raise a NotATerminalError."""
if _silent or not _show_info or _is_term_dumb():
return False
from wandb import util
# TODO: Verify the databricks check is still necessary.
# Originally added to fix WB-5264.
if util._is_databricks():
return False
# isatty() returns false in Jupyter, but it's OK to output ANSI color
# sequences and to read from stdin.
return _in_jupyter() or (_sys_stderr_isatty() and _sys_stdin_isatty())
def terminput(
prompt: str,
*,
timeout: float | None = None,
hide: bool = False,
) -> str:
"""Prompt the user for input.
Args:
prompt: The prompt to display. The prompt is printed without a newline
and the cursor is positioned after the prompt's last character.
The prompt should end with whitespace.
timeout: A timeout after which to raise a TimeoutError.
Cannot be set if hide is True.
hide: If true, does not echo the characters typed by the user.
This is useful for passwords.
Returns:
The text typed by the user before pressing the 'return' key.
Raises:
TimeoutError: If a timeout was specified and expired.
NotATerminalError: If the output device is not capable, like if stderr
is redirected to a file, stdin is a pipe or closed, TERM=dumb is
set, or wandb is configured in 'silent' mode.
KeyboardInterrupt: If the user pressed Ctrl+C during the prompt.
"""
prefixed_prompt = f"{LOG_STRING}: {prompt}"
return _terminput(prefixed_prompt, timeout=timeout, hide=hide)
def _terminput(
prefixed_prompt: str,
*,
timeout: float | None = None,
hide: bool = False,
) -> str:
"""Implements terminput() and can be patched by tests."""
if not can_use_terminput():
raise NotATerminalError
if hide and timeout is not None:
# Only click.prompt() can hide, and only timed_input can time out.
raise NotImplementedError
if timeout is not None:
# Lazy import to avoid circular imports.
from wandb.sdk.lib.timed_input import timed_input
try:
return timed_input(
prefixed_prompt,
timeout=timeout,
err=True,
jupyter=_in_jupyter(),
)
except KeyboardInterrupt:
sys.stderr.write("\n")
raise
try:
return click.prompt(
prefixed_prompt,
prompt_suffix="",
hide_input=hide,
err=True,
)
except click.Abort:
sys.stderr.write("\n")
raise KeyboardInterrupt from None
| SupportsLeveledLogging |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/batchnorm.py | {
"start": 3156,
"end": 4519
} | class ____(_BatchNorm):
r"""This is the quantized version of :class:`~torch.nn.BatchNorm3d`."""
_NNI_BN_RELU_MODULE = nni.BNReLU3d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(num_features, eps, momentum, **factory_kwargs)
def _get_name(self):
return "QuantizedBatchNorm3d"
def _check_input_dim(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, H, W)`!")
def forward(self, input: torch.Tensor) -> torch.Tensor:
# disabling this since this is not symbolically traceable
# self._check_input_dim(input)
return torch.ops.quantized.batch_norm3d(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.scale,
self.zero_point,
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
return _BatchNorm.from_float(
cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
| BatchNorm3d |
python | run-llama__llama_index | llama-index-core/llama_index/core/voice_agents/base.py | {
"start": 312,
"end": 5197
} | class ____(ABC):
"""
Abstract class that serves as base for any Voice Agent.
Attributes:
ws (BaseVoiceAgentWebSocket): The websocket underlying the agent and providing the voice service.
interface (BaseVoiceAgentInterface): The audio input/output interface.
api_key (Optional[str]): API key (if needed). Defaults to None.
tools (Optional[List[BaseTool]]): List of tools for the agent to use (tool use should be adapted to the specific integration). Defaults to None.
_messages (List[ChatMessage]): Private attribute initialized as an empty list of ChatMessage, it should be populated with chat messages as the conversation goes on.
_events (List[BaseVoiceAgentEvent]): Private attribute initialized as an empty list of BaseVoiceAgentEvent, it should be populated with events as the conversation goes on.
"""
def __init__(
self,
ws: Optional[BaseVoiceAgentWebsocket] = None,
interface: Optional[BaseVoiceAgentInterface] = None,
ws_url: Optional[str] = None,
api_key: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
):
self.ws = ws
self.ws_url = ws_url
self.interface = interface
self.api_key = api_key
self.tools = tools
self._messages: List[ChatMessage] = []
self._events: List[BaseVoiceAgentEvent] = []
@abstractmethod
async def start(self, *args: Any, **kwargs: Any) -> None:
"""
Start the voice agent.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def send(self, audio: Any, *args: Any, **kwargs: Any) -> None:
"""
Send audio to the websocket underlying the voice agent.
Args:
audio (Any): audio data to send (generally as bytes or str, but it is kept open also to other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def handle_message(self, message: Any, *args: Any, **kwargs: Any) -> Any:
"""
Handle incoming message.
Args:
message (Any): incoming message (should be dict, but it is kept open also for other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
async def interrupt(self) -> None:
"""
Interrupt the input/output audio flow.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def stop(self) -> None:
"""
Stop the conversation with the voice agent.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
def export_messages(
self,
limit: Optional[int] = None,
filter: Optional[Callable[[List[ChatMessage]], List[ChatMessage]]] = None,
) -> List[ChatMessage]:
"""
Export all recorded messages during a conversation.
Args:
limit (Optional[int]): Maximum number of messages to return. Defaults to None.
filter (Optional[Callable[[List[ChatMessage]], List[ChatMessage]]]): Filter function. Defaults to None.
Returns:
out (List[ChatMessage]): exported messages.
"""
messages = self._messages
if limit:
if limit <= len(messages):
messages = messages[:limit]
if filter:
messages = filter(messages)
return messages
def export_events(
self,
limit: Optional[int] = None,
filter: Optional[
Callable[[List[BaseVoiceAgentEvent]], List[BaseVoiceAgentEvent]]
] = None,
) -> List[BaseVoiceAgentEvent]:
"""
Export all recorded events during a conversation.
Args:
limit (Optional[int]): Maximum number of events to return. Defaults to None.
filter (Optional[Callable[[List[BaseVoiceAgentEvent]], List[BaseVoiceAgentEvent]]]): Filter function. Defaults to None.
Returns:
out (List[BaseVoiceAgentEvent]): exported events.
"""
events = self._events
if limit:
if limit <= len(events):
events = events[:limit]
if filter:
events = filter(events)
return events
| BaseVoiceAgent |
python | Netflix__metaflow | metaflow/parameters.py | {
"start": 8843,
"end": 9993
} | class ____(object):
"""
This is a very simple wrapper to allow parameter "conversion" to be delayed until
the `_set_constants` function in FlowSpec. Typically, parameters are converted
by click when the command line option is processed. For some parameters, like
IncludeFile, this is too early as it would mean we would trigger the upload
of the file too early. If a parameter converts to a DelayedEvaluationParameter
object through the usual click mechanisms, `_set_constants` knows to invoke the
__call__ method on that DelayedEvaluationParameter; in that case, the __call__
method is invoked without any parameter. The return_str parameter will be used
by schedulers when they need to convert DelayedEvaluationParameters to a
string to store them
"""
def __init__(self, name, field, fun):
self._name = name
self._field = field
self._fun = fun
def __call__(self, return_str=False):
try:
return self._fun(return_str=return_str)
except Exception as e:
raise ParameterFieldFailed(self._name, self._field)
| DelayedEvaluationParameter |
python | keras-team__keras | keras/src/ops/einops.py | {
"start": 2587,
"end": 6268
} | class ____(Operation):
def call(self, tensor, pattern, **axes_lengths):
return rearrange(tensor, pattern, **axes_lengths)
def compute_output_spec(self, tensor, pattern, **axes_lengths):
input_pattern, output_pattern = re.split(r"\s*->\s*", pattern)
input_axes = re.findall(r"\w+|\(.*?\)", input_pattern)
output_axes = re.findall(r"\w+|\(.*?\)", output_pattern)
input_shape = shape(tensor)
axes_map = _create_axes_map(input_axes, input_shape, axes_lengths)
grouped_output_axes = _create_grouped_axes(output_axes)
output_shape = _compute_output_shape(axes_map, grouped_output_axes)
return KerasTensor(shape=output_shape, dtype=tensor.dtype)
@keras_export("keras.ops.rearrange")
def rearrange(tensor, pattern, **axes_lengths):
"""Rearranges the axes of a Keras tensor according to a specified pattern,
einops-style.
Args:
tensor: Input Keras tensor.
pattern: String describing the rearrangement in einops notation.
**axes_lengths: Keyword arguments specifying lengths of axes
when axes decomposition is used.
Returns:
Tensor: A Keras tensor with rearranged axes.
Follows the logic of:
1. If decomposition is needed, reshape to match decomposed dimensions.
2. Permute known and inferred axes to match the form of the output.
3. Reshape to match the desired output shape.
Example Usage:
```
>>> import numpy as np
>>> from keras.ops import rearrange
>>> images = np.random.rand(32, 30, 40, 3) # BHWC format
# Reordering to BCHW
>>> rearrange(images, 'b h w c -> b c h w').shape
TensorShape([32, 3, 30, 40])
# "Merge" along first axis - concat images from a batch
>>> rearrange(images, 'b h w c -> (b h) w c').shape
TensorShape([960, 40, 3])
# "Merge" along second axis - concat images horizontally
>>> rearrange(images, 'b h w c -> h (b w) c').shape
TensorShape([30, 1280, 3])
# Flatten images into a CHW vector
>>> rearrange(images, 'b h w c -> b (c h w)').shape
TensorShape([32, 3600])
# Decompose H and W axes into 4 smaller patches
>>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
TensorShape([128, 15, 20, 3])
# Space-to-depth decomposition of input axes
>>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
TensorShape([32, 15, 20, 12])
```
""" # noqa: E501
if any_symbolic_tensors((tensor,)):
return Rearrange().symbolic_call(tensor, pattern, **axes_lengths)
# Split the input and output patterns
input_pattern, output_pattern = re.split(r"\s*->\s*", pattern)
input_axes = re.findall(r"\w+|\(.*?\)", input_pattern)
output_axes = re.findall(r"\w+|\(.*?\)", output_pattern)
input_shape = shape(tensor)
# Create axes map, and flattened output group
axes_map = _create_axes_map(input_axes, input_shape, axes_lengths)
grouped_output_axes = _create_grouped_axes(output_axes)
flattened_output_axes = _flatten_group(grouped_output_axes)
# 1. Axes decomposition
decomposed_shapes = _compute_decomposed_shape(
input_axes, axes_lengths, axes_map
)
if decomposed_shapes != tensor.shape:
tensor = reshape(tensor, decomposed_shapes)
# 2. Transpose to match target shape
permute_order = _get_transpose_order(input_axes, flattened_output_axes)
tensor = transpose(tensor, permute_order)
# 3. Reshape to final target shape
output_shape = _compute_output_shape(axes_map, grouped_output_axes)
tensor = reshape(tensor, output_shape)
return tensor
| Rearrange |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator2/generator/generator_test.py | {
"start": 1872,
"end": 14870
} | class ____(parameterized.TestCase):
def test_get_public_api(self):
tmp_dir = self.create_tempdir()
write_test_data(tmp_dir.full_path)
expected_tensor_top_level = generator._Entrypoint(
module='tf',
name='Tensor',
exported_symbol=tensor_es,
)
expected = generator.PublicAPI(
v1_entrypoints_by_module=collections.defaultdict(set),
v2_entrypoints_by_module=collections.defaultdict(set),
v1_generated_imports_by_module=collections.defaultdict(set),
v2_generated_imports_by_module=collections.defaultdict(set),
docs_by_module={},
)
expected.v1_entrypoints_by_module['tf'].add(expected_tensor_top_level)
expected.v2_entrypoints_by_module['tf'].add(expected_tensor_top_level)
expected.v2_entrypoints_by_module['tf.experimental.numpy'].add(
generator._Entrypoint(
module='tf.experimental.numpy',
name='ndarray',
exported_symbol=tensor_es,
)
)
expected.v2_generated_imports_by_module['tf'].add('tf.experimental')
expected.v2_generated_imports_by_module['tf.experimental'].add(
'tf.experimental.numpy'
)
got = generator.get_public_api(
[os.path.join(tmp_dir, f) for f in test_data],
file_prefixes_to_strip=[tmp_dir.full_path],
packages_to_ignore=['tf.python.framework.test_ops'],
output_package='tf',
module_prefix=''
)
self.assertEqual(
expected,
got,
)
@parameterized.named_parameters(
dict(
testcase_name='normal_file',
entrypoint=generator._Entrypoint(
module='tf.io',
name='decode_csv',
exported_symbol=exported_api.ExportedSymbol(
file_name='tf/python/ops/parsing_ops.py',
line_no=10,
symbol_name='decode_csv_v2',
v1_apis=[],
v2_apis=['tf.io.decode_csv'],
),
),
prefixes_to_strip=[],
expected='tf.python.ops.parsing_ops',
),
dict(
testcase_name='genfile',
entrypoint=generator._Entrypoint(
module='tf.io',
name='decode_proto_v2',
exported_symbol=exported_api.ExportedSymbol(
file_name=(
'bazel-out/genfiles/tf/python/ops/gen_decode_proto_ops.py'
),
line_no=20,
symbol_name='decode_proto_v2',
v1_apis=[],
v2_apis=['tf.io.decode_proto_v2'],
),
),
prefixes_to_strip=['bazel-out/genfiles'],
expected='tf.python.ops.gen_decode_proto_ops',
),
)
def test_get_import_path(self, entrypoint, prefixes_to_strip, expected):
self.assertEqual(
expected,
generator._get_import_path(
entrypoint.exported_symbol.file_name, prefixes_to_strip, ''
),
)
@parameterized.named_parameters(
dict(
testcase_name='direct',
entrypoint=generator._Entrypoint(
module='tf',
name='Tensor',
exported_symbol=tensor_es,
),
use_lazy_loading=False,
expected='from tf.python.framework.tensor import Tensor # line: 1',
),
dict(
testcase_name='alias',
entrypoint=generator._Entrypoint(
module='tf.io',
name='decode_csv',
exported_symbol=exported_api.ExportedSymbol(
file_name='tf/python/ops/parsing_ops.py',
line_no=10,
symbol_name='decode_csv_v2',
v1_apis=[],
v2_apis=['tf.io.decode_csv'],
),
),
use_lazy_loading=False,
expected=(
'from tf.python.ops.parsing_ops import decode_csv_v2 as'
' decode_csv # line: 10'
),
),
dict(
testcase_name='direct_lazy',
entrypoint=generator._Entrypoint(
module='tf',
name='Tensor',
exported_symbol=tensor_es,
),
use_lazy_loading=True,
expected=(
" 'Tensor': ('tf.python.framework.tensor', 'Tensor'), # line: 1"
),
),
dict(
testcase_name='alias_lazy',
entrypoint=generator._Entrypoint(
module='tf.io',
name='decode_csv',
exported_symbol=exported_api.ExportedSymbol(
file_name='tf/python/ops/parsing_ops.py',
line_no=10,
symbol_name='decode_csv_v2',
v1_apis=[],
v2_apis=['tf.io.decode_csv'],
),
),
use_lazy_loading=True,
expected=(
" 'decode_csv': ('tf.python.ops.parsing_ops',"
" 'decode_csv_v2'), # line: 10"
),
),
)
def test_entrypoint_get_import(self, entrypoint, use_lazy_loading, expected):
self.assertEqual(expected, entrypoint.get_import([], '', use_lazy_loading))
def test_get_module(self):
self.assertEqual(
'keras.losses',
generator.get_module(
'bazel/tensorflow/keras/losses/', 'bazel/tensorflow'
),
)
def test_generate_proxy_api_files(self):
tmp_dir = self.create_tempdir()
proxy_file = os.path.join(tmp_dir, 'tensorflow/keras/losses/__init__.py')
generator.generate_proxy_api_files(
[proxy_file], 'keras', os.path.join(tmp_dir, 'tensorflow/keras')
)
self.assertTrue(os.path.isfile(proxy_file))
with open(proxy_file, 'r') as f:
self.assertEqual('from keras.losses import *', f.read())
def test_get_module_docstring(self):
docs_by_module = {
'io': 'io docs',
}
self.assertEqual(
'io docs', generator._get_module_docstring(docs_by_module, 'io')
)
self.assertEqual(
'Public API for math namespace',
generator._get_module_docstring(docs_by_module, 'math'),
)
@parameterized.named_parameters(
dict(
testcase_name='static_imports',
use_lazy_loading=False,
subpackage_rewrite=None,
expected="""from tf import io
from tf.python.framework.tensor import Tensor # line: 1
""",
),
dict(
testcase_name='lazy_imports',
use_lazy_loading=True,
subpackage_rewrite=None,
expected=""" 'io': ('', 'tf.io'),
'Tensor': ('tf.python.framework.tensor', 'Tensor'), # line: 1
""",
),
dict(
testcase_name='subpackage_rewrite',
use_lazy_loading=False,
subpackage_rewrite='tf.compat.v1',
expected="""from tf.compat.v1 import io
from tf.python.framework.tensor import Tensor # line: 1
""",
),
)
def test_get_imports_for_module(
self, use_lazy_loading, subpackage_rewrite, expected
):
symbols_by_module = {
'tf': {
generator._Entrypoint(
module='tf', name='Tensor', exported_symbol=tensor_es
)
}
}
generated_imports_by_module = {'tf': {'tf.io'}}
self.assertEqual(
expected,
generator._get_imports_for_module(
'tf',
'tf',
symbols_by_module,
generated_imports_by_module,
[],
'',
use_lazy_loading,
subpackage_rewrite,
),
)
@parameterized.named_parameters(
dict(
testcase_name='empty_prefixes_and_packages',
file='tf/python/framework/test_ops.py',
file_prefixes_to_strip=[],
packages_to_ignore=[],
should_skip=False,
),
dict(
testcase_name='empty_prefix_nonempty_package',
file='tf/python/framework/test_ops.py',
file_prefixes_to_strip=[],
packages_to_ignore=['tf.python.framework.test_ops'],
should_skip=True,
),
dict(
testcase_name='nonempty_prefix_empty_package',
file='gen/tf/python/framework/test_ops.py',
file_prefixes_to_strip=['gen/'],
packages_to_ignore=[],
should_skip=False,
),
dict(
testcase_name='nonempty_prefix_nonempty_package',
file='gen/tf/python/framework/test_ops.py',
file_prefixes_to_strip=['gen/'],
packages_to_ignore=['tf.python.framework.test_ops'],
should_skip=True,
),
dict(
testcase_name='non_matching_prefix_and_package',
file='tf/python/ops/test_ops.py',
file_prefixes_to_strip=['gen/'],
packages_to_ignore=['tf.python.framework.test_ops'],
should_skip=False,
),
)
def test_should_skip_file(
self, file, file_prefixes_to_strip, packages_to_ignore, should_skip
):
self.assertEqual(
should_skip,
generator._should_skip_file(
file, file_prefixes_to_strip, packages_to_ignore, '',
),
)
@parameterized.named_parameters(
dict(
testcase_name='default',
root_file_name=None,
),
dict(
testcase_name='renamed_root',
root_file_name='v2.py',
),
)
def test_gen_init_files(self, root_file_name):
output_dir = self.create_tempdir()
mapping_dir = self.create_tempdir()
write_test_data(mapping_dir.full_path)
file_prefixes_to_strip = [mapping_dir.full_path]
public_api = generator.get_public_api(
[os.path.join(mapping_dir, f) for f in test_data],
file_prefixes_to_strip=file_prefixes_to_strip,
packages_to_ignore=['tf.python.framework.test_ops'],
output_package='tf',
module_prefix='',
)
paths_expected = [
root_file_name if root_file_name else '__init__.py',
'experimental/__init__.py',
'experimental/numpy/__init__.py',
]
paths_expected = set(
[
os.path.normpath(os.path.join(output_dir, path))
for path in paths_expected
]
)
if root_file_name is None:
generator._gen_init_files(
output_dir,
'tf',
2,
public_api.v2_entrypoints_by_module,
public_api.v2_generated_imports_by_module,
public_api.docs_by_module,
'',
file_prefixes_to_strip,
False,
'',
paths_expected,
)
else:
generator._gen_init_files(
output_dir,
'tf',
2,
public_api.v2_entrypoints_by_module,
public_api.v2_generated_imports_by_module,
public_api.docs_by_module,
'',
file_prefixes_to_strip,
False,
'',
paths_expected,
root_file_name=root_file_name,
)
expected_init_path = os.path.join(
output_dir.full_path,
root_file_name if root_file_name else '__init__.py',
)
self.assertTrue(os.path.exists(expected_init_path))
with open(expected_init_path, 'r') as f:
self.assertEqual(
f.read(),
"""# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator2/generator/generator.py script.
\"""Public API for tf namespace
\"""
import sys as _sys
from tf import experimental
from tf.python.framework.tensor import Tensor # line: 1
""",
)
expected_numpy_path = os.path.join(
output_dir.full_path, 'experimental/numpy/__init__.py'
)
self.assertTrue(os.path.exists(expected_numpy_path))
with open(expected_numpy_path, 'r') as f:
self.assertEqual(
f.read(),
"""# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator2/generator/generator.py script.
\"""Public API for tf.experimental.numpy namespace
\"""
import sys as _sys
from tf.python.framework.tensor import Tensor as ndarray # line: 1
""",
)
def testRaisesOnNotExpectedFile(self):
output_dir = self.create_tempdir()
mapping_dir = self.create_tempdir()
write_test_data(mapping_dir.full_path)
file_prefixes_to_strip = [mapping_dir.full_path]
public_api = generator.get_public_api(
[os.path.normpath(os.path.join(mapping_dir, f)) for f in test_data],
file_prefixes_to_strip=file_prefixes_to_strip,
packages_to_ignore=['tf.python.framework.test_ops'],
output_package='tf',
module_prefix='',
)
with self.assertRaisesRegex(
AssertionError, 'Exported api attempted to write to'
):
generator._gen_init_files(
output_dir,
'tf',
2,
public_api.v2_entrypoints_by_module,
public_api.v2_generated_imports_by_module,
public_api.docs_by_module,
'',
file_prefixes_to_strip,
False,
'',
[],
)
if __name__ == '__main__':
absltest.main()
| GeneratorTest |
python | getsentry__sentry-python | sentry_sdk/worker.py | {
"start": 360,
"end": 4464
} | class ____:
def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):
# type: (int) -> None
self._queue = Queue(queue_size) # type: Queue
self._lock = threading.Lock()
self._thread = None # type: Optional[threading.Thread]
self._thread_for_pid = None # type: Optional[int]
@property
def is_alive(self):
# type: () -> bool
if self._thread_for_pid != os.getpid():
return False
if not self._thread:
return False
return self._thread.is_alive()
def _ensure_thread(self):
# type: () -> None
if not self.is_alive:
self.start()
def _timed_queue_join(self, timeout):
# type: (float) -> bool
deadline = time() + timeout
queue = self._queue
queue.all_tasks_done.acquire()
try:
while queue.unfinished_tasks:
delay = deadline - time()
if delay <= 0:
return False
queue.all_tasks_done.wait(timeout=delay)
return True
finally:
queue.all_tasks_done.release()
def start(self):
# type: () -> None
with self._lock:
if not self.is_alive:
self._thread = threading.Thread(
target=self._target, name="sentry-sdk.BackgroundWorker"
)
self._thread.daemon = True
try:
self._thread.start()
self._thread_for_pid = os.getpid()
except RuntimeError:
# At this point we can no longer start because the interpreter
# is already shutting down. Sadly at this point we can no longer
# send out events.
self._thread = None
def kill(self):
# type: () -> None
"""
Kill worker thread. Returns immediately. Not useful for
waiting on shutdown for events, use `flush` for that.
"""
logger.debug("background worker got kill request")
with self._lock:
if self._thread:
try:
self._queue.put_nowait(_TERMINATOR)
except FullError:
logger.debug("background worker queue full, kill failed")
self._thread = None
self._thread_for_pid = None
def flush(self, timeout, callback=None):
# type: (float, Optional[Any]) -> None
logger.debug("background worker got flush request")
with self._lock:
if self.is_alive and timeout > 0.0:
self._wait_flush(timeout, callback)
logger.debug("background worker flushed")
def full(self):
# type: () -> bool
return self._queue.full()
def _wait_flush(self, timeout, callback):
# type: (float, Optional[Any]) -> None
initial_timeout = min(0.1, timeout)
if not self._timed_queue_join(initial_timeout):
pending = self._queue.qsize() + 1
logger.debug("%d event(s) pending on flush", pending)
if callback is not None:
callback(pending, timeout)
if not self._timed_queue_join(timeout - initial_timeout):
pending = self._queue.qsize() + 1
logger.error("flush timed out, dropped %s events", pending)
def submit(self, callback):
# type: (Callable[[], None]) -> bool
self._ensure_thread()
try:
self._queue.put_nowait(callback)
return True
except FullError:
return False
def _target(self):
# type: () -> None
while True:
callback = self._queue.get()
try:
if callback is _TERMINATOR:
break
try:
callback()
except Exception:
logger.error("Failed processing job", exc_info=True)
finally:
self._queue.task_done()
sleep(0)
| BackgroundWorker |
python | spack__spack | lib/spack/spack/error.py | {
"start": 6004,
"end": 6400
} | class ____(SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
| NoChecksumException |
python | mlflow__mlflow | mlflow/genai/judges/tools/search_trace_regex.py | {
"start": 618,
"end": 806
} | class ____:
"""Represents a single regex match found in a trace."""
span_id: str
matched_text: str
surrounding_text: str
@experimental(version="3.4.0")
@dataclass
| RegexMatch |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 27730,
"end": 28429
} | class ____:
async def test_return_state(self, prefect_client):
@task
async def foo():
return 42
state = await run_task_async(foo, return_type="state")
assert isinstance(state, State)
assert state.is_completed()
assert await state.result() == 42
async def test_return_state_even_on_failure(self, prefect_client):
@task
async def foo():
raise ValueError("xyz")
state = await run_task_async(foo, return_type="state")
assert isinstance(state, State)
assert state.is_failed()
with pytest.raises(ValueError, match="xyz"):
await state.result()
| TestReturnState |
python | tiangolo__fastapi | tests/test_jsonable_encoder.py | {
"start": 712,
"end": 826
} | class ____(Person):
def __iter__(self):
return ((k, v) for k, v in self.__dict__.items())
| DictablePerson |
python | pyca__cryptography | src/cryptography/hazmat/primitives/serialization/ssh.py | {
"start": 18985,
"end": 27447
} | class ____:
"""
The format of a sk-ecdsa-sha2-nistp256@openssh.com public key is:
string "sk-ecdsa-sha2-nistp256@openssh.com"
string curve name
ec_point Q
string application (user-specified, but typically "ssh:")
"""
def load_public(
self, data: memoryview
) -> tuple[ec.EllipticCurvePublicKey, memoryview]:
"""Make ECDSA public key from data."""
public_key, data = _lookup_kformat(_ECDSA_NISTP256).load_public(data)
_, data = load_application(data)
return public_key, data
def get_public(self, data: memoryview) -> typing.NoReturn:
# Confusingly `get_public` is an entry point used by private key
# loading.
raise UnsupportedAlgorithm(
"sk-ecdsa-sha2-nistp256 private keys cannot be loaded"
)
_KEY_FORMATS = {
_SSH_RSA: _SSHFormatRSA(),
_SSH_DSA: _SSHFormatDSA(),
_SSH_ED25519: _SSHFormatEd25519(),
_ECDSA_NISTP256: _SSHFormatECDSA(b"nistp256", ec.SECP256R1()),
_ECDSA_NISTP384: _SSHFormatECDSA(b"nistp384", ec.SECP384R1()),
_ECDSA_NISTP521: _SSHFormatECDSA(b"nistp521", ec.SECP521R1()),
_SK_SSH_ED25519: _SSHFormatSKEd25519(),
_SK_SSH_ECDSA_NISTP256: _SSHFormatSKECDSA(),
}
def _lookup_kformat(key_type: utils.Buffer):
"""Return valid format or throw error"""
if not isinstance(key_type, bytes):
key_type = memoryview(key_type).tobytes()
if key_type in _KEY_FORMATS:
return _KEY_FORMATS[key_type]
raise UnsupportedAlgorithm(f"Unsupported key type: {key_type!r}")
SSHPrivateKeyTypes = typing.Union[
ec.EllipticCurvePrivateKey,
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ed25519.Ed25519PrivateKey,
]
def load_ssh_private_key(
data: utils.Buffer,
password: bytes | None,
backend: typing.Any = None,
*,
unsafe_skip_rsa_key_validation: bool = False,
) -> SSHPrivateKeyTypes:
"""Load private key from OpenSSH custom encoding."""
utils._check_byteslike("data", data)
if password is not None:
utils._check_bytes("password", password)
m = _PEM_RC.search(data)
if not m:
raise ValueError("Not OpenSSH private key format")
p1 = m.start(1)
p2 = m.end(1)
data = binascii.a2b_base64(memoryview(data)[p1:p2])
if not data.startswith(_SK_MAGIC):
raise ValueError("Not OpenSSH private key format")
data = memoryview(data)[len(_SK_MAGIC) :]
# parse header
ciphername, data = _get_sshstr(data)
kdfname, data = _get_sshstr(data)
kdfoptions, data = _get_sshstr(data)
nkeys, data = _get_u32(data)
if nkeys != 1:
raise ValueError("Only one key supported")
# load public key data
pubdata, data = _get_sshstr(data)
pub_key_type, pubdata = _get_sshstr(pubdata)
kformat = _lookup_kformat(pub_key_type)
pubfields, pubdata = kformat.get_public(pubdata)
_check_empty(pubdata)
if ciphername != _NONE or kdfname != _NONE:
ciphername_bytes = ciphername.tobytes()
if ciphername_bytes not in _SSH_CIPHERS:
raise UnsupportedAlgorithm(
f"Unsupported cipher: {ciphername_bytes!r}"
)
if kdfname != _BCRYPT:
raise UnsupportedAlgorithm(f"Unsupported KDF: {kdfname!r}")
blklen = _SSH_CIPHERS[ciphername_bytes].block_len
tag_len = _SSH_CIPHERS[ciphername_bytes].tag_len
# load secret data
edata, data = _get_sshstr(data)
# see https://bugzilla.mindrot.org/show_bug.cgi?id=3553 for
# information about how OpenSSH handles AEAD tags
if _SSH_CIPHERS[ciphername_bytes].is_aead:
tag = bytes(data)
if len(tag) != tag_len:
raise ValueError("Corrupt data: invalid tag length for cipher")
else:
_check_empty(data)
_check_block_size(edata, blklen)
salt, kbuf = _get_sshstr(kdfoptions)
rounds, kbuf = _get_u32(kbuf)
_check_empty(kbuf)
ciph = _init_cipher(ciphername_bytes, password, salt.tobytes(), rounds)
dec = ciph.decryptor()
edata = memoryview(dec.update(edata))
if _SSH_CIPHERS[ciphername_bytes].is_aead:
assert isinstance(dec, AEADDecryptionContext)
_check_empty(dec.finalize_with_tag(tag))
else:
# _check_block_size requires data to be a full block so there
# should be no output from finalize
_check_empty(dec.finalize())
else:
if password:
raise TypeError(
"Password was given but private key is not encrypted."
)
# load secret data
edata, data = _get_sshstr(data)
_check_empty(data)
blklen = 8
_check_block_size(edata, blklen)
ck1, edata = _get_u32(edata)
ck2, edata = _get_u32(edata)
if ck1 != ck2:
raise ValueError("Corrupt data: broken checksum")
# load per-key struct
key_type, edata = _get_sshstr(edata)
if key_type != pub_key_type:
raise ValueError("Corrupt data: key type mismatch")
private_key, edata = kformat.load_private(
edata,
pubfields,
unsafe_skip_rsa_key_validation=unsafe_skip_rsa_key_validation,
)
# We don't use the comment
_, edata = _get_sshstr(edata)
# yes, SSH does padding check *after* all other parsing is done.
# need to follow as it writes zero-byte padding too.
if edata != _PADDING[: len(edata)]:
raise ValueError("Corrupt data: invalid padding")
if isinstance(private_key, dsa.DSAPrivateKey):
warnings.warn(
"SSH DSA keys are deprecated and will be removed in a future "
"release.",
utils.DeprecatedIn40,
stacklevel=2,
)
return private_key
def _serialize_ssh_private_key(
private_key: SSHPrivateKeyTypes,
password: bytes,
encryption_algorithm: KeySerializationEncryption,
) -> bytes:
"""Serialize private key with OpenSSH custom encoding."""
utils._check_bytes("password", password)
if isinstance(private_key, dsa.DSAPrivateKey):
warnings.warn(
"SSH DSA key support is deprecated and will be "
"removed in a future release",
utils.DeprecatedIn40,
stacklevel=4,
)
key_type = _get_ssh_key_type(private_key)
kformat = _lookup_kformat(key_type)
# setup parameters
f_kdfoptions = _FragList()
if password:
ciphername = _DEFAULT_CIPHER
blklen = _SSH_CIPHERS[ciphername].block_len
kdfname = _BCRYPT
rounds = _DEFAULT_ROUNDS
if (
isinstance(encryption_algorithm, _KeySerializationEncryption)
and encryption_algorithm._kdf_rounds is not None
):
rounds = encryption_algorithm._kdf_rounds
salt = os.urandom(16)
f_kdfoptions.put_sshstr(salt)
f_kdfoptions.put_u32(rounds)
ciph = _init_cipher(ciphername, password, salt, rounds)
else:
ciphername = kdfname = _NONE
blklen = 8
ciph = None
nkeys = 1
checkval = os.urandom(4)
comment = b""
# encode public and private parts together
f_public_key = _FragList()
f_public_key.put_sshstr(key_type)
kformat.encode_public(private_key.public_key(), f_public_key)
f_secrets = _FragList([checkval, checkval])
f_secrets.put_sshstr(key_type)
kformat.encode_private(private_key, f_secrets)
f_secrets.put_sshstr(comment)
f_secrets.put_raw(_PADDING[: blklen - (f_secrets.size() % blklen)])
# top-level structure
f_main = _FragList()
f_main.put_raw(_SK_MAGIC)
f_main.put_sshstr(ciphername)
f_main.put_sshstr(kdfname)
f_main.put_sshstr(f_kdfoptions)
f_main.put_u32(nkeys)
f_main.put_sshstr(f_public_key)
f_main.put_sshstr(f_secrets)
# copy result info bytearray
slen = f_secrets.size()
mlen = f_main.size()
buf = memoryview(bytearray(mlen + blklen))
f_main.render(buf)
ofs = mlen - slen
# encrypt in-place
if ciph is not None:
ciph.encryptor().update_into(buf[ofs:mlen], buf[ofs:])
return _ssh_pem_encode(buf[:mlen])
SSHPublicKeyTypes = typing.Union[
ec.EllipticCurvePublicKey,
rsa.RSAPublicKey,
dsa.DSAPublicKey,
ed25519.Ed25519PublicKey,
]
SSHCertPublicKeyTypes = typing.Union[
ec.EllipticCurvePublicKey,
rsa.RSAPublicKey,
ed25519.Ed25519PublicKey,
]
| _SSHFormatSKECDSA |
python | realpython__materials | python-yaml/models.py | {
"start": 129,
"end": 316
} | class ____:
__slots__ = ["name"]
def __init__(self, name):
self.name = name
def __setstate__(self, state):
self.name = codecs.decode(state["name"], "rot13")
| User |
python | django__django | tests/admin_views/test_autocomplete_view.py | {
"start": 999,
"end": 1106
} | class ____(admin.TabularInline):
model = Authorship
autocomplete_fields = ["author"]
| AuthorshipInline |
python | facebookresearch__faiss | tests/test_meta_index.py | {
"start": 1793,
"end": 5872
} | class ____(unittest.TestCase):
@unittest.skipIf(os.name == "posix" and os.uname().sysname == "Darwin",
"There is a bug in the OpenMP implementation on OSX.")
def test_shards(self):
k = 32
ref_index = faiss.IndexFlatL2(d)
ref_index.add(xb)
_Dref, Iref = ref_index.search(xq, k)
# Create both threaded and non-threaded shard indexes
shard_index_nonthreaded = faiss.IndexShards(
d, False) # explicitly non-threaded
shard_index_threaded = faiss.IndexShards(
d, True) # explicitly threaded
shard_index_2 = faiss.IndexShards(d, True, False)
ni = 3
# Populate both indexes with the same data
for i in range(ni):
i0 = int(i * nb / ni)
i1 = int((i + 1) * nb / ni)
# Add to non-threaded index
index_nt = faiss.IndexFlatL2(d)
index_nt.add(xb[i0:i1])
shard_index_nonthreaded.add_shard(index_nt)
# Add to threaded index
index_t = faiss.IndexFlatL2(d)
index_t.add(xb[i0:i1])
shard_index_threaded.add_shard(index_t)
# Add to shard_index_2 for the original test logic
index_2 = faiss.IndexFlatL2(d)
irm = faiss.IndexIDMap(index_2)
shard_index_2.add_shard(irm)
# test parallel add
shard_index_2.verbose = True
shard_index_2.add(xb)
for test_no in range(3):
with_threads = test_no == 1
if with_threads:
remember_nt = faiss.omp_get_max_threads()
faiss.omp_set_num_threads(1)
# Use the threaded index
test_index = shard_index_threaded
else:
# Use the non-threaded index
test_index = shard_index_nonthreaded
if test_no != 2:
_D, I = test_index.search(xq, k)
else:
_D, I = shard_index_2.search(xq, k)
if with_threads:
faiss.omp_set_num_threads(remember_nt)
ndiff = (I != Iref).sum()
assert (ndiff < nq * k / 1000.)
def test_shards_ivf(self):
ds = SyntheticDataset(32, 1000, 100, 20)
ref_index = faiss.index_factory(ds.d, "IVF32,SQ8")
ref_index.train(ds.get_train())
xb = ds.get_database()
ref_index.add(ds.get_database())
Dref, Iref = ref_index.search(ds.get_database(), 10)
ref_index.reset()
sharded_index = faiss.IndexShardsIVF(
ref_index.quantizer, ref_index.nlist, False, True)
for shard in range(3):
index_i = faiss.clone_index(ref_index)
index_i.add(xb[shard * nb // 3: (shard + 1)* nb // 3])
sharded_index.add_shard(index_i)
Dnew, Inew = sharded_index.search(ds.get_database(), 10)
np.testing.assert_equal(Inew, Iref)
np.testing.assert_allclose(Dnew, Dref)
def test_shards_ivf_train_add(self):
ds = SyntheticDataset(32, 1000, 600, 20)
quantizer = faiss.IndexFlatL2(ds.d)
sharded_index = faiss.IndexShardsIVF(quantizer, 40, False, False)
for _ in range(3):
sharded_index.add_shard(faiss.index_factory(ds.d, "IVF40,Flat"))
sharded_index.train(ds.get_train())
sharded_index.add(ds.get_database())
Dnew, Inew = sharded_index.search(ds.get_queries(), 10)
index_ref = faiss.IndexIVFFlat(quantizer, ds.d, sharded_index.nlist)
index_ref.train(ds.get_train())
index_ref.add(ds.get_database())
Dref, Iref = index_ref.search(ds.get_queries(), 10)
np.testing.assert_equal(Inew, Iref)
np.testing.assert_allclose(Dnew, Dref)
# mess around with the quantizer's centroids
centroids = quantizer.reconstruct_n()
centroids = centroids[::-1].copy()
quantizer.reset()
quantizer.add(centroids)
D2, I2 = sharded_index.search(ds.get_queries(), 10)
self.assertFalse(np.all(I2 == Inew))
| Shards |
python | numpy__numpy | numpy/f2py/tests/test_regression.py | {
"start": 5762,
"end": 6182
} | class ____(util.F2PyTest):
# Ensure that variables are exposed without functions or subroutines in a module
sources = [util.getpath("tests", "src", "regression", "assignOnlyModule.f90")]
@pytest.mark.slow
def test_gh27167(self):
assert (self.module.f_globals.n_max == 16)
assert (self.module.f_globals.i_max == 18)
assert (self.module.f_globals.j_max == 72)
| TestAssignmentOnlyModules |
python | ansible__ansible | lib/ansible/_internal/_errors/_handler.py | {
"start": 1689,
"end": 3455
} | class ____:
"""
Provides a configurable error handler context manager for a specific list of exception types.
Unhandled errors leaving the context manager can be ignored, treated as warnings, or allowed to raise by setting `ErrorAction`.
"""
def __init__(self, action: ErrorAction) -> None:
self.action = action
@contextlib.contextmanager
def handle(self, *args: type[BaseException], skip_on_ignore: bool = False) -> t.Iterator[None]:
"""
Handle the specified exception(s) using the defined error action.
If `skip_on_ignore` is `True`, the body of the context manager will be skipped for `ErrorAction.IGNORE`.
Use of `skip_on_ignore` requires enclosure within the `Skippable` context manager.
"""
if not args:
raise ValueError('At least one exception type is required.')
if skip_on_ignore and self.action == ErrorAction.IGNORE:
raise _SkipException() # skipping ignored action
try:
yield
except args as ex:
match self.action:
case ErrorAction.WARNING:
display.error_as_warning(msg=None, exception=ex)
case ErrorAction.ERROR:
raise
case _: # ErrorAction.IGNORE
pass
if skip_on_ignore:
raise _SkipException() # completed skippable action, ensures the `Skippable` context was used
@classmethod
def from_config(cls, setting: str, variables: dict[str, t.Any] | None = None) -> t.Self:
"""Return an `ErrorHandler` instance configured using the specified Ansible config setting."""
return cls(ErrorAction.from_config(setting, variables=variables))
| ErrorHandler |
python | walkccc__LeetCode | solutions/3485. Longest Common Prefix of K Strings After Removal/3485.py | {
"start": 1051,
"end": 1354
} | class ____:
def longestCommonPrefix(self, words: list[str], k: int) -> list[int]:
ans = []
trie = Trie(k)
for word in words:
trie.insert(word)
for word in words:
trie.erase(word)
ans.append(trie.getLongestCommonPrefix())
trie.insert(word)
return ans
| Solution |
python | conda__conda | conda/models/match_spec.py | {
"start": 3461,
"end": 33116
} | class ____(metaclass=MatchSpecType):
"""The query language for conda packages.
Any of the fields that comprise a :class:`PackageRecord` can be used to compose a
:class:`MatchSpec`.
:class:`MatchSpec` can be composed with keyword arguments, where keys are any of the
attributes of :class:`PackageRecord`. Values for keyword arguments are the exact values the
attribute should match against. Many fields can also be matched against non-exact values--by
including wildcard `*` and `>`/`<` ranges--where supported. Any non-specified field is
the equivalent of a full wildcard match.
:class:`MatchSpec` can also be composed using a single positional argument, with optional
keyword arguments. Keyword arguments also override any conflicting information provided in
the positional argument. The positional argument can be either an existing :class:`MatchSpec`
instance or a string. Conda has historically supported more than one string representation
for equivalent :class:`MatchSpec` queries. This :class:`MatchSpec` should accept any existing
valid spec string, and correctly compose a :class:`MatchSpec` instance.
A series of rules are now followed for creating the canonical string representation of a
:class:`MatchSpec` instance. The canonical string representation can generically be
represented by
(channel(/subdir):(namespace):)name(version(build))[key1=value1,key2=value2]
where `()` indicate optional fields. The rules for constructing a canonical string
representation are:
1. `name` (i.e. "package name") is required, but its value can be '*'. Its position is always
outside the key-value brackets.
2. If `version` is an exact version, it goes outside the key-value brackets and is prepended
by `==`. If `version` is a "fuzzy" value (e.g. `1.11.*`), it goes outside the key-value
brackets with the `.*` left off and is prepended by `=`. Otherwise `version` is included
inside key-value brackets.
3. If `version` is an exact version, and `build` is an exact value, `build` goes outside
key-value brackets prepended by a `=`. Otherwise, `build` goes inside key-value brackets.
`build_string` is an alias for `build`.
4. The `namespace` position is being held for a future conda feature.
5. If `channel` is included and is an exact value, a `::` separator is ued between `channel`
and `name`. `channel` can either be a canonical channel name or a channel url. In the
canonical string representation, the canonical channel name will always be used.
6. If `channel` is an exact value and `subdir` is an exact value, `subdir` is appended to
`channel` with a `/` separator. Otherwise, `subdir` is included in the key-value brackets.
7. Key-value brackets can be delimited by comma, space, or comma+space. Value can optionally
be wrapped in single or double quotes, but must be wrapped if `value` contains a comma,
space, or equal sign. The canonical format uses comma delimiters and single quotes.
8. When constructing a :class:`MatchSpec` instance from a string, any key-value pair given
inside the key-value brackets overrides any matching parameter given outside the brackets.
When :class:`MatchSpec` attribute values are simple strings, the are interpreted using the
following conventions:
- If the string begins with `^` and ends with `$`, it is converted to a regex.
- If the string contains an asterisk (`*`), it is transformed from a glob to a regex.
- Otherwise, an exact match to the string is sought.
Examples:
>>> str(MatchSpec(name='foo', build='py2*', channel='conda-forge'))
'conda-forge::foo[build=py2*]'
>>> str(MatchSpec('foo 1.0 py27_0'))
'foo==1.0=py27_0'
>>> str(MatchSpec('foo=1.0=py27_0'))
'foo==1.0=py27_0'
>>> str(MatchSpec('conda-forge::foo[version=1.0.*]'))
'conda-forge::foo=1.0'
>>> str(MatchSpec('conda-forge/linux-64::foo>=1.0'))
"conda-forge/linux-64::foo[version='>=1.0']"
>>> str(MatchSpec('*/linux-64::foo>=1.0'))
"foo[subdir=linux-64,version='>=1.0']"
To fully-specify a package with a full, exact spec, the fields
- channel
- subdir
- name
- version
- build
must be given as exact values. In the future, the namespace field will be added to this list.
Alternatively, an exact spec is given by '*[md5=12345678901234567890123456789012]'
or '*[sha256=f453db4ffe2271ec492a2913af4e61d4a6c118201f07de757df0eff769b65d2e]'.
"""
FIELD_NAMES = (
"channel",
"subdir",
"name",
"version",
"build",
"build_number",
"track_features",
"features",
"url",
"md5",
"sha256",
"license",
"license_family",
"fn",
)
FIELD_NAMES_SET = frozenset(FIELD_NAMES)
_MATCHER_CACHE = {}
def __init__(self, optional=False, target=None, **kwargs):
self._optional = optional
self._target = target
self._original_spec_str = kwargs.pop("_original_spec_str", None)
self._match_components = self._build_components(**kwargs)
@classmethod
def from_dist_str(cls, dist_str):
parts = {}
if dist_str[-len(CONDA_PACKAGE_EXTENSION_V2) :] == CONDA_PACKAGE_EXTENSION_V2:
dist_str = dist_str[: -len(CONDA_PACKAGE_EXTENSION_V2)]
elif dist_str[-len(CONDA_PACKAGE_EXTENSION_V1) :] == CONDA_PACKAGE_EXTENSION_V1:
dist_str = dist_str[: -len(CONDA_PACKAGE_EXTENSION_V1)]
if "::" in dist_str:
channel_subdir_str, dist_str = dist_str.split("::", 1)
if "/" in channel_subdir_str:
channel_str, subdir = channel_subdir_str.rsplit("/", 1)
if subdir not in context.known_subdirs:
channel_str = channel_subdir_str
subdir = None
parts["channel"] = channel_str
if subdir:
parts["subdir"] = subdir
else:
parts["channel"] = channel_subdir_str
name, version, build = dist_str.rsplit("-", 2)
parts.update(
{
"name": name,
"version": version,
"build": build,
}
)
return cls(**parts)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def get(self, field_name, default=None):
v = self.get_raw_value(field_name)
return default if v is None else v
@property
def is_name_only_spec(self):
return (
len(self._match_components) == 1
and "name" in self._match_components
and self.name != "*"
)
def dist_str(self):
return self.__str__()
@property
def optional(self):
return self._optional
@property
def target(self):
return self._target
@property
def original_spec_str(self):
return self._original_spec_str
def match(self, rec):
"""
Accepts a `PackageRecord` or a dict, and matches can pull from any field
in that record. Returns True for a match, and False for no match.
"""
if isinstance(rec, dict):
# TODO: consider AttrDict instead of PackageRecord
from .records import PackageRecord
rec = PackageRecord.from_objects(rec)
for field_name, v in self._match_components.items():
if not self._match_individual(rec, field_name, v):
return False
return True
def _match_individual(self, record, field_name, match_component):
val = getattr(record, field_name)
try:
return match_component.match(val)
except AttributeError:
return match_component == val
def _is_simple(self):
return (
len(self._match_components) == 1
and self.get_exact_value("name") is not None
)
def _is_single(self):
return len(self._match_components) == 1
def _to_filename_do_not_use(self):
# WARNING: this is potentially unreliable and use should probably be limited
# returns None if a filename can't be constructed
fn_field = self.get_exact_value("fn")
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ("name", "version", "build"))
if not any(x is None for x in vals):
return ("{}-{}-{}".format(*vals)) + CONDA_PACKAGE_EXTENSION_V1
else:
return None
def __repr__(self):
builder = [f'{self.__class__.__name__}("{self}"']
if self.target:
builder.append(f', target="{self.target}"')
if self.optional:
builder.append(", optional=True")
builder.append(")")
return "".join(builder)
def __str__(self):
builder = []
brackets = []
channel_matcher = self._match_components.get("channel")
if channel_matcher and channel_matcher.exact_value:
builder.append(str(channel_matcher))
elif channel_matcher and not channel_matcher.matches_all:
brackets.append(f"channel={str(channel_matcher)}")
subdir_matcher = self._match_components.get("subdir")
if subdir_matcher:
if channel_matcher and channel_matcher.exact_value:
builder.append(f"/{subdir_matcher}")
else:
brackets.append(f"subdir={subdir_matcher}")
name_matcher = self._match_components.get("name", "*")
builder.append(("::%s" if builder else "%s") % name_matcher)
version = self._match_components.get("version")
build = self._match_components.get("build")
version_exact = False
if version:
version = str(version)
if any(s in version for s in "><$^|,"):
brackets.append(f"version='{version}'")
elif version[:2] in ("!=", "~="):
if build:
brackets.append(f"version='{version}'")
else:
builder.append(version)
elif version[-2:] == ".*":
builder.append("=" + version[:-2])
# Skip wildcard-only versions, to avoid an empty "=" in the output.
# See https://github.com/conda/conda/issues/14357 for more info.
elif version == "*":
pass
elif version[-1] == "*":
builder.append("=" + version[:-1])
elif version.startswith("=="):
builder.append(version)
version_exact = True
else:
builder.append("==" + version)
version_exact = True
if build:
build = str(build)
if any(s in build for s in "><$^|,"):
brackets.append(f"build='{build}'")
elif "*" in build:
brackets.append(f"build={build}")
elif version_exact:
builder.append("=" + build)
else:
brackets.append(f"build={build}")
_skip = {"channel", "subdir", "name", "version", "build"}
if "url" in self._match_components and "fn" in self._match_components:
_skip.add("fn")
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
if key == "url" and channel_matcher:
# skip url in canonical str if channel already included
continue
value = str(self._match_components[key])
if any(s in value for s in ", ="):
brackets.append(f"{key}='{value}'")
else:
brackets.append(f"{key}={value}")
if brackets:
builder.append("[{}]".format(",".join(brackets)))
return "".join(builder)
def __json__(self):
return self.__str__()
def conda_build_form(self):
builder = []
name = self.get_exact_value("name")
if not name:
raise ValueError(".conda_build_form() requires a non-empty spec name.")
builder.append(name)
build = self.get_raw_value("build")
version = self.get_raw_value("version")
if build:
version = version or "*"
builder += [version, build]
elif version:
builder.append(version)
return " ".join(builder)
def conda_env_form(self):
"""
Return the package specification in conda environment export format.
This produces the format used by `conda env export`: name=version=build
(single equals), without channel prefixes and without .* patterns.
Examples:
>>> MatchSpec("numpy==1.21.0=py39h1234567_0").conda_env_form()
'numpy=1.21.0=py39h1234567_0'
>>> MatchSpec("numpy=1.21.0").conda_env_form() # no-builds case
'numpy=1.21.0'
>>> MatchSpec("conda-forge::numpy==1.21.0=py39h1234567_0").conda_env_form()
'numpy=1.21.0=py39h1234567_0' # channel prefix removed
Returns:
str: Package specification in conda env export format
"""
# Get the full string representation (avoids .* patterns)
spec_str = str(self)
# Remove channel prefix if present (e.g., "conda-forge::package" -> "package")
if "::" in spec_str:
spec_str = spec_str.split("::", 1)[1]
# Convert MatchSpec format (name==version=build) to conda env format (name=version=build)
if "==" in spec_str:
spec_str = spec_str.replace("==", "=", 1) # Only replace first occurrence
return spec_str
def __eq__(self, other):
if isinstance(other, MatchSpec):
return self._hash_key == other._hash_key
else:
return False
def __hash__(self):
return hash(self._hash_key)
@memoizedproperty
def _hash_key(self):
return self._match_components, self.optional, self.target
def __contains__(self, field):
return field in self._match_components
def _build_components(self, **kwargs):
not_fields = set(kwargs) - MatchSpec.FIELD_NAMES_SET
if not_fields:
raise InvalidMatchSpec(
self._original_spec_str, f"Cannot match on field(s): {not_fields}"
)
_make_component = MatchSpec._make_component
return frozendict(_make_component(key, value) for key, value in kwargs.items())
@staticmethod
def _make_component(field_name, value):
if hasattr(value, "match"):
matcher = value
return field_name, matcher
_MATCHER_CACHE = MatchSpec._MATCHER_CACHE
cache_key = (field_name, value)
cached_matcher = _MATCHER_CACHE.get(cache_key)
if cached_matcher:
return field_name, cached_matcher
if field_name in _implementors:
matcher = _implementors[field_name](value)
else:
matcher = ExactStrMatch(str(value))
_MATCHER_CACHE[(field_name, value)] = matcher
return field_name, matcher
@property
def name(self):
return self.get_exact_value("name") or "*"
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ("name", "version", "build"))
if s < len(self._match_components):
return 3
elif not self.get_exact_value("name") or "build" in self._match_components:
return 3
elif "version" in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get("version")
@property
def fn(self):
val = self.get_raw_value("fn") or self.get_raw_value("url")
if val:
val = basename(val)
if not val:
raise ValueError(".fn cannot be empty.")
return val
@classmethod
def merge(cls, match_specs, union=False):
match_specs = sorted(tuple(cls(s) for s in match_specs if s), key=str)
name_groups = groupby(attrgetter("name"), match_specs)
unmergeable = name_groups.pop("*", []) + name_groups.pop(None, [])
merged_specs = []
mergeable_groups = tuple(
chain.from_iterable(
groupby(lambda s: s.optional, group).values()
for group in name_groups.values()
)
)
for group in mergeable_groups:
target_groups = groupby(attrgetter("target"), group)
target_groups.pop(None, None)
if len(target_groups) > 1:
raise ValueError(f"Incompatible MatchSpec merge:{dashlist(group)}")
merged_specs.append(
reduce(lambda x, y: x._merge(y, union), group)
if len(group) > 1
else group[0]
)
return (*merged_specs, *unmergeable)
@classmethod
def union(cls, match_specs):
return cls.merge(match_specs, union=True)
def _merge(self, other, union=False):
if self.optional != other.optional or self.target != other.target:
raise ValueError(f"Incompatible MatchSpec merge: - {self}\n - {other}")
final_components = {}
component_names = set(self._match_components) | set(other._match_components)
for component_name in component_names:
this_component = self._match_components.get(component_name)
that_component = other._match_components.get(component_name)
if this_component is None and that_component is None:
continue
elif this_component is None:
final_components[component_name] = that_component
elif that_component is None:
final_components[component_name] = this_component
else:
if union:
try:
final = this_component.union(that_component)
except (AttributeError, ValueError, TypeError):
final = f"{this_component}|{that_component}"
else:
final = this_component.merge(that_component)
final_components[component_name] = final
return self.__class__(
optional=self.optional, target=self.target, **final_components
)
def _parse_version_plus_build(v_plus_b):
"""This should reliably pull the build string out of a version + build string combo.
Examples:
>>> _parse_version_plus_build("=1.2.3 0")
('=1.2.3', '0')
>>> _parse_version_plus_build("1.2.3=0")
('1.2.3', '0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 =py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build("=1.2.3 ")
('=1.2.3', None)
>>> _parse_version_plus_build(">1.8,<2|==1.7")
('>1.8,<2|==1.7', None)
>>> _parse_version_plus_build("* openblas_0")
('*', 'openblas_0')
>>> _parse_version_plus_build("* *")
('*', '*')
"""
parts = re.search(
r"((?:.+?)[^><!,|]?)(?:(?<![=!|,<>~])(?:[ =])([^-=,|<>~]+?))?$", v_plus_b
)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(" ", ""), build
def _parse_legacy_dist(dist_str):
"""
Examples:
>>> _parse_legacy_dist("_license-1.1-py27_1.tar.bz2")
('_license', '1.1', 'py27_1')
>>> _parse_legacy_dist("_license-1.1-py27_1")
('_license', '1.1', 'py27_1')
"""
dist_str, _ = strip_pkg_extension(dist_str)
name, version, build = dist_str.rsplit("-", 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name or chn.base_url
return channel_name, chn.subdir
_PARSE_CACHE = {}
def _sanitize_version_str(version: str, build: str | None) -> str:
"""
Sanitize version strings for MatchSpec parsing.
Handles edge cases and translates version patterns for proper MatchSpec processing.
Empty operators like "==" are passed through so existing error handling code can
treat them like other incomplete operators ("<=" or ">="). This is necessary because
downstream translation code would mangle "==" into an empty string, resulting in an
empty version field that breaks logic expecting missing versions to be represented
as operators like "==", "<=", and ">=".
These missing version cases result from match specs like "numpy==", "numpy<=",
"numpy>=", "numpy= " (with trailing space), which should be treated as errors.
Note: "numpy=" (no trailing space) is treated as valid.
For simple versions starting with "=", translates patterns like "=1.2.3" to "1.2.3*"
when appropriate conditions are met.
Args:
version: The version string to sanitize
build: Optional build string that affects sanitization behavior
Returns:
Sanitized version string
Examples:
"==" or "=" -> passed through for error handling
"==1.2.3" -> "1.2.3" (when build is None)
"=1.2.3" -> "1.2.3*" (when build is None and no special chars)
">=1.0" -> ">=1.0" (unchanged, doesn't start with "=")
"""
# Pass through empty operators for proper error handling downstream
if version in ("==", "="):
return version
# We will only sanitize versions starting with "=" (e.g., "=1.2.3", "==1.2.3")
if not version.startswith("="):
return version
version_without_equals = version.lstrip("=")
# For exact matches like "==1.2.3", strip the "==" when no build specified
if version.startswith("==") and build is None:
return version_without_equals
# For simple versions like "=1.2.3", add wildcard if conditions are met
if not any(char in version_without_equals for char in "=,|"):
if build is None and not version_without_equals.endswith("*"):
return version_without_equals + "*"
return version_without_equals
return version
def _parse_spec_str(spec_str):
cached_result = _PARSE_CACHE.get(spec_str)
if cached_result:
return cached_result
original_spec_str = spec_str
# pre-step for ugly backward compat
if spec_str.endswith("@"):
feature_name = spec_str[:-1]
return {
"name": "*",
"track_features": (feature_name,),
}
# Step 1. strip '#' comment
if "#" in spec_str:
ndx = spec_str.index("#")
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
# Step 1.b strip ' if ' anticipating future compatibility issues
spec_split = spec_str.split(" if ", 1)
if len(spec_split) > 1:
log.debug("Ignoring conditional in spec %s", spec_str)
spec_str = spec_split[0]
# Step 2. done if spec_str is a tarball
if is_package_file(spec_str):
# treat as a normal url
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if channel.subdir:
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
"channel": channel.canonical_name,
"subdir": channel.subdir,
"name": name,
"version": version,
"build": build,
"fn": channel.package_filename,
"url": spec_str,
}
else:
# url is not a channel
if spec_str.startswith("file://"):
# We must undo percent-encoding when generating fn.
path_or_url = url_to_path(spec_str)
else:
path_or_url = spec_str
return {
"name": "*",
"fn": basename(path_or_url),
"url": spec_str,
}
return result
# Step 3. strip off brackets portion
brackets = {}
m3 = re.match(r".*(?:(\[.*\]))", spec_str)
if m3:
brackets_str = m3.groups()[0]
spec_str = spec_str.replace(brackets_str, "")
brackets_str = brackets_str[1:-1]
m3b = re.finditer(
r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str
)
for match in m3b:
key, _, value, _ = match.groups()
if not key or not value:
raise InvalidMatchSpec(
original_spec_str, "key-value mismatch in brackets"
)
if key == "version" and value:
value = _sanitize_version_str(value, match.groupdict().get("build"))
brackets[key] = value
# Step 4. strip off parens portion
m4 = re.match(r".*(?:(\(.*\)))", spec_str)
parens = {}
if m4:
parens_str = m4.groups()[0]
spec_str = spec_str.replace(parens_str, "")
parens_str = parens_str[1:-1]
m4b = re.finditer(
r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', parens_str
)
for match in m4b:
key, _, value, _ = match.groups()
parens[key] = value
if "optional" in parens_str:
parens["optional"] = True
# Step 5. strip off '::' channel and namespace
m5 = spec_str.rsplit(":", 2)
m5_len = len(m5)
if m5_len == 3:
channel_str, namespace, spec_str = m5
elif m5_len == 2:
namespace, spec_str = m5
channel_str = None
elif m5_len:
spec_str = m5[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if "channel" in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop("channel"))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if "subdir" in brackets:
subdir = brackets.pop("subdir")
# Step 6. strip off package name from remaining version + build
m3 = re.match(r"([^ =<>!~]+)?([><!=~ ].+)?", spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise InvalidMatchSpec(
original_spec_str, f"no package name found in '{spec_str}'"
)
else:
raise InvalidMatchSpec(original_spec_str, "no package name found")
# Step 7. otherwise sort out version + build
spec_str = spec_str and spec_str.strip()
# This was an attempt to make MatchSpec('numpy-1.11.0-py27_0') work like we'd want. It's
# not possible though because plenty of packages have names with more than one '-'.
# if spec_str is None and name.count('-') >= 2:
# name, version, build = _parse_legacy_dist(name)
if spec_str:
if "[" in spec_str:
raise InvalidMatchSpec(
original_spec_str, "multiple brackets sections not allowed"
)
version, build = _parse_version_plus_build(spec_str)
version = _sanitize_version_str(version, build)
else:
version, build = None, None
# Step 8. now compile components together
components = {}
components["name"] = name or "*"
if channel is not None:
components["channel"] = channel
if subdir is not None:
components["subdir"] = subdir
if namespace is not None:
# components['namespace'] = namespace
pass
if version is not None:
components["version"] = version
if build is not None:
components["build"] = build
# anything in brackets will now strictly override key as set in other area of spec str
# EXCEPT FOR: name
# If we let name in brackets override a name outside of brackets it is possible to write
# MatchSpecs that appear to install one package but actually install a completely different one
# e.g. tensorflow[name=* version=* md5=<hash of pytorch package> ] will APPEAR to install
# tensorflow but actually install pytorch.
if "name" in components and "name" in brackets:
msg = (
f"'name' specified both inside ({brackets['name']}) and outside "
f"({components['name']}) of brackets. The value outside of brackets "
f"({components['name']}) will be used."
)
warnings.warn(msg, UserWarning)
del brackets["name"]
components.update(brackets)
components["_original_spec_str"] = original_spec_str
_PARSE_CACHE[original_spec_str] = components
return components
| MatchSpec |
python | pytorch__pytorch | torch/_higher_order_ops/aoti_call_delegate.py | {
"start": 643,
"end": 6126
} | class ____(HigherOrderOperator):
"""aoti_call_delegate is a HOP for calling AOTInductor lowered submodule in ExportedProgram.
It has the following signature:
aoti_call_delegate(
lowered_module: Union[AOTInductorEPModule, AOTInductorRunnerWrapper]
original_gm:fx.GraphModule,
weight_args: List[Tensor],
input_args: List[Tensor],
) -> outputs: List[Tensor]
where,
- lowered_module is the AOTInductor lowered submodule, backed by compiled .so file, supporting real tensor inputs
- original_gm is the stateless version of the original GraphModule before lowering, allowing FakeTensor propagation
- weight_args is the list of weights in original GraphModule, including parameters and buffers
- input_args is the list of flatten inputs
"""
def __init__(self) -> None:
super().__init__("aoti_call_delegate")
def __call__(
self,
lowered_module: AOTI_LOWERED_MODULE, # type: ignore[valid-type]
original_gm: torch.fx.GraphModule,
weight_args: list[torch.Tensor],
input_args: list[torch.Tensor],
) -> list[torch.Tensor]:
return super().__call__(lowered_module, original_gm, weight_args, input_args)
aoti_call_delegate = AOTICallDelegate()
aoti_call_delegate.fallthrough(torch._C.DispatchKey.PythonDispatcher)
aoti_call_delegate.fallthrough(torch._C.DispatchKey.PythonTLSSnapshot)
aoti_call_delegate.fallthrough(torch._C.DispatchKey.ADInplaceOrView)
aoti_call_delegate.fallthrough(torch._C.DispatchKey.AutocastCPU)
@aoti_call_delegate.py_impl(torch._C.DispatchKey.CompositeExplicitAutograd)
def call_delegate_cpu(
lowered_module: AOTI_LOWERED_MODULE, # type: ignore[valid-type]
original_gm: torch.fx.GraphModule,
weight_args: list[torch.Tensor],
input_args: list[torch.Tensor],
) -> list[torch.Tensor]:
# FX creates this immutable_dict/list concept. Get rid of this.
map_types: dict[type, type] = {
torch.fx.immutable_collections.immutable_dict: dict,
torch.fx.immutable_collections.immutable_list: list,
}
new_args = pytree.tree_map_only(
tuple(map_types.keys()),
lambda a: map_types[type(a)](a),
weight_args + input_args,
lambda a: isinstance(a, tuple(map_types.keys())),
)
has_fake_args = any(isinstance(arg, FakeTensor) for arg in new_args)
if has_fake_args:
# use stateless original_gm for tracing with fake tensors
fake_out = original_gm(*new_args)
return fake_out
else:
# use AOTI Runner for real tensors
new_input_args = new_args[len(weight_args) :]
if type(lowered_module).__name__ == "AOTInductorRunnerWrapper":
return lowered_module(*new_input_args) # type: ignore[misc]
elif type(lowered_module).__name__ == "AOTInductorEPModule":
return lowered_module(new_input_args) # type: ignore[misc]
else:
raise RuntimeError(
f"Unexpected lowered_module type: {type(lowered_module)}."
)
def trace_aoti_call_delegate(
proxy_mode, func_overload, lowered_module, original_gm, weight_args, input_args
):
proxy_mode.tracer.root.register_module("lowered_module", lowered_module)
proxy_mode.tracer.root.register_module("original_gm", original_gm)
node_args = (lowered_module, original_gm, weight_args, input_args)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", func_overload, proxy_args, {}, name="aoti_call_delegate"
)
with disable_proxy_modes_tracing():
out = call_delegate_cpu(lowered_module, original_gm, weight_args, input_args)
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
@aoti_call_delegate.py_impl(ProxyTorchDispatchMode)
def call_delegate_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode,
lowered_module: AOTI_LOWERED_MODULE, # type: ignore[valid-type]
original_gm: torch.fx.GraphModule,
weight_args: list[torch.Tensor],
input_args: list[torch.Tensor],
):
res = trace_aoti_call_delegate(
mode, aoti_call_delegate, lowered_module, original_gm, weight_args, input_args
)
return res
@aoti_call_delegate.py_impl(FakeTensorMode)
def call_delegate_fake_tensor_mode(
mode: FakeTensorMode,
lowered_module: AOTI_LOWERED_MODULE, # type: ignore[valid-type]
original_gm: torch.fx.GraphModule,
weight_args: list[torch.Tensor],
input_args: list[torch.Tensor],
) -> list[torch.Tensor]:
with mode:
return call_delegate_cpu(lowered_module, original_gm, weight_args, input_args)
@aoti_call_delegate.py_functionalize_impl
def call_delegate_functionalize(
ctx,
lowered_module: AOTI_LOWERED_MODULE, # type: ignore[valid-type]
original_gm: torch.fx.GraphModule,
weight_args: list[torch.Tensor],
input_args: list[torch.Tensor],
):
unwrapped_weight_args = tuple(
ctx.unwrap_tensors(weight_arg) for weight_arg in weight_args
)
unwrapped_input_args = tuple(
ctx.unwrap_tensors(input_arg) for input_arg in input_args
)
with ctx.redispatch_to_next():
res = aoti_call_delegate(
lowered_module,
original_gm,
unwrapped_weight_args, # type: ignore[arg-type]
unwrapped_input_args, # type: ignore[arg-type]
)
return ctx.wrap_tensors(res)
| AOTICallDelegate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.