language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/wasb.py
|
{
"start": 2531,
"end": 25448
}
|
class ____(BaseHook):
"""
Interact with Azure Blob Storage through the ``wasb://`` protocol.
These parameters have to be passed in Airflow Data Base: account_name and account_key.
Additional options passed in the 'extra' field of the connection will be
passed to the `BlobServiceClient()` constructor. For example, authenticate
using a SAS token by adding {"sas_token": "YOUR_TOKEN"} or using an account key
by adding {"account_key": "YOUR_ACCOUNT_KEY"}.
If no authentication configuration is provided, DefaultAzureCredential will be used (applicable
when using Azure compute infrastructure).
:param wasb_conn_id: Reference to the :ref:`wasb connection <howto/connection:wasb>`.
:param public_read: Whether an anonymous public read access should be used. default is False
"""
conn_name_attr = "wasb_conn_id"
default_conn_name = "wasb_default"
conn_type = "wasb"
hook_name = "Azure Blob Storage"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"connection_string": PasswordField(
lazy_gettext("Blob Storage Connection String (optional)"), widget=BS3PasswordFieldWidget()
),
"shared_access_key": PasswordField(
lazy_gettext("Blob Storage Shared Access Key (optional)"), widget=BS3PasswordFieldWidget()
),
"tenant_id": StringField(
lazy_gettext("Tenant Id (Active Directory Auth)"), widget=BS3TextFieldWidget()
),
"sas_token": PasswordField(lazy_gettext("SAS Token (optional)"), widget=BS3PasswordFieldWidget()),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port"],
"relabeling": {
"login": "Blob Storage Login (optional)",
"password": "Blob Storage Key (optional)",
"host": "Account URL (Active Directory Auth)",
},
"placeholders": {
"login": "account name",
"password": "secret",
"host": "account url",
"connection_string": "connection string auth",
"tenant_id": "tenant",
"shared_access_key": "shared access key",
"sas_token": "account url or token",
"extra": "additional options for use with ClientSecretCredential, DefaultAzureCredential, or account_key authentication",
},
}
def __init__(
self,
wasb_conn_id: str = default_conn_name,
public_read: bool = False,
) -> None:
super().__init__()
self.conn_id = wasb_conn_id
self.public_read = public_read
self._blob_service_client: AsyncBlobServiceClient | BlobServiceClient | None = None
logger = logging.getLogger("azure.core.pipeline.policies.http_logging_policy")
try:
logger.setLevel(os.environ.get("AZURE_HTTP_LOGGING_LEVEL", logging.WARNING))
except ValueError:
logger.setLevel(logging.WARNING)
def _get_field(self, extra_dict, field_name):
prefix = "extra__wasb__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{prefix}' prefix when using this method."
)
if field_name in extra_dict:
return extra_dict[field_name] or None
return extra_dict.get(f"{prefix}{field_name}") or None
@property
def blob_service_client(self) -> AsyncBlobServiceClient | BlobServiceClient:
"""Return the BlobServiceClient object (cached)."""
if self._blob_service_client is None:
self._blob_service_client = self.get_conn()
return self._blob_service_client
@blob_service_client.setter
def blob_service_client(self, client: AsyncBlobServiceClient) -> None:
"""Set the cached BlobServiceClient object."""
self._blob_service_client = client
def get_conn(self) -> BlobServiceClient:
"""Return the BlobServiceClient object."""
conn = self.get_connection(self.conn_id)
extra = conn.extra_dejson or {}
client_secret_auth_config = extra.pop("client_secret_auth_config", {})
connection_string = self._get_field(extra, "connection_string")
if connection_string:
# connection_string auth takes priority
return BlobServiceClient.from_connection_string(connection_string, **extra)
account_url = parse_blob_account_url(conn.host, conn.login)
tenant = self._get_field(extra, "tenant_id")
if tenant:
# use Active Directory auth
app_id = cast("str", conn.login)
app_secret = cast("str", conn.password)
token_credential = ClientSecretCredential(
tenant_id=tenant, client_id=app_id, client_secret=app_secret, **client_secret_auth_config
)
return BlobServiceClient(account_url=account_url, credential=token_credential, **extra)
if self.public_read:
# Here we use anonymous public read
# more info
# https://docs.microsoft.com/en-us/azure/storage/blobs/storage-manage-access-to-resources
return BlobServiceClient(account_url=account_url, **extra)
shared_access_key = self._get_field(extra, "shared_access_key")
if shared_access_key:
# using shared access key
return BlobServiceClient(account_url=account_url, credential=shared_access_key, **extra)
sas_token = self._get_field(extra, "sas_token")
if sas_token:
if sas_token.startswith("https"):
return BlobServiceClient(account_url=sas_token, **extra)
return BlobServiceClient(account_url=f"{account_url.rstrip('/')}/{sas_token}", **extra)
# Fall back to old auth (password) or use managed identity if not provided.
credential: str | TokenCredential | None = conn.password
if not credential:
# Check for account_key in extra fields before falling back to DefaultAzureCredential
account_key = self._get_field(extra, "account_key")
if account_key:
credential = account_key
else:
managed_identity_client_id = self._get_field(extra, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extra, "workload_identity_tenant_id")
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
self.log.info("Using DefaultAzureCredential as credential")
return BlobServiceClient(
account_url=account_url,
credential=credential,
**extra,
)
def _get_container_client(self, container_name: str) -> AsyncContainerClient | ContainerClient:
"""
Instantiate a container client.
:param container_name: The name of the container
:return: AsyncContainerClient | ContainerClient
"""
return self.blob_service_client.get_container_client(container_name)
def _get_blob_client(self, container_name: str, blob_name: str) -> BlobClient | AsyncBlobClient:
"""
Instantiate a blob client.
:param container_name: The name of the blob container
:param blob_name: The name of the blob. This needs not be existing
"""
return self.blob_service_client.get_blob_client(container=container_name, blob=blob_name)
def check_for_blob(self, container_name: str, blob_name: str, **kwargs) -> bool:
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param kwargs: Optional keyword arguments for ``BlobClient.get_blob_properties`` takes.
:return: True if the blob exists, False otherwise.
"""
try:
self._get_blob_client(container_name, blob_name).get_blob_properties(**kwargs)
except ResourceNotFoundError:
return False
return True
def check_for_prefix(self, container_name: str, prefix: str, **kwargs) -> bool:
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:param prefix: Prefix of the blob.
:param kwargs: Optional keyword arguments that ``ContainerClient.walk_blobs`` takes
:return: True if blobs matching the prefix exist, False otherwise.
"""
blobs = self.get_blobs_list(container_name=container_name, prefix=prefix, **kwargs)
return bool(blobs)
def check_for_variable_type(self, variable_name: str, container: Any, expected_type: type[Any]) -> None:
if not isinstance(container, expected_type):
raise TypeError(
f"{variable_name} for {self.__class__.__name__} must be {expected_type.__name__}, got {type(container).__name__}"
)
def get_blobs_list(
self,
container_name: str,
prefix: str | None = None,
include: list[str] | None = None,
delimiter: str = "/",
**kwargs,
) -> list:
"""
List blobs in a given container.
:param container_name: The name of the container
:param prefix: Filters the results to return only blobs whose names
begin with the specified prefix.
:param include: Specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy`, ``deleted``.
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
"""
container = self._get_container_client(container_name)
self.check_for_variable_type("container", container, ContainerClient)
container = cast("ContainerClient", container)
blob_list = []
blobs = container.walk_blobs(name_starts_with=prefix, include=include, delimiter=delimiter, **kwargs)
for blob in blobs:
blob_list.append(blob.name)
return blob_list
def get_blobs_list_recursive(
self,
container_name: str,
prefix: str | None = None,
include: list[str] | None = None,
endswith: str = "",
**kwargs,
) -> list:
"""
List blobs in a given container.
:param container_name: The name of the container
:param prefix: Filters the results to return only blobs whose names
begin with the specified prefix.
:param include: Specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy`, ``deleted``.
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
"""
container = self._get_container_client(container_name)
self.check_for_variable_type("container", container, ContainerClient)
container = cast("ContainerClient", container)
blob_list = []
blobs = container.list_blobs(name_starts_with=prefix, include=include, **kwargs)
for blob in blobs:
if blob.name.endswith(endswith):
blob_list.append(blob.name)
return blob_list
def load_file(
self,
file_path: str,
container_name: str,
blob_name: str,
create_container: bool = False,
**kwargs,
) -> None:
"""
Upload a file to Azure Blob Storage.
:param file_path: Path to the file to load.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
:param kwargs: Optional keyword arguments that ``BlobClient.upload_blob()`` takes.
"""
with open(file_path, "rb") as data:
self.upload(
container_name=container_name,
blob_name=blob_name,
data=data,
create_container=create_container,
**kwargs,
)
def load_string(
self,
string_data: str,
container_name: str,
blob_name: str,
create_container: bool = False,
**kwargs,
) -> None:
"""
Upload a string to Azure Blob Storage.
:param string_data: String to load.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
:param kwargs: Optional keyword arguments that ``BlobClient.upload()`` takes.
"""
# Reorder the argument order from airflow.providers.amazon.aws.hooks.s3.load_string.
self.upload(
container_name=container_name,
blob_name=blob_name,
data=string_data,
create_container=create_container,
**kwargs,
)
def get_file(self, file_path: str, container_name: str, blob_name: str, **kwargs):
"""
Download a file from Azure Blob Storage.
:param file_path: Path to the file to download.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param kwargs: Optional keyword arguments that `BlobClient.download_blob()` takes.
"""
with open(file_path, "wb") as fileblob:
stream = self.download(container_name=container_name, blob_name=blob_name, **kwargs)
fileblob.write(stream.readall())
def read_file(self, container_name: str, blob_name: str, **kwargs):
"""
Read a file from Azure Blob Storage and return as a string.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param kwargs: Optional keyword arguments that `BlobClient.download_blob` takes.
"""
return self.download(container_name, blob_name, **kwargs).content_as_text()
def upload(
self,
container_name: str,
blob_name: str,
data: Any,
blob_type: str = "BlockBlob",
length: int | None = None,
create_container: bool = False,
**kwargs,
) -> dict[str, Any]:
"""
Create a new blob from a data source with automatic chunking.
:param container_name: The name of the container to upload data
:param blob_name: The name of the blob to upload. This need not exist in the container
:param data: The blob data to upload
:param blob_type: The type of the blob. This can be either ``BlockBlob``,
``PageBlob`` or ``AppendBlob``. The default value is ``BlockBlob``.
:param length: Number of bytes to read from the stream. This is optional,
but should be supplied for optimal performance.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
"""
if create_container:
self.create_container(container_name)
blob_client = self._get_blob_client(container_name, blob_name)
# TODO: rework the interface as it might also return Awaitable
return blob_client.upload_blob(data, blob_type, length=length, **kwargs) # type: ignore[return-value]
def download(
self, container_name, blob_name, offset: int | None = None, length: int | None = None, **kwargs
) -> StorageStreamDownloader:
"""
Download a blob to the StorageStreamDownloader.
:param container_name: The name of the container containing the blob
:param blob_name: The name of the blob to download
:param offset: Start of byte range to use for downloading a section of the blob.
Must be set if length is provided.
:param length: Number of bytes to read from the stream.
"""
blob_client = self._get_blob_client(container_name, blob_name)
# TODO: rework the interface as it might also return Awaitable
return blob_client.download_blob(offset=offset, length=length, **kwargs) # type: ignore[return-value]
def create_container(self, container_name: str) -> None:
"""
Create container object if not already existing.
:param container_name: The name of the container to create
"""
container_client = self._get_container_client(container_name)
try:
self.log.debug("Attempting to create container: %s", container_name)
container_client.create_container()
self.log.info("Created container: %s", container_name)
except ResourceExistsError:
self.log.info(
"Attempted to create container %r but it already exists. If it is expected that this "
"container will always exist, consider setting create_container to False.",
container_name,
)
except HttpResponseError as e:
self.log.info(
"Received an HTTP response error while attempting to creating container %r: %s"
"\nIf the error is related to missing permissions to create containers, please consider "
"setting create_container to False or supplying connection credentials with the "
"appropriate permission for connection ID %r.",
container_name,
e.response,
self.conn_id,
)
except Exception as e:
self.log.info("Error while attempting to create container %r: %s", container_name, e)
raise
def delete_container(self, container_name: str) -> None:
"""
Delete a container object.
:param container_name: The name of the container
"""
try:
self.log.debug("Attempting to delete container: %s", container_name)
self._get_container_client(container_name).delete_container()
self.log.info("Deleted container: %s", container_name)
except ResourceNotFoundError:
self.log.warning("Unable to delete container %s (not found)", container_name)
except Exception:
self.log.error("Error deleting container: %s", container_name)
raise
def delete_blobs(self, container_name: str, *blobs, **kwargs) -> None:
"""
Mark the specified blobs or snapshots for deletion.
:param container_name: The name of the container containing the blobs
:param blobs: The blobs to delete. This can be a single blob, or multiple values
can be supplied, where each value is either the name of the blob (str) or BlobProperties.
"""
self._get_container_client(container_name).delete_blobs(*blobs, **kwargs)
self.log.info("Deleted blobs: %s", blobs)
def copy_blobs(
self,
source_container_name: str,
source_blob_name: str,
destination_container_name: str,
destination_blob_name: str,
) -> None:
"""
Copy the specified blobs from one blob prefix to another.
:param source_container_name: The name of the source container containing the blobs.
:param source_blob_name: The full source blob path without the container name.
:param destination_container_name: The name of the destination container where the blobs
will be copied to.
:param destination_blob_name: The full destination blob path without the container name.
"""
source_blob_client = self._get_blob_client(
container_name=source_container_name, blob_name=source_blob_name
)
source_blob_url = source_blob_client.url
destination_blob_client = self._get_blob_client(
container_name=destination_container_name, blob_name=destination_blob_name
)
destination_blob_client.start_copy_from_url(source_blob_url)
def delete_file(
self,
container_name: str,
blob_name: str,
is_prefix: bool = False,
ignore_if_missing: bool = False,
delimiter: str = "",
**kwargs,
) -> None:
"""
Delete a file, or all blobs matching a prefix, from Azure Blob Storage.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param is_prefix: If blob_name is a prefix, delete all matching files
:param ignore_if_missing: if True, then return success even if the
blob does not exist.
:param kwargs: Optional keyword arguments that ``ContainerClient.delete_blobs()`` takes.
"""
if is_prefix:
blobs_to_delete = self.get_blobs_list(
container_name, prefix=blob_name, delimiter=delimiter, **kwargs
)
elif self.check_for_blob(container_name, blob_name):
blobs_to_delete = [blob_name]
else:
blobs_to_delete = []
if not ignore_if_missing and not blobs_to_delete:
raise AirflowException(f"Blob(s) not found: {blob_name}")
# The maximum number of blobs that can be deleted in a single request is 256 using the underlying
# `ContainerClient.delete_blobs()` method. Therefore the deletes need to be in batches of <= 256.
num_blobs_to_delete = len(blobs_to_delete)
for i in range(0, num_blobs_to_delete, 256):
self.delete_blobs(container_name, *blobs_to_delete[i : i + 256], **kwargs)
def test_connection(self):
"""Test Azure Blob Storage connection."""
success = (True, "Successfully connected to Azure Blob Storage.")
try:
# Attempt to retrieve storage account information
self.get_conn().get_account_information()
return success
except Exception as e:
return False, str(e)
|
WasbHook
|
python
|
django__django
|
django/contrib/admindocs/apps.py
|
{
"start": 91,
"end": 216
}
|
class ____(AppConfig):
name = "django.contrib.admindocs"
verbose_name = _("Administrative Documentation")
|
AdminDocsConfig
|
python
|
ZoranPandovski__al-go-rithms
|
hashes/Python/sha1.py
|
{
"start": 1557,
"end": 4911
}
|
class ____:
"""
Class to contain the entire pipeline for SHA1 Hashing Algorithm
"""
def __init__(self, data):
"""
Inititates the variables data and h. h is a list of 5 8-digit Hexadecimal
numbers corresponding to (1732584193, 4023233417, 2562383102, 271733878, 3285377520)
respectively. We will start with this as a message digest. 0x is how you write
Hexadecimal numbers in Python
"""
self.data = data
self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def rotate(n, b):
"""
Static method to be used inside other methods. Left rotates n by b.
"""
return ((n << b) | (n >> (32 - b))) & 0xffffffff
def padding(self):
"""
Pads the input message with zeros so that padded_data has 64 bytes or 512 bits
"""
padding = b'\x80' + b'\x00'*(63 - (len(self.data) + 8) % 64)
padded_data = self.data + padding + struct.pack('>Q', 8 * len(self.data))
return padded_data
def split_blocks(self):
"""
Returns a list of bytestrings each of length 64
"""
return [self.padded_data[i:i+64] for i in range(0, len(self.padded_data), 64)]
# @staticmethod
def expand_block(self, block):
"""
Takes a bytestring-block of length 64, unpacks it to a list of integers and returns a
list of 80 integers pafter some bit operations
"""
w = list(struct.unpack('>16L', block)) + [0] * 64
for i in range(16, 80):
w[i] = self.rotate((w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]), 1)
return w
def final_hash(self):
"""
Calls all the other methods to process the input. Pads the data, then splits into
blocks and then does a series of operations for each block (including expansion).
For each block, the variable h that was initialized is copied to a,b,c,d,e
and these 5 variables a,b,c,d,e undergo several changes. After all the blocks are
processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1] and so on.
This h becomes our final hash which is returned.
"""
self.padded_data = self.padding()
self.blocks = self.split_blocks()
for block in self.blocks:
expanded_block = self.expand_block(block)
a, b, c, d, e = self.h
for i in range(0, 80):
if 0 <= i < 20:
f = (b & c) | ((~b) & d)
k = 0x5A827999
elif 20 <= i < 40:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i < 60:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i < 80:
f = b ^ c ^ d
k = 0xCA62C1D6
a, b, c, d, e = self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xffffffff,\
a, self.rotate(b, 30), c, d
self.h = self.h[0] + a & 0xffffffff,\
self.h[1] + b & 0xffffffff,\
self.h[2] + c & 0xffffffff,\
self.h[3] + d & 0xffffffff,\
self.h[4] + e & 0xffffffff
return '%08x%08x%08x%08x%08x' %tuple(self.h)
|
SHA1Hash
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/resolved/base.py
|
{
"start": 8986,
"end": 19880
}
|
class ____:
type: Any
default: Any
has_default: bool
field_info: Optional[FieldInfo]
def _get_annotations(
resolved_type: type[Resolvable],
) -> dict[str, AnnotationInfo]:
annotations: dict[str, AnnotationInfo] = {}
init_kwargs = _get_init_kwargs(resolved_type)
if is_dataclass(resolved_type):
for f in fields(resolved_type):
has_default = f.default is not MISSING or f.default_factory is not MISSING
annotations[f.name] = AnnotationInfo(
type=f.type,
default=f.default,
has_default=has_default,
field_info=None,
)
return annotations
elif safe_is_subclass(resolved_type, BaseModel):
for name, field_info in resolved_type.model_fields.items():
has_default = not field_info.is_required()
annotations[name] = AnnotationInfo(
type=field_info.rebuild_annotation(),
default=field_info.default,
has_default=has_default,
field_info=field_info,
)
return annotations
elif is_record(resolved_type):
defaults = get_record_defaults(resolved_type)
for name, ttype in get_record_annotations(resolved_type).items():
annotations[name] = AnnotationInfo(
type=ttype,
default=defaults[name] if name in defaults else None,
has_default=name in defaults,
field_info=None,
)
return annotations
elif init_kwargs is not None:
return init_kwargs
else:
raise ResolutionException(
f"Invalid Resolvable type {resolved_type}, could not determine fields. Resolved subclasses must be one of the following:\n"
"* class with __init__\n"
"* @dataclass\n"
"* pydantic Model\n"
"* @dagster_shared.record.record\n"
)
def _get_init_kwargs(
target_type: type[Resolvable],
) -> Optional[dict[str, AnnotationInfo]]:
if target_type.__init__ is object.__init__:
return None
sig = inspect.signature(target_type.__init__)
fields: dict[str, AnnotationInfo] = {}
skipped_self = False
for name, param in sig.parameters.items():
if not skipped_self:
skipped_self = True
continue
if param.kind == param.POSITIONAL_ONLY:
raise ResolutionException(
f"Invalid Resolvable type {target_type}: __init__ contains positional only parameter."
)
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
continue
if param.annotation == param.empty:
raise ResolutionException(
f"Invalid Resolvable type {target_type}: __init__ parameter {name} has no type hint."
)
fields[name] = AnnotationInfo(
type=param.annotation,
default=param.default,
has_default=param.default is not param.empty,
field_info=None,
)
return fields
def resolve_fields(
model: BaseModel,
resolved_cls: type,
context: "ResolutionContext",
) -> Mapping[str, Any]:
"""Returns a mapping of field names to resolved values for those fields."""
alias_name_by_field_name = {
field_name: (
annotation_info.field_info.alias
if annotation_info.field_info and annotation_info.field_info.alias
else field_name
)
for field_name, annotation_info in _get_annotations(resolved_cls).items()
}
field_resolvers = {
(field_name): _get_resolver(annotation_info.type, field_name)
for field_name, annotation_info in _get_annotations(resolved_cls).items()
}
out = {
field_name: resolver.execute(context=context, model=model, field_name=field_name)
for field_name, resolver in field_resolvers.items()
# filter out unset fields to trigger defaults
if (resolver.model_field_name or field_name) in model.model_dump(exclude_unset=True)
and getattr(model, resolver.model_field_name or field_name) != _Unset
}
return {alias_name_by_field_name[k]: v for k, v in out.items()}
T = TypeVar("T")
def _get_resolver(annotation: Any, field_name: str) -> "Resolver":
origin = get_origin(annotation)
args = get_args(annotation)
# explicit field level Resolver
if origin is Annotated:
resolver = next((arg for arg in args if isinstance(arg, Resolver)), None)
if resolver:
# if the outer resolver is default, see if there is a nested one
if resolver.is_default:
nested = _dig_for_resolver(args[0], [])
if nested:
return nested.with_outer_resolver(resolver)
check.invariant(
_is_resolvable_type(args[0]) or resolver.model_field_type,
f"Resolver for {field_name} must define model_field_type, {args[0]} is not model compliant.",
)
return resolver
# nested or implicit
res = _dig_for_resolver(annotation, [])
if res:
return res
from dagster.components.resolved.core_models import CORE_MODEL_SUGGESTIONS
core_model_suggestion = ""
if annotation in CORE_MODEL_SUGGESTIONS:
core_model_suggestion = f"\n\nAn annotated resolver for {annotation.__name__} is available, you may wish to use it instead: {CORE_MODEL_SUGGESTIONS[annotation]}"
raise ResolutionException(
"Could not derive resolver for annotation\n"
f" {field_name}: {annotation}\n"
"Field types are expected to be:\n"
"* serializable types such as str, float, int, bool, list, Enum, etc\n"
"* Resolvable subclasses\n"
"* pydantic Models\n"
"* Annotated with an appropriate dagster.components.Resolver\n"
f" e.g. Annotated[{annotation.__name__}, Resolver(fn=..., model_field_type=...)]"
f"{core_model_suggestion}"
)
def _dig_for_resolver(annotation, path: Sequence[_TypeContainer]) -> Optional[Resolver]:
if _is_implicitly_resolved_type(annotation):
return Resolver.default()
origin = get_origin(annotation)
args = get_args(annotation)
if safe_is_subclass(annotation, Resolvable):
return Resolver(
partial(
_resolve_at_path,
container_path=path,
resolver=annotation.resolve_from_model,
),
model_field_type=_wrap(annotation.model(), path),
)
if origin is Annotated:
resolver = next((arg for arg in args if isinstance(arg, Resolver)), None)
if resolver:
check.invariant(
_is_resolvable_type(args[0]) or resolver.model_field_type,
f"Nested resolver must define model_field_type {args[0]} is not model compliant.",
)
# need to ensure nested resolvers set their model type
if resolver.resolves_from_parent_object and path:
raise ResolutionException(
f"Resolver.from_model found nested within {list(p.name for p in path)}. "
"Resolver.from_model can only be used on the outer most Annotated wrapper."
)
return Resolver(
resolver.fn.__class__(
partial(
_resolve_at_path,
container_path=path,
resolver=resolver.fn.callable,
)
),
model_field_type=_wrap(resolver.model_field_type or args[0], path),
inject_before_resolve=resolver.inject_before_resolve,
)
annotated_type = args[0]
if _is_implicitly_resolved_type(annotated_type):
return Resolver.default()
return _dig_for_resolver(annotated_type, path)
if origin in (Union, UnionType):
if len(args) == 2 and args[1] is type(None):
res = _dig_for_resolver(args[0], [*path, _TypeContainer.OPTIONAL])
if res:
return res
else:
arg_resolver_pairs = [(arg, _dig_for_resolver(arg, path)) for arg in args]
if all(r is not None for _, r in arg_resolver_pairs):
return Resolver.union(
arg_resolver_pairs, # type: ignore # doesn't understand all check
)
elif origin in (
Sequence,
tuple,
list,
): # should look for tuple[T, ...] specifically
res = _dig_for_resolver(args[0], [*path, _TypeContainer.SEQUENCE])
if res:
return res
elif origin is dict:
key_type, value_type = args
if key_type != str:
raise ResolutionException(f"dict key type must be str, got {key_type}")
value_res = _dig_for_resolver(value_type, [*path, _TypeContainer.DICT])
if value_res:
return value_res
def _wrap(ttype, path: Sequence[_TypeContainer]):
result_type = ttype
for container in reversed(path):
if container is _TypeContainer.OPTIONAL:
result_type = Optional[result_type]
elif container is _TypeContainer.SEQUENCE:
# use tuple instead of Sequence for perf
result_type = tuple[result_type, ...]
elif container is _TypeContainer.DICT:
result_type = dict[str, result_type]
else:
check.assert_never(container)
return result_type
def _resolve_at_path(
context: "ResolutionContext",
value: Any,
container_path: Sequence[_TypeContainer],
resolver,
):
if not container_path:
return resolver(context, value)
container = container_path[0]
inner_path = container_path[1:]
if container is _TypeContainer.OPTIONAL:
return _resolve_at_path(context, value, inner_path, resolver) if value is not None else None
elif container is _TypeContainer.SEQUENCE:
return [
_resolve_at_path(context.at_path(idx), i, inner_path, resolver)
for idx, i in enumerate(value)
]
elif container is _TypeContainer.DICT:
return {
k: _resolve_at_path(context.at_path(k), v, inner_path, resolver)
for k, v in value.items()
}
check.assert_never(container)
def _ensure_non_resolvable_model_compliance(mtype: type[BaseModel]):
for name, field_info in mtype.model_fields.items():
field_type = field_info.rebuild_annotation()
if not _is_implicitly_resolved_type(field_type):
raise ResolutionException(
f"pydantic model class {mtype.__name__} includes incompatible field\n"
f" {name}: {field_type}\n"
"Subclass Resolvable to support Resolvers on fields."
)
|
AnnotationInfo
|
python
|
getsentry__sentry
|
src/sentry/users/api/bases/user.py
|
{
"start": 4889,
"end": 5847
}
|
class ____(Endpoint):
"""
The base endpoint for APIs that deal with Users but live in the region silo.
Inherit from this class to get permission checks and to automatically
convert user ID "me" to the currently logged in user's ID.
"""
permission_classes = (UserPermission,)
def convert_args(
self, request: Request, user_id: int | str | None = None, *args: Any, **kwargs: Any
) -> Any:
user: RpcUser | User | None = None
if user_id == "me":
if isinstance(request.user, AnonymousUser) or not request.user.is_authenticated:
raise ResourceDoesNotExist
user = request.user
elif user_id is not None:
user = user_service.get_user(user_id=int(user_id))
if not user:
raise ResourceDoesNotExist
self.check_object_permissions(request, user)
kwargs["user"] = user
return args, kwargs
|
RegionSiloUserEndpoint
|
python
|
huggingface__transformers
|
src/transformers/models/dpt/modeling_dpt.py
|
{
"start": 42566,
"end": 43383
}
|
class ____(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
features = config.fusion_hidden_size
self.head = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(),
nn.Dropout(config.semantic_classifier_dropout),
nn.Conv2d(features, config.num_labels, kernel_size=1),
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True),
)
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
# use last features
hidden_states = hidden_states[self.config.head_in_index]
logits = self.head(hidden_states)
return logits
|
DPTSemanticSegmentationHead
|
python
|
openai__gym
|
gym/core.py
|
{
"start": 10073,
"end": 14519
}
|
class ____(Env[ObsType, ActType]):
"""Wraps an environment to allow a modular transformation of the :meth:`step` and :meth:`reset` methods.
This class is the base class for all wrappers. The subclass could override
some methods to change the behavior of the original environment without touching the
original code.
Note:
Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
"""
def __init__(self, env: Env):
"""Wraps an environment to allow a modular transformation of the :meth:`step` and :meth:`reset` methods.
Args:
env: The environment to wrap
"""
self.env = env
self._action_space: Optional[spaces.Space] = None
self._observation_space: Optional[spaces.Space] = None
self._reward_range: Optional[Tuple[SupportsFloat, SupportsFloat]] = None
self._metadata: Optional[dict] = None
def __getattr__(self, name):
"""Returns an attribute with ``name``, unless ``name`` starts with an underscore."""
if name.startswith("_"):
raise AttributeError(f"accessing private attribute '{name}' is prohibited")
return getattr(self.env, name)
@property
def spec(self):
"""Returns the environment specification."""
return self.env.spec
@classmethod
def class_name(cls):
"""Returns the class name of the wrapper."""
return cls.__name__
@property
def action_space(self) -> spaces.Space[ActType]:
"""Returns the action space of the environment."""
if self._action_space is None:
return self.env.action_space
return self._action_space
@action_space.setter
def action_space(self, space: spaces.Space):
self._action_space = space
@property
def observation_space(self) -> spaces.Space:
"""Returns the observation space of the environment."""
if self._observation_space is None:
return self.env.observation_space
return self._observation_space
@observation_space.setter
def observation_space(self, space: spaces.Space):
self._observation_space = space
@property
def reward_range(self) -> Tuple[SupportsFloat, SupportsFloat]:
"""Return the reward range of the environment."""
if self._reward_range is None:
return self.env.reward_range
return self._reward_range
@reward_range.setter
def reward_range(self, value: Tuple[SupportsFloat, SupportsFloat]):
self._reward_range = value
@property
def metadata(self) -> dict:
"""Returns the environment metadata."""
if self._metadata is None:
return self.env.metadata
return self._metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
@property
def render_mode(self) -> Optional[str]:
"""Returns the environment render_mode."""
return self.env.render_mode
@property
def np_random(self) -> np.random.Generator:
"""Returns the environment np_random."""
return self.env.np_random
@np_random.setter
def np_random(self, value):
self.env.np_random = value
@property
def _np_random(self):
raise AttributeError(
"Can't access `_np_random` of a wrapper, use `.unwrapped._np_random` or `.np_random`."
)
def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, dict]:
"""Steps through the environment with action."""
return self.env.step(action)
def reset(self, **kwargs) -> Tuple[ObsType, dict]:
"""Resets the environment with kwargs."""
return self.env.reset(**kwargs)
def render(
self, *args, **kwargs
) -> Optional[Union[RenderFrame, List[RenderFrame]]]:
"""Renders the environment."""
return self.env.render(*args, **kwargs)
def close(self):
"""Closes the environment."""
return self.env.close()
def __str__(self):
"""Returns the wrapper name and the unwrapped environment string."""
return f"<{type(self).__name__}{self.env}>"
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
@property
def unwrapped(self) -> Env:
"""Returns the base environment of the wrapper."""
return self.env.unwrapped
|
Wrapper
|
python
|
keras-team__keras
|
keras/src/layers/reshaping/repeat_vector.py
|
{
"start": 208,
"end": 1335
}
|
class ____(Layer):
"""Repeats the input n times.
Example:
>>> x = keras.Input(shape=(32,))
>>> y = keras.layers.RepeatVector(3)(x)
>>> y.shape
(None, 3, 32)
Args:
n: Integer, repetition factor.
Input shape:
2D tensor with shape `(batch_size, features)`.
Output shape:
3D tensor with shape `(batch_size, n, features)`.
"""
def __init__(self, n, **kwargs):
super().__init__(**kwargs)
self.n = n
if not isinstance(n, int):
raise TypeError(
f"Expected an integer value for `n`, got {type(n)}."
)
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.n, input_shape[1])
def call(self, inputs):
input_shape = ops.shape(inputs)
reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1]))
return ops.repeat(reshaped, self.n, axis=1)
def get_config(self):
config = {"n": self.n}
base_config = super().get_config()
return {**base_config, **config}
|
RepeatVector
|
python
|
ray-project__ray
|
rllib/env/remote_base_env.py
|
{
"start": 512,
"end": 16999
}
|
class ____(BaseEnv):
"""BaseEnv that executes its sub environments as @ray.remote actors.
This provides dynamic batching of inference as observations are returned
from the remote simulator actors. Both single and multi-agent child envs
are supported, and envs can be stepped synchronously or asynchronously.
NOTE: This class implicitly assumes that the remote envs are gym.Env's
You shouldn't need to instantiate this class directly. It's automatically
inserted when you use the `remote_worker_envs=True` option in your
Algorithm's config.
"""
def __init__(
self,
make_env: Callable[[int], EnvType],
num_envs: int,
multiagent: bool,
remote_env_batch_wait_ms: int,
existing_envs: Optional[List[ray.actor.ActorHandle]] = None,
worker: Optional["RolloutWorker"] = None,
restart_failed_sub_environments: bool = False,
):
"""Initializes a RemoteVectorEnv instance.
Args:
make_env: Callable that produces a single (non-vectorized) env,
given the vector env index as only arg.
num_envs: The number of sub-environments to create for the
vectorization.
multiagent: Whether this is a multiagent env or not.
remote_env_batch_wait_ms: Time to wait for (ray.remote)
sub-environments to have new observations available when
polled. Only when none of the sub-environments is ready,
repeat the `ray.wait()` call until at least one sub-env
is ready. Then return only the observations of the ready
sub-environment(s).
existing_envs: Optional list of already created sub-environments.
These will be used as-is and only as many new sub-envs as
necessary (`num_envs - len(existing_envs)`) will be created.
worker: An optional RolloutWorker that owns the env. This is only
used if `remote_worker_envs` is True in your config and the
`on_sub_environment_created` custom callback needs to be
called on each created actor.
restart_failed_sub_environments: If True and any sub-environment (within
a vectorized env) throws any error during env stepping, the
Sampler will try to restart the faulty sub-environment. This is done
without disturbing the other (still intact) sub-environment and without
the RolloutWorker crashing.
"""
# Could be creating local or remote envs.
self.make_env = make_env
self.num_envs = num_envs
self.multiagent = multiagent
self.poll_timeout = remote_env_batch_wait_ms / 1000
self.worker = worker
self.restart_failed_sub_environments = restart_failed_sub_environments
# Already existing env objects (generated by the RolloutWorker).
existing_envs = existing_envs or []
# Whether the given `make_env` callable already returns ActorHandles
# (@ray.remote class instances) or not.
self.make_env_creates_actors = False
self._observation_space = None
self._action_space = None
# List of ray actor handles (each handle points to one @ray.remote
# sub-environment).
self.actors: Optional[List[ray.actor.ActorHandle]] = None
# `self.make_env` already produces Actors: Use it directly.
if len(existing_envs) > 0 and isinstance(
existing_envs[0], ray.actor.ActorHandle
):
self.make_env_creates_actors = True
self.actors = existing_envs
while len(self.actors) < self.num_envs:
self.actors.append(self._make_sub_env(len(self.actors)))
# `self.make_env` produces gym.Envs (or children thereof, such
# as MultiAgentEnv): Need to auto-wrap it here. The problem with
# this is that custom methods wil get lost. If you would like to
# keep your custom methods in your envs, you should provide the
# env class directly in your config (w/o tune.register_env()),
# such that your class can directly be made a @ray.remote
# (w/o the wrapping via `_Remote[Multi|Single]AgentEnv`).
# Also, if `len(existing_envs) > 0`, we have to throw those away
# as we need to create ray actors here.
else:
self.actors = [self._make_sub_env(i) for i in range(self.num_envs)]
# Utilize existing envs for inferring observation/action spaces.
if len(existing_envs) > 0:
self._observation_space = existing_envs[0].observation_space
self._action_space = existing_envs[0].action_space
# Have to call actors' remote methods to get observation/action spaces.
else:
self._observation_space, self._action_space = ray.get(
[
self.actors[0].observation_space.remote(),
self.actors[0].action_space.remote(),
]
)
# Dict mapping object refs (return values of @ray.remote calls),
# whose actual values we are waiting for (via ray.wait in
# `self.poll()`) to their corresponding actor handles (the actors
# that created these return values).
# Call `reset()` on all @ray.remote sub-environment actors.
self.pending: Dict[ray.actor.ActorHandle] = {
a.reset.remote(): a for a in self.actors
}
@override(BaseEnv)
def poll(
self,
) -> Tuple[
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
]:
# each keyed by env_id in [0, num_remote_envs)
obs, rewards, terminateds, truncateds, infos = {}, {}, {}, {}, {}
ready = []
# Wait for at least 1 env to be ready here.
while not ready:
ready, _ = ray.wait(
list(self.pending),
num_returns=len(self.pending),
timeout=self.poll_timeout,
)
# Get and return observations for each of the ready envs
env_ids = set()
for obj_ref in ready:
# Get the corresponding actor handle from our dict and remove the
# object ref (we will call `ray.get()` on it and it will no longer
# be "pending").
actor = self.pending.pop(obj_ref)
env_id = self.actors.index(actor)
env_ids.add(env_id)
# Get the ready object ref (this may be return value(s) of
# `reset()` or `step()`).
try:
ret = ray.get(obj_ref)
except Exception as e:
# Something happened on the actor during stepping/resetting.
# Restart sub-environment (create new actor; close old one).
if self.restart_failed_sub_environments:
logger.exception(e.args[0])
self.try_restart(env_id)
# Always return multi-agent data.
# Set the observation to the exception, no rewards,
# terminated[__all__]=True (episode will be discarded anyways),
# no infos.
ret = (
e,
{},
{"__all__": True},
{"__all__": False},
{},
)
# Do not try to restart. Just raise the error.
else:
raise e
# Our sub-envs are simple Actor-turned gym.Envs or MultiAgentEnvs.
if self.make_env_creates_actors:
rew, terminated, truncated, info = None, None, None, None
if self.multiagent:
if isinstance(ret, tuple):
# Gym >= 0.26: `step()` result: Obs, reward, terminated,
# truncated, info.
if len(ret) == 5:
ob, rew, terminated, truncated, info = ret
# Gym >= 0.26: `reset()` result: Obs and infos.
elif len(ret) == 2:
ob = ret[0]
info = ret[1]
# Gym < 0.26? Something went wrong.
else:
raise AssertionError(
"Your gymnasium.Env seems to NOT return the correct "
"number of return values for `step()` (needs to return"
" 5 values: obs, reward, terminated, truncated and "
"info) or `reset()` (needs to return 2 values: obs and "
"info)!"
)
# Gym < 0.26: `reset()` result: Only obs.
else:
raise AssertionError(
"Your gymnasium.Env seems to only return a single value "
"upon `reset()`! Must return 2 (obs AND infos)."
)
else:
if isinstance(ret, tuple):
# `step()` result: Obs, reward, terminated, truncated, info.
if len(ret) == 5:
ob = {_DUMMY_AGENT_ID: ret[0]}
rew = {_DUMMY_AGENT_ID: ret[1]}
terminated = {_DUMMY_AGENT_ID: ret[2], "__all__": ret[2]}
truncated = {_DUMMY_AGENT_ID: ret[3], "__all__": ret[3]}
info = {_DUMMY_AGENT_ID: ret[4]}
# `reset()` result: Obs and infos.
elif len(ret) == 2:
ob = {_DUMMY_AGENT_ID: ret[0]}
info = {_DUMMY_AGENT_ID: ret[1]}
# Gym < 0.26? Something went wrong.
else:
raise AssertionError(
"Your gymnasium.Env seems to NOT return the correct "
"number of return values for `step()` (needs to return"
" 5 values: obs, reward, terminated, truncated and "
"info) or `reset()` (needs to return 2 values: obs and "
"info)!"
)
# Gym < 0.26?
else:
raise AssertionError(
"Your gymnasium.Env seems to only return a single value "
"upon `reset()`! Must return 2 (obs and infos)."
)
# If this is a `reset()` return value, we only have the initial
# observations and infos: Set rewards, terminateds, and truncateds to
# dummy values.
if rew is None:
rew = {agent_id: 0 for agent_id in ob.keys()}
terminated = {"__all__": False}
truncated = {"__all__": False}
# Our sub-envs are auto-wrapped (by `_RemoteSingleAgentEnv` or
# `_RemoteMultiAgentEnv`) and already behave like multi-agent
# envs.
else:
ob, rew, terminated, truncated, info = ret
obs[env_id] = ob
rewards[env_id] = rew
terminateds[env_id] = terminated
truncateds[env_id] = truncated
infos[env_id] = info
logger.debug(f"Got obs batch for actors {env_ids}")
return obs, rewards, terminateds, truncateds, infos, {}
@override(BaseEnv)
def send_actions(self, action_dict: MultiEnvDict) -> None:
for env_id, actions in action_dict.items():
actor = self.actors[env_id]
# `actor` is a simple single-agent (remote) env, e.g. a gym.Env
# that was made a @ray.remote.
if not self.multiagent and self.make_env_creates_actors:
obj_ref = actor.step.remote(actions[_DUMMY_AGENT_ID])
# `actor` is already a _RemoteSingleAgentEnv or
# _RemoteMultiAgentEnv wrapper
# (handles the multi-agent action_dict automatically).
else:
obj_ref = actor.step.remote(actions)
self.pending[obj_ref] = actor
@override(BaseEnv)
def try_reset(
self,
env_id: Optional[EnvID] = None,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> Tuple[MultiEnvDict, MultiEnvDict]:
actor = self.actors[env_id]
obj_ref = actor.reset.remote(seed=seed, options=options)
self.pending[obj_ref] = actor
# Because this env type does not support synchronous reset requests (with
# immediate return value), we return ASYNC_RESET_RETURN here to indicate
# that the reset results will be available via the next `poll()` call.
return ASYNC_RESET_RETURN, ASYNC_RESET_RETURN
@override(BaseEnv)
def try_restart(self, env_id: Optional[EnvID] = None) -> None:
# Try closing down the old (possibly faulty) sub-env, but ignore errors.
try:
# Close the env on the remote side.
self.actors[env_id].close.remote()
except Exception as e:
if log_once("close_sub_env"):
logger.warning(
"Trying to close old and replaced sub-environment (at vector "
f"index={env_id}), but closing resulted in error:\n{e}"
)
# Terminate the actor itself to free up its resources.
self.actors[env_id].__ray_terminate__.remote()
# Re-create a new sub-environment.
self.actors[env_id] = self._make_sub_env(env_id)
@override(BaseEnv)
def stop(self) -> None:
if self.actors is not None:
for actor in self.actors:
actor.__ray_terminate__.remote()
@override(BaseEnv)
def get_sub_environments(self, as_dict: bool = False) -> List[EnvType]:
if as_dict:
return dict(enumerate(self.actors))
return self.actors
@property
@override(BaseEnv)
def observation_space(self) -> gym.spaces.Dict:
return self._observation_space
@property
@override(BaseEnv)
def action_space(self) -> gym.Space:
return self._action_space
def _make_sub_env(self, idx: Optional[int] = None):
"""Re-creates a sub-environment at the new index."""
# Our `make_env` creates ray actors directly.
if self.make_env_creates_actors:
sub_env = self.make_env(idx)
if self.worker is not None:
self.worker.callbacks.on_sub_environment_created(
worker=self.worker,
sub_environment=self.actors[idx],
env_context=self.worker.env_context.copy_with_overrides(
vector_index=idx
),
)
# Our `make_env` returns actual envs -> Have to convert them into actors
# using our utility wrapper classes.
else:
def make_remote_env(i):
logger.info("Launching env {} in remote actor".format(i))
if self.multiagent:
sub_env = _RemoteMultiAgentEnv.remote(self.make_env, i)
else:
sub_env = _RemoteSingleAgentEnv.remote(self.make_env, i)
if self.worker is not None:
self.worker.callbacks.on_sub_environment_created(
worker=self.worker,
sub_environment=sub_env,
env_context=self.worker.env_context.copy_with_overrides(
vector_index=i
),
)
return sub_env
sub_env = make_remote_env(idx)
return sub_env
@override(BaseEnv)
def get_agent_ids(self) -> Set[AgentID]:
if self.multiagent:
return ray.get(self.actors[0].get_agent_ids.remote())
else:
return {_DUMMY_AGENT_ID}
@ray.remote(num_cpus=0)
|
RemoteBaseEnv
|
python
|
django__django
|
tests/contenttypes_tests/models.py
|
{
"start": 720,
"end": 860
}
|
class ____(models.Model):
url = models.URLField(max_length=100)
def get_absolute_url(self):
return self.url
|
SchemeIncludedURL
|
python
|
gevent__gevent
|
src/gevent/_ffi/watcher.py
|
{
"start": 17949,
"end": 18009
}
|
class ____(object):
_watcher_type = 'prepare'
|
PrepareMixin
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/mixins.py
|
{
"start": 1428,
"end": 2042
}
|
class ____(metaclass=ViewsetMeta):
"""Re-execute a gate manually."""
execute_view_class: Optional[Type[View]] = None
@viewprop
def execute_view(self):
"""View for the admin to re-execute a gate."""
if self.execute_view_class:
return self.execute_view_class.as_view()
@property
def execute_path(self):
if self.execute_view:
return path(
"<int:process_pk>/{}/<int:task_pk>/execute/".format(self.name),
utils.wrap_task_view(self, self.execute_view),
name="execute",
)
|
NodeExecuteMixin
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tpu_embedding_v3.py
|
{
"start": 7342,
"end": 8243
}
|
class ____:
"""Information about how we stack tables."""
# Indexed by stacked table name:
stacked_table_to_tables: Dict[str, TableConfig] = _fielddict()
quantization_configs: Dict[str, QuantizationConfig] = _fielddict()
# Indexed by table name:
table_name_to_table: Dict[str, TableConfig] = _fielddict()
table_to_padding_rows: Dict[str, int] = _fielddict()
table_to_padding_columns: Dict[str, int] = _fielddict()
table_to_sample_count: Dict[str, int] = _fielddict()
table_to_layout: Dict[str, sparse_core_layout_pb2.SparseCoreTableLayout] = (
_fielddict()
)
# Maps table name to (stacked table, row offset, shard rotation)
table_to_stacked_table_offset: Dict[str, Tuple[str, int, int]] = _fielddict()
# Indexed by feature_path the key of flat_features:
feature_to_sample_offset: Dict[str, int] = _fielddict()
@saveable_compat.legacy_saveable_name("")
|
TableStacking
|
python
|
huggingface__transformers
|
tests/models/hunyuan_v1_dense/test_modeling_hunyuan_v1_dense.py
|
{
"start": 1043,
"end": 1190
}
|
class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = HunYuanDenseV1Model
@require_torch
|
HunYuanDenseV1ModelTester
|
python
|
ipython__ipython
|
tests/test_interactiveshell.py
|
{
"start": 29951,
"end": 30347
}
|
class ____(unittest.TestCase):
def test_unregistering(self):
err_transformer = ErrorTransformer()
ip.ast_transformers.append(err_transformer)
with self.assertWarnsRegex(UserWarning, "It will be unregistered"):
ip.run_cell("1 + 2")
# This should have been removed.
self.assertNotIn(err_transformer, ip.ast_transformers)
|
TestAstTransformError
|
python
|
apache__airflow
|
providers/standard/tests/unit/standard/operators/test_python.py
|
{
"start": 24744,
"end": 39972
}
|
class ____(BasePythonTest):
opcls = ShortCircuitOperator
@pytest.fixture(autouse=True)
def setup_tests(self):
self.task_id = "short_circuit"
self.op1 = EmptyOperator(task_id="op1")
self.op2 = EmptyOperator(task_id="op2")
all_downstream_skipped_states = {
"short_circuit": State.SUCCESS,
"op1": State.SKIPPED,
"op2": State.SKIPPED,
}
all_downstream_skipped_tasks = {"op1", "op2"}
all_success_states = {"short_circuit": State.SUCCESS, "op1": State.SUCCESS, "op2": State.SUCCESS}
all_success_skipped_tasks: set[str] = set()
@pytest.mark.parametrize(
(
"callable_return",
"test_ignore_downstream_trigger_rules",
"test_trigger_rule",
"expected_skipped_tasks",
"expected_task_states",
),
[
# Skip downstream tasks, do not respect trigger rules, default trigger rule on all downstream
# tasks
(
False,
True,
TriggerRule.ALL_SUCCESS,
all_downstream_skipped_tasks,
all_downstream_skipped_states,
),
# Skip downstream tasks via a falsy value, do not respect trigger rules, default trigger rule on
# all downstream tasks
([], True, TriggerRule.ALL_SUCCESS, all_downstream_skipped_tasks, all_downstream_skipped_states),
# Skip downstream tasks, do not respect trigger rules, non-default trigger rule on a downstream
# task
(False, True, TriggerRule.ALL_DONE, all_downstream_skipped_tasks, all_downstream_skipped_states),
# Skip downstream tasks via a falsy value, do not respect trigger rules, non-default trigger rule
# on a downstream task
([], True, TriggerRule.ALL_DONE, all_downstream_skipped_tasks, all_downstream_skipped_states),
# Skip downstream tasks, respect trigger rules, default trigger rule on all downstream tasks
(
False,
False,
TriggerRule.ALL_SUCCESS,
{"op1"},
{"short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.NONE},
),
# Skip downstream tasks via a falsy value, respect trigger rules, default trigger rule on all
# downstream tasks
(
[],
False,
TriggerRule.ALL_SUCCESS,
{"op1"},
{"short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.NONE},
),
# Skip downstream tasks, respect trigger rules, non-default trigger rule on a downstream task
(
False,
False,
TriggerRule.ALL_DONE,
{"op1"},
{"short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.SUCCESS},
),
# Skip downstream tasks via a falsy value, respect trigger rules, non-default trigger rule on a
# downstream task
(
[],
False,
TriggerRule.ALL_DONE,
{"op1"},
{"short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.SUCCESS},
),
# Do not skip downstream tasks, do not respect trigger rules, default trigger rule on all
# downstream tasks
(True, True, TriggerRule.ALL_SUCCESS, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks via a truthy value, do not respect trigger rules, default trigger
# rule on all downstream tasks
(["a", "b", "c"], True, TriggerRule.ALL_SUCCESS, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks, do not respect trigger rules, non-default trigger rule on a
# downstream task
(True, True, TriggerRule.ALL_DONE, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks via a truthy value, do not respect trigger rules, non-default
# trigger rule on a downstream task
(["a", "b", "c"], True, TriggerRule.ALL_DONE, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks, respect trigger rules, default trigger rule on all downstream
# tasks
(True, False, TriggerRule.ALL_SUCCESS, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks via a truthy value, respect trigger rules, default trigger rule on
# all downstream tasks
(["a", "b", "c"], False, TriggerRule.ALL_SUCCESS, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks, respect trigger rules, non-default trigger rule on a downstream
# task
(True, False, TriggerRule.ALL_DONE, all_success_skipped_tasks, all_success_states),
# Do not skip downstream tasks via a truthy value, respect trigger rules, non-default trigger rule
# on a downstream task
(["a", "b", "c"], False, TriggerRule.ALL_DONE, all_success_skipped_tasks, all_success_states),
],
ids=[
"skip_ignore_with_default_trigger_rule_on_all_tasks",
"skip_falsy_result_ignore_with_default_trigger_rule_on_all_tasks",
"skip_ignore_respect_with_non-default_trigger_rule_on_single_task",
"skip_falsy_result_ignore_respect_with_non-default_trigger_rule_on_single_task",
"skip_respect_with_default_trigger_rule_all_tasks",
"skip_falsy_result_respect_with_default_trigger_rule_all_tasks",
"skip_respect_with_non-default_trigger_rule_on_single_task",
"skip_falsy_result_respect_respect_with_non-default_trigger_rule_on_single_task",
"no_skip_ignore_with_default_trigger_rule_on_all_tasks",
"no_skip_truthy_result_ignore_with_default_trigger_rule_all_tasks",
"no_skip_no_respect_with_non-default_trigger_rule_on_single_task",
"no_skip_truthy_result_ignore_with_non-default_trigger_rule_on_single_task",
"no_skip_respect_with_default_trigger_rule_all_tasks",
"no_skip_truthy_result_respect_with_default_trigger_rule_all_tasks",
"no_skip_respect_with_non-default_trigger_rule_on_single_task",
"no_skip_truthy_result_respect_with_non-default_trigger_rule_on_single_task",
],
)
def test_short_circuiting(
self,
callable_return,
test_ignore_downstream_trigger_rules,
test_trigger_rule,
expected_skipped_tasks,
expected_task_states,
):
"""
Checking the behavior of the ShortCircuitOperator in several scenarios enabling/disabling the skipping
of downstream tasks, both short-circuiting modes, and various trigger rules of downstream tasks.
"""
with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True):
short_circuit = ShortCircuitOperator(
task_id="short_circuit",
python_callable=lambda: callable_return,
ignore_downstream_trigger_rules=test_ignore_downstream_trigger_rules,
)
short_circuit >> self.op1 >> self.op2
self.op2.trigger_rule = test_trigger_rule
dr = self.dag_maker.create_dagrun()
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
if expected_skipped_tasks:
with pytest.raises(DownstreamTasksSkipped) as exc_info:
self.dag_maker.run_ti("short_circuit", dr)
assert set(exc_info.value.tasks) == set(expected_skipped_tasks)
else:
assert self.dag_maker.run_ti("short_circuit", dr) is None
else:
self.dag_maker.run_ti("short_circuit", dr)
self.dag_maker.run_ti("op1", dr)
self.dag_maker.run_ti("op2", dr)
assert short_circuit.ignore_downstream_trigger_rules == test_ignore_downstream_trigger_rules
assert short_circuit.trigger_rule == TriggerRule.ALL_SUCCESS
assert self.op1.trigger_rule == TriggerRule.ALL_SUCCESS
assert self.op2.trigger_rule == test_trigger_rule
self.assert_expected_task_states(dr, expected_task_states)
def test_clear_skipped_downstream_task(self):
"""
After a downstream task is skipped by ShortCircuitOperator, clearing the skipped task
should not cause it to be executed.
"""
with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True):
short_circuit = ShortCircuitOperator(task_id=self.task_id, python_callable=lambda: False)
short_circuit >> self.op1 >> self.op2
dr = self.dag_maker.create_dagrun()
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
with create_session() as session:
sc_ti = dr.get_task_instance(task_id=self.task_id, session=session)
with pytest.raises(DownstreamTasksSkipped) as exc_info:
sc_ti.run()
assert set(exc_info.value.tasks) == {"op1", "op2"}
sc_ti.set_state(TaskInstanceState.SUCCESS, session=session)
dr.task_instance_scheduling_decisions(session=session)
op1_ti = dr.get_task_instance(task_id="op1", session=session)
op1_ti.task = self.op1
assert op1_ti.state == TaskInstanceState.SKIPPED
op1_ti.set_state(None)
op1_ti.run()
assert op1_ti.state == TaskInstanceState.SKIPPED
else:
self.dag_maker.run_ti(self.task_id, dr)
self.dag_maker.run_ti(self.op1.task_id, dr)
self.dag_maker.run_ti(self.op2.task_id, dr)
assert short_circuit.ignore_downstream_trigger_rules
assert short_circuit.trigger_rule == TriggerRule.ALL_SUCCESS
assert self.op1.trigger_rule == TriggerRule.ALL_SUCCESS
assert self.op2.trigger_rule == TriggerRule.ALL_SUCCESS
expected_states = {
"short_circuit": State.SUCCESS,
"op1": State.SKIPPED,
"op2": State.SKIPPED,
}
self.assert_expected_task_states(dr, expected_states)
# Clear downstream task "op1" that was previously executed.
tis = dr.get_task_instances()
with create_session() as session:
if AIRFLOW_V_3_0_PLUS:
clear_task_instances([ti for ti in tis if ti.task_id == "op1"], session=session)
else:
clear_task_instances(
[ti for ti in tis if ti.task_id == "op1"], session=session, dag=short_circuit.dag
)
self.dag_maker.run_ti("op1", dr)
self.assert_expected_task_states(dr, expected_states)
def test_xcom_push(self):
clear_db_runs()
with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True):
short_op_push_xcom = ShortCircuitOperator(
task_id="push_xcom_from_shortcircuit", python_callable=lambda: "signature"
)
short_op_no_push_xcom = ShortCircuitOperator(
task_id="do_not_push_xcom_from_shortcircuit", python_callable=lambda: False
)
dr = self.dag_maker.create_dagrun()
self.dag_maker.run_ti("push_xcom_from_shortcircuit", dr)
self.dag_maker.run_ti("do_not_push_xcom_from_shortcircuit", dr)
tis = dr.get_task_instances()
assert tis[0].xcom_pull(task_ids=short_op_push_xcom.task_id, key="return_value") == "signature"
assert tis[0].xcom_pull(task_ids=short_op_no_push_xcom.task_id, key="return_value") is False
def test_xcom_push_skipped_tasks(self):
with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True):
short_op_push_xcom = ShortCircuitOperator(
task_id="push_xcom_from_shortcircuit", python_callable=lambda: False
)
empty_task = EmptyOperator(task_id="empty_task")
short_op_push_xcom >> empty_task
dr = self.dag_maker.create_dagrun()
if AIRFLOW_V_3_0_1:
from airflow.exceptions import DownstreamTasksSkipped
with pytest.raises(DownstreamTasksSkipped):
short_op_push_xcom.run(start_date=self.default_date, end_date=self.default_date)
else:
self.dag_maker.run_ti("push_xcom_from_shortcircuit", dr)
tis = dr.get_task_instances()
assert tis[0].xcom_pull(task_ids=short_op_push_xcom.task_id, key="skipmixin_key") == {
"skipped": ["empty_task"]
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 2 implementation is different")
def test_short_circuit_operator_skips_sensors(self):
"""Test that ShortCircuitOperator properly skips sensors in Airflow 3.x."""
from airflow.sdk.bases.sensor import BaseSensorOperator
# Create a sensor similar to S3FileSensor to reproduce the issue
class CustomS3Sensor(BaseSensorOperator):
def __init__(self, bucket_name: str, object_key: str, **kwargs):
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_key = object_key
self.timeout = 0
self.poke_interval = 0
def poke(self, context):
# Simulate sensor logic
return True
with self.dag_maker(self.dag_id):
# ShortCircuit that evaluates to False (should skip all downstream)
short_circuit = ShortCircuitOperator(
task_id="check_dis_is_mon_to_fri_not_holiday",
python_callable=lambda: False, # This causes skipping
)
sensor_task = CustomS3Sensor(
task_id="wait_for_ticker_to_secid_lookup_s3_file",
bucket_name="test-bucket",
object_key="ticker_to_secid_lookup.csv",
)
short_circuit >> sensor_task
dr = self.dag_maker.create_dagrun()
self.dag_maker.run_ti("check_dis_is_mon_to_fri_not_holiday", dr)
# Verify the sensor is included in the skip list by checking XCom
# (this was the bug - sensors were not being included in skip list)
tis = dr.get_task_instances()
xcom_data = tis[0].xcom_pull(task_ids="check_dis_is_mon_to_fri_not_holiday", key="skipmixin_key")
assert xcom_data is not None, "XCom data should exist"
skipped_task_ids = set(xcom_data.get("skipped", []))
assert "wait_for_ticker_to_secid_lookup_s3_file" in skipped_task_ids, (
"Sensor should be skipped by ShortCircuitOperator"
)
virtualenv_string_args: list[str] = []
@pytest.mark.execution_timeout(120)
|
TestShortCircuitOperator
|
python
|
django__django
|
django/db/models/constants.py
|
{
"start": 142,
"end": 210
}
|
class ____(Enum):
IGNORE = "ignore"
UPDATE = "update"
|
OnConflict
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDictClosed4.py
|
{
"start": 196,
"end": 261
}
|
class ____(TypedDict, extra_items=int | None):
name: str
|
Movie1
|
python
|
django__django
|
tests/xor_lookups/models.py
|
{
"start": 31,
"end": 144
}
|
class ____(models.Model):
num = models.IntegerField()
def __str__(self):
return str(self.num)
|
Number
|
python
|
pallets__jinja
|
src/jinja2/nodes.py
|
{
"start": 12829,
"end": 13013
}
|
class ____(Stmt):
"""A node that represents the import tag."""
fields = ("template", "target", "with_context")
template: "Expr"
target: str
with_context: bool
|
Import
|
python
|
PyCQA__pylint
|
tests/functional/m/missing/missing_self_argument.py
|
{
"start": 69,
"end": 502
}
|
class ____:
"""A class with some methods missing self args."""
def __init__(self):
self.var = "var"
def method(): # [no-method-argument]
"""A method without a self argument."""
def setup(): # [no-method-argument]
"""A method without a self argument, but usage."""
self.var = 1 # [undefined-variable]
def correct(self):
"""Correct."""
self.var = "correct"
|
MyClass
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py
|
{
"start": 773,
"end": 1457
}
|
class ____(NamespaceParser, metaclass=abc.ABCMeta):
"""Base class for target namespace parsers involving a single target."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target'
@property
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
return 'targets'
@property
def use_list(self) -> bool:
"""True if the destination is a list, otherwise False."""
return True
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return True
|
TargetNamespaceParser
|
python
|
pandas-dev__pandas
|
pandas/core/computation/engines.py
|
{
"start": 2708,
"end": 3100
}
|
class ____(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
env = self.expr.env
scope = env.full_scope
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope)
|
NumExprEngine
|
python
|
spyder-ide__spyder
|
spyder/plugins/ipythonconsole/widgets/client.py
|
{
"start": 2813,
"end": 38578
}
|
class ____(QWidget, SaveHistoryMixin, SpyderWidgetMixin): # noqa: PLR0904
"""
Client widget for the IPython Console
This widget is necessary to handle the interaction between the
plugin and each shell widget.
"""
sig_append_to_history_requested = Signal(str, str)
sig_execution_state_changed = Signal()
sig_time_label = Signal(str)
# Signals for remote kernels
sig_restart_kernel_requested = Signal()
sig_kernel_died = Signal()
CONF_SECTION = 'ipython_console'
SEPARATOR = '{0}## ---({1})---'.format(os.linesep*2, time.ctime())
INITHISTORY = ['# -*- coding: utf-8 -*-',
'# *** Spyder Python Console History Log ***', ]
def __init__(
self,
parent,
id_,
config_options,
additional_options,
menu_actions=None,
given_name=None,
give_focus=True,
options_button=None,
handlers=None,
initial_cwd=None,
forcing_custom_interpreter=False,
special_kernel=None,
jupyter_api=None,
files_api=None,
can_close=True,
):
super().__init__(parent)
SaveHistoryMixin.__init__(self, get_conf_path('history.py'))
# --- Init attrs
self.container = parent
self.id_ = id_
self.menu_actions = menu_actions
self.given_name = given_name
self.initial_cwd = initial_cwd
self.forcing_custom_interpreter = forcing_custom_interpreter
self._jupyter_api: typing.Optional[JupyterAPI] = jupyter_api
self._files_api: typing.Optional[
SpyderRemoteFileServicesAPI
] = files_api
self.can_close = can_close
# --- Other attrs
self.kernel_handler: KernelHandler = None
self.hostname = None
self.show_elapsed_time = self.get_conf('show_elapsed_time')
self.reset_warning = self.get_conf('show_reset_namespace_warning')
self.options_button = options_button
self.history = []
self.allow_rename = True
self.error_text = None
self.give_focus = give_focus
self.kernel_id = None
self.__on_close = lambda: None
css_path = self.get_conf('css_path', section='appearance')
if css_path is None:
self.css_path = CSS_PATH
else:
self.css_path = css_path
# --- Widgets
self.shellwidget = ShellWidget(
config=config_options,
ipyclient=self,
additional_options=additional_options,
handlers=handlers,
local_kernel=True,
special_kernel=special_kernel,
)
self.infowidget = self.container.infowidget
self.blank_page = self._create_blank_page()
self.kernel_loading_page = self._create_loading_page()
self.env_loading_page = self._create_loading_page(env=True)
if self.is_remote():
# Keep a reference
self.info_page = None
else:
# Initially show environment loading page
self.info_page = self.env_loading_page
# Elapsed time
self.t0 = time.monotonic()
self.timer = QTimer(self)
# --- Layout
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.shellwidget)
if self.infowidget is not None:
self.layout.addWidget(self.infowidget)
self.setLayout(self.layout)
# --- Exit function
self.exit_callback = lambda: self.container.close_client(client=self)
# --- Dialog manager
self.dialog_manager = DialogManager()
#--- Remote kernels states
self.__remote_restart_requested = False
self.__remote_reconnect_requested = False
# ---- Private methods
# -------------------------------------------------------------------------
def _when_kernel_is_ready(self):
"""
Configuration after the prompt is shown.
Notes
-----
This is not called on restart. For kernel setup you need to use
ShellWidget.handle_kernel_is_ready.
"""
if self.kernel_handler.connection_state not in [
KernelConnectionState.SpyderKernelReady,
KernelConnectionState.IpykernelReady]:
# The kernel is not ready
return
self.kernel_handler.sig_kernel_is_ready.disconnect(
self._when_kernel_is_ready)
# To hide the loading page
self._hide_loading_page()
# Set the initial current working directory in the kernel
self._set_initial_cwd_in_kernel()
# Notes:
# 1. It's necessary to do this at this point to avoid giving focus to
# _control at startup.
# 2. The try except is needed to avoid some errors in our tests.
try:
self._connect_control_signals()
except RuntimeError:
pass
if self.give_focus:
self.shellwidget._control.setFocus()
def _create_loading_page(self, env=False):
"""Create html page to show while the kernel is starting"""
loading_template = Template(LOADING)
loading_img = get_image_path('loading_sprites')
if os.name == 'nt':
loading_img = loading_img.replace('\\', '/')
message = _("Connecting to kernel...")
if env:
message = _("Retrieving environment variables...")
page = loading_template.substitute(
css_path=self.css_path,
loading_img=loading_img,
message=message
)
return page
def _create_blank_page(self):
"""Create html page to show while the kernel is starting"""
loading_template = Template(BLANK)
page = loading_template.substitute(css_path=self.css_path)
return page
def _show_loading_page(self, page=None):
"""Show animation while loading."""
if self.infowidget is not None:
self.shellwidget.hide()
self.infowidget.show()
self.info_page = page if page else self.kernel_loading_page
self.set_info_page()
def _hide_loading_page(self):
"""Hide animation shown while loading."""
if self.infowidget is not None:
self.infowidget.hide()
self.info_page = self.blank_page
self.set_info_page()
self.shellwidget.show()
def _show_special_console_error(self, missing_dependency):
if missing_dependency is not None:
error_message = _(
"Your Python environment or installation doesn't have the "
"<tt>{missing_dependency}</tt> module installed or it "
"occurred a problem importing it. Due to that, it is not "
"possible for Spyder to create this special console for "
"you."
).format(missing_dependency=missing_dependency)
self.show_kernel_error(error_message)
def _connect_control_signals(self):
"""Connect signals of control widgets."""
control = self.shellwidget._control
page_control = self.shellwidget._page_control
control.sig_focus_changed.connect(
self.container.sig_focus_changed)
page_control.sig_focus_changed.connect(
self.container.sig_focus_changed)
control.sig_visibility_changed.connect(
self.container.refresh_container)
page_control.sig_visibility_changed.connect(
self.container.refresh_container)
page_control.sig_show_find_widget_requested.connect(
self.container.find_widget.show)
def _set_initial_cwd_in_kernel(self):
"""Set the initial cwd in the kernel."""
logger.debug("Setting initial working directory in the kernel")
cwd_path = get_home_dir()
project_path = self.container.get_active_project_path()
emit_cwd_change = True
# This is for the first client
if self.id_['int_id'] == '1':
if self.get_conf(
'startup/use_project_or_home_directory',
section='workingdir'
):
cwd_path = get_home_dir()
if project_path is not None:
cwd_path = project_path
elif self.get_conf(
'startup/use_fixed_directory',
section='workingdir'
):
cwd_path = self.get_conf(
'startup/fixed_directory',
default=get_home_dir(),
section='workingdir'
)
else:
# For new clients
if self.initial_cwd is not None:
cwd_path = self.initial_cwd
elif self.get_conf(
'console/use_project_or_home_directory',
section='workingdir'
):
cwd_path = get_home_dir()
if project_path is not None:
cwd_path = project_path
elif self.get_conf('console/use_cwd', section='workingdir'):
cwd_path = self.container.get_working_directory()
emit_cwd_change = False
elif self.get_conf(
'console/use_fixed_directory',
section='workingdir'
):
cwd_path = self.get_conf(
'console/fixed_directory',
default=get_home_dir(),
section='workingdir'
)
if self.is_remote():
# Use the remote machine files API to get the home directory (`~`)
# absolute path.
self._get_remote_home_directory().connect(
self._on_remote_home_directory
)
else:
# We can't set the cwd when connecting to remote kernels directly.
if not (
self.kernel_handler.password or self.kernel_handler.sshkey
):
# Check if cwd exists, else use home dir.
# Fixes spyder-ide/spyder#25120.
if not osp.isdir(cwd_path):
cwd_path = get_home_dir()
emit_cwd_change = True
self.shellwidget.set_cwd(
cwd_path, emit_cwd_change=emit_cwd_change
)
# ---- Public API
# -------------------------------------------------------------------------
@property
def connection_file(self):
if self.kernel_handler is None:
return None
return self.kernel_handler.connection_file
def connect_kernel(self, kernel_handler, first_connect=True):
"""Connect kernel to client using our handler."""
self._hide_loading_page()
if not self.is_remote():
self._show_loading_page(self.kernel_loading_page)
self.kernel_handler = kernel_handler
# Connect standard streams.
kernel_handler.sig_stderr.connect(self.print_stderr)
kernel_handler.sig_stdout.connect(self.print_stdout)
kernel_handler.sig_fault.connect(self.print_fault)
# This needs to be done only once (when the console is created) and not
# on every kernel restart. That's why we connect directly to
# kernel_handler.sig_kernel_is_ready.
# See spyder-ide/spyder#24577
kernel_handler.sig_kernel_is_ready.connect(
self._when_kernel_is_ready)
# Actually do the connection
self.shellwidget.connect_kernel(kernel_handler, first_connect)
def disconnect_kernel(self, shutdown_kernel):
"""Disconnect from current kernel."""
kernel_handler = getattr(self, "kernel_handler", None)
if not kernel_handler:
return
kernel_handler.sig_stderr.disconnect(self.print_stderr)
kernel_handler.sig_stdout.disconnect(self.print_stdout)
kernel_handler.sig_fault.disconnect(self.print_fault)
self.shellwidget.disconnect_kernel(shutdown_kernel)
self.kernel_handler = None
@Slot(str)
def print_stderr(self, stderr):
"""Print stderr written in PIPE."""
if not stderr:
return
if self.is_benign_error(stderr):
return
if self.shellwidget.isHidden():
error_text = '<tt>%s</tt>' % stderr
# Avoid printing the same thing again
if self.error_text != error_text:
if self.error_text:
# Append to error text
error_text = self.error_text + error_text
self.show_kernel_error(error_text)
if self.shellwidget._starting:
self.shellwidget.banner = (
stderr + '\n' + self.shellwidget.banner)
else:
self.shellwidget._append_plain_text(
stderr, before_prompt=True)
@Slot(str)
def print_stdout(self, stdout):
"""Print stdout written in PIPE."""
if not stdout:
return
if self.shellwidget._starting:
self.shellwidget.banner = (
stdout + '\n' + self.shellwidget.banner)
else:
self.shellwidget._append_plain_text(
stdout, before_prompt=True)
def connect_shellwidget_signals(self):
"""Configure shellwidget after kernel is connected."""
# Set exit callback
self.shellwidget.exit_requested.connect(self.exit_callback)
# To save history
self.shellwidget.executing.connect(self.add_to_history)
# For Mayavi to run correctly
self.shellwidget.executing.connect(
self.shellwidget.set_backend_for_mayavi)
# To update history after execution
self.shellwidget.executed.connect(self.update_history)
# To enable the stop button when executing a process
self.shellwidget.executing.connect(
self.sig_execution_state_changed)
# To disable the stop button after execution stopped
self.shellwidget.executed.connect(
self.sig_execution_state_changed)
# To correctly change Matplotlib backend interactively
self.shellwidget.executing.connect(
self.shellwidget.change_mpl_backend)
# To show env and sys.path contents
self.shellwidget.sig_show_syspath.connect(self.show_syspath)
self.shellwidget.sig_show_env.connect(self.show_env)
def add_to_history(self, command):
"""Add command to history"""
if self.shellwidget.is_debugging():
return
return super().add_to_history(command)
def is_client_executing(self):
return (self.shellwidget._executing or
self.shellwidget.is_waiting_pdb_input())
@Slot()
def stop_button_click_handler(self):
"""Method to handle what to do when the stop button is pressed"""
# Interrupt computations or stop debugging
if not self.shellwidget.is_waiting_pdb_input():
self.interrupt_kernel()
else:
self.shellwidget.pdb_execute_command('exit')
def show_kernel_error(self, error):
"""Show kernel initialization errors in infowidget."""
if isinstance(error, Exception):
if isinstance(error, SpyderKernelError):
error = error.args[0]
else:
error = _("The error is:<br><br>"
"<tt>{}</tt>").format(traceback.format_exc())
self.error_text = error
if self.is_benign_error(error):
return
if self.is_warning_message(error):
return
InstallerIPythonKernelError(error)
# Replace end of line chars with <br>
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
# Don't break lines in hyphens
# From https://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
# Create error page
message = _("An error occurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
self.info_page = kernel_error_template.substitute(
css_path=self.css_path,
message=message,
error=error)
# Show error
if self.infowidget is not None:
self.set_info_page()
self.shellwidget.hide()
self.infowidget.show()
# Inform other plugins that the shell failed to start
self.shellwidget.sig_shellwidget_errored.emit(self.shellwidget)
# Stop shellwidget
self.shellwidget.shutdown()
def show_kernel_connection_error(self):
self.show_kernel_error(
_(
"It was not possible to connect to the kernel associated to "
"this console. If you are trying to connect to an existing "
"kernel, check that the connection file you selected actually "
"corresponds to the kernel you want to connect to."
)
)
def is_benign_error(self, error):
"""Decide if an error is benign in order to filter it."""
benign_errors = [
# Error when switching from the Qt5 backend to the Tk one.
# See spyder-ide/spyder#17488
"KeyboardInterrupt caught in kernel",
"QSocketNotifier: Multiple socket notifiers for same socket",
# Error when switching from the Tk backend to the Qt5 one.
# See spyder-ide/spyder#17488
"Tcl_AsyncDelete async handler deleted by the wrong thread",
"error in background error handler:",
" while executing",
'"::tcl::Bgerror',
# Avoid showing this warning because it was up to the user to
# disable secure writes.
"WARNING: Insecure writes have been enabled via environment",
# Old error
"No such comm",
# PYDEVD debug warning message. See spyder-ide/spyder#18908
"Note: Debugging will proceed. "
"Set PYDEVD_DISABLE_FILE_VALIDATION=1 to disable this validation.",
# Argument not expected error. See spyder-ide/spyder#19298
"The following argument was not expected",
# Avoid showing error for kernel restarts after kernel dies when
# using an external interpreter
"conda.cli.main_run",
# Warning when debugpy is not available because it's an optional
# dependency of IPykernel.
# See spyder-ide/spyder#21900
"debugpy_stream undefined, debugging will not be enabled",
# Harmless warning from OpenCL on Windows.
# See spyder-ide/spyder#22551
"The system cannot find the path specified",
# UNC paths, see spyder-ide/spyder#23726
" UNC ",
# Matplotlib spurious message, see spyder-ide/spyder#24153
"Matplotlib is building the font cache",
]
return any([err in error for err in benign_errors])
def is_warning_message(self, error):
"""Decide if a message contains a warning in order to filter it."""
warning_pattern = re.compile(
r"(?:^|\s)(?:[A-Za-z]*Warning:|"
r"(?<=\s)WARNING(?=\s))(?:\:)?(?=\s|$)"
)
return warning_pattern.search(error)
def get_name(self):
"""Return client name"""
if self.given_name is None:
# Name according to host
if self.hostname is None:
name = _("Console")
else:
name = self.hostname
# Adding id to name
client_id = self.id_['int_id'] + u'/' + self.id_['str_id']
name = name + u' ' + client_id
elif (self.given_name in ["Pylab", "SymPy", "Cython"] or
self.forcing_custom_interpreter):
client_id = self.id_['int_id'] + u'/' + self.id_['str_id']
name = self.given_name + u' ' + client_id
else:
name = self.given_name + u'/' + self.id_['str_id']
return name
def get_control(self, pager=True):
"""Return the text widget (or similar) to give focus to"""
# page_control is the widget used for paging
page_control = self.shellwidget._page_control
if pager and page_control and page_control.isVisible():
return page_control
else:
return self.shellwidget._control
def set_font(self, font):
"""Set IPython widget's font"""
self.shellwidget._control.setFont(font)
self.shellwidget.font = font
def set_color_scheme(self, color_scheme, reset=True):
"""Set IPython color scheme."""
# Needed to handle not initialized kernel_client
# See spyder-ide/spyder#6996.
try:
self.shellwidget.set_color_scheme(color_scheme, reset)
except AttributeError:
pass
def close_client(self, is_last_client, close_console=False):
"""Close the client."""
self.__on_close = lambda: None
debugging = False
# Needed to handle a RuntimeError. See spyder-ide/spyder#5568.
try:
# This is required after spyder-ide/spyder#21788 to prevent freezes
# when closing Spyder. That happens not only when a console is in
# debugging mode before closing, but also when a kernel restart is
# requested while debugging.
if self.shellwidget.is_debugging():
debugging = True
self.__on_close = functools.partial(
self.finish_close,
is_last_client,
close_console,
debugging
)
self.shellwidget.sig_prompt_ready.connect(self.__on_close)
self.shellwidget.stop_debugging()
else:
self.interrupt_kernel()
except RuntimeError:
pass
if not debugging:
self.finish_close(is_last_client, close_console, debugging)
def finish_close(self, is_last_client, close_console, debugging):
"""Actions to take to finish closing the client."""
# Disconnect timer needed to update elapsed time and this slot in case
# it was connected.
try:
self.shellwidget.sig_prompt_ready.disconnect(self.__on_close)
self.timer.timeout.disconnect(self.show_time)
except (RuntimeError, TypeError):
pass
# This is a hack to prevent segfaults when closing Spyder and the
# client was debugging before doing it.
# It's a side effect of spyder-ide/spyder#21788
if debugging and close_console:
for __ in range(3):
time.sleep(0.08)
QApplication.processEvents()
self.shutdown(is_last_client, close_console=close_console)
# Close jupyter and files apis regardless of the kernel state
if self.is_remote():
if not self._jupyter_api.closed:
AsyncDispatcher(
loop=self._jupyter_api.session._loop, early_return=False
)(self._jupyter_api.close)()
if not self._files_api.closed:
AsyncDispatcher(
loop=self._files_api.session._loop, early_return=False
)(self._files_api.close)()
# Prevent errors in our tests
try:
self.close()
self.setParent(None)
except RuntimeError:
pass
def shutdown(self, is_last_client, close_console=False):
"""Shutdown connection and kernel if needed."""
self.dialog_manager.close_all()
shutdown_kernel = (
is_last_client
and (not self.shellwidget.is_external_kernel or self.is_remote())
and not self.error_text
)
self.shellwidget.shutdown(shutdown_kernel)
if self.is_remote() and shutdown_kernel and not close_console:
self.shutdown_remote_kernel()
def interrupt_kernel(self):
"""Interrupt the associanted Spyder kernel if it's running"""
# Needed to prevent a crash when a kernel is not running.
# See spyder-ide/spyder#6299.
try:
self.shellwidget.request_interrupt_kernel()
except RuntimeError:
pass
def replace_kernel(self, kernel_handler, shutdown_kernel, clear=True):
"""
Replace kernel by disconnecting from the current one and connecting to
another kernel, which is equivalent to a restart.
"""
# Reset elapsed time
self.t0 = time.monotonic()
# Connect kernel to client
self.disconnect_kernel(shutdown_kernel)
self.connect_kernel(kernel_handler, first_connect=False)
# Reset shellwidget and print restart message
self.shellwidget.reset(clear=clear)
self.shellwidget._kernel_restarted_message(died=False)
def is_kernel_active(self):
"""Check if the kernel is active."""
return (
self.kernel_handler is not None
and self.kernel_handler.connection_state
in [
KernelConnectionState.SpyderKernelReady,
KernelConnectionState.IpykernelReady,
]
)
def print_fault(self, fault):
"""Print fault text."""
self.shellwidget._append_plain_text('\n' + fault, before_prompt=True)
@Slot()
def enter_array_inline(self):
"""Enter and show the array builder on inline mode."""
self.shellwidget._control.enter_array_inline()
@Slot()
def enter_array_table(self):
"""Enter and show the array builder on table."""
self.shellwidget._control.enter_array_table()
@Slot()
def inspect_object(self):
"""Show how to inspect an object with our Help plugin"""
self.shellwidget._control.inspect_current_object()
@Slot()
def clear_line(self):
"""Clear a console line"""
self.shellwidget._keyboard_quit()
@Slot()
def clear_console(self):
"""Clear the whole console"""
self.shellwidget.clear_console()
@Slot()
def reset_namespace(self):
"""Resets the namespace by removing all names defined by the user"""
self.shellwidget.reset_namespace(warning=self.reset_warning,
message=True)
def update_history(self):
self.history = self.shellwidget._history
@Slot(object)
def show_syspath(self, syspath):
"""Show sys.path contents."""
if syspath is not None:
editor = CollectionsEditor(self)
editor.setup(syspath, title="sys.path contents", readonly=True,
icon=ima.icon('syspath'))
self.dialog_manager.show(editor)
else:
return
@Slot(object)
def show_env(self, env):
"""Show environment variables."""
env = dict(sorted(env.items()))
self.dialog_manager.show(RemoteEnvDialog(env, parent=self))
def show_time(self, end=False):
"""Text to show in time_label."""
elapsed_time = time.monotonic() - self.t0
# System time changed to past date, so reset start.
if elapsed_time < 0:
self.t0 = time.monotonic()
elapsed_time = 0
if elapsed_time > 24 * 3600: # More than a day...!
fmt = "%d %H:%M:%S"
else:
fmt = "%H:%M:%S"
if end:
color = SpyderPalette.COLOR_TEXT_3
else:
color = SpyderPalette.COLOR_ACCENT_4
text = "<span style=\'color: %s\'><b>%s" \
"</b></span>" % (color,
time.strftime(fmt, time.gmtime(elapsed_time)))
if self.show_elapsed_time:
self.sig_time_label.emit(text)
else:
self.sig_time_label.emit("")
@Slot(bool)
def set_show_elapsed_time(self, state):
"""Slot to show/hide elapsed time label."""
self.show_elapsed_time = state
def set_info_page(self):
"""Set current info_page."""
if self.infowidget is not None and self.info_page is not None:
self.infowidget.setHtml(
self.info_page,
QUrl.fromLocalFile(self.css_path)
)
self.sig_execution_state_changed.emit()
# ---- For remote clients
# -------------------------------------------------------------------------
def is_remote(self):
"""Check if this client is connected to a remote server."""
return self._jupyter_api is not None
@property
def jupyter_api(self):
return self._jupyter_api
def remote_kernel_restarted_failure_message(
self, error=None, shutdown=False
):
"""Show message when the kernel failed to be restarted."""
msg = _("It was not possible to restart the kernel")
if error is None:
error_html = f"<br>{msg}<br>"
else:
if isinstance(error, SpyderKernelError):
error = error.args[0]
elif isinstance(error, Exception):
error = _("The error is:<br><br>" "<tt>{}</tt>").format(
traceback.format_exc()
)
# Replace end of line chars with <br>
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
# Don't break lines in hyphens
# From https://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
# Create error page
kernel_error_template = Template(KERNEL_ERROR)
error_html = kernel_error_template.substitute(
css_path=self.css_path,
message=msg,
error=error)
self.shellwidget._append_html(error_html, before_prompt=False)
self.shellwidget.insert_horizontal_ruler()
if shutdown:
self.shutdown(is_last_client=False, close_console=False)
@AsyncDispatcher.QtSlot
def _on_remote_kernel_restarted(self, future):
"""Handle restarts for remote kernels."""
if future.result():
# Reset shellwidget and print restart message
self.kernel_handler.reconnect_kernel()
self.shellwidget.reset(clear=True)
else:
self.remote_kernel_restarted_failure_message(shutdown=True)
# This will show an error message in the plugins connected to the
# IPython console and disable kernel related actions in its Options
# menu.
sw = self.shellwidget
sw.sig_shellwidget_errored.emit(sw)
self.__remote_restart_requested = False
@AsyncDispatcher.QtSlot
def _reconnect_on_kernel_info(self, future):
if (kernel_info := future.result()):
try:
kernel_handler = KernelHandler.from_websocket(
(
self.jupyter_api.api_url
/ "kernels"
/ self.kernel_id
/ "channels"
).with_scheme("ws"),
aiohttp_session=self._jupyter_api.session,
)
except Exception as err:
self.remote_kernel_restarted_failure_message(
err, shutdown=True
)
else:
self.replace_kernel(
kernel_handler, shutdown_kernel=False, clear=False
)
else:
self.remote_kernel_restarted_failure_message(shutdown=True)
# This will show an error message in the plugins connected to the
# IPython console and disable kernel related actions in its Options
# menu.
sw = self.shellwidget
sw.sig_shellwidget_errored.emit(sw)
self.__remote_reconnect_requested = False
@AsyncDispatcher.QtSlot
def _on_remote_kernel_started(self, future):
"""
Actions to take when a remote kernel was started for this IPython console
client.
"""
# It's only at this point that we can allow users to close the client.
self.can_close = True
# Handle failures to launch a kernel
try:
kernel_info = future.result()
except Exception as err:
self.show_kernel_error(err)
return
if not kernel_info:
self.show_kernel_error(
_(
"There was an error connecting to the server <b>{}</b>. "
"Please check your connection is working."
).format(self._jupyter_api.server_name)
)
return
# Connect client's signals
self.kernel_id = kernel_info["id"]
try:
kernel_handler = KernelHandler.from_websocket(
(
self.jupyter_api.api_url
/ "kernels"
/ self.kernel_id
/ "channels"
).with_scheme("ws"),
aiohttp_session=self._jupyter_api.session,
)
except Exception as err:
self.show_kernel_error(err)
else:
# Connect client to the kernel
self.connect_kernel(kernel_handler)
@AsyncDispatcher(loop="ipythonconsole", early_return=False)
async def shutdown_remote_kernel(self):
return await self._jupyter_api.terminate_kernel(self.kernel_id)
@AsyncDispatcher(loop="ipythonconsole", early_return=False)
async def interrupt_remote_kernel(self):
return await self._jupyter_api.interrupt_kernel(self.kernel_id)
@AsyncDispatcher(loop="ipythonconsole")
async def _restart_remote_kernel(self):
return await self._jupyter_api.restart_kernel(self.kernel_id)
@AsyncDispatcher(loop="ipythonconsole")
async def _get_remote_kernel_info(self):
return await self._jupyter_api.get_kernel(self.kernel_id)
@AsyncDispatcher(loop="ipythonconsole")
async def _new_remote_kernel(self, kernel_spec=None):
logger.debug("Creating new remote kernel for %s", self.get_name())
await self.jupyter_api.connect()
return await self._jupyter_api.create_kernel(kernel_spec)
@AsyncDispatcher.QtSlot
def _on_remote_home_directory(self, future):
result = future.result()
home_directory = result.get("name", "/")
logger.debug(f"Retrieved remote home directory: {home_directory}")
self.shellwidget.set_cwd(home_directory, emit_cwd_change=True)
@AsyncDispatcher(loop="ipythonconsole")
async def _get_remote_home_directory(self):
await self._files_api.connect()
return await self._files_api.info("~")
def restart_remote_kernel(self):
# Reset elapsed time
self.t0 = time.monotonic()
if self.__remote_restart_requested:
return
self._restart_remote_kernel().connect(
self._on_remote_kernel_restarted
)
self.__remote_restart_requested = True
def reconnect_remote_kernel(self):
if self.__remote_reconnect_requested:
return
self._get_remote_kernel_info().connect(self._reconnect_on_kernel_info)
self.__remote_reconnect_requested = True
def start_remote_kernel(self, kernel_spec=None):
self._new_remote_kernel(kernel_spec).connect(
self._on_remote_kernel_started
)
|
ClientWidget
|
python
|
Netflix__metaflow
|
test/test_config/mutable_flow.py
|
{
"start": 3061,
"end": 4730
}
|
class ____(FlowMutator):
def mutate(self, mutable_flow):
steps = ["start", "end"]
count = 0
for name, s in mutable_flow.steps:
assert name in steps, "Unexpected step name"
steps.remove(name)
count += 1
assert count == 2, "Unexpected number of steps"
count = 0
parameters = []
for name, c in mutable_flow.configs:
assert name == "config", "Unexpected config name"
parameters = c["parameters"]
count += 1
assert count == 1, "Unexpected number of configs"
count = 0
for name, p in mutable_flow.parameters:
if name == "trigger_param":
continue
assert name == parameters[count]["name"], "Unexpected parameter name"
count += 1
to_add = mutable_flow.config["flow_add_environment"]["vars"]
for name, s in mutable_flow.steps:
if name == "start":
decos = [deco for deco in s.decorator_specs]
assert len(decos) == 3, "Unexpected number of decorators"
assert decos[0].startswith("environment:"), "Unexpected decorator"
env_deco, _ = extract_step_decorator_from_decospec(decos[0], {})
attrs = env_deco.attributes
for k, v in to_add.items():
attrs["vars"][k] = v
s.remove_decorator(decos[0])
s.add_decorator(environment, **attrs)
else:
s.add_decorator(
environment, **mutable_flow.config["flow_add_environment"].to_dict()
)
|
ModifyFlow
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared/cli/__init__.py
|
{
"start": 7129,
"end": 8781
}
|
class ____:
python_file: Optional[str] = None
module_name: Optional[str] = None
package_name: Optional[str] = None
working_directory: Optional[str] = None
attribute: Optional[str] = None
autoload_defs_module_name: Optional[str] = None
@classmethod
def extract_from_cli_options(cls, cli_options: dict[str, Any]) -> Self:
# This is expected to always be called from a click entry point, so all options should be
# present in the dictionary. We rely on `@record` for type-checking.
return cls(
python_file=cli_options.pop("python_file", None),
module_name=cli_options.pop("module_name", None),
package_name=cli_options.pop("package_name", None),
working_directory=cli_options.pop("working_directory", None),
attribute=cli_options.pop("attribute", None),
autoload_defs_module_name=cli_options.pop("autoload_defs_module_name", None),
)
def to_workspace_opts(self) -> "WorkspaceOpts":
return WorkspaceOpts(
python_file=(self.python_file,) if self.python_file else None,
module_name=(self.module_name,) if self.module_name else None,
package_name=(self.package_name,) if self.package_name else None,
autoload_defs_module_name=self.autoload_defs_module_name
if self.autoload_defs_module_name
else None,
working_directory=self.working_directory,
attribute=self.attribute,
)
def specifies_target(self) -> bool:
return bool(self.python_file or self.module_name or self.package_name)
|
PythonPointerOpts
|
python
|
huggingface__transformers
|
src/transformers/models/led/modeling_led.py
|
{
"start": 33317,
"end": 39739
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
layer_idx: Optional[bool] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_values
|
LEDDecoderAttention
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/functions.py
|
{
"start": 2653,
"end": 9700
}
|
class ____(TypedDict):
hues: int
values: int
maxValue: int
minValue: int
maxHue: int
minHue: int
sat: int
alpha: int
color_like: TypeAlias = (
QtGui.QColor
| str
| float
| int
| tuple[int, int, int]
| tuple[int, int, int, int]
| tuple[float, float, float]
| tuple[float, float, float, float]
| tuple[int, HueKeywordArgs]
)
def siScale(x, minVal=1e-25, allowUnicode=True, power:int|float=1):
"""
Return the recommended scale factor and SI prefix string for x.
Parameters
----------
x : float
The value to be scaled.
minVal : float, optional
The minimum value considered for scaling. Default is 1e-25.
allowUnicode : bool, optional
Whether to allow Unicode SI prefixes. Default is True.
power : int or float, optional
The power to which the units are raised. For example, if units='m²', the
power should be 2. This ensures correct scaling of the prefix in
nonlinear units. Supports positive, negative and non-integral powers.
Returns
-------
scale : float
The scale factor to apply to x.
prefix : str
The SI prefix string.
Examples
--------
>>> siScale(0.0001)
(1000000.0, 'μ')
# This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
"""
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if not math.isfinite(x):
return(1, '')
except:
raise
if abs(x) < minVal:
m = 0
else:
# log of x with base 1000^power
log1000x = math.log(abs(x))/(math.log(1000)*power)
if power > 0:
log1000x = math.floor(log1000x)
else:
log1000x = math.ceil(log1000x)
m = int(clip_scalar(log1000x, -9.0, 9.0))
if m == 0:
pref = ''
elif m < -8 or m > 8:
pref = 'e%d' % (m*3)
else:
if allowUnicode:
pref = SI_PREFIXES[m+8]
else:
pref = SI_PREFIXES_ASCII[m+8]
m1 = -3*m*power
p = 10.**m1
return (p, pref)
def siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True, power = 1):
"""
Format a number in engineering notation with SI prefix.
Parameters
----------
x : float
The value to be formatted.
precision : int, optional
Number of decimal places to include in the formatted output. Default is 3.
suffix : str, optional
Suffix to append to the formatted output.
space : bool, optional
Whether to include a space between the SI prefix and the value. Default is True.
error : float, optional
Error value to include in the formatted output.
minVal : float, optional
Minimum value considered for scaling. Default is 1e-25.
allowUnicode : bool, optional
Whether to allow Unicode SI prefixes. Default is True.
power : int or float, optional
Power to which the units are raised. For example, if suffix='m²', the power should be 2.
This ensures correct scaling when the units are nonlinear. Supports positive, negative,
and non-integral powers. Note: The power only affects the scaling, not the suffix.
Returns
-------
str
The formatted string in engineering notation with SI prefix.
Examples
--------
>>> siFormat(0.0001, suffix='V')
'100 μV'
"""
if space is True:
space = ' '
if space is False:
space = ''
(p, pref) = siScale(x, minVal, allowUnicode, power)
if not (len(pref) > 0 and pref[0] == 'e'):
pref = space + pref
if error is None:
fmt = "%." + str(precision) + "g%s%s"
return fmt % (x*p, pref, suffix)
else:
if allowUnicode:
plusminus = space + "±" + space
else:
plusminus = " +/- "
fmt = "%." + str(precision) + "g%s%s%s%s"
return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal, power=power))
def siParse(s, regex=FLOAT_REGEX_PERIOD, suffix=None):
"""Convert a value written in SI notation to a tuple (number, si_prefix, suffix).
Example:
siParse('100 µV') # returns ('100', 'µ', 'V')
Note that in the above example, the µ symbol is the "micro sign" (UTF-8
0xC2B5), as opposed to the Greek letter mu (UTF-8 0xCEBC).
Parameters
----------
s : str
The string to parse.
regex : re.Pattern, optional
Compiled regular expression object for parsing. The default is a
general-purpose regex for parsing floating point expressions,
potentially containing an SI prefix and a suffix.
suffix : str, optional
Suffix to check for in ``s``. The default (None) indicates there may or
may not be a suffix contained in the string and it is returned if
found. An empty string ``""`` is handled differently: if the string
contains a suffix, it is discarded. This enables interpreting
characters following the numerical value as an SI prefix.
"""
s = s.strip()
if suffix is not None and len(suffix) > 0:
if s[-len(suffix):] != suffix:
raise ValueError("String '%s' does not have the expected suffix '%s'" % (s, suffix))
s = s[:-len(suffix)] + 'X' # add a fake suffix so the regex still picks up the si prefix
# special case: discard any extra characters if suffix is explicitly empty
if suffix == "":
s += 'X'
m = regex.match(s)
if m is None:
raise ValueError('Cannot parse number "%s"' % s)
try:
sip = m.group('siPrefix')
except IndexError:
sip = ''
if suffix is None:
try:
suf = m.group('suffix')
except IndexError:
suf = ''
else:
suf = suffix
return m.group('number'), '' if sip is None else sip, '' if suf is None else suf
def siEval(s, typ=float, regex=FLOAT_REGEX_PERIOD, suffix=None, unitPower=1):
"""
Convert a value written in SI notation to its equivalent prefixless value.
Example::
siEval("100 μV") # returns 0.0001
"""
val, siprefix, suffix = siParse(s, regex, suffix=suffix)
v = typ(val)
return siApply(v, siprefix, unitPower=unitPower)
def siApply(val, siprefix, unitPower=1):
"""
"""
n = SI_PREFIX_EXPONENTS[siprefix] if siprefix != '' else 0
n = n * unitPower
if n > 0:
return val * 10**n
elif n < 0:
# this case makes it possible to use Decimal objects here
return val / 10**-n
else:
return val
def float_regex_for_locale(locale = QtCore.QLocale()) -> re.Pattern:
"""Return a FLOAT_REGEX pattern appropriate for the given locale."""
decimal_point = locale.decimalPoint()
if decimal_point == ',':
return FLOAT_REGEX_COMMA
else:
return FLOAT_REGEX_PERIOD
|
HueKeywordArgs
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_legacy_slugs.py
|
{
"start": 1838,
"end": 2513
}
|
class ____(util.MdCase):
"""Test Unicode cased, encoded slugs."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.uslugify_cased_encoded
}
}
def test_slug(self):
"""Test the slug output."""
with pytest.warns(DeprecationWarning):
self.check_markdown(
r'# Testing cased unicode-slugs_headers ±♠Ωℑ with encoding',
r'<h1 id="Testing-cased-unicode-slugs_headers-%CE%A9%E2%84%91-with-encoding">'
'Testing cased unicode-slugs_headers ±♠Ωℑ with encoding</h1>'
)
|
TestUslugifyCasedEncoded
|
python
|
numpy__numpy
|
doc/preprocess.py
|
{
"start": 692,
"end": 1565
}
|
class ____(Template):
delimiter = '@'
def doxy_config(root_path):
"""
Fetch all Doxygen sub-config files and gather it with the main config file.
"""
confs = []
dsrc_path = os.path.join(root_path, "doc", "source")
sub = {'ROOT_DIR': root_path}
with open(os.path.join(dsrc_path, "doxyfile")) as fd:
conf = DoxyTpl(fd.read())
confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub))
for subdir in ["doc", "numpy"]:
for dpath, _, files in os.walk(os.path.join(root_path, subdir)):
if ".doxyfile" not in files:
continue
conf_path = os.path.join(dpath, ".doxyfile")
with open(conf_path) as fd:
conf = DoxyTpl(fd.read())
confs.append(conf.substitute(CUR_DIR=dpath, **sub))
return confs
if __name__ == "__main__":
main()
|
DoxyTpl
|
python
|
yaml__pyyaml
|
lib/yaml/emitter.py
|
{
"start": 385,
"end": 426
}
|
class ____(YAMLError):
pass
|
EmitterError
|
python
|
hyperopt__hyperopt
|
hyperopt/pyll_utils.py
|
{
"start": 3535,
"end": 7407
}
|
class ____:
def __init__(self, name, val, op):
self.op = op
self.name = name
self.val = val
def __str__(self):
return f"Cond{{{self.name} {self.op} {self.val}}}"
def __eq__(self, other):
return self.op == other.op and self.name == other.name and self.val == other.val
def __hash__(self):
return hash((self.op, self.name, self.val))
def __repr__(self):
return str(self)
EQ = partial(Cond, op="=")
def _expr_to_config(expr, conditions, hps):
if expr.name == "switch":
idx = expr.inputs()[0]
options = expr.inputs()[1:]
assert idx.name == "hyperopt_param"
assert idx.arg["obj"].name in (
"randint", # -- in case of hp.choice
"categorical", # -- in case of hp.pchoice
)
_expr_to_config(idx, conditions, hps)
for ii, opt in enumerate(options):
_expr_to_config(opt, conditions + (EQ(idx.arg["label"].obj, ii),), hps)
elif expr.name == "hyperopt_param":
label = expr.arg["label"].obj
if label in hps:
if hps[label]["node"] != expr.arg["obj"]:
raise DuplicateLabel(label)
hps[label]["conditions"].add(conditions)
else:
hps[label] = {
"node": expr.arg["obj"],
"conditions": {conditions},
"label": label,
}
else:
for ii in expr.inputs():
_expr_to_config(ii, conditions, hps)
def expr_to_config(expr, conditions, hps):
"""
Populate dictionary `hps` with the hyperparameters in pyll graph `expr`
and conditions for participation in the evaluation of `expr`.
Arguments:
expr - a pyll expression root.
conditions - a tuple of conditions (`Cond`) that must be True for
`expr` to be evaluated.
hps - dictionary to populate
Creates `hps` dictionary:
label -> { 'node': apply node of hyperparameter distribution,
'conditions': `conditions` + tuple,
'label': label
}
"""
expr = as_apply(expr)
if conditions is None:
conditions = ()
assert isinstance(expr, Apply)
_expr_to_config(expr, conditions, hps)
_remove_allpaths(hps, conditions)
def _remove_allpaths(hps, conditions):
"""Hacky way to recognize some kinds of false dependencies
Better would be logic programming.
"""
potential_conds = {}
for k, v in list(hps.items()):
if v["node"].name == "randint":
low = v["node"].arg["low"].obj
# if high is None, the domain is [0, low), else it is [low, high)
domain_size = (
v["node"].arg["high"].obj - low
if v["node"].arg["high"] != MissingArgument
else low
)
potential_conds[k] = frozenset([EQ(k, ii) for ii in range(domain_size)])
elif v["node"].name == "categorical":
p = v["node"].arg["p"].obj
potential_conds[k] = frozenset([EQ(k, ii) for ii in range(p.size)])
for k, v in list(hps.items()):
if len(v["conditions"]) > 1:
all_conds = [[c for c in cond if c is not True] for cond in v["conditions"]]
all_conds = [cond for cond in all_conds if len(cond) >= 1]
if len(all_conds) == 0:
v["conditions"] = {conditions}
continue
depvar = all_conds[0][0].name
all_one_var = all(
len(cond) == 1 and cond[0].name == depvar for cond in all_conds
)
if all_one_var:
conds = [cond[0] for cond in all_conds]
if frozenset(conds) == potential_conds[depvar]:
v["conditions"] = {conditions}
continue
# -- eof
|
Cond
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 1054419,
"end": 1054579
}
|
class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (Team, User)
|
DeploymentReviewer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/di-string-match.py
|
{
"start": 29,
"end": 447
}
|
class ____(object):
def diStringMatch(self, S):
"""
:type S: str
:rtype: List[int]
"""
result = []
left, right = 0, len(S)
for c in S:
if c == 'I':
result.append(left)
left += 1
else:
result.append(right)
right -= 1
result.append(left)
return result
|
Solution
|
python
|
django__django
|
tests/proxy_models/models.py
|
{
"start": 665,
"end": 869
}
|
class ____(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __str__(self):
return self.name
|
Person
|
python
|
pydata__xarray
|
xarray/structure/alignment.py
|
{
"start": 4610,
"end": 45268
}
|
class ____(Generic[T_Alignable]):
"""Implements all the complex logic for the re-indexing and alignment of Xarray
objects.
For internal use only, not public API.
Usage:
aligner = Aligner(*objects, **kwargs)
aligner.align()
aligned_objects = aligner.results
"""
objects: tuple[T_Alignable, ...]
results: tuple[T_Alignable, ...]
objects_matching_index_vars: tuple[
dict[MatchingIndexKey, dict[Hashable, Variable]], ...
]
join: JoinOptions | CombineKwargDefault
exclude_dims: frozenset[Hashable]
exclude_vars: frozenset[Hashable]
copy: bool
fill_value: Any
sparse: bool
indexes: dict[MatchingIndexKey, Index]
index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]]
all_indexes: dict[MatchingIndexKey, list[Index]]
all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]]
aligned_indexes: dict[MatchingIndexKey, Index]
aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]]
reindex: dict[MatchingIndexKey, bool]
keep_original_indexes: set[MatchingIndexKey]
reindex_kwargs: dict[str, Any]
unindexed_dim_sizes: dict[Hashable, set]
new_indexes: Indexes[Index]
def __init__(
self,
objects: Iterable[T_Alignable],
join: JoinOptions | CombineKwargDefault = "inner",
indexes: Mapping[Any, Any] | None = None,
exclude_dims: str | Iterable[Hashable] = frozenset(),
exclude_vars: Iterable[Hashable] = frozenset(),
method: str | None = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = dtypes.NA,
sparse: bool = False,
):
self.objects = tuple(objects)
self.objects_matching_indexes: tuple[Any, ...] = ()
self.objects_matching_index_vars = ()
if not isinstance(join, CombineKwargDefault) and join not in get_args(
JoinOptions
):
raise ValueError(f"invalid value for join: {join}")
self.join = join
self.copy = copy
self.fill_value = fill_value
self.sparse = sparse
if method is None and tolerance is None:
self.reindex_kwargs = {}
else:
self.reindex_kwargs = {"method": method, "tolerance": tolerance}
if isinstance(exclude_dims, str):
exclude_dims = [exclude_dims]
self.exclude_dims = frozenset(exclude_dims)
self.exclude_vars = frozenset(exclude_vars)
if indexes is None:
indexes = {}
self.indexes, self.index_vars = self._collect_indexes(
_normalize_indexes(indexes)
)
self.all_indexes = {}
self.all_index_vars = {}
self.unindexed_dim_sizes = {}
self.aligned_indexes = {}
self.aligned_index_vars = {}
self.reindex = {}
self.keep_original_indexes = set()
self.results = tuple()
def _collect_indexes(
self, indexes: Indexes
) -> tuple[IndexesToAlign, IndexVarsToAlign]:
"""Collect input and/or object indexes for alignment.
Return new dictionaries of xarray Index objects and coordinate
variables, whose keys are used to later retrieve all the indexes to
compare with each other (based on the name and dimensions of their
associated coordinate variables as well as the Index type).
"""
collected_indexes = {}
collected_index_vars = {}
for idx, idx_vars in indexes.group_by_index():
idx_coord_names_and_dims = []
idx_all_dims: set[Hashable] = set()
for name, var in idx_vars.items():
dims = var.dims
idx_coord_names_and_dims.append((name, dims))
idx_all_dims.update(dims)
key: MatchingIndexKey = (tuple(idx_coord_names_and_dims), type(idx))
if idx_all_dims:
exclude_dims = idx_all_dims & self.exclude_dims
if exclude_dims == idx_all_dims:
# Do not collect an index if all the dimensions it uses are
# also excluded from the alignment
continue
elif exclude_dims:
# If the dimensions used by index partially overlap with the dimensions
# excluded from alignment, it is possible to check index equality along
# non-excluded dimensions only. However, in this case each of the aligned
# objects must retain (a copy of) their original index. Re-indexing and
# overriding the index are not supported.
if self.join == "override":
excl_dims_str = ", ".join(str(d) for d in exclude_dims)
incl_dims_str = ", ".join(
str(d) for d in idx_all_dims - exclude_dims
)
raise AlignmentError(
f"cannot exclude dimension(s) {excl_dims_str} from alignment "
"with `join='override` because these are used by an index "
f"together with non-excluded dimensions {incl_dims_str}"
"(cannot safely override the index)."
)
else:
self.keep_original_indexes.add(key)
collected_indexes[key] = idx
collected_index_vars[key] = idx_vars
return collected_indexes, collected_index_vars
def find_matching_indexes(self) -> None:
all_indexes: dict[MatchingIndexKey, list[Index]]
all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]]
all_indexes_dim_sizes: dict[MatchingIndexKey, dict[Hashable, set]]
objects_matching_indexes: list[dict[MatchingIndexKey, Index]]
objects_matching_index_vars: list[
dict[MatchingIndexKey, dict[Hashable, Variable]]
]
all_indexes = defaultdict(list)
all_index_vars = defaultdict(list)
all_indexes_dim_sizes = defaultdict(lambda: defaultdict(set))
objects_matching_indexes = []
objects_matching_index_vars = []
for obj in self.objects:
obj_indexes, obj_index_vars = self._collect_indexes(obj.xindexes)
objects_matching_indexes.append(obj_indexes)
objects_matching_index_vars.append(obj_index_vars)
for key, idx in obj_indexes.items():
all_indexes[key].append(idx)
for key, index_vars in obj_index_vars.items():
all_index_vars[key].append(index_vars)
for dim, size in calculate_dimensions(index_vars).items():
all_indexes_dim_sizes[key][dim].add(size)
self.objects_matching_indexes = tuple(objects_matching_indexes)
self.objects_matching_index_vars = tuple(objects_matching_index_vars)
self.all_indexes = all_indexes
self.all_index_vars = all_index_vars
if self.join == "override":
for dim_sizes in all_indexes_dim_sizes.values():
for dim, sizes in dim_sizes.items():
if len(sizes) > 1:
raise AlignmentError(
"cannot align objects with join='override' with matching indexes "
f"along dimension {dim!r} that don't have the same size"
)
def find_matching_unindexed_dims(self) -> None:
unindexed_dim_sizes = defaultdict(set)
for obj in self.objects:
for dim in obj.dims:
if dim not in self.exclude_dims and dim not in obj.xindexes.dims:
unindexed_dim_sizes[dim].add(obj.sizes[dim])
self.unindexed_dim_sizes = unindexed_dim_sizes
def _need_reindex(self, dim, cmp_indexes) -> bool:
"""Whether or not we need to reindex variables for a set of
matching indexes.
We don't reindex when all matching indexes are equal for two reasons:
- It's faster for the usual case (already aligned objects).
- It ensures it's possible to do operations that don't require alignment
on indexes with duplicate values (which cannot be reindexed with
pandas). This is useful, e.g., for overwriting such duplicate indexes.
"""
if not indexes_all_equal(cmp_indexes, self.exclude_dims):
# always reindex when matching indexes are not equal
return True
unindexed_dims_sizes = {}
for d in dim:
if d in self.unindexed_dim_sizes:
sizes = self.unindexed_dim_sizes[d]
if len(sizes) > 1:
# reindex if different sizes are found for unindexed dims
return True
else:
unindexed_dims_sizes[d] = next(iter(sizes))
if unindexed_dims_sizes:
indexed_dims_sizes = {}
for cmp in cmp_indexes:
index_vars = cmp[1]
for var in index_vars.values():
indexed_dims_sizes.update(var.sizes)
for d, size in unindexed_dims_sizes.items():
if indexed_dims_sizes.get(d, -1) != size:
# reindex if unindexed dimension size doesn't match
return True
return False
def _get_index_joiner(self, index_cls) -> Callable:
if self.join in ["outer", "inner"]:
return functools.partial(
functools.reduce,
functools.partial(index_cls.join, how=self.join),
)
elif self.join == "left":
return operator.itemgetter(0)
elif self.join == "right":
return operator.itemgetter(-1)
elif self.join == "override":
# We rewrite all indexes and then use join='left'
return operator.itemgetter(0)
else:
# join='exact' return dummy lambda (error is raised)
return lambda _: None
def align_indexes(self) -> None:
"""Compute all aligned indexes and their corresponding coordinate variables."""
aligned_indexes: dict[MatchingIndexKey, Index] = {}
aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] = {}
reindex: dict[MatchingIndexKey, bool] = {}
new_indexes: dict[Hashable, Index] = {}
new_index_vars: dict[Hashable, Variable] = {}
def update_dicts(
key: MatchingIndexKey,
idx: Index,
idx_vars: dict[Hashable, Variable],
need_reindex: bool,
):
reindex[key] = need_reindex
aligned_indexes[key] = idx
aligned_index_vars[key] = idx_vars
for name, var in idx_vars.items():
if name in new_indexes:
other_idx = new_indexes[name]
other_var = new_index_vars[name]
raise AlignmentError(
f"cannot align objects on coordinate {name!r} because of conflicting indexes\n"
f"first index: {idx!r}\nsecond index: {other_idx!r}\n"
f"first variable: {var!r}\nsecond variable: {other_var!r}\n"
)
new_indexes[name] = idx
new_index_vars[name] = var
for key, matching_indexes in self.all_indexes.items():
matching_index_vars = self.all_index_vars[key]
dims = {d for coord in matching_index_vars[0].values() for d in coord.dims}
index_cls = key[1]
if self.join == "override":
joined_index = matching_indexes[0]
joined_index_vars = matching_index_vars[0]
need_reindex = False
elif key in self.indexes:
joined_index = self.indexes[key]
joined_index_vars = self.index_vars[key]
cmp_indexes = list(
zip(
[joined_index] + matching_indexes,
[joined_index_vars] + matching_index_vars,
strict=True,
)
)
need_reindex = self._need_reindex(dims, cmp_indexes)
else:
if len(matching_indexes) > 1:
need_reindex = self._need_reindex(
dims,
list(zip(matching_indexes, matching_index_vars, strict=True)),
)
else:
need_reindex = False
if need_reindex:
if (
isinstance(self.join, CombineKwargDefault)
and self.join != "exact"
):
emit_user_level_warning(
self.join.warning_message(
"This change will result in the following ValueError: "
"cannot be aligned with join='exact' because "
"index/labels/sizes are not equal along "
"these coordinates (dimensions): "
+ ", ".join(
f"{name!r} {dims!r}" for name, dims in key[0]
),
recommend_set_options=False,
),
FutureWarning,
)
if self.join == "exact":
raise AlignmentError(
"cannot align objects with join='exact' where "
"index/labels/sizes are not equal along "
"these coordinates (dimensions): "
+ ", ".join(f"{name!r} {dims!r}" for name, dims in key[0])
+ (
self.join.error_message()
if isinstance(self.join, CombineKwargDefault)
else ""
)
)
joiner = self._get_index_joiner(index_cls)
joined_index = joiner(matching_indexes)
if self.join == "left":
joined_index_vars = matching_index_vars[0]
elif self.join == "right":
joined_index_vars = matching_index_vars[-1]
else:
joined_index_vars = joined_index.create_variables()
else:
joined_index = matching_indexes[0]
joined_index_vars = matching_index_vars[0]
update_dicts(key, joined_index, joined_index_vars, need_reindex)
# Explicitly provided indexes that are not found in objects to align
# may relate to unindexed dimensions so we add them too
for key, idx in self.indexes.items():
if key not in aligned_indexes:
index_vars = self.index_vars[key]
update_dicts(key, idx, index_vars, False)
self.aligned_indexes = aligned_indexes
self.aligned_index_vars = aligned_index_vars
self.reindex = reindex
self.new_indexes = Indexes(new_indexes, new_index_vars)
def assert_unindexed_dim_sizes_equal(self) -> None:
for dim, sizes in self.unindexed_dim_sizes.items():
index_size = self.new_indexes.dims.get(dim)
if index_size is not None:
sizes.add(index_size)
add_err_msg = (
f" (note: an index is found along that dimension "
f"with size={index_size!r})"
)
else:
add_err_msg = ""
if len(sizes) > 1:
raise AlignmentError(
f"cannot reindex or align along dimension {dim!r} "
f"because of conflicting dimension sizes: {sizes!r}" + add_err_msg
)
def override_indexes(self) -> None:
objects = list(self.objects)
for i, obj in enumerate(objects[1:]):
new_indexes = {}
new_variables = {}
matching_indexes = self.objects_matching_indexes[i + 1]
for key, aligned_idx in self.aligned_indexes.items():
obj_idx = matching_indexes.get(key)
if obj_idx is not None:
for name, var in self.aligned_index_vars[key].items():
new_indexes[name] = aligned_idx
new_variables[name] = var.copy(deep=self.copy)
objects[i + 1] = obj._overwrite_indexes(new_indexes, new_variables)
self.results = tuple(objects)
def _get_dim_pos_indexers(
self,
matching_indexes: dict[MatchingIndexKey, Index],
) -> dict[Hashable, Any]:
dim_pos_indexers: dict[Hashable, Any] = {}
dim_index: dict[Hashable, Index] = {}
for key, aligned_idx in self.aligned_indexes.items():
obj_idx = matching_indexes.get(key)
if obj_idx is not None and self.reindex[key]:
indexers = obj_idx.reindex_like(aligned_idx, **self.reindex_kwargs)
for dim, idxer in indexers.items():
if dim in self.exclude_dims:
raise AlignmentError(
f"cannot reindex or align along dimension {dim!r} because "
"it is explicitly excluded from alignment. This is likely caused by "
"wrong results returned by the `reindex_like` method of this index:\n"
f"{obj_idx!r}"
)
if dim in dim_pos_indexers and not np.array_equal(
idxer, dim_pos_indexers[dim]
):
raise AlignmentError(
f"cannot reindex or align along dimension {dim!r} because "
"of conflicting re-indexers returned by multiple indexes\n"
f"first index: {obj_idx!r}\nsecond index: {dim_index[dim]!r}\n"
)
dim_pos_indexers[dim] = idxer
dim_index[dim] = obj_idx
return dim_pos_indexers
def _get_indexes_and_vars(
self,
obj: T_Alignable,
matching_indexes: dict[MatchingIndexKey, Index],
matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]],
) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]:
new_indexes = {}
new_variables = {}
for key, aligned_idx in self.aligned_indexes.items():
aligned_idx_vars = self.aligned_index_vars[key]
obj_idx = matching_indexes.get(key)
obj_idx_vars = matching_index_vars.get(key)
if obj_idx is None:
# add the aligned index if it relates to unindexed dimensions in obj
dims = {d for var in aligned_idx_vars.values() for d in var.dims}
if dims <= set(obj.dims):
obj_idx = aligned_idx
if obj_idx is not None:
# TODO: always copy object's index when no re-indexing is required?
# (instead of assigning the aligned index)
# (need performance assessment)
if key in self.keep_original_indexes:
assert self.reindex[key] is False
new_idx = obj_idx.copy(deep=self.copy)
new_idx_vars = new_idx.create_variables(obj_idx_vars)
else:
new_idx = aligned_idx
new_idx_vars = {
k: v.copy(deep=self.copy) for k, v in aligned_idx_vars.items()
}
new_indexes.update(dict.fromkeys(new_idx_vars, new_idx))
new_variables.update(new_idx_vars)
return new_indexes, new_variables
def _reindex_one(
self,
obj: T_Alignable,
matching_indexes: dict[MatchingIndexKey, Index],
matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]],
) -> T_Alignable:
new_indexes, new_variables = self._get_indexes_and_vars(
obj, matching_indexes, matching_index_vars
)
dim_pos_indexers = self._get_dim_pos_indexers(matching_indexes)
return obj._reindex_callback(
self,
dim_pos_indexers,
new_variables,
new_indexes,
self.fill_value,
self.exclude_dims,
self.exclude_vars,
)
def reindex_all(self) -> None:
self.results = tuple(
starmap(
self._reindex_one,
zip(
self.objects,
self.objects_matching_indexes,
self.objects_matching_index_vars,
strict=True,
),
)
)
def align(self) -> None:
if not self.indexes and len(self.objects) == 1:
# fast path for the trivial case
(obj,) = self.objects
self.results = (obj.copy(deep=self.copy),)
return
self.find_matching_indexes()
self.find_matching_unindexed_dims()
self.align_indexes()
self.assert_unindexed_dim_sizes_equal()
if self.join == "override":
self.override_indexes()
elif self.join == "exact" and not self.copy:
self.results = self.objects
else:
self.reindex_all()
T_Obj1 = TypeVar("T_Obj1", bound="Alignable")
T_Obj2 = TypeVar("T_Obj2", bound="Alignable")
T_Obj3 = TypeVar("T_Obj3", bound="Alignable")
T_Obj4 = TypeVar("T_Obj4", bound="Alignable")
T_Obj5 = TypeVar("T_Obj5", bound="Alignable")
@overload
def align(
obj1: T_Obj1,
/,
*,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Obj1]: ...
@overload
def align(
obj1: T_Obj1,
obj2: T_Obj2,
/,
*,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Obj1, T_Obj2]: ...
@overload
def align(
obj1: T_Obj1,
obj2: T_Obj2,
obj3: T_Obj3,
/,
*,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Obj1, T_Obj2, T_Obj3]: ...
@overload
def align(
obj1: T_Obj1,
obj2: T_Obj2,
obj3: T_Obj3,
obj4: T_Obj4,
/,
*,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4]: ...
@overload
def align(
obj1: T_Obj1,
obj2: T_Obj2,
obj3: T_Obj3,
obj4: T_Obj4,
obj5: T_Obj5,
/,
*,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4, T_Obj5]: ...
@overload
def align(
*objects: T_Alignable,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Alignable, ...]: ...
def align(
*objects: T_Alignable,
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
fill_value=dtypes.NA,
) -> tuple[T_Alignable, ...]:
"""
Given any number of Dataset and/or DataArray objects, returns new
objects with aligned indexes and dimension sizes.
Array from the aligned objects are suitable as input to mathematical
operators, because along each dimension they have the same index and size.
Missing values (if ``join != 'inner'``) are filled with ``fill_value``.
The default fill value is NaN.
Parameters
----------
*objects : Dataset or DataArray
Objects to align.
join : {"outer", "inner", "left", "right", "exact", "override"}, optional
Method for joining the indexes of the passed objects along each
dimension:
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
copy : bool, default: True
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed with
only slice operations, then the output may share memory with the input.
In either case, new xarray objects are always returned.
indexes : dict-like, optional
Any indexes explicitly provided with the `indexes` argument should be
used in preference to the aligned indexes.
exclude : str, iterable of hashable or None, optional
Dimensions that must be excluded from alignment
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
Returns
-------
aligned : tuple of DataArray or Dataset
Tuple of objects with the same type as `*objects` with aligned
coordinates.
Raises
------
AlignmentError
If any dimensions without labels on the arguments have different sizes,
or a different size than the size of the aligned dimension labels.
Examples
--------
>>> x = xr.DataArray(
... [[25, 35], [10, 24]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
... )
>>> y = xr.DataArray(
... [[20, 5], [7, 13]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]},
... )
>>> x
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
>>> y
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 16B 35.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> a, b = xr.align(x, y)
>>> a
<xarray.DataArray (lat: 1, lon: 2)> Size: 16B
array([[25, 35]])
Coordinates:
* lat (lat) float64 8B 35.0
* lon (lon) float64 16B 100.0 120.0
>>> b
<xarray.DataArray (lat: 1, lon: 2)> Size: 16B
array([[20, 5]])
Coordinates:
* lat (lat) float64 8B 35.0
* lon (lon) float64 16B 100.0 120.0
>>> a, b = xr.align(x, y, join="outer")
>>> a
<xarray.DataArray (lat: 3, lon: 2)> Size: 48B
array([[25., 35.],
[10., 24.],
[nan, nan]])
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)> Size: 48B
array([[20., 5.],
[nan, nan],
[ 7., 13.]])
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> a, b = xr.align(x, y, join="outer", fill_value=-999)
>>> a
<xarray.DataArray (lat: 3, lon: 2)> Size: 48B
array([[ 25, 35],
[ 10, 24],
[-999, -999]])
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)> Size: 48B
array([[ 20, 5],
[-999, -999],
[ 7, 13]])
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> a, b = xr.align(x, y, join="left")
>>> a
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[20., 5.],
[nan, nan]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
>>> a, b = xr.align(x, y, join="right")
>>> a
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[25., 35.],
[nan, nan]])
Coordinates:
* lat (lat) float64 16B 35.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 16B 35.0 42.0
* lon (lon) float64 16B 100.0 120.0
>>> a, b = xr.align(x, y, join="exact")
Traceback (most recent call last):
...
xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' ...
>>> a, b = xr.align(x, y, join="override")
>>> a
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)> Size: 32B
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
"""
aligner = Aligner(
objects,
join=join,
copy=copy,
indexes=indexes,
exclude_dims=exclude,
fill_value=fill_value,
)
aligner.align()
return aligner.results
def deep_align(
objects: Iterable[Any],
join: JoinOptions | CombineKwargDefault = "inner",
copy: bool = True,
indexes=None,
exclude: str | Iterable[Hashable] = frozenset(),
raise_on_invalid: bool = True,
fill_value=dtypes.NA,
) -> list[Any]:
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
from xarray.core.coordinates import Coordinates
from xarray.core.dataarray import DataArray
from xarray.core.dataset import Dataset
if indexes is None:
indexes = {}
def is_alignable(obj):
return isinstance(obj, Coordinates | DataArray | Dataset)
positions: list[int] = []
keys: list[type[object] | Hashable] = []
out: list[Any] = []
targets: list[Alignable] = []
no_key: Final = object()
not_replaced: Final = object()
for position, variables in enumerate(objects):
if is_alignable(variables):
positions.append(position)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
current_out = {}
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
# https://github.com/pydata/xarray/issues/3377
# TODO(shoyer): doing this here feels super-hacky -- can we
# move it explicitly into merge instead?
positions.append(position)
keys.append(k)
targets.append(v)
current_out[k] = not_replaced
else:
current_out[k] = v
out.append(current_out)
elif raise_on_invalid:
raise ValueError(
"object to align is neither an xarray.Dataset, "
f"an xarray.DataArray nor a dictionary: {variables!r}"
)
else:
out.append(variables)
aligned = align(
*targets,
join=join,
copy=copy,
indexes=indexes,
exclude=exclude,
fill_value=fill_value,
)
for position, key, aligned_obj in zip(positions, keys, aligned, strict=True):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
return out
def reindex(
obj: T_Alignable,
indexers: Mapping[Any, Any],
method: str | None = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = dtypes.NA,
sparse: bool = False,
exclude_vars: Iterable[Hashable] = frozenset(),
) -> T_Alignable:
"""Re-index either a Dataset or a DataArray.
Not public API.
"""
# TODO: (benbovy - explicit indexes): uncomment?
# --> from reindex docstrings: "any mismatched dimension is simply ignored"
# bad_keys = [k for k in indexers if k not in obj._indexes and k not in obj.dims]
# if bad_keys:
# raise ValueError(
# f"indexer keys {bad_keys} do not correspond to any indexed coordinate "
# "or unindexed dimension in the object to reindex"
# )
aligner = Aligner(
(obj,),
indexes=indexers,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
sparse=sparse,
exclude_vars=exclude_vars,
)
aligner.align()
return aligner.results[0]
def reindex_like(
obj: T_Alignable,
other: Dataset | DataArray,
method: str | None = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = dtypes.NA,
) -> T_Alignable:
"""Re-index either a Dataset or a DataArray like another Dataset/DataArray.
Not public API.
"""
if not other._indexes:
# This check is not performed in Aligner.
for dim in other.dims:
if dim in obj.dims:
other_size = other.sizes[dim]
obj_size = obj.sizes[dim]
if other_size != obj_size:
raise ValueError(
"different size for unlabeled "
f"dimension on argument {dim!r}: {other_size!r} vs {obj_size!r}"
)
return reindex(
obj,
indexers=other.xindexes,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
)
def _get_broadcast_dims_map_common_coords(args, exclude):
common_coords = {}
dims_map = {}
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg._indexes:
common_coords.update(arg.xindexes.get_all_coords(dim))
return dims_map, common_coords
def _broadcast_helper(
arg: T_Alignable, exclude, dims_map, common_coords
) -> T_Alignable:
from xarray.core.dataarray import DataArray
from xarray.core.dataset import Dataset
def _set_dims(var):
# Add excluded dims to a copy of dims_map
var_dims_map = dims_map.copy()
for dim in exclude:
with suppress(ValueError):
# ignore dim not in var.dims
var_dims_map[dim] = var.shape[var.dims.index(dim)]
return var.set_dims(var_dims_map)
def _broadcast_array(array: T_DataArray) -> T_DataArray:
data = _set_dims(array.variable)
coords = dict(array.coords)
coords.update(common_coords)
return array.__class__(
data, coords, data.dims, name=array.name, attrs=array.attrs
)
def _broadcast_dataset(ds: T_Dataset) -> T_Dataset:
data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars}
coords = dict(ds.coords)
coords.update(common_coords)
return ds.__class__(data_vars, coords, ds.attrs)
# remove casts once https://github.com/python/mypy/issues/12800 is resolved
if isinstance(arg, DataArray):
return _broadcast_array(arg) # type: ignore[return-value,unused-ignore]
elif isinstance(arg, Dataset):
return _broadcast_dataset(arg) # type: ignore[return-value,unused-ignore]
else:
raise ValueError("all input must be Dataset or DataArray objects")
@overload
def broadcast(
obj1: T_Obj1, /, *, exclude: str | Iterable[Hashable] | None = None
) -> tuple[T_Obj1]: ...
@overload
def broadcast(
obj1: T_Obj1, obj2: T_Obj2, /, *, exclude: str | Iterable[Hashable] | None = None
) -> tuple[T_Obj1, T_Obj2]: ...
@overload
def broadcast(
obj1: T_Obj1,
obj2: T_Obj2,
obj3: T_Obj3,
/,
*,
exclude: str | Iterable[Hashable] | None = None,
) -> tuple[T_Obj1, T_Obj2, T_Obj3]: ...
@overload
def broadcast(
obj1: T_Obj1,
obj2: T_Obj2,
obj3: T_Obj3,
obj4: T_Obj4,
/,
*,
exclude: str | Iterable[Hashable] | None = None,
) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4]: ...
@overload
def broadcast(
obj1: T_Obj1,
obj2: T_Obj2,
obj3: T_Obj3,
obj4: T_Obj4,
obj5: T_Obj5,
/,
*,
exclude: str | Iterable[Hashable] | None = None,
) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4, T_Obj5]: ...
@overload
def broadcast(
*args: T_Alignable, exclude: str | Iterable[Hashable] | None = None
) -> tuple[T_Alignable, ...]: ...
def broadcast(
*args: T_Alignable, exclude: str | Iterable[Hashable] | None = None
) -> tuple[T_Alignable, ...]:
"""Explicitly broadcast any number of DataArray or Dataset objects against
one another.
xarray objects automatically broadcast against each other in arithmetic
operations, so this function should not be necessary for normal use.
If no change is needed, the input data is returned to the output without
being copied.
Parameters
----------
*args : DataArray or Dataset
Arrays to broadcast against each other.
exclude : str, iterable of hashable or None, optional
Dimensions that must not be broadcasted
Returns
-------
broadcast : tuple of DataArray or tuple of Dataset
The same data as the input arrays, but with additional dimensions
inserted so that all data arrays have the same dimensions and shape.
Examples
--------
Broadcast two data arrays against one another to fill out their dimensions:
>>> a = xr.DataArray([1, 2, 3], dims="x")
>>> b = xr.DataArray([5, 6], dims="y")
>>> a
<xarray.DataArray (x: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: x
>>> b
<xarray.DataArray (y: 2)> Size: 16B
array([5, 6])
Dimensions without coordinates: y
>>> a2, b2 = xr.broadcast(a, b)
>>> a2
<xarray.DataArray (x: 3, y: 2)> Size: 48B
array([[1, 1],
[2, 2],
[3, 3]])
Dimensions without coordinates: x, y
>>> b2
<xarray.DataArray (x: 3, y: 2)> Size: 48B
array([[5, 6],
[5, 6],
[5, 6]])
Dimensions without coordinates: x, y
Fill out the dimensions of all data variables in a dataset:
>>> ds = xr.Dataset({"a": a, "b": b})
>>> (ds2,) = xr.broadcast(ds) # use tuple unpacking to extract one dataset
>>> ds2
<xarray.Dataset> Size: 96B
Dimensions: (x: 3, y: 2)
Dimensions without coordinates: x, y
Data variables:
a (x, y) int64 48B 1 1 2 2 3 3
b (x, y) int64 48B 5 6 5 6 5 6
"""
if exclude is None:
exclude = set()
args = align(*args, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
result = [_broadcast_helper(arg, exclude, dims_map, common_coords) for arg in args]
return tuple(result)
|
Aligner
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ActorDefinitionResourceRequirements.py
|
{
"start": 865,
"end": 1022
}
|
class ____(BaseModel):
class Config:
extra = Extra.forbid
jobType: JobType
resourceRequirements: ResourceRequirements
|
JobTypeResourceLimit
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0100_project_readthedocs_yaml_path.py
|
{
"start": 189,
"end": 1771
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0099_alter_domain_https"),
]
operations = [
migrations.AddField(
model_name="historicalproject",
name="readthedocs_yaml_path",
field=models.CharField(
blank=True,
default=None,
help_text="<strong>Warning: experimental feature</strong>. Custom path from repository top-level to your <code>.readthedocs.yaml</code>, ex. <code>subpath/docs/.readthedocs.yaml</code>. Leave blank for default value: <code>.readthedocs.yaml</code>.",
max_length=1024,
null=True,
validators=[readthedocs.projects.validators.validate_build_config_file],
verbose_name="Path for .readthedocs.yaml",
),
),
migrations.AddField(
model_name="project",
name="readthedocs_yaml_path",
field=models.CharField(
blank=True,
default=None,
help_text="<strong>Warning: experimental feature</strong>. Custom path from repository top-level to your <code>.readthedocs.yaml</code>, ex. <code>subpath/docs/.readthedocs.yaml</code>. Leave blank for default value: <code>.readthedocs.yaml</code>.",
max_length=1024,
null=True,
validators=[readthedocs.projects.validators.validate_build_config_file],
verbose_name="Path for .readthedocs.yaml",
),
),
]
|
Migration
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-tableau/dagster_tableau/resources.py
|
{
"start": 19329,
"end": 20186
}
|
class ____(BaseTableauClient):
"""Represents a client for Tableau Server and provides utilities
to interact with Tableau APIs.
"""
def __init__(
self,
connected_app_client_id: str,
connected_app_secret_id: str,
connected_app_secret_value: str,
username: str,
site_name: str,
server_name: str,
):
self.server_name = server_name
super().__init__(
connected_app_client_id=connected_app_client_id,
connected_app_secret_id=connected_app_secret_id,
connected_app_secret_value=connected_app_secret_value,
username=username,
site_name=site_name,
)
@property
def base_url(self) -> str:
"""Base URL for Tableau Cloud."""
return f"https://{self.server_name}"
@beta
|
TableauServerClient
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/types.py
|
{
"start": 1174,
"end": 1873
}
|
class ____(ZohoBaseType):
textarea = "textarea"
event_reminder = "event_reminder"
phone = "phone"
text = "text"
profileimage = "profileimage"
picklist = "picklist"
bigint = "bigint"
website = "website"
email = "email"
date = "date"
datetime = "datetime"
integer = "integer"
currency = "currency"
double = "double"
boolean = "boolean"
lookup = "lookup"
ownerlookup = "ownerlookup"
autonumber = "autonumber"
multiselectpicklist = "multiselectpicklist"
RRULE = "RRULE"
ALARM = "ALARM"
@classmethod
def numeric_string_types(cls) -> Iterable["ZohoDataType"]:
return cls.autonumber, cls.bigint
|
ZohoDataType
|
python
|
weaviate__weaviate-python-client
|
weaviate/gql/filter.py
|
{
"start": 14124,
"end": 14700
}
|
class ____(NearMedia):
"""NearImage class used to filter weaviate objects."""
def __init__(
self,
content: dict,
):
"""Initialize a NearImage class instance.
Args:
content: The content of the `nearImage` clause.
Raises:
TypeError: If 'content' is not of type dict.
TypeError: If 'content["image"]' is not of type str.
ValueError: If 'content' has key "certainty"/"distance" but the value is not float.
"""
super().__init__(content, MediaType.IMAGE)
|
NearImage
|
python
|
facebook__pyre-check
|
api/query.py
|
{
"start": 823,
"end": 1098
}
|
class ____(NamedTuple):
name: str
parameters: List[DefineParameter]
return_annotation: str
def get_class_name(self) -> str:
return ".".join(self.name.split(".")[:-1])
def get_method_name(self) -> str:
return self.name.split(".")[-1]
|
Define
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/requests/base.py
|
{
"start": 1501,
"end": 1700
}
|
class ____(Exception):
"""
Something was invalid about the request from Slack.
Includes the status the endpoint should return, based on the error.
"""
status: int
|
SlackRequestError
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-azure/dagster_azure/blob/fake_blob_client.py
|
{
"start": 1236,
"end": 1281
}
|
class ____:
name: str
url: str
|
FakeBlob
|
python
|
django__django
|
tests/backends/models.py
|
{
"start": 2489,
"end": 2866
}
|
class ____(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, models.CASCADE)
reporter_proxy = models.ForeignKey(
ReporterProxy,
models.SET_NULL,
null=True,
related_name="reporter_proxy",
)
def __str__(self):
return self.headline
|
Article
|
python
|
walkccc__LeetCode
|
solutions/2932. Maximum Strong Pair XOR I/2932.py
|
{
"start": 0,
"end": 141
}
|
class ____:
def __init__(self):
self.children: list[TrieNode | None] = [None] * 2
self.mn = math.inf
self.mx = -math.inf
|
TrieNode
|
python
|
mlflow__mlflow
|
mlflow/telemetry/events.py
|
{
"start": 2077,
"end": 3092
}
|
class ____(Event):
name: str = "genai_evaluate"
@classmethod
def parse(cls, arguments: dict[str, Any]) -> dict[str, Any] | None:
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.builtin_scorers import BuiltInScorer
record_params = {}
# Track if predict_fn is provided
record_params["predict_fn_provided"] = arguments.get("predict_fn") is not None
# Track builtin scorers
scorers = arguments.get("scorers") or []
builtin_scorers = {
type(scorer).__name__ for scorer in scorers if isinstance(scorer, BuiltInScorer)
}
record_params["builtin_scorers"] = sorted(builtin_scorers)
# Track scorer kind counts
scorer_kind_count = Counter[str](
scorer.kind.value for scorer in scorers if isinstance(scorer, Scorer)
)
record_params["scorer_kind_count"] = dict[str, int](sorted(scorer_kind_count.items()))
return record_params
|
GenAIEvaluateEvent
|
python
|
pallets__jinja
|
src/jinja2/runtime.py
|
{
"start": 32443,
"end": 33247
}
|
class ____(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self) -> str:
if self._undefined_hint:
message = f"undefined value printed: {self._undefined_hint}"
elif self._undefined_obj is missing:
message = self._undefined_name # type: ignore
else:
message = (
f"no such element: {object_type_repr(self._undefined_obj)}"
f"[{self._undefined_name!r}]"
)
return f"{{{{ {message} }}}}"
|
DebugUndefined
|
python
|
scipy__scipy
|
scipy/special/tests/test_spherical_bessel.py
|
{
"start": 9303,
"end": 9801
}
|
class ____:
def fundamental_theorem(self, n, a, b):
integral, tolerance = quad(lambda z: self.df(n, z), a, b)
assert_allclose(integral,
self.f(n, b) - self.f(n, a),
atol=tolerance)
@pytest.mark.slow
def test_fundamental_theorem_0(self):
self.fundamental_theorem(0, 3.0, 15.0)
@pytest.mark.slow
def test_fundamental_theorem_7(self):
self.fundamental_theorem(7, 0.5, 1.2)
|
SphericalDerivativesTestCase
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/xaxis/_rangebreak.py
|
{
"start": 235,
"end": 11958
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.rangebreak"
_valid_props = {
"bounds",
"dvalue",
"enabled",
"name",
"pattern",
"templateitemname",
"values",
}
@property
def bounds(self):
"""
Sets the lower and upper bounds of this axis rangebreak. Can be
used with `pattern`.
The 'bounds' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'bounds[0]' property accepts values of any type
(1) The 'bounds[1]' property accepts values of any type
Returns
-------
list
"""
return self["bounds"]
@bounds.setter
def bounds(self, val):
self["bounds"] = val
@property
def dvalue(self):
"""
Sets the size of each `values` item. The default is one day in
milliseconds.
The 'dvalue' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["dvalue"]
@dvalue.setter
def dvalue(self, val):
self["dvalue"] = val
@property
def enabled(self):
"""
Determines whether this axis rangebreak is enabled or disabled.
Please note that `rangebreaks` only work for "date" axis type.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def pattern(self):
"""
Determines a pattern on the time line that generates breaks. If
*day of week* - days of the week in English e.g. 'Sunday' or
`sun` (matching is case-insensitive and considers only the
first three characters), as well as Sunday-based integers
between 0 and 6. If "hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info. Examples: - { pattern:
'day of week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8] } breaks from
5pm to 8am (i.e. skips non-work hours).
The 'pattern' property is an enumeration that may be specified as:
- One of the following enumeration values:
['day of week', 'hour', '']
Returns
-------
Any
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def values(self):
"""
Sets the coordinate values corresponding to the rangebreaks. An
alternative to `bounds`. Use `dvalue` to set the size of the
values along the axis.
The 'values' property is an info array that may be specified as:
* a list of elements where:
The 'values[i]' property accepts values of any type
Returns
-------
list
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
@property
def _prop_descriptions(self):
return """\
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The default is one
day in milliseconds.
enabled
Determines whether this axis rangebreak is enabled or
disabled. Please note that `rangebreaks` only work for
"date" axis type.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pattern
Determines a pattern on the time line that generates
breaks. If *day of week* - days of the week in English
e.g. 'Sunday' or `sun` (matching is case-insensitive
and considers only the first three characters), as well
as Sunday-based integers between 0 and 6. If "hour" -
hour (24-hour clock) as decimal numbers between 0 and
24. for more info. Examples: - { pattern: 'day of
week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips
the weekends). - { pattern: 'hour', bounds: [17, 8] }
breaks from 5pm to 8am (i.e. skips non-work hours).
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use `dvalue`
to set the size of the values along the axis.
"""
def __init__(
self,
arg=None,
bounds=None,
dvalue=None,
enabled=None,
name=None,
pattern=None,
templateitemname=None,
values=None,
**kwargs,
):
"""
Construct a new Rangebreak object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Rangebreak`
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The default is one
day in milliseconds.
enabled
Determines whether this axis rangebreak is enabled or
disabled. Please note that `rangebreaks` only work for
"date" axis type.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pattern
Determines a pattern on the time line that generates
breaks. If *day of week* - days of the week in English
e.g. 'Sunday' or `sun` (matching is case-insensitive
and considers only the first three characters), as well
as Sunday-based integers between 0 and 6. If "hour" -
hour (24-hour clock) as decimal numbers between 0 and
24. for more info. Examples: - { pattern: 'day of
week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips
the weekends). - { pattern: 'hour', bounds: [17, 8] }
breaks from 5pm to 8am (i.e. skips non-work hours).
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use `dvalue`
to set the size of the values along the axis.
Returns
-------
Rangebreak
"""
super().__init__("rangebreaks")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.xaxis.Rangebreak
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Rangebreak`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bounds", arg, bounds)
self._set_property("dvalue", arg, dvalue)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("pattern", arg, pattern)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("values", arg, values)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Rangebreak
|
python
|
wandb__wandb
|
wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/inotify_c.py
|
{
"start": 5542,
"end": 15420
}
|
class ____(object):
"""
Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:type path:
:class:`bytes`
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS):
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
# Stores the watch descriptor for a given path.
self._wd_for_path = dict()
self._path_for_wd = dict()
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
self._add_dir_watch(path, recursive, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self):
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self):
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self):
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = dict()
def source_for_move(self, destination_event):
"""
The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
"""
Save this event as the source event for future MOVED_TO events to
reference.
"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
"""
Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self):
"""
Closes the inotify instance and removes all associated watches.
"""
with self._lock:
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
os.close(self._inotify_fd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
"""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
event_buffer = None
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path #avoid trailing slash
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
continue
event_list.append(inotify_event)
if (self.is_recursive and inotify_event.is_directory and
inotify_event.is_create):
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
"""
Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
if not os.path.isdir(path):
raise OSError('Path is not a directory')
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError("inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError("inotify instance limit reached")
else:
raise OSError(os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer):
"""
Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
|
Inotify
|
python
|
realpython__materials
|
python-range-membership-test/range_tools.py
|
{
"start": 66,
"end": 1717
}
|
class ____:
start: int
stop: int
step: int = 1
def __post_init__(self):
"""Validate parameters."""
if not isinstance(self.start, int):
raise ValueError("'start' must be an integer")
if not isinstance(self.stop, int):
raise ValueError("'stop' must be an integer")
if not isinstance(self.step, int) or self.step <= 0:
raise ValueError("'step' must be a positive integer")
def __iter__(self):
"""Create an iterator based on the range."""
return _RangeIterator(self.start, self.stop, self.step)
def __contains__(self, element):
"""Check if element is a member of the range."""
return (
self.start <= element < self.stop
and (element - self.start) % self.step == 0
)
def __len__(self):
"""Calculate the number of elements in the range."""
if self.stop <= self.start:
return 0
return math.ceil((self.stop - self.start) / self.step)
def __getitem__(self, index):
"""Get an element in the range based on its index."""
if index < 0 or index >= len(self):
raise IndexError(f"range index out of range: {index}")
return self.start + index * self.step
def count(self, element):
"""Count number of occurences of element in range."""
return 1 if element in self else 0
def index(self, element):
"""Calculate index of element in range."""
if element not in self:
raise ValueError(f"{element} not in range")
return (element - self.start) // self.step
@dataclass
|
Range
|
python
|
django-compressor__django-compressor
|
compressor/exceptions.py
|
{
"start": 100,
"end": 232
}
|
class ____(Exception):
"""
This exception is raised when a file cannot be compressed
"""
pass
|
UncompressableFileError
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/execution_time/comms.py
|
{
"start": 20545,
"end": 21157
}
|
class ____(TaskBreadcrumbsResponse):
type: Literal["TaskBreadcrumbsResult"] = "TaskBreadcrumbsResult"
@classmethod
def from_api_response(cls, response: TaskBreadcrumbsResponse) -> TaskBreadcrumbsResult:
"""
Create result class from API Response.
API Response is autogenerated from the API schema, so we need to convert
it to Result for communication between the Supervisor and the task
process since it needs a discriminator field.
"""
return cls(**response.model_dump(exclude_defaults=True), type="TaskBreadcrumbsResult")
|
TaskBreadcrumbsResult
|
python
|
rq__rq
|
rq/local.py
|
{
"start": 2259,
"end": 4738
}
|
class ____:
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
def __len__(self):
stack = getattr(self._local, 'stack', None)
if stack is None:
return 0
return len(stack)
|
LocalStack
|
python
|
pallets__werkzeug
|
src/werkzeug/routing/rules.py
|
{
"start": 585,
"end": 3326
}
|
class ____:
"""A part of a rule.
Rules can be represented by parts as delimited by `/` with
instances of this class representing those parts. The *content* is
either the raw content if *static* or a regex string to match
against. The *weight* can be used to order parts when matching.
"""
content: str
final: bool
static: bool
suffixed: bool
weight: Weighting
_part_re = re.compile(
r"""
(?:
(?P<slash>/) # a slash
|
(?P<static>[^</]+) # static rule data
|
(?:
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<arguments>.*?)\))? # converter arguments
: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
)
)
""",
re.VERBOSE,
)
_simple_rule_re = re.compile(r"<([^>]+)>")
_converter_args_re = re.compile(
r"""
\s*
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
[\w\d_.]+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
""",
re.VERBOSE,
)
_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
def _find(value: str, target: str, pos: int) -> int:
"""Find the *target* in *value* after *pos*.
Returns the *value* length if *target* isn't found.
"""
try:
return value.index(target, pos)
except ValueError:
return len(value)
def _pythonize(value: str) -> None | bool | int | float | str:
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in "\"'":
value = value[1:-1]
return str(value)
def parse_converter_args(argstr: str) -> tuple[tuple[t.Any, ...], dict[str, t.Any]]:
argstr += ","
args = []
kwargs = {}
position = 0
for item in _converter_args_re.finditer(argstr):
if item.start() != position:
raise ValueError(
f"Cannot parse converter argument '{argstr[position : item.start()]}'"
)
value = item.group("stringval")
if value is None:
value = item.group("value")
value = _pythonize(value)
if not item.group("name"):
args.append(value)
else:
name = item.group("name")
kwargs[name] = value
position = item.end()
return tuple(args), kwargs
|
RulePart
|
python
|
pypa__pipenv
|
pipenv/vendor/tomlkit/items.py
|
{
"start": 8211,
"end": 8629
}
|
class ____:
"""
Trivia information (aka metadata).
"""
# Whitespace before a value.
indent: str = ""
# Whitespace after a value, but before a comment.
comment_ws: str = ""
# Comment, starting with # character, or empty string if no comment.
comment: str = ""
# Trailing newline.
trail: str = "\n"
def copy(self) -> Trivia:
return dataclasses.replace(self)
|
Trivia
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1549769,
"end": 1556004
}
|
class ____(VegaLiteSchema):
"""
UnitSpecWithFrame schema wrapper.
Parameters
----------
mark : dict, :class:`Mark`, :class:`AnyMark`, :class:`BoxPlot`, :class:`MarkDef`, :class:`ErrorBar`, :class:`ErrorBand`, :class:`BoxPlotDef`, :class:`ErrorBarDef`, :class:`ErrorBandDef`, :class:`CompositeMark`, :class:`CompositeMarkDef`, Literal['arc', 'area', 'bar', 'image', 'line', 'point', 'rect', 'rule', 'text', 'tick', 'trail', 'circle', 'square', 'geoshape', 'boxplot', 'errorband', 'errorbar']
A string describing the mark type (one of ``"bar"``, ``"circle"``, ``"square"``,
``"tick"``, ``"line"``, ``"area"``, ``"point"``, ``"rule"``, ``"geoshape"``, and
``"text"``) or a `mark definition object
<https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__.
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`, None
An object describing the data source. Set to ``null`` to ignore the parent's data
source. If no data is set, it is derived from the parent.
description : str
Description of this mark for commenting purpose.
encoding : dict, :class:`Encoding`
A key-value mapping between encoding channels and definition of fields.
height : dict, float, :class:`Step`, Literal['container']
The height of a visualization.
* For a plot with a continuous y-field, height should be a number.
* For a plot with either a discrete y-field or no y-field, height can be either a
number indicating a fixed height or an object in the form of ``{step: number}``
defining the height per discrete step. (No y-field is equivalent to having one
discrete step.)
* To enable responsive sizing on height, it should be set to ``"container"``.
**Default value:** Based on ``config.view.continuousHeight`` for a plot with a
continuous y-field and ``config.view.discreteHeight`` otherwise.
**Note:** For plots with `row and column channels
<https://vega.github.io/vega-lite/docs/encoding.html#facet>`__, this represents the
height of a single view and the ``"container"`` option cannot be used.
**See also:** `height <https://vega.github.io/vega-lite/docs/size.html>`__
documentation.
name : str
Name of the visualization for later reference.
params : Sequence[dict, :class:`SelectionParameter`]
An array of parameters that may either be simple variables, or more complex
selections that map user input to data queries.
projection : dict, :class:`Projection`
An object defining properties of geographic projection, which will be applied to
``shape`` path for ``"geoshape"`` marks and to ``latitude`` and ``"longitude"``
channels for other marks.
title : str, dict, :class:`Text`, Sequence[str], :class:`TitleParams`
Title for the plot.
transform : Sequence[dict, :class:`Transform`, :class:`BinTransform`, :class:`FoldTransform`, :class:`LoessTransform`, :class:`PivotTransform`, :class:`StackTransform`, :class:`ExtentTransform`, :class:`FilterTransform`, :class:`ImputeTransform`, :class:`LookupTransform`, :class:`SampleTransform`, :class:`WindowTransform`, :class:`DensityTransform`, :class:`FlattenTransform`, :class:`QuantileTransform`, :class:`TimeUnitTransform`, :class:`AggregateTransform`, :class:`CalculateTransform`, :class:`RegressionTransform`, :class:`JoinAggregateTransform`]
An array of data transformations such as filter and new field calculation.
view : dict, :class:`ViewBackground`
An object defining the view background's fill and stroke.
**Default value:** none (transparent)
width : dict, float, :class:`Step`, Literal['container']
The width of a visualization.
* For a plot with a continuous x-field, width should be a number.
* For a plot with either a discrete x-field or no x-field, width can be either a
number indicating a fixed width or an object in the form of ``{step: number}``
defining the width per discrete step. (No x-field is equivalent to having one
discrete step.)
* To enable responsive sizing on width, it should be set to ``"container"``.
**Default value:** Based on ``config.view.continuousWidth`` for a plot with a
continuous x-field and ``config.view.discreteWidth`` otherwise.
**Note:** For plots with `row and column channels
<https://vega.github.io/vega-lite/docs/encoding.html#facet>`__, this represents the
width of a single view and the ``"container"`` option cannot be used.
**See also:** `width <https://vega.github.io/vega-lite/docs/size.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/UnitSpecWithFrame"}
def __init__(
self,
mark: Optional[SchemaBase | Map | Mark_T | CompositeMark_T] = Undefined,
data: Optional[SchemaBase | ChartDataType | Map | None] = Undefined,
description: Optional[str] = Undefined,
encoding: Optional[SchemaBase | Map] = Undefined,
height: Optional[float | SchemaBase | Literal["container"] | Map] = Undefined,
name: Optional[str] = Undefined,
params: Optional[Sequence[SchemaBase | Map]] = Undefined,
projection: Optional[SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
transform: Optional[Sequence[SchemaBase | Map]] = Undefined,
view: Optional[SchemaBase | Map] = Undefined,
width: Optional[float | SchemaBase | Literal["container"] | Map] = Undefined,
**kwds,
):
super().__init__(
mark=mark,
data=data,
description=description,
encoding=encoding,
height=height,
name=name,
params=params,
projection=projection,
title=title,
transform=transform,
view=view,
width=width,
**kwds,
)
|
UnitSpecWithFrame
|
python
|
modin-project__modin
|
modin/tests/pandas/test_io.py
|
{
"start": 111630,
"end": 112357
}
|
class ____:
# It's not easy to add infrastructure for `orc` format.
# In case of defaulting to pandas, it's enough
# to check that the parameters are passed to pandas.
def test_read_orc(self):
test_args = ("fake_path",)
test_kwargs = dict(
columns=["A"],
dtype_backend=lib.no_default,
filesystem=None,
fake_kwarg="some_pyarrow_parameter",
)
with mock.patch(
"pandas.read_orc", return_value=pandas.DataFrame([])
) as read_orc:
pd.read_orc(*test_args, **test_kwargs)
read_orc.assert_called_once_with(*test_args, **test_kwargs)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
|
TestOrc
|
python
|
joke2k__faker
|
faker/providers/ssn/fr_CH/__init__.py
|
{
"start": 66,
"end": 1495
}
|
class ____(SsnProvider):
ssn_formats = ("###.####.####.##",)
def ssn(self) -> str:
"""
Returns a 13 digits Swiss SSN named AHV (German) or
AVS (French and Italian)
See: http://www.bsv.admin.ch/themen/ahv/00011/02185/
"""
def _checksum(digits):
evensum = sum(digits[:-1:2])
oddsum = sum(digits[1::2])
return (10 - ((evensum + oddsum * 3) % 10)) % 10
digits: List[int] = [7, 5, 6]
# create an array of first 9 elements initialized randomly
digits += self.generator.random.sample(range(10), 9)
# determine the last digit to make it qualify the test
digits.append(_checksum(digits))
# repeat steps until it does qualify the test
digits_ = "".join([str(d) for d in digits])
return f"{digits_[:3]}.{digits_[3:7]}.{digits_[7:11]}.{digits_[11:]}"
def vat_id(self) -> str:
"""
:return: Swiss UID number
"""
def _checksum(digits):
code = ["8", "6", "4", "2", "3", "5", "9", "7"]
remainder = 11 - (sum(map(lambda x, y: int(x) * int(y), code, digits)) % 11)
if remainder == 10:
return 0
elif remainder == 11:
return 5
return remainder
vat_id: str = self.numerify("########")
return "CHE" + vat_id + str(_checksum(vat_id))
|
Provider
|
python
|
optuna__optuna
|
optuna/storages/_rdb/models.py
|
{
"start": 15674,
"end": 18325
}
|
class ____(BaseModel):
class TrialIntermediateValueType(enum.Enum):
FINITE = 1
INF_POS = 2
INF_NEG = 3
NAN = 4
__tablename__ = "trial_intermediate_values"
__table_args__: Any = (UniqueConstraint("trial_id", "step"),)
trial_intermediate_value_id = _Column(Integer, primary_key=True)
trial_id = _Column(Integer, ForeignKey("trials.trial_id"), nullable=False)
step = _Column(Integer, nullable=False)
intermediate_value = _Column(Float(precision=FLOAT_PRECISION), nullable=True)
intermediate_value_type = _Column(Enum(TrialIntermediateValueType), nullable=False)
trial = orm.relationship(
TrialModel, backref=orm.backref("intermediate_values", cascade="all, delete-orphan")
)
@classmethod
def intermediate_value_to_stored_repr(
cls, value: float
) -> tuple[float | None, TrialIntermediateValueType]:
if math.isnan(value):
return None, cls.TrialIntermediateValueType.NAN
elif value == float("inf"):
return None, cls.TrialIntermediateValueType.INF_POS
elif value == float("-inf"):
return None, cls.TrialIntermediateValueType.INF_NEG
else:
return value, cls.TrialIntermediateValueType.FINITE
@classmethod
def stored_repr_to_intermediate_value(
cls, value: float | None, float_type: TrialIntermediateValueType
) -> float:
if float_type == cls.TrialIntermediateValueType.NAN:
assert value is None
return float("nan")
elif float_type == cls.TrialIntermediateValueType.INF_POS:
assert value is None
return float("inf")
elif float_type == cls.TrialIntermediateValueType.INF_NEG:
assert value is None
return float("-inf")
else:
assert float_type == cls.TrialIntermediateValueType.FINITE
assert value is not None
return value
@classmethod
def find_by_trial_and_step(
cls, trial: TrialModel, step: int, session: orm.Session
) -> "TrialIntermediateValueModel" | None:
trial_intermediate_value = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.step == step)
.one_or_none()
)
return trial_intermediate_value
@classmethod
def where_trial_id(
cls, trial_id: int, session: orm.Session
) -> list["TrialIntermediateValueModel"]:
trial_intermediate_values = session.query(cls).filter(cls.trial_id == trial_id).all()
return trial_intermediate_values
|
TrialIntermediateValueModel
|
python
|
altair-viz__altair
|
altair/datasets/_reader.py
|
{
"start": 3433,
"end": 12629
}
|
class ____(Generic[IntoDataFrameT, IntoFrameT]):
"""
Modular file reader, targeting remote & local tabular resources.
.. warning::
Use ``reader(...)`` instead of instantiating ``Reader`` directly.
"""
_read: Sequence[Read[IntoDataFrameT]]
"""Eager file read functions."""
_scan: Sequence[Scan[IntoFrameT]]
"""Lazy file read functions."""
_name: str
"""
Used in error messages, repr and matching ``@overload``(s).
Otherwise, has no concrete meaning.
"""
_implementation: nw.Implementation
"""
Corresponding `narwhals implementation`_.
.. _narwhals implementation:
https://github.com/narwhals-dev/narwhals/blob/9b6a355530ea46c590d5a6d1d0567be59c0b5742/narwhals/utils.py#L61-L290
"""
_opener: ClassVar[OpenerDirector] = _build_opener()
_metadata_path: ClassVar[Path] = (
Path(__file__).parent / "_metadata" / "metadata.parquet"
)
def __init__(
self,
read: Sequence[Read[IntoDataFrameT]],
scan: Sequence[Scan[IntoFrameT]],
name: str,
implementation: nw.Implementation,
) -> None:
self._read = read
self._scan = scan
self._name = name
self._implementation = implementation
self._schema_cache = SchemaCache(implementation=implementation)
def __repr__(self) -> str:
from textwrap import indent
PREFIX = " " * 4
NL = "\n"
body = f"read\n{indent(NL.join(str(el) for el in self._read), PREFIX)}"
if self._scan:
body += f"\nscan\n{indent(NL.join(str(el) for el in self._scan), PREFIX)}"
return f"Reader[{self._name}] {self._implementation!r}\n{body}"
def read_fn(self, meta: Metadata, /) -> Callable[..., IntoDataFrameT]:
return self._solve(meta, self._read)
def scan_fn(self, meta: Metadata | Path | str, /) -> Callable[..., IntoFrameT]:
meta = meta if isinstance(meta, Mapping) else {"suffix": _into_suffix(meta)}
return self._solve(meta, self._scan)
@property
def cache(self) -> DatasetCache:
return DatasetCache(self)
def _handle_pyarrow_date_error(self, e: Exception, name: str) -> None:
"""Handle PyArrow date parsing errors with informative error messages, see https://github.com/apache/arrow/issues/41488."""
if "CSV conversion error to date" in str(e) and "pyarrow" in str(
type(e).__module__
):
message = (
f"PyArrow cannot parse date format in dataset '{name}'. "
f"This is a known limitation of PyArrow's CSV reader for non-ISO date formats.\n\n"
f"Alternatives:\n"
f"1. Use a different backend: data.{name}(engine='pandas') or data.{name}(engine='polars')\n"
f"2. Convert dates manually after loading as strings\n\n"
f"Original error: {e}"
)
raise AltairDatasetsError(message) from e
raise e
def dataset(
self,
name: Dataset | LiteralString,
suffix: Extension | None = None,
/,
**kwds: Any,
) -> IntoDataFrameT:
frame = self._query(name, suffix)
meta = next(_iter_metadata(frame))
fn = self.read_fn(meta)
fn_kwds = self._merge_kwds(meta, kwds)
if self.cache.is_active():
fp = self.cache._maybe_download(meta)
try:
return fn(fp, **fn_kwds)
except Exception as e:
self._handle_pyarrow_date_error(e, name)
raise
else:
with self._opener.open(meta["url"]) as f:
try:
return fn(f, **fn_kwds)
except Exception as e:
self._handle_pyarrow_date_error(e, name)
raise
def url(
self, name: Dataset | LiteralString, suffix: Extension | None = None, /
) -> str:
frame = self._query(name, suffix)
meta = next(_iter_metadata(frame))
if is_parquet(meta.items()) and not is_available("vegafusion"):
raise AltairDatasetsError.from_url(meta)
url = meta["url"]
if isinstance(url, str):
return url
else:
msg = f"Expected 'str' but got {type(url).__name__!r}\nfrom {url!r}."
raise TypeError(msg)
# TODO: (Multiple)
# - Settle on a better name
# - Add method to `Loader`
# - Move docs to `Loader.{new name}`
def open_markdown(self, name: Dataset, /) -> None:
"""
Learn more about a dataset, opening `vega-datasets/datapackage.md`_ with the default browser.
Additional info *may* include: `description`_, `schema`_, `sources`_, `licenses`_.
.. _vega-datasets/datapackage.md:
https://github.com/vega/vega-datasets/blob/main/datapackage.md
.. _description:
https://datapackage.org/standard/data-resource/#description
.. _schema:
https://datapackage.org/standard/table-schema/#schema
.. _sources:
https://datapackage.org/standard/data-package/#sources
.. _licenses:
https://datapackage.org/standard/data-package/#licenses
"""
import webbrowser
from altair.utils import VERSIONS
ref = self._query(name).get_column("file_name").item(0).replace(".", "")
tag = VERSIONS["vega-datasets"]
url = f"https://github.com/vega/vega-datasets/blob/v{tag}/datapackage.md#{ref}"
webbrowser.open(url)
@overload
def profile(self, *, show: Literal[False] = ...) -> _SupportProfile: ...
@overload
def profile(self, *, show: Literal[True]) -> None: ...
def profile(self, *, show: bool = False) -> _SupportProfile | None:
"""
Describe which datasets can be loaded as tabular data.
Parameters
----------
show
Print a densely formatted repr *instead of* returning a mapping.
"""
relevant_columns = set(
chain.from_iterable(impl._relevant_columns for impl in self._read)
)
frame = self._scan_metadata().select("dataset_name", *relevant_columns)
inc_expr = nw.any_horizontal(impl._include_expr for impl in self._read)
result: _SupportProfile = {
"unsupported": _dataset_names(frame, ~inc_expr),
"supported": _dataset_names(frame, inc_expr),
}
if show:
import pprint
pprint.pprint(result, compact=True, sort_dicts=False)
return None
return result
def _query(
self, name: Dataset | LiteralString, suffix: Extension | None = None, /
) -> nw.DataFrame[IntoDataFrameT]:
"""
Query a tabular version of `vega-datasets/datapackage.json`_.
Applies a filter, erroring out when no results would be returned.
.. _vega-datasets/datapackage.json:
https://github.com/vega/vega-datasets/blob/main/datapackage.json
"""
constraints = _into_constraints(name, suffix)
frame = self._scan_metadata(**constraints).collect()
if not frame.is_empty():
return frame
else:
msg = f"Found no results for:\n {constraints!r}"
raise ValueError(msg)
def _merge_kwds(self, meta: Metadata, kwds: dict[str, Any], /) -> Mapping[str, Any]:
"""
Extend user-provided arguments with dataset & library-specfic defaults.
.. important:: User-provided arguments have a higher precedence.
"""
if self._schema_cache.is_active() and (
schema := self._schema_cache.schema_kwds(meta)
):
kwds = schema | kwds if kwds else schema
return kwds
@property
def _metadata_frame(self) -> nw.LazyFrame[IntoFrameT]:
fp = self._metadata_path
return nw.from_native(self.scan_fn(fp)(fp)).lazy()
def _scan_metadata(
self, *predicates: OneOrSeq[IntoExpr], **constraints: Unpack[Metadata]
) -> nw.LazyFrame[IntoFrameT]:
if predicates or constraints:
return self._metadata_frame.filter(*predicates, **constraints)
return self._metadata_frame
def _solve(
self, meta: Metadata, impls: Sequence[BaseImpl[R]], /
) -> Callable[..., R]:
"""
Return the first function that satisfies dataset constraints.
See Also
--------
``altair.datasets._readimpl.BaseImpl.unwrap_or_skip``
"""
items = meta.items()
it = (some for impl in impls if (some := impl.unwrap_or_skip(items)))
if fn_or_err := next(it, None):
if _is_err(fn_or_err):
raise fn_or_err.from_tabular(meta, self._name)
return fn_or_err
raise AltairDatasetsError.from_tabular(meta, self._name)
def _dataset_names(
frame: nw.LazyFrame, *predicates: OneOrSeq[IntoExpr]
) -> Sequence[Dataset]:
# NOTE: helper function for `Reader.profile`
return (
frame.filter(*predicates)
.select("dataset_name")
.collect()
.get_column("dataset_name")
.to_list()
)
|
Reader
|
python
|
django__django
|
tests/migrations/test_migrations_run_before/0003_third.py
|
{
"start": 43,
"end": 656
}
|
class ____(migrations.Migration):
"""
This is a wee bit crazy, but it's just to show that run_before works.
"""
dependencies = [
("migrations", "0001_initial"),
]
run_before = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
],
)
]
|
Migration
|
python
|
mlflow__mlflow
|
mlflow/dspy/callback.py
|
{
"start": 1522,
"end": 17571
}
|
class ____(BaseCallback):
"""Callback for generating MLflow traces for DSPy components"""
def __init__(self, dependencies_schema: dict[str, Any] | None = None):
self._dependencies_schema = dependencies_schema
# call_id: (LiveSpan, OTel token)
self._call_id_to_span: dict[str, SpanWithToken] = {}
self._call_id_to_module: dict[str, Any] = {}
###### state management for optimization process ######
# The current callback logic assumes there is no optimization running in parallel.
# The state management may not work when multiple optimizations are running in parallel.
# optimizer_stack_level is used to determine if the callback is called within compile
# we cannot use boolean flag because the callback can be nested
self.optimizer_stack_level = 0
# call_id: (key, step)
self._call_id_to_metric_key: dict[str, tuple[str, int]] = {}
self._evaluation_counter = defaultdict(int)
self._disabled_eval_call_ids = set()
self._eval_runs_started: set[str] = set()
def set_dependencies_schema(self, dependencies_schema: dict[str, Any]):
if self._dependencies_schema:
raise MlflowException(
"Dependencies schema should be set only once to the callback.",
error_code=MlflowException.INVALID_PARAMETER_VALUE,
)
self._dependencies_schema = dependencies_schema
@skip_if_trace_disabled
def on_module_start(self, call_id: str, instance: Any, inputs: dict[str, Any]):
span_type = self._get_span_type_for_module(instance)
attributes = self._get_span_attribute_for_module(instance)
# The __call__ method of dspy.Module has a signature of (self, *args, **kwargs),
# while all built-in modules only accepts keyword arguments. To avoid recording
# empty "args" key in the inputs, we remove it if it's empty.
if "args" in inputs and not inputs["args"]:
inputs.pop("args")
self._start_span(
call_id,
name=f"{instance.__class__.__name__}.forward",
span_type=span_type,
inputs=self._unpack_kwargs(inputs),
attributes=attributes,
)
self._call_id_to_module[call_id] = instance
@skip_if_trace_disabled
def on_module_end(self, call_id: str, outputs: Any | None, exception: Exception | None = None):
instance = self._call_id_to_module.pop(call_id)
attributes = {}
if _get_fully_qualified_class_name(instance) == "dspy.retrieve.databricks_rm.DatabricksRM":
from mlflow.entities.document import Document
if isinstance(outputs, dspy.Prediction):
# Convert outputs to MLflow document format to make it compatible with
# agent evaluation.
num_docs = len(outputs.doc_ids)
doc_uris = outputs.doc_uris if outputs.doc_uris is not None else [None] * num_docs
outputs = [
Document(
page_content=doc_content,
metadata={
"doc_id": doc_id,
"doc_uri": doc_uri,
}
| extra_column_dict,
id=doc_id,
).to_dict()
for doc_content, doc_id, doc_uri, extra_column_dict in zip(
outputs.docs,
outputs.doc_ids,
doc_uris,
outputs.extra_columns,
)
]
else:
# NB: DSPy's Prediction object is a customized dictionary-like object, but its repr
# is not easy to read on UI. Therefore, we unpack it to a dictionary.
# https://github.com/stanfordnlp/dspy/blob/6fe693528323c9c10c82d90cb26711a985e18b29/dspy/primitives/prediction.py#L21-L28
if isinstance(outputs, dspy.Prediction):
usage_by_model = (
outputs.get_lm_usage() if hasattr(outputs, "get_lm_usage") else None
)
outputs = outputs.toDict()
if usage_by_model:
usage_data = {
TokenUsageKey.INPUT_TOKENS: 0,
TokenUsageKey.OUTPUT_TOKENS: 0,
TokenUsageKey.TOTAL_TOKENS: 0,
}
for usage in usage_by_model.values():
usage_data[TokenUsageKey.INPUT_TOKENS] += usage.get("prompt_tokens", 0)
usage_data[TokenUsageKey.OUTPUT_TOKENS] += usage.get("completion_tokens", 0)
usage_data[TokenUsageKey.TOTAL_TOKENS] += usage.get("total_tokens", 0)
attributes[SpanAttributeKey.CHAT_USAGE] = usage_data
self._end_span(call_id, outputs, exception, attributes)
@skip_if_trace_disabled
def on_lm_start(self, call_id: str, instance: Any, inputs: dict[str, Any]):
span_type = (
SpanType.CHAT_MODEL if getattr(instance, "model_type", None) == "chat" else SpanType.LLM
)
filtered_kwargs = {
key: value
for key, value in instance.kwargs.items()
if key not in {"api_key", "api_base"}
}
attributes = {
**filtered_kwargs,
"model": instance.model,
"model_type": instance.model_type,
"cache": instance.cache,
SpanAttributeKey.MESSAGE_FORMAT: "dspy",
}
inputs = self._unpack_kwargs(inputs)
self._start_span(
call_id,
name=f"{instance.__class__.__name__}.__call__",
span_type=span_type,
inputs=inputs,
attributes=attributes,
)
@skip_if_trace_disabled
def on_lm_end(self, call_id: str, outputs: Any | None, exception: Exception | None = None):
self._end_span(call_id, outputs, exception)
@skip_if_trace_disabled
def on_adapter_format_start(self, call_id: str, instance: Any, inputs: dict[str, Any]):
self._start_span(
call_id,
name=f"{instance.__class__.__name__}.format",
span_type=SpanType.PARSER,
inputs=self._unpack_kwargs(inputs),
attributes={},
)
@skip_if_trace_disabled
def on_adapter_format_end(
self, call_id: str, outputs: Any | None, exception: Exception | None = None
):
self._end_span(call_id, outputs, exception)
@skip_if_trace_disabled
def on_adapter_parse_start(self, call_id: str, instance: Any, inputs: dict[str, Any]):
self._start_span(
call_id,
name=f"{instance.__class__.__name__}.parse",
span_type=SpanType.PARSER,
inputs=self._unpack_kwargs(inputs),
attributes={},
)
@skip_if_trace_disabled
def on_adapter_parse_end(
self, call_id: str, outputs: Any | None, exception: Exception | None = None
):
self._end_span(call_id, outputs, exception)
@skip_if_trace_disabled
def on_tool_start(self, call_id: str, instance: Any, inputs: dict[str, Any]):
# DSPy uses the special "finish" tool to signal the end of the agent.
if instance.name == "finish":
return
inputs = self._unpack_kwargs(inputs)
# Tools are always called with keyword arguments only.
inputs.pop("args", None)
self._start_span(
call_id,
name=f"Tool.{instance.name}",
span_type=SpanType.TOOL,
inputs=inputs,
attributes={
"name": instance.name,
"description": instance.desc,
"args": instance.args,
},
)
@skip_if_trace_disabled
def on_tool_end(self, call_id: str, outputs: Any | None, exception: Exception | None = None):
if call_id in self._call_id_to_span:
self._end_span(call_id, outputs, exception)
def on_evaluate_start(self, call_id: str, instance: Any, inputs: dict[str, Any]):
"""
Callback handler at the beginning of evaluation call. Available with DSPy>=2.6.9.
This callback starts a nested run for each evaluation call inside optimization.
If called outside optimization and no active run exists, it creates a new run.
"""
if not get_autologging_config(FLAVOR_NAME, "log_evals"):
return
key = "eval"
if callback_metadata := inputs.get("callback_metadata"):
if "metric_key" in callback_metadata:
key = callback_metadata["metric_key"]
if callback_metadata.get("disable_logging"):
self._disabled_eval_call_ids.add(call_id)
return
started_run = False
if self.optimizer_stack_level > 0:
with _lock:
# we may want to include optimizer_stack_level in the key
# to handle nested optimization
step = self._evaluation_counter[key]
self._evaluation_counter[key] += 1
self._call_id_to_metric_key[call_id] = (key, step)
mlflow.start_run(run_name=f"{key}_{step}", nested=True)
started_run = True
elif mlflow.active_run() is None:
mlflow.start_run(run_name=key, nested=True)
started_run = True
if started_run:
self._eval_runs_started.add(call_id)
if program := inputs.get("program"):
save_dspy_module_state(program, "model.json")
log_dspy_module_params(program)
# Log the current DSPy LM state
log_dspy_lm_state()
def on_evaluate_end(
self,
call_id: str,
outputs: Any,
exception: Exception | None = None,
):
"""
Callback handler at the end of evaluation call. Available with DSPy>=2.6.9.
This callback logs the evaluation score to the individual run
and add eval metric to the parent run if called inside optimization.
"""
if not get_autologging_config(FLAVOR_NAME, "log_evals"):
return
if call_id in self._disabled_eval_call_ids:
self._disabled_eval_call_ids.discard(call_id)
return
run_started = call_id in self._eval_runs_started
if exception:
if run_started:
mlflow.end_run(status=RunStatus.to_string(RunStatus.FAILED))
self._eval_runs_started.discard(call_id)
return
score = None
if isinstance(outputs, float):
score = outputs
elif isinstance(outputs, tuple):
score = outputs[0]
elif isinstance(outputs, dspy.Prediction):
score = float(outputs)
try:
mlflow.log_table(self._generate_result_table(outputs.results), "result_table.json")
except Exception:
_logger.debug("Failed to log result table.", exc_info=True)
if score is not None:
mlflow.log_metric("eval", score)
if run_started:
mlflow.end_run()
self._eval_runs_started.discard(call_id)
# Log the evaluation score to the parent run if called inside optimization
if self.optimizer_stack_level > 0 and mlflow.active_run() is not None:
if call_id not in self._call_id_to_metric_key:
return
key, step = self._call_id_to_metric_key.pop(call_id)
if score is not None:
mlflow.log_metric(
key,
score,
step=step,
)
def reset(self):
self._call_id_to_metric_key: dict[str, tuple[str, int]] = {}
self._evaluation_counter = defaultdict(int)
self._eval_runs_started = set()
def _start_span(
self,
call_id: str,
name: str,
span_type: SpanType,
inputs: dict[str, Any],
attributes: dict[str, Any],
):
if not IS_TRACING_SDK_ONLY:
from mlflow.pyfunc.context import get_prediction_context
prediction_context = get_prediction_context()
if prediction_context and self._dependencies_schema:
prediction_context.update(**self._dependencies_schema)
else:
prediction_context = None
with maybe_set_prediction_context(prediction_context):
span = start_span_no_context(
name=name,
span_type=span_type,
parent_span=mlflow.get_current_active_span(),
inputs=inputs,
attributes=attributes,
)
token = set_span_in_context(span)
self._call_id_to_span[call_id] = SpanWithToken(span, token)
return span
def _end_span(
self,
call_id: str,
outputs: Any | None,
exception: Exception | None = None,
attributes: dict[str, Any] | None = None,
):
st = self._call_id_to_span.pop(call_id, None)
if not st.span:
_logger.warning(f"Failed to end a span. Span not found for call_id: {call_id}")
return
status = SpanStatusCode.OK if exception is None else SpanStatusCode.ERROR
if exception:
st.span.add_event(SpanEvent.from_exception(exception))
if attributes:
st.span.set_attributes(attributes)
try:
st.span.end(outputs=outputs, status=status)
finally:
detach_span_from_context(st.token)
def _get_span_type_for_module(self, instance):
if isinstance(instance, dspy.Retrieve):
return SpanType.RETRIEVER
elif isinstance(instance, dspy.ReAct):
return SpanType.AGENT
elif isinstance(instance, dspy.Predict):
return SpanType.LLM
elif isinstance(instance, dspy.Adapter):
return SpanType.PARSER
else:
return SpanType.CHAIN
def _get_span_attribute_for_module(self, instance):
if isinstance(instance, dspy.Predict):
return {"signature": instance.signature.signature}
elif isinstance(instance, dspy.ChainOfThought):
if hasattr(instance, "signature"):
signature = instance.signature.signature
else:
signature = instance.predict.signature.signature
attributes = {"signature": signature}
if hasattr(instance, "extended_signature"):
attributes["extended_signature"] = instance.extended_signature.signature
return attributes
return {}
def _unpack_kwargs(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Unpacks the kwargs from the inputs dictionary"""
# NB: Not using pop() to avoid modifying the original inputs dictionary
kwargs = inputs.get("kwargs", {})
inputs_wo_kwargs = {k: v for k, v in inputs.items() if k != "kwargs"}
merged = {**inputs_wo_kwargs, **kwargs}
return {k: _convert_signature(v) for k, v in merged.items()}
def _generate_result_table(
self, outputs: list[tuple[dspy.Example, dspy.Prediction, Any]]
) -> dict[str, list[Any]]:
result = {"score": []}
for i, (example, prediction, score) in enumerate(outputs):
for k, v in example.items():
if f"example_{k}" not in result:
result[f"example_{k}"] = [None] * i
result[f"example_{k}"].append(v)
for k, v in prediction.items():
if f"pred_{k}" not in result:
result[f"pred_{k}"] = [None] * i
result[f"pred_{k}"].append(v)
result["score"].append(score)
for k, v in result.items():
if len(v) != i + 1:
result[k].append(None)
return result
|
MlflowCallback
|
python
|
pandas-dev__pandas
|
pandas/tests/io/parser/conftest.py
|
{
"start": 2279,
"end": 2372
}
|
class ____(BaseParser):
engine = "python"
float_precision_choices = [None]
|
PythonParser
|
python
|
pyca__cryptography
|
tests/conftest.py
|
{
"start": 1663,
"end": 1816
}
|
class ____:
@contextlib.contextmanager
def test(self):
try:
yield
except pytest.skip.Exception:
pass
|
SubTests
|
python
|
numba__numba
|
numba/cuda/tests/nocuda/test_dummyarray.py
|
{
"start": 10761,
"end": 11868
}
|
class ____(unittest.TestCase):
def test_squeeze(self):
nparr = np.empty((1, 2, 1, 4, 1, 3))
arr = Array.from_desc(
0, nparr.shape, nparr.strides, nparr.dtype.itemsize
)
def _assert_equal_shape_strides(arr1, arr2):
self.assertEqual(arr1.shape, arr2.shape)
self.assertEqual(arr1.strides, arr2.strides)
_assert_equal_shape_strides(arr, nparr)
_assert_equal_shape_strides(arr.squeeze()[0], nparr.squeeze())
for axis in (0, 2, 4, (0, 2), (0, 4), (2, 4), (0, 2, 4)):
_assert_equal_shape_strides(
arr.squeeze(axis=axis)[0], nparr.squeeze(axis=axis)
)
def test_squeeze_invalid_axis(self):
nparr = np.empty((1, 2, 1, 4, 1, 3))
arr = Array.from_desc(
0, nparr.shape, nparr.strides, nparr.dtype.itemsize
)
with self.assertRaises(ValueError):
arr.squeeze(axis=1)
with self.assertRaises(ValueError):
arr.squeeze(axis=(2, 3))
@skip_on_cudasim("Tests internals of the CUDA driver device array")
|
TestSqueeze
|
python
|
ray-project__ray
|
python/ray/serve/_private/test_utils.py
|
{
"start": 19248,
"end": 21023
}
|
class ____:
def __init__(self, name: str = None, tag_keys: Tuple[str] = None):
self.name = name
self.counts = dict()
self.tags = tag_keys or ()
self.default_tags = dict()
def set_default_tags(self, tags: Dict[str, str]):
for key, tag in tags.items():
assert key in self.tags
self.default_tags[key] = tag
def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None):
merged_tags = self.default_tags.copy()
merged_tags.update(tags or {})
assert set(merged_tags.keys()) == set(self.tags)
d = self.counts
for tag in self.tags[:-1]:
tag_value = merged_tags[tag]
if tag_value not in d:
d[tag_value] = dict()
d = d[tag_value]
key = merged_tags[self.tags[-1]]
d[key] = d.get(key, 0) + value
def get_count(self, tags: Dict[str, str]) -> int:
value = self.counts
for tag in self.tags:
tag_value = tags[tag]
value = value.get(tag_value)
if value is None:
return
return value
def get_tags(self):
return self.tags
@ray.remote
def get_node_id():
return ray.get_runtime_context().get_node_id()
def check_num_alive_nodes(target: int):
alive_nodes = [node for node in ray.nodes() if node["Alive"]]
assert len(alive_nodes) == target
return True
def get_deployment_details(
deployment_name: str,
app_name: str = SERVE_DEFAULT_APP_NAME,
_client: ServeControllerClient = None,
):
client = _client or _get_global_client()
details = client.get_serve_details()
return details["applications"][app_name]["deployments"][deployment_name]
@ray.remote
|
FakeCounter
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/streaming/_messages.py
|
{
"start": 8829,
"end": 16958
}
|
class ____:
"""Wrapper over AsyncMessageStream that is returned by `.stream()`
so that an async context manager can be used without `await`ing the
original client call.
```py
async with client.messages.stream(...) as stream:
async for chunk in stream:
...
```
"""
def __init__(
self,
api_request: Awaitable[AsyncStream[RawMessageStreamEvent]],
) -> None:
self.__stream: AsyncMessageStream | None = None
self.__api_request = api_request
async def __aenter__(self) -> AsyncMessageStream:
raw_stream = await self.__api_request
self.__stream = AsyncMessageStream(raw_stream)
return self.__stream
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self.__stream is not None:
await self.__stream.close()
def build_events(
*,
event: RawMessageStreamEvent,
message_snapshot: Message,
) -> list[MessageStreamEvent]:
events_to_fire: list[MessageStreamEvent] = []
if event.type == "message_start":
events_to_fire.append(event)
elif event.type == "message_delta":
events_to_fire.append(event)
elif event.type == "message_stop":
events_to_fire.append(build(MessageStopEvent, type="message_stop", message=message_snapshot))
elif event.type == "content_block_start":
events_to_fire.append(event)
elif event.type == "content_block_delta":
events_to_fire.append(event)
content_block = message_snapshot.content[event.index]
if event.delta.type == "text_delta":
if content_block.type == "text":
events_to_fire.append(
build(
TextEvent,
type="text",
text=event.delta.text,
snapshot=content_block.text,
)
)
elif event.delta.type == "input_json_delta":
if content_block.type == "tool_use":
events_to_fire.append(
build(
InputJsonEvent,
type="input_json",
partial_json=event.delta.partial_json,
snapshot=content_block.input,
)
)
elif event.delta.type == "citations_delta":
if content_block.type == "text":
events_to_fire.append(
build(
CitationEvent,
type="citation",
citation=event.delta.citation,
snapshot=content_block.citations or [],
)
)
elif event.delta.type == "thinking_delta":
if content_block.type == "thinking":
events_to_fire.append(
build(
ThinkingEvent,
type="thinking",
thinking=event.delta.thinking,
snapshot=content_block.thinking,
)
)
elif event.delta.type == "signature_delta":
if content_block.type == "thinking":
events_to_fire.append(
build(
SignatureEvent,
type="signature",
signature=content_block.signature,
)
)
pass
else:
# we only want exhaustive checking for linters, not at runtime
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event.delta)
elif event.type == "content_block_stop":
content_block = message_snapshot.content[event.index]
events_to_fire.append(
build(ContentBlockStopEvent, type="content_block_stop", index=event.index, content_block=content_block),
)
else:
# we only want exhaustive checking for linters, not at runtime
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event)
return events_to_fire
JSON_BUF_PROPERTY = "__json_buf"
TRACKS_TOOL_INPUT = (
ToolUseBlock,
ServerToolUseBlock,
)
def accumulate_event(
*,
event: RawMessageStreamEvent,
current_snapshot: Message | None,
) -> Message:
if not isinstance(cast(Any, event), BaseModel):
event = cast( # pyright: ignore[reportUnnecessaryCast]
RawMessageStreamEvent,
construct_type_unchecked(
type_=cast(Type[RawMessageStreamEvent], RawMessageStreamEvent),
value=event,
),
)
if not isinstance(cast(Any, event), BaseModel):
raise TypeError(f"Unexpected event runtime type, after deserialising twice - {event} - {type(event)}")
if current_snapshot is None:
if event.type == "message_start":
return Message.construct(**cast(Any, event.message.to_dict()))
raise RuntimeError(f'Unexpected event order, got {event.type} before "message_start"')
if event.type == "content_block_start":
# TODO: check index
current_snapshot.content.append(
cast(
ContentBlock,
construct_type(type_=ContentBlock, value=event.content_block.model_dump()),
),
)
elif event.type == "content_block_delta":
content = current_snapshot.content[event.index]
if event.delta.type == "text_delta":
if content.type == "text":
content.text += event.delta.text
elif event.delta.type == "input_json_delta":
if isinstance(content, TRACKS_TOOL_INPUT):
from jiter import from_json
# we need to keep track of the raw JSON string as well so that we can
# re-parse it for each delta, for now we just store it as an untyped
# property on the snapshot
json_buf = cast(bytes, getattr(content, JSON_BUF_PROPERTY, b""))
json_buf += bytes(event.delta.partial_json, "utf-8")
if json_buf:
content.input = from_json(json_buf, partial_mode=True)
setattr(content, JSON_BUF_PROPERTY, json_buf)
elif event.delta.type == "citations_delta":
if content.type == "text":
if not content.citations:
content.citations = [event.delta.citation]
else:
content.citations.append(event.delta.citation)
elif event.delta.type == "thinking_delta":
if content.type == "thinking":
content.thinking += event.delta.thinking
elif event.delta.type == "signature_delta":
if content.type == "thinking":
content.signature = event.delta.signature
else:
# we only want exhaustive checking for linters, not at runtime
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event.delta)
elif event.type == "message_delta":
current_snapshot.stop_reason = event.delta.stop_reason
current_snapshot.stop_sequence = event.delta.stop_sequence
current_snapshot.usage.output_tokens = event.usage.output_tokens
# Update other usage fields if they exist in the event
if event.usage.input_tokens is not None:
current_snapshot.usage.input_tokens = event.usage.input_tokens
if event.usage.cache_creation_input_tokens is not None:
current_snapshot.usage.cache_creation_input_tokens = event.usage.cache_creation_input_tokens
if event.usage.cache_read_input_tokens is not None:
current_snapshot.usage.cache_read_input_tokens = event.usage.cache_read_input_tokens
if event.usage.server_tool_use is not None:
current_snapshot.usage.server_tool_use = event.usage.server_tool_use
return current_snapshot
|
AsyncMessageStreamManager
|
python
|
facebook__pyre-check
|
tools/upgrade/commands/tests/fixme_single_test.py
|
{
"start": 486,
"end": 3930
}
|
class ____(unittest.TestCase):
@patch("subprocess.run")
@patch.object(Configuration, "find_project_configuration", return_value=Path("/"))
@patch.object(Configuration, "write")
@patch.object(Configuration, "remove_version")
@patch.object(Configuration, "get_errors")
@patch.object(ErrorSuppressingCommand, "_get_and_suppress_errors")
@patch.object(Repository, "commit_changes")
def test_run_fixme_single(
self,
commit_changes: MagicMock,
get_and_suppress_errors: MagicMock,
get_errors: MagicMock,
remove_version: MagicMock,
configuration_write: MagicMock,
find_configuration: MagicMock,
subprocess: MagicMock,
) -> None:
arguments = MagicMock()
arguments.path = Path("/local")
arguments.error_source = "generate"
arguments.lint = False
arguments.no_commit = False
arguments.fixme_threshold = None
arguments.upgrade_version = False
get_errors.return_value = Errors([])
configuration_contents = '{"targets":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
FixmeSingle.from_arguments(arguments, repository).run()
get_and_suppress_errors.assert_called_once()
commit_changes.assert_called_once_with(
commit=True, title="Suppress pyre errors for local"
)
get_and_suppress_errors.reset_mock()
commit_changes.reset_mock()
pyre_errors = [
{
"line": 2,
"column": 4,
"path": "local.py",
"code": 7,
"name": "Kind",
"concise_description": "Error",
"ignore_error": False,
"external_to_global_root": False,
}
]
get_errors.return_value = Errors(pyre_errors)
with patch("builtins.open", mock_open(read_data=configuration_contents)):
FixmeSingle.from_arguments(arguments, repository).run()
get_and_suppress_errors.assert_called_once()
commit_changes.assert_called_once_with(
commit=True, title="Suppress pyre errors for local"
)
get_and_suppress_errors.reset_mock()
commit_changes.reset_mock()
arguments.fixme_threshold = 1
pyre_errors = [
{
"line": 2,
"column": 4,
"path": "local.py",
"code": 7,
"name": "Kind",
"concise_description": "Error",
"ignore_error": False,
"external_to_global_root": False,
},
{
"line": 3,
"column": 4,
"path": "local.py",
"code": 7,
"name": "Kind",
"concise_description": "Error",
"ignore_error": False,
"external_to_global_root": False,
},
]
get_errors.return_value = Errors(pyre_errors)
with patch("builtins.open", mock_open(read_data=configuration_contents)):
FixmeSingle.from_arguments(arguments, repository).run()
get_and_suppress_errors.assert_called_once()
commit_changes.assert_called_once_with(
commit=True, title="Suppress pyre errors for local"
)
|
FixmeSingleTest
|
python
|
Netflix__metaflow
|
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
|
{
"start": 706,
"end": 1029
}
|
class ____(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._output = []
def handle_starttag(self, tag, attrs):
self._output.append(tag)
return super().handle_starttag(tag, attrs)
def get_output(self):
return self._output
|
BaseClass
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/common/public_api.py
|
{
"start": 820,
"end": 5407
}
|
class ____:
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
'tf': [
'compiler',
'core',
# TODO(scottzhu): See b/227410870 for more details. Currently
# dtensor API is exposed under tf.experimental.dtensor, but in the
# meantime, we have tensorflow/dtensor directory which will be treat
# as a python package. We want to avoid step into the
# tensorflow/dtensor directory when visit the API.
# When the tf.dtensor becomes the public API, it will actually pick
# up from tf.compat.v2.dtensor as priority and hide the
# tensorflow/dtensor package.
'security',
'dtensor',
'python',
'tsl', # TODO(tlongeri): Remove after TSL is moved out of TF.
],
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
del obj # Unused.
return ((path in self._private_map and name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__', '__next_in_mro__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
|
PublicAPIVisitor
|
python
|
cython__cython
|
Cython/Debugger/libcython.py
|
{
"start": 37477,
"end": 38018
}
|
class ____(CythonCommand):
"""
List Cython source code. To disable to customize colouring see the cy_*
parameters.
"""
name = 'cy list'
command_class = gdb.COMMAND_FILES
completer_class = gdb.COMPLETE_NONE
@libpython.dont_suppress_errors
# @dispatch_on_frame(c_command='list')
def invoke(self, _, from_tty):
sd, lineno = self.get_source_desc()
source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
lex_entire=True)
print(source)
|
CyList
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-and-minimum-sums-of-at-most-size-k-subsequences.py
|
{
"start": 977,
"end": 1426
}
|
class ____(object):
def minMaxSums(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9+7
nums.sort()
result = 0
cnt = 1
for i in xrange(len(nums)):
cnt = reduce(lambda accu, x: (accu+x)%MOD, (nCr(i, j) for j in xrange(min(i, k-1)+1)), 0)
result = (result+(nums[i]+nums[~i])*cnt)%MOD
return result
|
Solution2
|
python
|
pytorch__pytorch
|
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
|
{
"start": 911,
"end": 15426
}
|
class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(8, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_dp_state_dict_save_load(self):
fsdp_mesh = init_device_mesh(device_type.type, (self.world_size,))
self.run_subtests(
{"mlp_dim": [2, 3, 4, 5], "mesh": [fsdp_mesh]},
self._test_dp_state_dict_save_load,
)
if 16 % self.world_size == 0:
# TODO: remove this evenness check when FSDP2 supports uneven sharding
# see: https://github.com/pytorch/pytorch/blob/cbb03e69717943ddf912f9a68b3a6f935bbf21f5/torch/distributed/fsdp/_fully_shard/_fsdp_param.py#L353-L361 # noqa: B950
self.run_subtests(
{
"mlp_dim": [16],
"mesh": [fsdp_mesh],
"use_shard_placement_fn": [True],
},
self._test_dp_state_dict_save_load,
)
if self.world_size % 2 != 0:
return
hsdp_mesh = init_device_mesh(
device_type.type,
(self.world_size // 2, 2),
mesh_dim_names=("dp_replicate", "dp_shard"),
)
self.run_subtests(
{"mlp_dim": [2, 3, 4, 5], "mesh": [hsdp_mesh]},
self._test_dp_state_dict_save_load,
)
self.run_subtests(
{"mlp_dim": [16], "mesh": [hsdp_mesh], "use_shard_placement_fn": [True]},
self._test_dp_state_dict_save_load,
)
def _test_dp_state_dict_save_load(
self, mlp_dim: int, mesh: DeviceMesh, use_shard_placement_fn: bool = False
):
torch.manual_seed(42)
base_model = nn.Sequential(
MLP(mlp_dim),
nn.Sequential(MLP(mlp_dim), nn.Linear(mlp_dim, mlp_dim)),
MLP(mlp_dim),
)
def _shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
largest_dim = largest_dim_size = -1
for dim, dim_size in enumerate(param.shape):
if dim_size > largest_dim_size:
largest_dim = dim
largest_dim_size = dim_size
return Shard(largest_dim)
shard_placement_fn = _shard_placement_fn if use_shard_placement_fn else None
fully_shard_fn = functools.partial(
fully_shard, mesh=mesh, shard_placement_fn=shard_placement_fn
)
# Check basic `reshard_after_forward=True`
model1 = copy.deepcopy(base_model)
for module in model1:
fully_shard_fn(module)
fully_shard_fn(model1)
self._test_state_dict_save_load(model1)
# Check `reshard_after_forward=False` before and after a forward
model2 = copy.deepcopy(base_model)
for module in model2:
fully_shard_fn(module, reshard_after_forward=False)
fully_shard_fn(model2, reshard_after_forward=False)
self._test_state_dict_save_load(model2)
ref_sharded_sd = model2.state_dict()
inp = torch.randn((2, mlp_dim), device=device_type.type)
model2(inp) # parameters are not resharded after this forward
# Check that state dict hooks reshard
sharded_sd = model2.state_dict()
self.assertEqual(set(ref_sharded_sd.keys()), set(sharded_sd.keys()))
for key, value in ref_sharded_sd.items():
self.assertEqual(value, sharded_sd[key])
@skip_if_lt_x_gpu(2)
def test_cached_state_dict(self):
self.run_subtests(
{"mlp_dim": [2, 3, 4, 5], "mutate_after_state_dict": [True, False]},
self._test_cached_state_dict,
)
def _test_cached_state_dict(self, mlp_dim: int, mutate_after_state_dict: bool):
torch.manual_seed(42)
model = nn.Linear(mlp_dim, mlp_dim, bias=False)
fully_shard(model, reshard_after_forward=True)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
# call .state_dict() once and use `sd` directly to reduce cpu overhead
sd = model.state_dict()
assert isinstance(model.weight, DTensor)
if not mutate_after_state_dict:
self.assertTrue(
sd["weight"]._local_tensor.untyped_storage().data_ptr()
== model.weight._local_tensor.untyped_storage().data_ptr()
)
else:
model = model.cpu()
model = model.cuda()
self.assertTrue(
sd["weight"]._local_tensor.untyped_storage().data_ptr()
!= model.weight._local_tensor.untyped_storage().data_ptr()
)
torch.manual_seed(42 + self.rank)
inp = torch.rand(mlp_dim, mlp_dim, device="cuda")
for _ in range(5):
optim.zero_grad()
loss = model(inp).sum()
loss.backward()
optim.step()
if not mutate_after_state_dict:
self.assertTrue(
sd["weight"]._local_tensor.untyped_storage().data_ptr()
== model.weight._local_tensor.untyped_storage().data_ptr()
)
@skip_if_lt_x_gpu(2)
def test_dp_state_dict_cpu_offload(self):
self.run_subtests(
{
"offload_policy": [
CPUOffloadPolicy(pin_memory=True),
CPUOffloadPolicy(pin_memory=False),
],
"cpu_state_dict": [True, False],
},
self._test_dp_state_dict_cpu_offload,
)
def _test_dp_state_dict_cpu_offload(
self, offload_policy: CPUOffloadPolicy, cpu_state_dict: bool
):
mlp_dim = 4
torch.manual_seed(42)
with torch.device("meta"):
model = nn.Sequential(
nn.Linear(mlp_dim, mlp_dim, bias=False),
nn.Linear(mlp_dim, mlp_dim, bias=False),
)
for module in model:
fully_shard(module, offload_policy=offload_policy)
fully_shard(model, offload_policy=offload_policy)
# split full sd into multiple pieces
# to test loading with `strict=False`
state_dicts = []
for name, dtensor in model.named_parameters():
full_tensor = torch.randn(dtensor.size())
sharded_tensor = distribute_tensor(
full_tensor, dtensor.device_mesh, dtensor.placements
)
if cpu_state_dict:
sharded_tensor = sharded_tensor.cpu()
state_dicts.append({name: sharded_tensor})
# check that we can load with some parameters still on meta device
for sd in state_dicts:
model.load_state_dict(sd, assign=True, strict=False)
# lazy init without error
inp = torch.rand((mlp_dim, mlp_dim), device=device_type.type)
context = (
self.assertRaisesRegex(
RuntimeError,
rf"Found following parameters on non-CPU device: \[\('0.weight', device\(type='{device_type.type}'",
)
if not cpu_state_dict
else nullcontext()
)
with context:
model(inp).sum()
state_dict = model.state_dict()
for name, dtensor in state_dict.items():
self.assertEqual(dtensor.device.type, "cpu")
@skip_if_lt_x_gpu(2)
def test_2d_state_dict_correctness(self):
dp_size = 2
global_mesh = init_device_mesh(
device_type.type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
torch.manual_seed(42)
mlp_dim = 4
# model init
model = nn.Sequential(*[MLP(mlp_dim) for _ in range(3)])
model_2d = copy.deepcopy(model)
# FSDP + TP
model_2d = parallelize_module(
model_2d,
device_mesh=tp_mesh,
parallelize_plan={
"0.in_proj": ColwiseParallel(),
"0.out_proj": RowwiseParallel(),
"1.in_proj": ColwiseParallel(),
"1.out_proj": RowwiseParallel(),
"2.in_proj": ColwiseParallel(),
"2.out_proj": RowwiseParallel(),
},
)
for mlp in model_2d:
fully_shard(mlp, mesh=dp_mesh)
fully_shard(model_2d, mesh=dp_mesh)
# state_dict parity check
model_state_dict = model.state_dict()
model_2d_state_dict = model_2d.state_dict()
for tensor, dtensor in zip(
model_state_dict.values(), model_2d_state_dict.values()
):
self.assertTrue(isinstance(dtensor, DTensor))
self.assertEqual(tensor, dtensor.full_tensor())
@skip_if_lt_x_gpu(2)
def test_dp_tp_state_dict_save_load(self):
dp_size = 2
global_mesh = init_device_mesh(
device_type.type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
self.run_subtests(
{"mlp_dim": [4, 6, 8, 10]},
functools.partial(self._test_dp_tp_state_dict_save_load, global_mesh),
)
def _test_dp_tp_state_dict_save_load(self, global_mesh: DeviceMesh, mlp_dim: int):
dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
torch.manual_seed(42)
model = nn.Sequential(*[MLP(mlp_dim) for _ in range(3)])
model = parallelize_module(
model,
device_mesh=tp_mesh,
parallelize_plan={
"0.in_proj": ColwiseParallel(),
"0.out_proj": RowwiseParallel(),
"1.in_proj": ColwiseParallel(),
"1.out_proj": RowwiseParallel(),
"2.in_proj": ColwiseParallel(),
"2.out_proj": RowwiseParallel(),
},
)
for mlp in model:
fully_shard(mlp, mesh=dp_mesh)
fully_shard(model, mesh=dp_mesh)
self._test_state_dict_save_load(model)
@skip_if_lt_x_gpu(4)
def test_hsdp_tp_state_dict_save_load(self):
global_mesh = init_device_mesh(
device_type.type,
(2, 2, self.world_size // 4),
mesh_dim_names=("dp_replicate", "dp_shard", "tp"),
)
self.run_subtests(
{"mlp_dim": [4, 6, 8, 10]},
functools.partial(self._test_hsdp_tp_state_dict_save_load, global_mesh),
)
def _test_hsdp_tp_state_dict_save_load(self, global_mesh: DeviceMesh, mlp_dim: int):
dp_mesh, tp_mesh = global_mesh["dp_replicate", "dp_shard"], global_mesh["tp"]
torch.manual_seed(42)
model = nn.Sequential(*[MLP(mlp_dim) for _ in range(3)])
model = parallelize_module(
model,
device_mesh=tp_mesh,
parallelize_plan={
"0.in_proj": ColwiseParallel(),
"0.out_proj": RowwiseParallel(),
"1.in_proj": ColwiseParallel(),
"1.out_proj": RowwiseParallel(),
"2.in_proj": ColwiseParallel(),
"2.out_proj": RowwiseParallel(),
},
)
for mlp in model:
fully_shard(mlp, mesh=dp_mesh)
fully_shard(model, mesh=dp_mesh)
self._test_state_dict_save_load(model)
def _test_state_dict_save_load(self, model: nn.Module):
for param_name, param in model.named_parameters():
self.assertIsInstance(
param,
DTensor,
f"Expects parameters to be sharded as DTensors but got {param_name} "
f"as {type(param)}: {param}",
)
old_fill_value = 1
new_fill_value = 42 + self.rank
with torch.no_grad():
for param in model.parameters():
param.fill_(old_fill_value)
# Use that the parameters are currently sharded, meaning that their
# data pointers correspond to the sharded parameter data
param_name_to_data_ptr = {
n: p.to_local().data_ptr() for n, p in model.named_parameters()
}
ref_sharded_sizes = [p.size() for p in model.parameters()]
state_dict = model.state_dict()
for param, ref_sharded_size in zip(model.parameters(), ref_sharded_sizes):
self.assertEqual(param.size(), ref_sharded_size)
self.assertTrue(isinstance(param, nn.Parameter))
# Verify that keys match, values are DTensors, and values share the
# same storage as the existing sharded parameter data
self.assertEqual(set(state_dict.keys()), set(param_name_to_data_ptr.keys()))
for param_name, tensor in state_dict.items():
self.assertTrue(isinstance(tensor, DTensor))
if param_name_to_data_ptr[param_name] == 0:
# Check that this is padding (added by DTensor)
self.assertGreater(self.rank, 0)
self.assertEqual(torch.count_nonzero(tensor.to_local()).item(), 0)
else:
self.assertEqual(
tensor.to_local().data_ptr(), param_name_to_data_ptr[param_name]
)
# Verify that we can load a new state dict that contains DTensors with
# storages different from the current model parameters
new_state_dict: dict[str, DTensor] = {}
for param_name, dtensor in state_dict.items():
# Construct new DTensors to exercise load state dict writeback
new_state_dict[param_name] = dtensor.detach().clone().fill_(new_fill_value)
for param in model.parameters():
self.assertEqual(
param.to_local(),
torch.ones_like(param.to_local()) * old_fill_value,
)
model.load_state_dict(new_state_dict)
for param_name, param in model.named_parameters():
self.assertEqual(
param.to_local(),
torch.ones_like(param.to_local()) * new_fill_value,
)
local_param = param.to_local()
# Only guarantee that the local tensor's data pointer does not
# change if the sharding was even (i.e. no padding); otherwise,
# FSDP may re-pad the local tensor, changing its data pointer
if local_param.size(0) * param.device_mesh.size() == param.size(0):
self.assertEqual(
local_param.data_ptr(), param_name_to_data_ptr[param_name]
)
|
TestFullyShardStateDictMultiProcess
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/triton.py
|
{
"start": 4910,
"end": 29118
}
|
class ____(metaclass=BaseHeuristicSingleton):
"""
Base class for mm_configs, device specific triton kernels config inherit from here
"""
def __init__(self) -> None:
# Whether the heuristic is used for int8. Use this when the heuristic is int8 exclusive
# but prefer the preprocess_mm_configs argument when it's used for both
self.has_int8_tensor: bool = False
# Whether to scale configs at all
# TODO(coconutruben): remove this once mm_plus_mm and tests support scaling
self.should_scale_configs: bool = True
# List of dictionaries to store the kernel configs. Configs that evaluate to true
# will be utilised on the target platform. The configs are as follows:
# (BLOCK_M, BLOCK_N, BLOCK_K, num_stages, num_warps)
self.mm_configs: list[BaseConfig] = [
GemmConfig(32, 32, 16, 1, 2),
GemmConfig(32, 32, 128, 2, 4),
GemmConfig(32, 64, 32, 5, 8),
GemmConfig(64, 32, 32, 5, 8),
GemmConfig(64, 32, 128, 5, 4),
GemmConfig(64, 64, 16, 2, 4),
GemmConfig(64, 64, 32, 2, 4),
GemmConfig(64, 64, 64, 3, 8),
GemmConfig(64, 64, 128, 5, 4),
GemmConfig(64, 128, 32, 3, 4),
GemmConfig(64, 128, 32, 4, 8),
GemmConfig(64, 128, 64, 3, 4),
GemmConfig(64, 128, 128, 4, 4),
GemmConfig(128, 64, 32, 3, 4),
GemmConfig(128, 64, 32, 4, 8),
GemmConfig(128, 128, 32, 2, 8),
GemmConfig(128, 128, 32, 3, 4),
GemmConfig(128, 128, 64, 3, 4),
GemmConfig(128, 128, 64, 5, 8),
GemmConfig(128, 128, 128, 4, 8),
]
# Exhaustive search for mm configs
self.exhaustive_configs: list[BaseConfig] = [
GemmConfig(BLOCK_M, BLOCK_N, BLOCK_K, num_stages, num_warps, group_m)
for BLOCK_M, BLOCK_N, BLOCK_K in itertools.product(
[16, 32, 64, 128, 256], repeat=3
)
for num_stages in [1, 2, 3, 4, 5]
for num_warps in [2, 4, 8]
for group_m in [8]
]
# these are only used in tuned_mm when AutoHeuristic is enabled
# the idea is that when AutoHeuristic collects data to learn a heuristic, more configs are autotuned
# when the learned heuristic is used, the learned heuristic reduces the number of configs down to 10
# which saves compilation time (since less configs are autotuned) and potentially increase performance
# because the learned heuristic might predict a config that is not part mm_configs
self.extra_mm_configs: list[BaseConfig] = [
GemmConfig(16, 32, 16, 3, 2),
GemmConfig(16, 32, 32, 4, 2),
GemmConfig(16, 32, 32, 5, 2),
GemmConfig(64, 64, 128, 3, 4),
GemmConfig(128, 64, 32, 2, 2),
GemmConfig(128, 64, 64, 3, 8),
GemmConfig(128, 64, 128, 4, 8),
GemmConfig(128, 128, 32, 4, 4),
GemmConfig(128, 128, 64, 3, 8),
GemmConfig(128, 128, 64, 5, 4),
]
self.int8_mm_configs: list[BaseConfig] = [
GemmConfig(64, 64, 32, 2, 4),
GemmConfig(64, 128, 32, 3, 4),
GemmConfig(128, 64, 32, 3, 4),
GemmConfig(64, 128, 32, 4, 8),
GemmConfig(128, 64, 32, 4, 8),
GemmConfig(64, 32, 32, 5, 8),
GemmConfig(32, 64, 32, 5, 8),
GemmConfig(128, 128, 32, 2, 8),
GemmConfig(64, 64, 64, 3, 8),
GemmConfig(128, 256, 128, 3, 8),
GemmConfig(256, 128, 128, 3, 8),
]
self.mixed_mm_configs: list[BaseConfig] = [
GemmConfig(16, 128, 256, 3, 4),
GemmConfig(16, 128, 256, 5, 8),
]
self.persistent_mm_configs: list[BaseConfig] = [
GemmConfig(128, 256, 64, 3, 8),
GemmConfig(128, 128, 64, 3, 8),
GemmConfig(128, 128, 128, 3, 8),
GemmConfig(128, 128, 128, 3, 4),
GemmConfig(128, 128, 64, 4, 8),
GemmConfig(128, 128, 64, 5, 8),
GemmConfig(256, 128, 64, 4, 8),
GemmConfig(128, 128, 64, 5, 4),
]
self.blackwell_persistent_mm_configs: list[BaseConfig] = [
GemmConfig(128, 256, 64, 4, 8),
GemmConfig(256, 128, 64, 3, 8),
GemmConfig(128, 256, 128, 2, 8),
GemmConfig(128, 256, 64, 3, 8),
GemmConfig(128, 128, 128, 3, 4),
GemmConfig(256, 128, 64, 3, 8),
GemmConfig(128, 128, 128, 3, 8),
]
self.blackwell_persistent_addmm_configs: list[BaseConfig] = [
GemmConfig(256, 128, 64, 2, 4),
]
self.scaled_mm_configs: list[BaseConfig] = [
GemmConfig(128, 256, 32, 3, 8),
GemmConfig(256, 128, 32, 3, 8),
GemmConfig(256, 64, 32, 4, 4),
GemmConfig(64, 256, 32, 4, 4),
GemmConfig(128, 128, 32, 4, 4),
GemmConfig(128, 64, 32, 4, 4),
GemmConfig(64, 128, 32, 4, 4),
GemmConfig(128, 32, 32, 4, 4),
GemmConfig(64, 32, 32, 5, 2),
GemmConfig(256, 128, 128, 3, 8),
GemmConfig(256, 64, 128, 4, 4),
GemmConfig(64, 256, 128, 4, 4),
GemmConfig(128, 128, 128, 4, 4),
GemmConfig(128, 64, 64, 4, 4),
GemmConfig(64, 128, 64, 4, 4),
GemmConfig(128, 32, 64, 4, 4),
GemmConfig(64, 32, 64, 5, 2),
GemmConfig(16, 32, 32, 2, 2),
GemmConfig(16, 64, 32, 2, 2),
GemmConfig(16, 128, 32, 2, 4),
GemmConfig(16, 256, 32, 2, 4),
GemmConfig(16, 32, 64, 2, 2),
GemmConfig(16, 64, 64, 2, 2),
GemmConfig(16, 128, 64, 2, 4),
GemmConfig(16, 256, 64, 2, 4),
GemmConfig(32, 32, 32, 2, 2),
GemmConfig(32, 64, 32, 2, 2),
GemmConfig(32, 128, 32, 2, 4),
GemmConfig(32, 256, 32, 2, 4),
GemmConfig(32, 32, 64, 2, 2),
GemmConfig(32, 64, 64, 2, 2),
GemmConfig(32, 128, 64, 2, 4),
GemmConfig(32, 256, 64, 2, 4),
GemmConfig(16, 32, 32, 3, 2),
GemmConfig(16, 64, 32, 3, 2),
GemmConfig(16, 128, 32, 3, 4),
GemmConfig(16, 256, 32, 3, 4),
GemmConfig(16, 32, 64, 3, 2),
GemmConfig(16, 64, 64, 3, 2),
GemmConfig(16, 128, 64, 3, 4),
GemmConfig(16, 256, 64, 3, 4),
GemmConfig(32, 32, 32, 3, 2),
GemmConfig(32, 64, 32, 3, 2),
GemmConfig(32, 128, 32, 3, 4),
GemmConfig(32, 256, 32, 3, 4),
GemmConfig(32, 32, 64, 3, 2),
GemmConfig(32, 64, 64, 3, 2),
GemmConfig(32, 128, 64, 3, 4),
GemmConfig(32, 256, 64, 3, 4),
GemmConfig(16, 32, 32, 4, 2),
GemmConfig(16, 64, 32, 4, 2),
GemmConfig(16, 128, 32, 4, 4),
GemmConfig(16, 256, 32, 4, 4),
GemmConfig(16, 32, 64, 4, 2),
GemmConfig(16, 64, 64, 4, 2),
GemmConfig(16, 128, 64, 4, 4),
GemmConfig(16, 256, 64, 4, 4),
GemmConfig(32, 32, 32, 4, 2),
GemmConfig(32, 64, 32, 4, 2),
GemmConfig(32, 128, 32, 4, 4),
GemmConfig(32, 256, 32, 4, 4),
GemmConfig(32, 32, 64, 4, 2),
GemmConfig(32, 64, 64, 4, 2),
GemmConfig(32, 128, 64, 4, 4),
GemmConfig(32, 256, 64, 4, 4),
GemmConfig(16, 32, 32, 5, 2),
GemmConfig(16, 64, 32, 5, 2),
GemmConfig(16, 128, 32, 5, 4),
GemmConfig(16, 256, 32, 5, 4),
GemmConfig(16, 32, 64, 5, 2),
GemmConfig(16, 64, 64, 5, 2),
GemmConfig(16, 128, 64, 5, 4),
GemmConfig(16, 256, 64, 5, 4),
GemmConfig(32, 32, 32, 5, 2),
GemmConfig(32, 64, 32, 5, 2),
GemmConfig(32, 128, 32, 5, 4),
GemmConfig(32, 256, 32, 5, 4),
GemmConfig(32, 32, 64, 5, 2),
GemmConfig(32, 64, 64, 5, 2),
GemmConfig(32, 128, 64, 5, 4),
GemmConfig(32, 256, 64, 5, 4),
GemmConfig(16, 32, 32, 6, 2),
GemmConfig(16, 64, 32, 6, 2),
GemmConfig(16, 128, 32, 6, 4),
GemmConfig(16, 256, 32, 6, 4),
GemmConfig(16, 32, 64, 6, 2),
GemmConfig(16, 64, 64, 6, 2),
GemmConfig(16, 128, 64, 6, 4),
GemmConfig(16, 256, 64, 6, 4),
GemmConfig(32, 32, 32, 6, 2),
GemmConfig(32, 64, 32, 6, 2),
GemmConfig(32, 128, 32, 6, 4),
GemmConfig(32, 256, 32, 6, 4),
GemmConfig(32, 32, 64, 6, 2),
GemmConfig(32, 64, 64, 6, 2),
GemmConfig(32, 128, 64, 6, 4),
GemmConfig(32, 256, 64, 6, 4),
GemmConfig(64, 16, 256, 5, 4),
GemmConfig(64, 32, 256, 5, 4),
GemmConfig(64, 128, 128, 2, 4),
GemmConfig(64, 128, 128, 3, 4),
GemmConfig(128, 128, 128, 2, 4),
GemmConfig(128, 256, 128, 4, 8),
GemmConfig(256, 128, 128, 2, 4),
GemmConfig(256, 128, 128, 2, 8),
]
self.scaled_persistent_mm_configs: list[BaseConfig] = [
GemmConfig(128, 128, 64, 3, 8),
GemmConfig(128, 128, 128, 3, 8),
GemmConfig(128, 128, 128, 4, 8),
GemmConfig(128, 128, 128, 4, 4),
GemmConfig(128, 128, 128, 3, 4),
GemmConfig(128, 128, 128, 5, 4),
GemmConfig(128, 128, 128, 5, 8),
GemmConfig(128, 128, 128, 6, 8),
GemmConfig(128, 128, 64, 4, 8),
GemmConfig(64, 32, 256, 5, 4),
GemmConfig(128, 256, 128, 3, 8),
GemmConfig(64, 128, 256, 4, 4),
GemmConfig(64, 256, 128, 4, 4),
]
# TODO: Unify with other gemm patterns, mm_plus_mm currently follows
# slightly different pattern than rest
self.mm_plus_mm_configs: list[BaseConfig] = [
GemmConfig(64, 64, 32, 2, 4),
GemmConfig(64, 64, 32, 3, 8),
GemmConfig(64, 64, 32, 4, 16),
GemmConfig(64, 32, 32, 4, 8),
GemmConfig(32, 64, 32, 4, 8),
GemmConfig(128, 128, 32, 1, 8),
GemmConfig(64, 64, 64, 1, 8),
GemmConfig(32, 32, 128, 1, 8),
GemmConfig(64, 64, 16, 2, 4),
GemmConfig(32, 32, 16, 1, 2),
]
self.conv_configs: list[BaseConfig] = [
ConvConfig(64, 256, 16, 2, 4),
ConvConfig(256, 64, 16, 2, 4),
ConvConfig(1024, 16, 16, 1, 8),
ConvConfig(128, 128, 32, 2, 8),
ConvConfig(64, 64, 32, 2, 4),
ConvConfig(64, 256, 32, 2, 8),
ConvConfig(256, 64, 32, 2, 8),
]
self.flex_attn_fwd_autotune_configs: list[FlexConfig] = [
FlexConfig(128, 64, 3, 4),
FlexConfig(128, 128, 3, 4),
FlexConfig(128, 128, 2, 8),
FlexConfig(128, 128, 1, 8),
FlexConfig(64, 128, 3, 4),
FlexConfig(64, 64, 3, 4),
]
self.flex_attn_bwd_autotune_configs: list[FlexBwDConfig] = [
# See Note: flex bwd configs
FlexBwDConfig(BLOCK_M, BLOCK_N, BLOCK_N, BLOCK_M, s, w)
for BLOCK_M in [32, 64]
for BLOCK_N in [32, 64, 128]
for s in [1, 3, 4, 5] # num_stages
for w in ([4, 8] if BLOCK_M >= 128 or BLOCK_N >= 128 else [4])
if BLOCK_N % BLOCK_M == 0
]
self.flex_decode_autotune_configs: list[FlexDecodeConfig] = [
FlexDecodeConfig(64, 3, 2),
FlexDecodeConfig(32, 3, 2),
FlexDecodeConfig(128, 3, 2),
]
self.exhaustive_flex_attn_fwd_configs: list[FlexConfig] = [
FlexConfig(BLOCK_M, BLOCK_N, num_stages, num_warps)
for BLOCK_M in [16, 32, 64, 128]
for BLOCK_N in [32, 64, 128]
for num_stages in [1, 3, 4, 5]
for num_warps in [2, 4, 8]
]
self.exhaustive_flex_attn_bwd_configs: list[FlexBwDConfig] = [
# See Note: flex bwd configs
FlexBwDConfig(BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2, num_stages, num_warps)
for BLOCK_M1 in [16, 32, 64, 128]
for BLOCK_N1 in [16, 32, 64, 128]
for BLOCK_M2 in [16, 32, 64, 128]
for BLOCK_N2 in [16, 32, 64, 128]
for num_stages in [1, 3, 4]
for num_warps in [2, 4, 8]
if BLOCK_N1 % BLOCK_M1 == 0
and BLOCK_M2 % BLOCK_N2 == 0 # kernel static assertions
]
self.exhaustive_flex_decode_configs: list[FlexDecodeConfig] = [
FlexDecodeConfig(block_n, num_stages, num_warps)
for block_n in [16, 32, 64, 128]
for num_stages in [1, 3, 4, 5]
for num_warps in [2, 4, 8]
]
def _finalize_mm_configs(
self,
configs: list[BaseConfig],
) -> Generator[TritonConfig, None, None]:
"""
Finalizes configs after scaling, applying additional constraints.
"""
used: OrderedSet[tuple[Optional[int], ...]] = OrderedSet()
max_mm_configs = config.test_configs.max_mm_configs
for conf in configs:
# Each warp computes a 16x16 tile = 256 elements
num_warps = min(conf.num_warps, conf.block_m * conf.block_n // 256)
# Construct key for finding duplicate configs
key: tuple[Optional[int], ...] = (
conf.block_m,
conf.block_n,
conf.block_k,
conf.num_stages,
conf.hint_override,
num_warps,
)
# Check if gemm specific arg exists - add to key if does
group_m = getattr(conf, "group_m", None)
if group_m is not None:
key += (group_m,)
if key not in used and (
max_mm_configs is None or len(used) < max_mm_configs
):
used.add(key)
kwargs = {
"BLOCK_M": conf.block_m,
"BLOCK_N": conf.block_n,
"BLOCK_K": conf.block_k,
"hint_override": conf.hint_override,
}
if group_m is not None:
kwargs["GROUP_M"] = group_m
yield self.triton_config(conf.num_stages, num_warps, **kwargs)
def _scale_mm_configs(
self,
m: int,
n: int,
k: int,
configs: list[BaseConfig],
scale: float,
has_int8_tensor: bool,
exclude: Callable[[sympy.Integer, sympy.Integer, sympy.Integer], bool],
hint_override: Optional[int] = None,
) -> list[BaseConfig]:
"""
Scales and filters matrix multiplication configs based on input size.
"""
if not self.should_scale_configs:
return configs
from ..runtime.runtime_utils import next_power_of_2
min_block_size = 16
min_block_size_k = 32 if (has_int8_tensor or self.has_int8_tensor) else 16
scaled_configs = []
for hint_override in [None] + config.multi_kernel_hints:
m_hint = max(
next_power_of_2(
V.graph.sizevars.size_hint(
m,
fallback=config.unbacked_symint_fallback, # type: ignore[arg-type]
hint_override=hint_override,
)
),
min_block_size,
)
n_hint = max(
next_power_of_2(
V.graph.sizevars.size_hint(
n,
fallback=config.unbacked_symint_fallback, # type: ignore[arg-type]
hint_override=hint_override,
)
),
min_block_size,
)
k_hint = max(
next_power_of_2(
V.graph.sizevars.size_hint(
k,
fallback=config.unbacked_symint_fallback, # type: ignore[arg-type]
hint_override=hint_override,
)
),
min_block_size_k,
)
for c in configs:
scaled_config = dataclasses.replace(
c,
block_m=max(min(int(c.block_m * scale), m_hint), min_block_size),
block_n=max(min(int(c.block_n * scale), n_hint), min_block_size),
block_k=max(min(int(c.block_k * scale), k_hint), min_block_size_k),
hint_override=hint_override,
)
if not exclude(
scaled_config.block_m, scaled_config.block_n, scaled_config.block_k
):
scaled_configs.append(scaled_config)
return scaled_configs
def _get_exceeding_shared_memory_checker(
self,
) -> Optional[Callable[[BaseConfig, int], bool]]:
"""
Returns a function that checks whether a given configuration exceeds the available shared memory for the device.
If the device does not report available shared memory, returns None.
"""
try:
device = torch.cuda.current_device()
props = torch.cuda.get_device_properties(device)
if not hasattr(props, "shared_memory_per_block_optin"): # for NVidia GPUs
return None
sm_available = int(props.shared_memory_per_block_optin)
except Exception:
# If CUDA is not available or properties cannot be queried, return None
return None
# TODO make a BaseDeviceConfigHeuristics to handle different device configuration in its own implementation.
def exceeds(gemm_config: BaseConfig, dtype_size: int) -> bool:
shared_mem_accum = dtype_size * (
gemm_config.block_m * gemm_config.block_k
+ gemm_config.block_n * gemm_config.block_k
)
return shared_mem_accum * gemm_config.num_stages > sm_available
return exceeds
def _prune_exceeding_max_shared_mem_configs(
self,
configs: list[BaseConfig],
dtype_size: int,
) -> list[BaseConfig]:
if dtype_size <= 0:
return configs
is_exceeding_shared_memory = self._get_exceeding_shared_memory_checker()
if is_exceeding_shared_memory is None:
return configs
return [c for c in configs if not is_exceeding_shared_memory(c, dtype_size)]
def _prune_exhaustive_configs(
self,
configs: list[BaseConfig],
dtype_size: int,
) -> list[BaseConfig]:
is_exceeding_shared_memory = self._get_exceeding_shared_memory_checker()
pruned_configs = []
for gemm_config in configs:
# Will use more shared memory than available
if is_exceeding_shared_memory and is_exceeding_shared_memory(
gemm_config, dtype_size
):
continue
NUM_REG = 255
acc_regs = math.ceil(
gemm_config.block_m * gemm_config.block_n / (gemm_config.num_warps * 32)
)
# Lower bound for register spillage, if exceeds the kernel will certainly spill
if acc_regs > NUM_REG:
continue
pruned_configs.append(gemm_config)
return pruned_configs
def _filter_configs(self, configs: list[BaseConfig]) -> list[BaseConfig]:
"""
Filter configs based on specific requirements.
Subclasses can override this to implement custom filtering logic.
"""
return configs
def preprocess_mm_configs(
self,
m: int,
n: int,
k: int,
configs: list[BaseConfig],
has_int8_tensor: bool = False,
scale: float = 1.0,
exclude: Callable[
[sympy.Integer, sympy.Integer, sympy.Integer], bool
] = lambda m, n, k: False,
dtype_size: int = 0,
op_name: str = "mm", # For preprocessing overrides e.g. on CPU
) -> Generator[TritonConfig, None, None]:
configs = self._filter_configs(configs)
scaled_configs = self._scale_mm_configs(
m, n, k, configs, scale, has_int8_tensor, exclude
)
# Filter out configs that require more shared memory than is available.
if config.max_autotune_prune_choices_based_on_shared_mem:
scaled_configs = self._prune_exceeding_max_shared_mem_configs(
scaled_configs, dtype_size
)
if config.max_autotune_gemm_search_space == "EXHAUSTIVE":
assert dtype_size > 0, "dtype_size must be provided for exhaustive search"
scaled_configs = self._prune_exhaustive_configs(scaled_configs, dtype_size)
return self._finalize_mm_configs(scaled_configs)
def triton_config(
self, num_stages: int, num_warps: int, **kwargs: Any
) -> TritonConfig:
from triton import Config as TritonConfig # type: ignore[attr-defined]
return TritonConfig(kwargs, num_stages=num_stages, num_warps=num_warps)
def get_mm_configs(self) -> partial[Generator[TritonConfig, None, None]]:
return partial(self.preprocess_mm_configs, configs=self.mm_configs)
def get_exhaustive_mm_configs(self) -> partial[Generator[TritonConfig, None, None]]:
return partial(self.preprocess_mm_configs, configs=self.exhaustive_configs)
def get_conv_configs(self) -> partial[Generator[TritonConfig, None, None]]:
return partial(
self.preprocess_mm_configs, configs=self.conv_configs, op_name="conv"
)
# Flex attn helpers
def get_flex_attn_fwd_configs(self, head_dim: int, dtype: Any) -> list[FlexConfig]:
flex_attn_fwd_configs: list[FlexConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_attn_fwd_configs
flex_attn_fwd_configs += self.flex_attn_fwd_autotune_configs
if head_dim <= 256:
if dtype == torch.float32:
default_config = FlexConfig(64, 64, 3, 4)
else:
default_config = FlexConfig(128, 64, 3, 4)
else:
if dtype == torch.float32:
default_config = FlexConfig(32, 16, 3, 4)
else:
default_config = FlexConfig(64, 32, 3, 4)
if default_config not in flex_attn_fwd_configs:
flex_attn_fwd_configs.append(default_config)
return flex_attn_fwd_configs
def get_flex_attn_bwd_configs(
self, head_dim: int, dtype: Any
) -> list[FlexBwDConfig]:
flex_attn_bwd_configs: list[FlexBwDConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_attn_bwd_configs
flex_attn_bwd_configs += self.flex_attn_bwd_autotune_configs
default_config = FlexBwDConfig(16, 16, 16, 16, 1, 4)
if default_config not in flex_attn_bwd_configs:
flex_attn_bwd_configs.append(default_config)
return flex_attn_bwd_configs
def get_flex_decode_configs(
self, head_dim: int, dtype: Any
) -> list[FlexDecodeConfig]:
flex_decode_configs: list[FlexDecodeConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_decode_configs
flex_decode_configs += self.flex_decode_autotune_configs
default_config = FlexDecodeConfig(block_n=64, num_stages=1, num_warps=2)
if default_config not in flex_decode_configs:
flex_decode_configs.append(default_config)
return flex_decode_configs
|
BaseConfigHeuristic
|
python
|
spyder-ide__spyder
|
spyder/plugins/explorer/widgets/explorer.py
|
{
"start": 3292,
"end": 3393
}
|
class ____:
General = 'general_section'
Language = 'language_section'
|
DirViewNewSubMenuSections
|
python
|
realpython__materials
|
tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/logic/models.py
|
{
"start": 617,
"end": 1017
}
|
class ____:
cells: str = " " * 9
def __post_init__(self) -> None:
validate_grid(self)
@cached_property
def x_count(self) -> int:
return self.cells.count("X")
@cached_property
def o_count(self) -> int:
return self.cells.count("O")
@cached_property
def empty_count(self) -> int:
return self.cells.count(" ")
@dataclass(frozen=True)
|
Grid
|
python
|
astropy__astropy
|
astropy/modeling/projections.py
|
{
"start": 23948,
"end": 24208
}
|
class ____(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
|
Sky2Pix_SansonFlamsteed
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/random_invert_test.py
|
{
"start": 164,
"end": 2229
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomInvert,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_invert_inference(self):
seed = 3481
layer = layers.RandomInvert()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_invert_no_op(self):
seed = 3481
layer = layers.RandomInvert(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_invert_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((1, 8, 8, 3))
else:
input_data = np.random.random((1, 3, 8, 8))
layer = layers.RandomInvert(
factor=(1, 1),
value_range=[0, 1],
data_format=data_format,
seed=1337,
)
output = layer(input_data)
self.assertAllClose(1 - input_data, output)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomInvert(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
RandomInvertTest
|
python
|
coleifer__peewee
|
playhouse/mysql_ext.py
|
{
"start": 2791,
"end": 3874
}
|
class ____(TextField):
field_type = 'JSON'
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = self._json_dumps(value)
return value
def extract(self, path):
return fn.json_extract(self, path)
def Match(columns, expr, modifier=None):
if isinstance(columns, (list, tuple)):
match = fn.MATCH(*columns) # Tuple of one or more columns / fields.
else:
match = fn.MATCH(columns) # Single column / field.
args = expr if modifier is None else NodeList((expr, SQL(modifier)))
return NodeList((match, fn.AGAINST(args)))
|
JSONField
|
python
|
getsentry__sentry
|
src/sentry/integrations/vsts/actions/create_ticket.py
|
{
"start": 218,
"end": 853
}
|
class ____(TicketEventAction):
id = "sentry.integrations.vsts.notify_action.AzureDevopsCreateTicketAction"
label = "Create an Azure DevOps work item in {integration} with these "
ticket_type = "an Azure DevOps work item"
link = "https://docs.sentry.io/product/integrations/source-code-mgmt/azure-devops/#issue-sync"
provider = IntegrationProviderSlug.AZURE_DEVOPS.value
def generate_footer(self, rule_url: str) -> str:
return "\nThis work item was automatically created by Sentry via [{}]({})".format(
self.rule.label,
absolute_uri(rule_url),
)
|
AzureDevopsCreateTicketAction
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 4210,
"end": 4411
}
|
class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
return np.log(metafeatures.get_value("NumberOfInstances"))
@metafeatures.define("NumberOfClasses")
|
LogNumberOfInstances
|
python
|
euske__pdfminer
|
pdfminer/pdftypes.py
|
{
"start": 949,
"end": 992
}
|
class ____(PSException):
pass
|
PDFException
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 6974,
"end": 7026
}
|
class ____(str, Enum):
BOOL = "bool"
|
BoolIndexType
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-deltalake/dagster_deltalake/io_manager.py
|
{
"start": 1766,
"end": 5681
}
|
class ____(ConfigurableIOManagerFactory):
"""Base class for an IO manager definition that reads inputs from and writes outputs to Delta Lake.
Examples:
.. code-block:: python
from dagster_deltalake import DeltaLakeIOManager
from dagster_deltalake_pandas import DeltaLakePandasTypeHandler
class MyDeltaLakeIOManager(DeltaLakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [DeltaLakePandasTypeHandler()]
@asset(
key_prefix=["my_schema"] # will be used as the schema (parent folder) in Delta Lake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={"io_manager": MyDeltaLakeIOManager()}
)
If you do not provide a schema, Dagster will determine a schema based on the assets and ops using
the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.
For ops, the schema can be specified by including a "schema" entry in output metadata. If none
of these is provided, the schema will default to "public".
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame):
# my_table will just contain the data from column "a"
...
"""
root_uri: str = Field(description="Storage location where Delta tables are stored.")
mode: WriteMode = Field(
default=WriteMode.overwrite.value, # type: ignore
description="The write mode passed to save the output.",
)
overwrite_schema: bool = Field(default=False)
writer_engine: WriterEngine = Field(
default=WriterEngine.pyarrow.value, # type: ignore
description="Engine passed to write_deltalake.",
)
storage_options: Union[AzureConfig, S3Config, LocalConfig, GcsConfig] = Field(
discriminator="provider",
description="Configuration for accessing storage location.",
)
client_options: Optional[ClientConfig] = Field(
default=None, description="Additional configuration passed to http client."
)
table_config: Optional[dict[str, str]] = Field(
default=None,
description="Additional config and metadata added to table on creation.",
)
schema_: Optional[str] = Field(
default=None, alias="schema", description="Name of the schema to use."
) # schema is a reserved word for pydantic
custom_metadata: Optional[dict[str, str]] = Field(
default=None, description="Custom metadata that is added to transaction commit."
)
writer_properties: Optional[dict[str, str]] = Field(
default=None, description="Writer properties passed to the rust engine writer."
)
@staticmethod
@abstractmethod
def type_handlers() -> Sequence[DbTypeHandler]: ...
@staticmethod
def default_load_type() -> Optional[type]:
return None
def create_io_manager(self, context) -> DbIOManager:
self.storage_options.dict()
return DbIOManager(
db_client=DeltaLakeDbClient(),
database="deltalake",
schema=self.schema_,
type_handlers=self.type_handlers(),
default_load_type=self.default_load_type(),
io_manager_name="DeltaLakeIOManager",
)
|
DeltaLakeIOManager
|
python
|
apache__airflow
|
airflow-core/tests/unit/utils/test_json.py
|
{
"start": 1517,
"end": 3554
}
|
class ____:
def test_encode_raises(self):
with pytest.raises(TypeError, match="^.*is not JSON serializable$"):
json.dumps(
Exception,
cls=utils_json.XComEncoder,
)
def test_encode_xcom_asset(self):
asset = Asset(uri="mytest://asset", name="mytest")
s = json.dumps(asset, cls=utils_json.XComEncoder)
obj = json.loads(s, cls=utils_json.XComDecoder)
assert asset.uri == obj.uri
@pytest.mark.parametrize(
"data",
[
({"foo": 1, "bar": 2},),
({"foo": 1, "bar": 2, "baz": Z(1)},),
(
{"foo": 1, "bar": 2},
{"foo": 1, "bar": 2, "baz": Z(1)},
),
({"d1": {"d2": 3}},),
({"d1": {"d2": Z(1)}},),
({"d1": {"d2": {"d3": 4}}},),
({"d1": {"d2": {"d3": Z(1)}}},),
],
)
def test_encode_xcom_with_nested_dict(self, data):
i = json.dumps(data, cls=utils_json.XComEncoder)
e = json.loads(i, cls=utils_json.XComDecoder)
assert data == e
def test_orm_deserialize(self):
x = 14
u = U(x=x)
s = json.dumps(u, cls=utils_json.XComEncoder)
o = json.loads(s, cls=utils_json.XComDecoder, object_hook=utils_json.XComDecoder.orm_object_hook)
assert o == f"{U.__module__}.{U.__qualname__}@version={U.__version__}(x={x})"
def test_collections(self):
i = [1, 2]
e = json.loads(json.dumps(i, cls=utils_json.XComEncoder), cls=utils_json.XComDecoder)
assert i == e
i = ("a", "b", "a", "c")
e = json.loads(json.dumps(i, cls=utils_json.XComEncoder), cls=utils_json.XComDecoder)
assert i == e
i = {2, 3}
e = json.loads(json.dumps(i, cls=utils_json.XComEncoder), cls=utils_json.XComDecoder)
assert i == e
i = frozenset({6, 7})
e = json.loads(json.dumps(i, cls=utils_json.XComEncoder), cls=utils_json.XComDecoder)
assert i == e
|
TestXComEncoder
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-groups-with-increasing-length.py
|
{
"start": 1797,
"end": 2595
}
|
class ____(object):
def maxIncreasingGroups(self, usageLimits):
"""
:type usageLimits: List[int]
:rtype: int
"""
def check(l):
curr = 0
for i in xrange(l):
curr += usageLimits[~i]-(l-i)
curr = min(curr, 0)
for i in xrange(len(usageLimits)-l):
curr += usageLimits[i]
return curr >= 0
usageLimits.sort()
left, right = 1, len(usageLimits)
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
# Time: O(nlogn)
# Space: O(n)
# constructive algorithms, sort, binary search, greedy, prefix sum
|
Solution3
|
python
|
pennersr__django-allauth
|
allauth/mfa/webauthn/views.py
|
{
"start": 6415,
"end": 7364
}
|
class ____(FormView):
form_class = SignupWebAuthnForm
template_name = "mfa/webauthn/signup_form." + account_settings.TEMPLATE_EXTENSION
def get_context_data(self, **kwargs):
ret = super().get_context_data()
creation_options = auth.begin_registration(
self.request._login_stage.login.user, True
)
ret["js_data"] = {"creation_options": creation_options}
return ret
def get_form_kwargs(self):
ret = super().get_form_kwargs()
ret["user"] = self.request._login_stage.login.user
return ret
def form_valid(self, form):
flows.signup_authenticator(
self.request,
user=self.request._login_stage.login.user,
name=form.cleaned_data["name"],
credential=form.cleaned_data["credential"],
)
return self.request._login_stage.exit()
signup_webauthn = SignupWebAuthnView.as_view()
|
SignupWebAuthnView
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/oidc.py
|
{
"start": 5930,
"end": 8814
}
|
class ____(OIDCOnlyMixin, OAuthLibMixin, View):
"""
View used to show Claims about the authenticated End-User
"""
def get(self, request, *args, **kwargs):
return self._create_userinfo_response(request)
def post(self, request, *args, **kwargs):
return self._create_userinfo_response(request)
def _create_userinfo_response(self, request):
url, headers, body, status = self.create_userinfo_response(request)
response = HttpResponse(content=body or "", status=status)
for k, v in headers.items():
response[k] = v
return response
def _load_id_token(token):
"""
Loads an IDToken given its string representation for use with RP-Initiated Logout.
A tuple (IDToken, claims) is returned. Depending on the configuration expired tokens may be loaded.
If loading failed (None, None) is returned.
"""
IDToken = get_id_token_model()
validator = oauth2_settings.OAUTH2_VALIDATOR_CLASS()
try:
key = validator._get_key_for_token(token)
except InvalidJWSObject:
# Failed to deserialize the key.
return None, None
# Could not identify key from the ID Token.
if not key:
return None, None
try:
if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ACCEPT_EXPIRED_TOKENS:
# Only check the following while loading the JWT
# - claims are dict
# - the Claims defined in RFC7519 if present have the correct type (string, integer, etc.)
# The claim contents are not validated. `exp` and `nbf` in particular are not validated.
check_claims = {}
else:
# Also validate the `exp` (expiration time) and `nbf` (not before) claims.
check_claims = None
jwt_token = jwt.JWT(key=key, jwt=token, check_claims=check_claims)
claims = json.loads(jwt_token.claims)
# Assumption: the `sub` claim and `user` property of the corresponding IDToken Object point to the
# same user.
# To verify that the IDToken was intended for the user it is therefore sufficient to check the `user`
# attribute on the IDToken Object later on.
return IDToken.objects.get(jti=claims["jti"]), claims
except (JWException, JWTExpired, IDToken.DoesNotExist):
return None, None
def _validate_claims(request, claims):
"""
Validates the claims of an IDToken for use with OIDC RP-Initiated Logout.
"""
validator = oauth2_settings.OAUTH2_VALIDATOR_CLASS()
# Verification of `iss` claim is mandated by OIDC RP-Initiated Logout specs.
if "iss" not in claims or claims["iss"] != validator.get_oidc_issuer_endpoint(request):
# IDToken was not issued by this OP, or it can not be verified.
return False
return True
@method_decorator(login_not_required, name="dispatch")
|
UserInfoView
|
python
|
google__jax
|
tests/multiprocess/array_test.py
|
{
"start": 35541,
"end": 41609
}
|
class ____(jt_multiprocess.MultiProcessTest):
def test_cross_host_transfer_single_device_sharding(self):
x = np.arange(64).reshape(8, 8)
src_pid = 0
dst_pid = 1
src_sharding = jax.sharding.SingleDeviceSharding(
jax.local_devices(process_index=src_pid)[0])
dst_sharding = jax.sharding.SingleDeviceSharding(
jax.local_devices(process_index=dst_pid)[0])
y = jax.device_put(x, src_sharding)
z = jax.device_put(y, dst_sharding)
if jax.process_index() == dst_pid:
self.assertLen(z.addressable_shards, 1)
np.testing.assert_array_equal(z.addressable_shards[0].data, x)
else:
self.assertEmpty(z.addressable_shards)
def test_cross_host_transfer_named_sharding(self):
x = np.arange(64).reshape(8, 8)
n_local = jax.local_device_count()
src_pid = 0
dst_pid = 1
src_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_local,), ("x",),
devices=jax.local_devices(process_index=src_pid),
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
dst_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_local,), ("x",),
devices=jax.local_devices(process_index=dst_pid),
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
y = jax.device_put(x, src_sharding)
z = jax.device_put(y, dst_sharding)
if jax.process_index() == dst_pid:
self.assertLen(z.addressable_shards, n_local)
for shard in z.addressable_shards:
np.testing.assert_array_equal(shard.data, x[shard.index])
else:
self.assertEmpty(z.addressable_shards)
def test_cross_host_transfer_named_sharding_replicated(self):
x = np.arange(64).reshape(8, 8)
n_dev = jax.device_count() // 2
src_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_dev,), ("x",), devices=jax.devices()[:n_dev],
axis_types=(jax.sharding.AxisType.Explicit,)),
P()
)
dst_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_dev,), ("x",), devices=jax.devices()[n_dev:],
axis_types=(jax.sharding.AxisType.Explicit,)),
P()
)
y = jax.device_put(x, src_sharding)
z = jax.device_put(y, dst_sharding)
for shard in z.addressable_shards:
np.testing.assert_array_equal(shard.data, x[shard.index])
def test_cross_host_transfer_batched(self):
backend = xb.get_backend()
if "cuda" in backend.platform_version:
self.skipTest(
"The CUDA plugin does not support batched cross-host transfers."
)
num_arrays = 3
xs = []
for i in range(1, num_arrays + 1):
xs.append(jnp.arange(64 * i).reshape(8, 8 * i))
# TODO(emilyaf): Smaller sizes fail on TPU because the dst buffer size
# returned by TransferSizeUtil::ShapeSizeCompact is larger than the src
# buffer size. Investigate this further.
# xs.append(jnp.arange(16 * i).reshape(8, 2 * i))
xs[0] = xs[0].astype(jnp.float32)
n_local = jax.local_device_count()
src_pid = 0
dst_pid = 1
src_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_local,), ("x",),
devices=jax.local_devices(process_index=src_pid),
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
dst_sharding = jax.sharding.NamedSharding(
jax.make_mesh((n_local,), ("x",),
devices=jax.local_devices(process_index=dst_pid),
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
ys = jax.device_put(xs, src_sharding)
copy_semantics = xc.ArrayCopySemantics.ALWAYS_COPY
zs = xc.batched_copy_array_to_devices_with_sharding(
ys, [dst_sharding._internal_device_list] * num_arrays,
[dst_sharding] * num_arrays, [copy_semantics] * num_arrays)
for (x, z) in zip(xs, zs):
if jax.process_index() == dst_pid:
self.assertLen(z.addressable_shards, n_local)
for shard in z.addressable_shards:
np.testing.assert_array_equal(shard.data, x[shard.index])
else:
self.assertEmpty(z.addressable_shards)
@jtu.skip_on_devices("cpu")
def test_device_to_cpu_transfer_jit(self):
x = jnp.arange(64).reshape(8, 8)
with self.assertWarnsRegex(
DeprecationWarning,
r"backend and device argument on jit is deprecated",
):
cpu_transfer_f = jax.jit(lambda x: x + 1, backend="cpu")
cpu_transfer_f(x) # Should not raise a cross-host transfer error.
@jtu.skip_on_devices("cpu")
def test_device_put_to_cpu(self):
x = jnp.arange(64).reshape(8, 8)
devices = jax.devices()
cpu_devices = jax.devices(backend="cpu")
num_devices = min(len(devices), len(cpu_devices))
# Create CPU and GPU/TPU shardings that are not fully addressable.
cpu_sharding = jax.sharding.NamedSharding(
jax.make_mesh(
(num_devices,), ("x",), devices=cpu_devices[:num_devices],
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
sharding = jax.sharding.NamedSharding(
jax.make_mesh(
(num_devices,), ("x",), devices=devices[:num_devices],
axis_types=(jax.sharding.AxisType.Explicit,)),
P("x"))
y = jax.device_put(x, sharding)
# device_put of a GPU/TPU array to the CPU sharding should raise a helpful
# error.
with self.assertRaisesRegex(
ValueError, ("For a cross-host reshard in multi-controller JAX|"
"device_put's second argument must be a Device")):
jax.device_put(y, cpu_sharding)
if __name__ == "__main__":
if portpicker is None:
socket_port = 12345
else:
socket_port = portpicker.pick_unused_port()
jax.config.update(
"jax_cross_host_transfer_socket_address", f"127.0.0.1:{socket_port}")
# Too small for good performance, but set to avoid oom in msan tests.
jax.config.update(
"jax_cross_host_transfer_transfer_size",
64 * 1024,
)
jt_multiprocess.main()
|
CrossHostTransferTest
|
python
|
PyCQA__pylint
|
tests/testutils/_primer/test_primer.py
|
{
"start": 1668,
"end": 4000
}
|
class ____:
@pytest.mark.parametrize(
"directory",
[
pytest.param(p, id=str(p.relative_to(FIXTURES_PATH)))
for p in FIXTURES_PATH.iterdir()
if p.is_dir() and p.name != "batched" # tested separately
],
)
def test_compare(self, directory: Path) -> None:
"""Test for the standard case.
Directory in 'fixtures/' with 'main.json', 'pr.json' and 'expected.txt'.
"""
self.__assert_expected(directory)
def test_compare_batched(self) -> None:
fixture = FIXTURES_PATH / "batched"
self.__assert_expected(
fixture,
fixture / "main_BATCHIDX.json",
fixture / "pr_BATCHIDX.json",
batches=2,
)
def test_truncated_compare(self) -> None:
"""Test for the truncation of comments that are too long."""
max_comment_length = 525
directory = FIXTURES_PATH / "message_changed"
with patch(
"pylint.testutils._primer.primer_compare_command.MAX_GITHUB_COMMENT_LENGTH",
max_comment_length,
):
content = self.__assert_expected(
directory, expected_file=directory / "expected_truncated.txt"
)
assert len(content) < max_comment_length
@staticmethod
def __assert_expected(
directory: Path,
main: Path | None = None,
pr: Path | None = None,
expected_file: Path | None = None,
batches: int = 0,
) -> str:
if main is None:
main = directory / "main.json"
if pr is None:
pr = directory / "pr.json"
if expected_file is None:
expected_file = directory / "expected.txt"
new_argv = [*DEFAULT_ARGS, f"--base-file={main}", f"--new-file={pr}"]
if batches:
new_argv.append(f"--batches={batches}")
with patch("sys.argv", new_argv):
Primer(PRIMER_DIRECTORY, PACKAGES_TO_PRIME_PATH).run()
with open(PRIMER_DIRECTORY / "comment.txt", encoding="utf8") as f:
content = f.read()
with open(expected_file, encoding="utf8") as f:
expected = f.read()
# rstrip so the expected.txt can end with a newline
assert content == expected.rstrip("\n")
return content
|
TestPrimer
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0011_delete-url.py
|
{
"start": 71,
"end": 344
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0010_migrate_domain_data"),
]
operations = [
migrations.RemoveField(
model_name="domain",
name="url",
),
]
|
Migration
|
python
|
walkccc__LeetCode
|
solutions/687. Longest Univalue Path/687.py
|
{
"start": 0,
"end": 604
}
|
class ____:
def longestUnivaluePath(self, root: TreeNode | None) -> int:
ans = 0
def longestUnivaluePathDownFrom(root: TreeNode | None) -> int:
nonlocal ans
if not root:
return 0
l = longestUnivaluePathDownFrom(root.left)
r = longestUnivaluePathDownFrom(root.right)
arrowLeft = l + 1 if root.left and root.left.val == root.val else 0
arrowRight = r + 1 if root.right and root.right.val == root.val else 0
ans = max(ans, arrowLeft + arrowRight)
return max(arrowLeft, arrowRight)
longestUnivaluePathDownFrom(root)
return ans
|
Solution
|
python
|
tornadoweb__tornado
|
tornado/test/websocket_test.py
|
{
"start": 6380,
"end": 6879
}
|
class ____(AsyncHTTPTestCase):
def setUp(self):
super().setUp()
self.conns_to_close = []
def tearDown(self):
for conn in self.conns_to_close:
conn.close()
super().tearDown()
@gen.coroutine
def ws_connect(self, path, **kwargs):
ws = yield websocket_connect(
"ws://127.0.0.1:%d%s" % (self.get_http_port(), path), **kwargs
)
self.conns_to_close.append(ws)
raise gen.Return(ws)
|
WebSocketBaseTestCase
|
python
|
google__jax
|
tests/pallas/tpu_pallas_state_test.py
|
{
"start": 6934,
"end": 11120
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu_at_least(4):
self.skipTest("Only supported on TPU v4+")
def test_can_create_tensorcore_mesh(self):
_ = pltpu.create_tensorcore_mesh("x")
def test_kernel_helper_basic(self):
mesh = pltpu.create_tensorcore_mesh("x")
def body(x_ref, o_ref):
pltpu.sync_copy(x_ref, o_ref)
x = jnp.arange(8 * 128, dtype=jnp.int32).reshape((8, 128))
with self.subTest("decorator"):
result = pl.kernel(body, out_shape=x, mesh=mesh)(x)
np.testing.assert_array_equal(result, x)
with self.subTest("decorator_factory"):
result = pl.kernel(out_shape=x, mesh=mesh)(body)(x)
np.testing.assert_array_equal(result, x)
def test_empty_core_map_raises_error(self):
@jax.jit
def f(x):
y = jnp.zeros_like(x)
def inner(refs):
del refs # Unused.
@pl.core_map(pltpu.create_tensorcore_mesh("x"))
def _():
pass
_, y = pl.run_state(inner)((x, y))
return y
x = jnp.arange(8 * 128, dtype=jnp.int32).reshape((8, 128))
with self.assertRaisesRegex(Exception,
"Attempted to lower core_map without discharging."):
f(x)
def test_can_query_core_index(self):
mesh = pltpu.create_tensorcore_mesh("x")
slc_size = 16 // mesh.shape["x"]
@jax.jit
def f(x):
@pl.kernel(
out_shape=x,
mesh=mesh,
scratch_shapes=[
pltpu.VMEM((slc_size, 128), x.dtype),
pltpu.VMEM((slc_size, 128), x.dtype),
pltpu.SemaphoreType.DMA,
],
)
def kernel(x_ref, y_ref, x_vmem_ref, y_vmem_ref, sem):
num_cores = jax.lax.axis_size("x")
slc_size = 16 // num_cores
core_index = jax.lax.axis_index("x")
slc = pl.ds(core_index * slc_size, slc_size)
pltpu.async_copy(
x_ref.at[slc],
x_vmem_ref,
sem,
).wait()
y = x_vmem_ref[...] + jax.lax.axis_index("x")
y_vmem_ref[...] = y
pltpu.async_copy(y_vmem_ref, y_ref.at[slc], sem).wait()
return kernel(x)
num_cores = jax.devices()[0].num_cores
x = jnp.arange(16 * 128, dtype=jnp.int32).reshape((16, 128))
expected_out = (
x.reshape((num_cores, -1, 128)) + jnp.arange(num_cores)[..., None, None]
).reshape(x.shape)
y = f(x)
np.testing.assert_array_equal(y, expected_out)
def test_raises_on_captured_arrays(self):
@jax.jit
def f(x):
y = jnp.zeros_like(x)
@pl.kernel(out_shape=x,
mesh=pltpu.create_tensorcore_mesh("x"),
scratch_shapes=dict(tmp_ref=pltpu.VMEM(x.shape, x.dtype)))
def kernel(x_ref, out_ref, tmp_ref):
pltpu.sync_copy(x_ref, tmp_ref)
tmp_ref[...] += y
out_ref[...] = tmp_ref[...]
return kernel(x)
x = jnp.arange(8 * 128, dtype=jnp.int32).reshape((8, 128))
with self.assertRaisesRegex(Exception, "core_map .* captures constants"):
f(x)
def test_kernel_helper_with_scratch(self):
mesh = pltpu.create_tensorcore_mesh("x")
def body(x_ref, o_ref, scratch_ref):
pltpu.sync_copy(x_ref, scratch_ref)
scratch_ref[...] += 1
pltpu.sync_copy(scratch_ref, o_ref)
x = jnp.arange(8 * 128, dtype=jnp.int32).reshape((8, 128))
result = pl.kernel(
body, out_shape=x, mesh=mesh,
scratch_shapes=dict(scratch_ref=pltpu.VMEM(x.shape, x.dtype)))(x)
np.testing.assert_array_equal(result, x + 1)
def test_kernel_helper_with_out_tree(self):
mesh = pltpu.create_tensorcore_mesh("x")
def body(x_ref, o1_ref, o2_ref, scratch_ref):
pltpu.sync_copy(x_ref, o1_ref)
pltpu.sync_copy(x_ref, scratch_ref)
scratch_ref[...] += 1
pltpu.sync_copy(scratch_ref, o2_ref)
x = jnp.arange(8 * 128, dtype=jnp.int32).reshape((8, 128))
result1, result2 = pl.kernel(
body, out_shape=[x, x], mesh=mesh,
scratch_shapes=[pltpu.VMEM(x.shape, x.dtype)])(x)
np.testing.assert_array_equal(result1, x)
np.testing.assert_array_equal(result2, x + 1)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
CoreMapTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
{
"start": 1051,
"end": 2634
}
|
class ____(test.TestCase):
# Verifies behavior of tf.Session.reset().
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testSameVariablesClear(self):
server = server_lib.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
sess_1.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
session.Session.reset(server.target)
with self.assertRaises(errors_impl.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
if __name__ == "__main__":
test.main()
|
SameVariablesClearTest
|
python
|
networkx__networkx
|
benchmarks/benchmarks/benchmark_many_components.py
|
{
"start": 24,
"end": 915
}
|
class ____:
"""Use atlas6() as a benchmarking case to probe for performance on graphs
with many connected components.
``atlas6()`` is all of the graphs with at most 6 nodes and at least one edge
that are connected and not isomorphic to one another (142 components in total).
See the atlas6 gallery example for more info.
"""
def setup(self):
atlas = nx.graph_atlas_g()[
3:209
] # 0, 1, 2 => no edges. 208 is last 6 node graph
U = nx.Graph()
for G in atlas:
if (nx.number_connected_components(G) == 1) and (
not nx.isomorphism.GraphMatcher(U, G).subgraph_is_isomorphic()
):
U = nx.disjoint_union(U, G)
self.G = U
def time_single_source_all_shortest_paths(self):
_ = dict(nx.single_source_all_shortest_paths(self.G, 500))
|
ManyComponentsBenchmark
|
python
|
jina-ai__jina
|
tests/unit/serve/executors/test_executor.py
|
{
"start": 872,
"end": 1006
}
|
class ____(Executor):
@requests
def foo(self, docs, **kwargs):
docs.texts = [self.workspace for _ in docs]
|
WorkspaceExec
|
python
|
pytorch__pytorch
|
benchmarks/tensorexpr/broadcast.py
|
{
"start": 4570,
"end": 9601
}
|
class ____(benchmark.Benchmark):
# List of customization class variables.
op_str = None
binary_op_pt_func = None
binary_op_np_func = None
unary_op_pt_func = None
unary_op_np_func = None
split_input = True
def __init__(self, mode, device, dtype, M, N, K):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.K = K
self.d1 = self.rand(
[M, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.d2 = self.rand(
[K, 1, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.d3 = self.rand(
[M, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.d4 = self.rand(
[K, M, 1], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.inputs = [self.d1, self.d2, self.d3, self.d4]
def _eval(self, d1, d2, d3, d4, binary_op, unary_op):
if not binary_op:
def binary_op(x, y):
return x + y
if not unary_op:
def unary_op(x):
return x
if self.split_input:
d1 = unary_op(d1)
d2 = unary_op(d2)
d3 = unary_op(d3)
d4 = unary_op(d4)
else:
d1, d2, d3, d4 = (
unary_op(d1),
unary_op(d2),
unary_op(d1 + 0.001),
unary_op(d4),
)
a = binary_op(d1, d2)
b = binary_op(d3, d4)
c = a + b
return c
def forward(self, d1, d2, d3, d4):
binary_op = self.__class__.binary_op_pt_func
unary_op = self.__class__.unary_op_pt_func
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def reference(self):
binary_op = self.__class__.binary_op_np_func
unary_op = self.__class__.unary_op_np_func
[d1, d2, d3, d4] = [self.numpy(d) for d in [self.d1, self.d2, self.d3, self.d4]]
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def config(self):
return [self.M, self.N, self.K]
@classmethod
def module(cls):
return "broadcast_" + cls.op_str
def memory_workload(self):
input_count = len(self.inputs)
if self.mode == "fwd":
if self.split_input:
sol_count = 1
algorithmic_count = 1
else:
sol_count = 1
algorithmic_count = 1
else:
if self.split_input:
sol_count = 1
algorithmic_count = input_count
else:
sol_count = 1
algorithmic_count = input_count
buffer_size = self.M * self.N * self.K * 4
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[1 << 8, 1 << 7, 1 << 9]]
def register_broadcast_ops():
binary_op_list = [
["mul", operator.mul],
["add", operator.add],
["sub", operator.sub],
["div", lambda a, b: a / (b + 1e-4)],
[
"pow",
torch.pow,
np.power,
], # no fuson triggered
["max", torch.max, np.maximum],
["min", torch.min, np.minimum],
]
unary_op_list = [
["erf", torch.erf, np.erf],
["exp", torch.exp, np.exp],
["sin", torch.sin, np.sin],
["cos", torch.cos, np.cos],
]
for split_input, binary_op in itertools.product([True, False], binary_op_list):
# Make a copy of BroadcastBench
if len(binary_op) == 2:
[op_str, op_pt_func] = binary_op
op_np_func = op_pt_func
elif len(binary_op) == 3:
[op_str, op_pt_func, op_np_func] = binary_op
split_str = "split" if split_input else "shared"
op_str = split_str + "_" + op_str
bm_cls = type("BroadcastBench_" + op_str, (BroadcastBench,), {})
bm_cls.op_str = op_str
bm_cls.binary_op_pt_func = op_pt_func
bm_cls.binary_op_np_func = op_np_func
bm_cls.split_input = split_input
benchmark.register_benchmark_class(bm_cls)
for split_input, unary_op in itertools.product([True, False], unary_op_list):
# Make a copy of BroadcastBench
if len(unary_op) == 2:
[op_str, op_pt_func] = unary_op
op_np_func = op_pt_func
elif len(unary_op) == 3:
[op_str, op_pt_func, op_np_func] = unary_op
split_str = "split" if split_input else "shared"
op_str = split_str + "_" + op_str
bm_cls = type("BroadcastBench_" + op_str, (BroadcastBench,), {})
bm_cls.op_str = op_str
bm_cls.unary_op_pt_func = op_pt_func
bm_cls.unary_op_np_func = op_np_func
bm_cls.split_input = split_input
benchmark.register_benchmark_class(bm_cls)
register_broadcast_ops()
|
BroadcastBench
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.