language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/cloud_batch.py | {
"start": 1145,
"end": 6749
} | class ____(BaseTrigger):
"""
Cloud Batch trigger to check if templated job has been finished.
:param job_name: Required. Name of the job.
:param project_id: Required. the Google Cloud project ID in which the job was started.
:param location: Optional. the location where job is executed. If set to None then
the value of DEFAULT_BATCH_LOCATION will be used
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param poll_sleep: Polling period in seconds to check for the status
"""
def __init__(
self,
job_name: str,
project_id: str | None,
location: str = DEFAULT_BATCH_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
polling_period_seconds: float = 10,
timeout: float | None = None,
):
super().__init__()
self.project_id = project_id
self.job_name = job_name
self.location = location
self.gcp_conn_id = gcp_conn_id
self.polling_period_seconds = polling_period_seconds
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize class arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.cloud_batch.CloudBatchJobFinishedTrigger",
{
"project_id": self.project_id,
"job_name": self.job_name,
"location": self.location,
"gcp_conn_id": self.gcp_conn_id,
"polling_period_seconds": self.polling_period_seconds,
"timeout": self.timeout,
"impersonation_chain": self.impersonation_chain,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""
Fetch job status or yield certain Events.
Main loop of the class in where it is fetching the job status and yields certain Event.
If the job has status success then it yields TriggerEvent with success status, if job has
status failed - with error status and if the job is being deleted - with deleted status.
In any other case Trigger will wait for specified amount of time
stored in self.polling_period_seconds variable.
"""
timeout = self.timeout
hook = self._get_async_hook()
try:
while timeout is None or timeout > 0:
job: Job = await hook.get_batch_job(job_name=self.job_name)
status: JobStatus.State = job.status.state
if status == JobStatus.State.SUCCEEDED:
yield TriggerEvent(
{
"job_name": self.job_name,
"status": "success",
"message": "Job completed",
}
)
return
elif status == JobStatus.State.FAILED:
yield TriggerEvent(
{
"job_name": self.job_name,
"status": "error",
"message": f"Batch job with name {self.job_name} has failed its execution",
}
)
return
elif status == JobStatus.State.DELETION_IN_PROGRESS:
yield TriggerEvent(
{
"job_name": self.job_name,
"status": "deleted",
"message": f"Batch job with name {self.job_name} is being deleted",
}
)
return
else:
self.log.info("Current job status is: %s", status)
self.log.info("Sleeping for %s seconds.", self.polling_period_seconds)
if timeout is not None:
timeout -= self.polling_period_seconds
if timeout is None or timeout > 0:
await asyncio.sleep(self.polling_period_seconds)
except Exception as e:
self.log.exception("Exception occurred while checking for job completion.")
yield TriggerEvent({"status": "error", "message": str(e)})
return
self.log.exception("Job with name [%s] timed out", self.job_name)
yield TriggerEvent(
{
"job_name": self.job_name,
"status": "timed out",
"message": f"Batch job with name {self.job_name} timed out",
}
)
def _get_async_hook(self) -> CloudBatchAsyncHook:
return CloudBatchAsyncHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
| CloudBatchJobFinishedTrigger |
python | doocs__leetcode | lcp/LCP 67. 装饰树/Solution.py | {
"start": 192,
"end": 600
} | class ____:
def expandBinaryTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
def dfs(root):
if root is None:
return None
l, r = dfs(root.left), dfs(root.right)
if l:
root.left = TreeNode(-1, l)
if r:
root.right = TreeNode(-1, None, r)
return root
return dfs(root)
| Solution |
python | coleifer__peewee | tests/sqlite_changelog.py | {
"start": 423,
"end": 495
} | class ____(TestModel):
name = TextField()
dob = DateField()
| Person |
python | apache__airflow | providers/opensearch/tests/unit/opensearch/conftest.py | {
"start": 1762,
"end": 4331
} | class ____:
def count(self, index: Any = None, body: Any = None):
return {"count": 1, "_shards": {"total": 1, "successful": 1, "skipped": 0, "failed": 0}}
def search(self, index=None, body=None, sort=None, size=None, from_=None):
return self.sample_log_response()
def sample_log_response(self):
return {
"_shards": {"failed": 0, "skipped": 0, "successful": 7, "total": 7},
"hits": {
"hits": [
{
"_id": "jdeZT4kBjAZqZnexVUxk",
"_source": {
"dag_id": "example_bash_operator",
"execution_date": "2023_07_09T07_47_32_000000",
"levelname": "INFO",
"message": "Some Message 1",
"event": "Some Message 1",
"task_id": "run_after_loop",
"try_number": "1",
"offset": 0,
},
"_type": "_doc",
},
{
"_id": "qteZT4kBjAZqZnexVUxl",
"_source": {
"dag_id": "example_bash_operator",
"execution_date": "2023_07_09T07_47_32_000000",
"levelname": "INFO",
"message": "Another Some Message 2",
"event": "Another Some Message 2",
"task_id": "run_after_loop",
"try_number": "1",
"offset": 1,
},
"_type": "_doc",
},
],
"max_score": 2.482621,
"total": {"relation": "eq", "value": 36},
},
"timed_out": False,
"took": 7,
}
@pytest.fixture
def mock_hook(monkeypatch):
monkeypatch.setattr(OpenSearchHook, "search", MockSearch.search)
monkeypatch.setattr(OpenSearchHook, "client", MockSearch.client)
monkeypatch.setattr(OpenSearchHook, "index", MockSearch.index)
@pytest.fixture(autouse=True)
def setup_connection(create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="opensearch_default",
conn_type="opensearch",
host="myopensearch.com",
login="test_user",
password="test",
)
)
| MockClient |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/parsing/json_schema.py | {
"start": 188,
"end": 3261
} | class ____(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input: Whether the evaluator requires input.
requires_reference: Whether the evaluator requires reference.
evaluation_name: The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **_: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError as e:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg) from e
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> dict | list | None | float | bool | int | str:
if isinstance(node, str):
return parse_json_markdown(node)
if hasattr(node, "model_json_schema") and callable(node.model_json_schema):
# Pydantic v2 model
return node.model_json_schema()
if hasattr(node, "schema") and callable(node.schema):
# Pydantic v1 model
return node.schema()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
return {"score": True}
@override
def _evaluate_strings(
self,
prediction: str | Any,
input: str | Any = None,
reference: str | Any = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
| JsonSchemaEvaluator |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py | {
"start": 847,
"end": 965
} | class ____:
a: str = 0
b = field()
c: int = foo()
d = list()
@mutable() # auto_attribs = None => True
| C |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integration_migrate_opsgenie.py | {
"start": 93,
"end": 1790
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.organization = self.create_organization(owner=self.user)
def get_path(self, integration_id) -> str:
return f"/api/0/organizations/{self.organization.slug}/integrations/{integration_id}/migrate-opsgenie/"
def test_no_integration(self) -> None:
path = self.get_path(integration_id=-1)
response = self.client.put(path, format="json")
assert response.status_code == 404
def test_not_opsgenie_integration(self) -> None:
integration = self.create_integration(
organization=self.organization, provider="jira", external_id="jira:1"
)
path = self.get_path(integration_id=integration.id)
response = self.client.put(path, format="json")
assert response.status_code == 400
@patch("sentry.integrations.api.endpoints.organization_integration_migrate_opsgenie.metrics")
@patch(
"sentry.integrations.opsgenie.integration.OpsgenieIntegration.schedule_migrate_opsgenie_plugin"
)
def test_simple(self, mock_migrate_opsgenie_plugin: MagicMock, mock_metrics: MagicMock) -> None:
integration = self.create_integration(
organization=self.organization, provider="opsgenie", external_id="cool_opsgenie"
)
path = self.get_path(integration_id=integration.id)
response = self.client.put(path, format="json")
assert response.status_code == 202
assert mock_migrate_opsgenie_plugin.called
mock_metrics.incr.assert_any_call("opsgenie.migration_attempt", skip_internal=False)
| OrganizationIntegrationMigrateOpsgenieTest |
python | django__django | tests/custom_pk/models.py | {
"start": 817,
"end": 901
} | class ____(models.Model):
id = MyWrapperField(primary_key=True, db_index=True)
| Bar |
python | milvus-io__pymilvus | tests/test_bulk_writer_validators.py | {
"start": 8087,
"end": 9975
} | class ____:
def test_valid_list(self):
"""Test valid list of integers"""
result = int8_vector_validator([1, 2, 3], 3)
assert result == [1, 2, 3]
def test_invalid_list_length(self):
"""Test list with wrong dimension"""
with pytest.raises(MilvusException, match="array's length must be equal to vector dimension"):
int8_vector_validator([1, 2], 3)
def test_invalid_list_type(self):
"""Test list with non-int elements"""
with pytest.raises(MilvusException, match="array's element must be int value"):
int8_vector_validator([1, 2.0, 3], 3)
def test_valid_numpy_int8(self):
"""Test valid numpy array with int8"""
arr = np.array([1, 2, 3], dtype=np.int8)
result = int8_vector_validator(arr, 3)
assert result == [1, 2, 3]
def test_invalid_numpy_dtype(self):
"""Test numpy array with invalid dtype"""
arr = np.array([1, 2, 3], dtype=np.int32)
with pytest.raises(MilvusException, match='dtype must be "int8"'):
int8_vector_validator(arr, 3)
def test_invalid_numpy_shape(self):
"""Test numpy array with wrong shape"""
arr = np.array([[1, 2], [3, 4]], dtype=np.int8)
with pytest.raises(MilvusException, match="shape must not be one dimension"):
int8_vector_validator(arr, 4)
def test_invalid_numpy_length(self):
"""Test numpy array with wrong dimension"""
arr = np.array([1, 2], dtype=np.int8)
with pytest.raises(MilvusException, match="length must be equal to vector dimension"):
int8_vector_validator(arr, 3)
def test_invalid_type(self):
"""Test with invalid input type"""
with pytest.raises(MilvusException, match="only accept numpy.ndarray or list"):
int8_vector_validator("invalid", 3)
| TestInt8VectorValidator |
python | mlflow__mlflow | mlflow/metrics/genai/prompts/v1.py | {
"start": 19150,
"end": 22821
} | class ____:
definition = (
"Relevance encompasses the appropriateness, significance, and applicability of the output "
"with respect to both the input and context. Scores should reflect the extent to which the "
"output directly addresses the question provided in the input, given the provided context."
)
grading_prompt = (
"Relevance: Below are the details for different scores:"
"- Score 1: The output doesn't mention anything about the question or is completely "
"irrelevant to the provided context.\n"
"- Score 2: The output provides some relevance to the question and is somehow related "
"to the provided context.\n"
"- Score 3: The output mostly answers the question and is largely consistent with the "
"provided context.\n"
"- Score 4: The output answers the question and is consistent with the provided context.\n"
"- Score 5: The output answers the question comprehensively using the provided context."
)
grading_context_columns = ["context"]
parameters = default_parameters
default_model = default_model
example_score_2 = EvaluationExample(
input="How is MLflow related to Databricks?",
output="Databricks is a data engineering and analytics platform designed to help "
"organizations process and analyze large amounts of data. Databricks is a company "
"specializing in big data and machine learning solutions.",
score=2,
justification="The output provides relevant information about Databricks, mentioning it "
"as a company specializing in big data and machine learning solutions. However, it doesn't "
"directly address how MLflow is related to Databricks, which is the specific question "
"asked in the input. Therefore, the output is only somewhat related to the provided "
"context.",
grading_context={
"context": "MLflow is an open-source platform for managing the end-to-end machine "
"learning (ML) lifecycle. It was developed by Databricks, a company that specializes "
"in big data and machine learning solutions. MLflow is designed to address the "
"challenges that data scientists and machine learning engineers face when developing, "
"training, and deploying machine learning models."
},
)
example_score_4 = EvaluationExample(
input="How is MLflow related to Databricks?",
output="MLflow is a product created by Databricks to enhance the efficiency of machine "
"learning processes.",
score=4,
justification="The output provides a relevant and accurate statement about the "
"relationship between MLflow and Databricks. While it doesn't provide extensive detail, "
"it still offers a substantial and meaningful response. To achieve a score of 5, the "
"response could be further improved by providing additional context or details about "
"how MLflow specifically functions within the Databricks ecosystem.",
grading_context={
"context": "MLflow is an open-source platform for managing the end-to-end machine "
"learning (ML) lifecycle. It was developed by Databricks, a company that specializes "
"in big data and machine learning solutions. MLflow is designed to address the "
"challenges that data scientists and machine learning engineers face when developing, "
"training, and deploying machine learning models."
},
)
default_examples = [example_score_2, example_score_4]
| RelevanceMetric |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond/package.py | {
"start": 217,
"end": 621
} | class ____(Package):
"""This package has an indirect diamond dependency on dt-diamond-bottom"""
homepage = "http://www.example.com"
url = "http://www.example.com/dt-diamond-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("dt-diamond-left")
depends_on("dt-diamond-right")
depends_on("c", type="build")
depends_on("cxx", type="build")
| DtDiamond |
python | imageio__imageio | imageio/plugins/spe.py | {
"start": 5359,
"end": 13810
} | class ____:
"""Extract metadata written by the SDT-control software
Some of it is encoded in the comment strings
(see :py:meth:`parse_comments`). Also, date and time are encoded in a
peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata`
to update the metadata dict.
"""
months = {
# Convert SDT-control month strings to month numbers
"Jän": 1,
"Jan": 1,
"Feb": 2,
"Mär": 3,
"Mar": 3,
"Apr": 4,
"Mai": 5,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Okt": 10,
"Oct": 10,
"Nov": 11,
"Dez": 12,
"Dec": 12,
}
sequence_types = {
# TODO: complete
"SEQU": "standard",
"SETO": "TOCCSL",
"KINE": "kinetics",
"SEAR": "arbitrary",
}
class CommentDesc:
"""Describe how to extract a metadata entry from a comment string"""
n: int
"""Which of the 5 SPE comment fields to use."""
slice: slice
"""Which characters from the `n`-th comment to use."""
cvt: Callable[[str], Any]
"""How to convert characters to something useful."""
scale: Union[None, float]
"""Optional scaling factor for numbers"""
def __init__(
self,
n: int,
slice: slice,
cvt: Callable[[str], Any] = str,
scale: Optional[float] = None,
):
self.n = n
self.slice = slice
self.cvt = cvt
self.scale = scale
comment_fields = {
(5, 0): {
"sdt_major_version": CommentDesc(4, slice(66, 68), int),
"sdt_minor_version": CommentDesc(4, slice(68, 70), int),
"sdt_controller_name": CommentDesc(4, slice(0, 6), str),
"exposure_time": CommentDesc(1, slice(64, 73), float, 10**-6),
"color_code": CommentDesc(4, slice(10, 14), str),
"detection_channels": CommentDesc(4, slice(15, 16), int),
"background_subtraction": CommentDesc(4, 14, lambda x: x == "B"),
"em_active": CommentDesc(4, 32, lambda x: x == "E"),
"em_gain": CommentDesc(4, slice(28, 32), int),
"modulation_active": CommentDesc(4, 33, lambda x: x == "A"),
"pixel_size": CommentDesc(4, slice(25, 28), float, 0.1),
"sequence_type": CommentDesc(
4, slice(6, 10), lambda x: __class__.sequence_types[x]
),
"grid": CommentDesc(4, slice(16, 25), float, 10**-6),
"n_macro": CommentDesc(1, slice(0, 4), int),
"delay_macro": CommentDesc(1, slice(10, 19), float, 10**-3),
"n_mini": CommentDesc(1, slice(4, 7), int),
"delay_mini": CommentDesc(1, slice(19, 28), float, 10**-6),
"n_micro": CommentDesc(1, slice(7, 10), int),
"delay_micro": CommentDesc(1, slice(28, 37), float, 10**-6),
"n_subpics": CommentDesc(1, slice(7, 10), int),
"delay_shutter": CommentDesc(1, slice(73, 79), float, 10**-6),
"delay_prebleach": CommentDesc(1, slice(37, 46), float, 10**-6),
"bleach_time": CommentDesc(1, slice(46, 55), float, 10**-6),
"recovery_time": CommentDesc(1, slice(55, 64), float, 10**-6),
},
(5, 1): {
"bleach_piezo_active": CommentDesc(4, slice(34, 35), lambda x: x == "z")
},
}
@staticmethod
def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]:
"""Get the version of SDT-control metadata encoded in the comments
Parameters
----------
comments
List of SPE file comments, typically ``metadata["comments"]``.
Returns
-------
Major and minor version. ``-1, -1`` if detection failed.
"""
if comments[4][70:76] != "COMVER":
return -1, -1
try:
return int(comments[4][76:78]), int(comments[4][78:80])
except ValueError:
return -1, -1
@staticmethod
def parse_comments(
comments: Sequence[str], version: Tuple[int, int]
) -> Dict[str, Any]:
"""Extract SDT-control metadata from comments
Parameters
----------
comments
List of SPE file comments, typically ``metadata["comments"]``.
version
Major and minor version of SDT-control metadata format
Returns
-------
Dict of metadata
"""
sdt_md = {}
for minor in range(version[1] + 1):
# Metadata with same major version is backwards compatible.
# Fields are specified incrementally in `comment_fields`.
# E.g. if the file has version 5.01, `comment_fields[5, 0]` and
# `comment_fields[5, 1]` need to be decoded.
try:
cmt = __class__.comment_fields[version[0], minor]
except KeyError:
continue
for name, spec in cmt.items():
try:
v = spec.cvt(comments[spec.n][spec.slice])
if spec.scale is not None:
v *= spec.scale
sdt_md[name] = v
except Exception as e:
warnings.warn(
f"Failed to decode SDT-control metadata field `{name}`: {e}"
)
sdt_md[name] = None
if version not in __class__.comment_fields:
supported_ver = ", ".join(
map(lambda x: f"{x[0]}.{x[1]:02}", __class__.comment_fields)
)
warnings.warn(
f"Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. "
f"Only versions {supported_ver} are supported. "
"Some or all SDT-control metadata may be missing."
)
comment = comments[0] + comments[2]
sdt_md["comment"] = comment.strip()
return sdt_md
@staticmethod
def get_datetime(date: str, time: str) -> Union[datetime, None]:
"""Turn date and time saved by SDT-control into proper datetime object
Parameters
----------
date
SPE file date, typically ``metadata["date"]``.
time
SPE file date, typically ``metadata["time_local"]``.
Returns
-------
File's datetime if parsing was succsessful, else None.
"""
try:
month = __class__.months[date[2:5]]
return datetime(
int(date[5:9]),
month,
int(date[0:2]),
int(time[0:2]),
int(time[2:4]),
int(time[4:6]),
)
except Exception as e:
logger.info(f"Failed to decode date from SDT-control metadata: {e}.")
@staticmethod
def extract_metadata(meta: Mapping, char_encoding: str = "latin1"):
"""Extract SDT-control metadata from SPE metadata
SDT-control stores some metadata in comments and other fields.
Extract them and remove unused entries.
Parameters
----------
meta
SPE file metadata. Modified in place.
char_encoding
Character encoding used to decode strings in the metadata.
"""
comver = __class__.get_comment_version(meta["comments"])
if any(c < 0 for c in comver):
# This file most likely was not created by SDT-control
logger.debug("SDT-control comments not found.")
return
sdt_meta = __class__.parse_comments(meta["comments"], comver)
meta.pop("comments")
meta.update(sdt_meta)
# Get date and time in a usable format
dt = __class__.get_datetime(meta["date"], meta["time_local"])
if dt:
meta["datetime"] = dt
meta.pop("date")
meta.pop("time_local")
sp4 = meta["spare_4"]
try:
meta["modulation_script"] = sp4.decode(char_encoding)
meta.pop("spare_4")
except UnicodeDecodeError:
warnings.warn(
"Failed to decode SDT-control laser "
"modulation script. Bad char_encoding?"
)
# Get rid of unused data
meta.pop("time_utc")
meta.pop("exposure_sec")
| SDTControlSpec |
python | apache__airflow | providers/opsgenie/src/airflow/providers/opsgenie/hooks/opsgenie.py | {
"start": 1080,
"end": 6038
} | class ____(BaseHook):
"""
This hook allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This hook sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this hook.
:param opsgenie_conn_id: The name of the Opsgenie connection to use
"""
conn_name_attr = "opsgenie_conn_id"
default_conn_name = "opsgenie_default"
conn_type = "opsgenie"
hook_name = "Opsgenie"
def __init__(self, opsgenie_conn_id: str = "opsgenie_default") -> None:
super().__init__()
self.conn_id = opsgenie_conn_id
configuration = Configuration()
conn = self.get_connection(self.conn_id)
configuration.api_key["Authorization"] = conn.password
configuration.host = conn.host or "https://api.opsgenie.com"
self.alert_api_instance = AlertApi(ApiClient(configuration))
def _get_api_key(self) -> str:
"""
Get the API key from the connection.
:return: API key
"""
conn = self.get_connection(self.conn_id)
return cast("str", conn.password)
def get_conn(self) -> AlertApi:
"""
Get the underlying AlertApi client.
:return: AlertApi client
"""
return self.alert_api_instance
def create_alert(self, payload: dict | None = None) -> SuccessResponse:
"""
Create an alert on Opsgenie.
:param payload: Opsgenie API Create Alert payload values
See https://docs.opsgenie.com/docs/alert-api#section-create-alert
:return: api response
"""
payload = payload or {}
try:
create_alert_payload = CreateAlertPayload(**payload)
api_response = self.alert_api_instance.create_alert(create_alert_payload)
return api_response
except OpenApiException as e:
self.log.exception("Exception when sending alert to opsgenie with payload: %s", payload)
raise e
def close_alert(
self,
identifier: str,
identifier_type: str | None = "id",
payload: dict | None = None,
**kwargs: dict | None,
) -> SuccessResponse:
"""
Close an alert in Opsgenie.
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param payload: Request payload of closing alert action.
see https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/AlertApi.md#close_alert
:param kwargs: params to pass to the function
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
payload = payload or {}
try:
close_alert_payload = CloseAlertPayload(**payload)
api_response = self.alert_api_instance.close_alert(
identifier=identifier,
identifier_type=identifier_type,
close_alert_payload=close_alert_payload,
**kwargs,
)
return api_response
except OpenApiException as e:
self.log.exception("Exception when closing alert in opsgenie with payload: %s", payload)
raise e
def delete_alert(
self,
identifier: str,
identifier_type: str | None = None,
user: str | None = None,
source: str | None = None,
) -> SuccessResponse:
"""
Delete an alert in Opsgenie.
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias.
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: Display name of the request owner.
:param source: Display name of the request source
:return: SuccessResponse
"""
try:
api_response = self.alert_api_instance.delete_alert(
identifier=identifier,
identifier_type=identifier_type,
user=user,
source=source,
)
return api_response
except OpenApiException as e:
self.log.exception("Exception when calling AlertApi->delete_alert: %s\n", e)
raise e
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Opsgenie connection."""
return {
"hidden_fields": ["port", "schema", "login", "extra"],
"relabeling": {"password": "Opsgenie API Key"},
}
| OpsgenieAlertHook |
python | huggingface__transformers | tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py | {
"start": 7741,
"end": 11301
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Dinov2WithRegisters does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
Dinov2WithRegistersModel,
Dinov2WithRegistersForImageClassification,
Dinov2WithRegistersBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": Dinov2WithRegistersModel,
"image-classification": Dinov2WithRegistersForImageClassification,
}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = Dinov2WithRegistersModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Dinov2WithRegistersConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Dinov2WithRegisters does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@unittest.skip(reason="Dinov2WithRegisters does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/dinov2-with-registers-base"
model = Dinov2WithRegistersModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| Dinov2WithRegistersModelTest |
python | kamyu104__LeetCode-Solutions | Python/number-of-boomerangs.py | {
"start": 52,
"end": 1153
} | class ____(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
result = 0
for i in xrange(len(points)):
group = collections.defaultdict(int)
for j in xrange(len(points)):
if j == i:
continue
dx, dy = points[i][0] - points[j][0], points[i][1] - points[j][1]
group[dx**2 + dy**2] += 1
for _, v in group.iteritems():
if v > 1:
result += v * (v-1)
return result
def numberOfBoomerangs2(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
cnt = 0
for a, i in enumerate(points):
dis_list = []
for b, k in enumerate(points[:a] + points[a + 1:]):
dis_list.append((k[0] - i[0]) ** 2 + (k[1] - i[1]) ** 2)
for z in collections.Counter(dis_list).values():
if z > 1:
cnt += z * (z - 1)
return cnt
| Solution |
python | conda__conda | conda/exceptions.py | {
"start": 15440,
"end": 15585
} | class ____(CondaError, IOError):
def __init__(self, message: str, *args):
msg = f"{message}"
super().__init__(msg)
| CondaIOError |
python | ray-project__ray | python/ray/autoscaler/batching_node_provider.py | {
"start": 1078,
"end": 2169
} | class ____:
"""Stores all data about a Ray node needed by the autoscaler.
Attributes:
kind: Whether the node is the head or a worker.
type: The user-defined type of the node.
replica_index: An identifier for nodes in a replica of a TPU worker group.
This value is set as a Pod label by a GKE webhook when TPUs are requested
ip: Cluster-internal ip of the node. ip can be None if the ip
has not yet been assigned.
status: The status of the node. You must adhere to the following semantics
for status:
* The status must be "up-to-date" if and only if the node is running.
* The status must be "update-failed" if and only if the node is in an
unknown or failed state.
* If the node is in a pending (starting-up) state, the status should be
a brief user-facing description of why the node is pending.
"""
kind: NodeKind
type: NodeType
ip: Optional[NodeIP]
status: NodeStatus
replica_index: Optional[str] = None
| NodeData |
python | huggingface__transformers | src/transformers/models/bart/configuration_bart.py | {
"start": 805,
"end": 7485
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BartModel`]. It is used to instantiate a BART
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the BART
[facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BartModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
num_labels (`int`, *optional*, defaults to 3):
The number of labels to use in [`BartForSequenceClassification`].
Example:
```python
>>> from transformers import BartConfig, BartModel
>>> # Initializing a BART facebook/bart-large style configuration
>>> configuration = BartConfig()
>>> # Initializing a model (with random weights) from the facebook/bart-large style configuration
>>> model = BartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "bart"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
use_cache=True,
num_labels=3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
is_encoder_decoder=True,
decoder_start_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=num_labels,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
self.tie_encoder_decoder = True
__all__ = ["BartConfig"]
| BartConfig |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 92229,
"end": 93097
} | class ____(Request):
"""
Gets model information
:param model: Model id
:type model: str
"""
_service = "models"
_action = "get_by_id"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"model": {"description": "Model id", "type": "string"}},
"required": ["model"],
"type": "object",
}
def __init__(self, model: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.model = model
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
| GetByIdRequest |
python | tensorflow__tensorflow | tensorflow/python/framework/type_spec_test.py | {
"start": 6917,
"end": 33267
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(
("FullySpecified",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("Metadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("NumpyMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
(np.int32(1), np.float32(1.),
np.array([[1, 2], [3, 4]]))),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
(np.int32(1), np.float32(1.),
np.array([[1, 2], [3, 4]])))),
)
def testEquality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertEqual(v1, v2)
self.assertEqual(v2, v1)
self.assertFalse(v1 != v2)
self.assertFalse(v2 != v1)
self.assertEqual(hash(v1), hash(v2))
@parameterized.named_parameters(
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("IncompatibleDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
("DiffMetadataNumpy",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 4]])),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 8]]))),
("DiffMetadataTensorSpecName",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="a")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="b"))),
("Non-TypeSpec",
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool), 5),
)
def testInequality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertNotEqual(v1, v2)
self.assertNotEqual(v2, v1)
self.assertFalse(v1 == v2)
self.assertFalse(v2 == v1)
@parameterized.named_parameters(
("SameValue", TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),
("UnknownDim", TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank", TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("NamedTuple",
NestOfTensorsSpec(
_TestNamedTuple(
a=tensor_spec.TensorSpec([8, 5], dtypes.int32),
b=tensor_spec.TensorSpec([8, 12], dtypes.int32))),
NestOfTensorsSpec(
_TestNamedTuple(
a=tensor_spec.TensorSpec([None, 5], dtypes.int32),
b=tensor_spec.TensorSpec([None, None], dtypes.int32)))),
(
"NamedTupleRedefined",
NestOfTensorsSpec(
_TestNamedTuple2( # Separate but equivalent type.
a=tensor_spec.TensorSpec([8, 5], dtypes.int32),
b=tensor_spec.TensorSpec([8, 12], dtypes.int32))),
NestOfTensorsSpec(
_TestNamedTuple(
a=tensor_spec.TensorSpec([None, 5], dtypes.int32),
b=tensor_spec.TensorSpec([None, None], dtypes.int32)))),
)
def testIsSubtypeOf(self, v1, v2):
self.assertTrue(v1.is_subtype_of(v2))
@parameterized.named_parameters(
("DifferentType",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpecTwin([5, 3], dtypes.int32, [None], dtypes.bool),
),
("DifferentDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("DifferentRank", TwoTensorsSpec([5, 3], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("DifferentDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("DifferentMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues", TwoTensorsSpec([5, 3], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
("SwappedDimensions",
TwoTensorsSpec([3, 5], dtypes.int32, [None], dtypes.int32),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.int32)),
("Supertype", TwoTensorsSpec([5, None], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("SerializeDifferentStructure",
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool, "smaller_tuple")),
("SerializeDifferentOrder",
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool, "different_order")),
)
def testIsNotSubtypeOf(self, v1, v2):
self.assertFalse(v1.is_subtype_of(v2))
@parameterized.named_parameters(
("SameValue", TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),
("DifferentValue",
TwoTensorsSpec([2, 1], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None, None], dtypes.int32, [None], dtypes.bool)),
("DifferentRank",
TwoTensorsSpec([3, 2, 1], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, [None], dtypes.bool)),
(
"NamedTupleRedefined",
NestOfTensorsSpec(
_TestNamedTuple2( # Separate but equivalent type.
a=tensor_spec.TensorSpec([8, 3], dtypes.int32),
b=tensor_spec.TensorSpec([8, 12], dtypes.int32))),
NestOfTensorsSpec(
_TestNamedTuple(
a=tensor_spec.TensorSpec([None, 5], dtypes.int32),
b=tensor_spec.TensorSpec([7, None], dtypes.int32))),
NestOfTensorsSpec(
_TestNamedTuple(
a=tensor_spec.TensorSpec([None, None], dtypes.int32),
b=tensor_spec.TensorSpec([None, None], dtypes.int32)))),
)
def testMostSpecificCommonSupertype(self, v1, v2, result):
self.assertEqual(v1.most_specific_common_supertype([v2]), result)
self.assertEqual(v2.most_specific_common_supertype([v1]), result)
@parameterized.named_parameters(
("DifferentType",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpecTwin([5, 3], dtypes.int32, [None], dtypes.bool),
),
("DifferentDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("DifferentMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SerializeDifferentStructure",
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool, "smaller_tuple")),
("SerializeDifferentOrder",
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool),
TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],
dtypes.bool, "different_order")),
)
def testNoCommonSupertype(self, v1, v2):
self.assertIsNone(v1.most_specific_common_supertype([v2]))
self.assertIsNone(v2.most_specific_common_supertype([v1]))
def testTensorDecomposition(self):
value = TwoComposites(
ragged_factory_ops.constant([[1, 2], [3]], dtypes.int32),
ragged_factory_ops.constant([[5], [6, 7, 8]], dtypes.float32),
)
spec = type_spec.type_spec_from_value(value)
self.assertEqual(
spec.flatten(),
[
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),
],
)
self.assertEqual(
[trace_type.from_value(t) for t in spec.to_tensors(value)],
[
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),
tensor_spec.TensorSpec(shape=(4,), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),
],
)
flat_original = spec.to_tensors(value)
reconstructed = spec.from_tensors(iter(flat_original))
flat_reconstructed = spec.to_tensors(reconstructed)
for original, reconstructed in zip(flat_original, flat_reconstructed):
self.assertIs(original, reconstructed)
def testCastDoesntRecreateCompositeTensor(self):
value = TwoComposites(
ragged_factory_ops.constant([[1, 2], [3]], dtypes.int32),
ragged_factory_ops.constant([[5], [6, 7, 8]], dtypes.float32),
)
spec = type_spec.type_spec_from_value(value)
casted_value = spec.cast(value, trace_type.InternalCastContext())
self.assertIs(value, casted_value)
@parameterized.named_parameters(
("SameValue",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("NamedTuple",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec([None, 5], dtypes.int32),
b=tensor_spec.TensorSpec([None, None], dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec([8, 5], dtypes.int32),
b=tensor_spec.TensorSpec([8, 12], dtypes.int32)))),
("NamedTupleRedefined",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec([None, 5], dtypes.int32),
b=tensor_spec.TensorSpec([None, None], dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple2( # Separate but equivalent type.
a=tensor_spec.TensorSpec([8, 5], dtypes.int32),
b=tensor_spec.TensorSpec([8, 12], dtypes.int32)))),
)
def testIsCompatibleWith(self, v1, v2):
self.assertTrue(v1.is_compatible_with(v2))
self.assertTrue(v2.is_compatible_with(v1))
@parameterized.named_parameters(
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("IncompatibleDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
)
def testIsNotCompatibleWith(self, v1, v2):
self.assertFalse(v1.is_compatible_with(v2))
self.assertFalse(v2.is_compatible_with(v1))
@parameterized.named_parameters(
("EqualTypes",
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("DiffRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),
("DiffDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("NamedTuple",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32)))),
("NamedTupleRedefined",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple2( # Separate but equivalent type.
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32)))),
)
def testMostSpecificCompatibleType(self, v1, v2, expected):
self.assertEqual(v1.most_specific_compatible_type(v2), expected)
self.assertEqual(v2.most_specific_compatible_type(v1), expected)
@parameterized.named_parameters(
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, "blue")),
("IncompatibleTensorSpecName",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="a")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="b"))),
("IncompatibleNestType",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(dict(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32)))),
)
def testMostSpecificCompatibleTypeException(self, v1, v2):
with self.assertRaises(ValueError):
v1.most_specific_compatible_type(v2)
with self.assertRaises(ValueError):
v2.most_specific_compatible_type(v1)
def testMostSpecificCompatibleTypeNamedTupleIsNotTuple(self):
named_tuple_spec_a = NestOfTensorsSpec.from_value(NestOfTensors(
_TestNamedTuple(a=1, b="aaa")))
named_tuple_spec_b = NestOfTensorsSpec.from_value(NestOfTensors(
_TestNamedTuple(a=2, b="bbb")))
named_tuple_spec_c = NestOfTensorsSpec.from_value(NestOfTensors(
_TestNamedTuple(a=3, b="ccc")))
normal_tuple_spec = NestOfTensorsSpec.from_value(NestOfTensors((2, "bbb")))
result_a_b = named_tuple_spec_a.most_specific_compatible_type(
named_tuple_spec_b)
result_b_a = named_tuple_spec_b.most_specific_compatible_type(
named_tuple_spec_a)
self.assertEqual(repr(result_a_b), repr(named_tuple_spec_c))
self.assertEqual(repr(result_b_a), repr(named_tuple_spec_c))
# Test that spec of named tuple is not equal to spec of normal tuple.
self.assertNotEqual(repr(result_a_b), repr(normal_tuple_spec))
@parameterized.named_parameters(
("IncompatibleDtype",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.bool))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32)))),
("DifferentTupleSize",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.bool))),
NestOfTensorsSpec(_TestNamedTupleSingleField(
a=tensor_spec.TensorSpec((), dtypes.int32)))),
("DifferentFieldName",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTupleDifferentField(
a=tensor_spec.TensorSpec((), dtypes.int32),
c=tensor_spec.TensorSpec((), dtypes.int32)))),
("NamedTupleAndTuple",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec((
tensor_spec.TensorSpec((), dtypes.int32),
tensor_spec.TensorSpec((), dtypes.int32)))),
)
def testMostSpecificCompatibleTypeForNamedTuplesException(self, v1, v2):
with self.assertRaises(ValueError):
v1.most_specific_compatible_type(v2)
with self.assertRaises(ValueError):
v2.most_specific_compatible_type(v1)
def toTensorList(self):
value = TwoTensors([1, 2, 3], [1.0, 2.0], "red")
spec = TwoTensorsSpec.from_value(value)
tensor_list = spec._to_tensor_list(value)
self.assertLen(tensor_list, 2)
self.assertIs(tensor_list[0], value.x)
self.assertIs(tensor_list[1], value.y)
def fromTensorList(self):
x = ops.convert_to_tensor([1, 2, 3])
y = ops.convert_to_tensor([1.0, 2.0])
color = "green"
spec = TwoTensorsSpec(x.shape, x.dtype, y.shape, y.dtype, color)
value = spec._from_tensor_list([x, y])
self.assertIs(value.x, x)
self.assertIs(value.y, y)
self.assertEqual(value.color, color)
def fromIncompatibleTensorList(self):
x = ops.convert_to_tensor([1, 2, 3])
y = ops.convert_to_tensor([1.0, 2.0])
spec1 = TwoTensorsSpec([100], x.dtype, y.shape, y.dtype, "green")
spec2 = TwoTensorsSpec(x.shape, x.dtype, y.shape, dtypes.bool, "green")
with self.assertRaises(ValueError):
spec1._from_tensor_list([x, y]) # shape mismatch
with self.assertRaises(ValueError):
spec2._from_tensor_list([x, y]) # dtype mismatch
def testFlatTensorSpecs(self):
spec = TwoTensorsSpec([5], dtypes.int32, [5, 8], dtypes.float32, "red")
self.assertEqual(spec._flat_tensor_specs,
[tensor_spec.TensorSpec([5], dtypes.int32),
tensor_spec.TensorSpec([5, 8], dtypes.float32)])
def testFullTypesForFlatTensors(self):
spec = TwoTensorsSpec([5], dtypes.int32, [5, 8], dtypes.float32, "red")
full_type_list = fulltypes_for_flat_tensors(spec)
expect = [
full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET),
full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET)
]
self.assertEqual(len(spec._flat_tensor_specs), len(full_type_list))
self.assertEqual(expect, full_type_list)
def testRepr(self):
spec = TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)
self.assertEqual(
repr(spec),
"TwoTensorsSpec(%r, %r, %r, %r, %r)" %
(tensor_shape.TensorShape([5, 3]), dtypes.int32,
tensor_shape.TensorShape(None), dtypes.bool, "red"))
def testFromValue(self):
value = TwoTensors([1, 2, 3], [1.0, 2.0], "red")
spec = type_spec.type_spec_from_value(value)
self.assertEqual(spec, TwoTensorsSpec.from_value(value))
def testCast(self):
spec = TwoTensorsSpec([], dtypes.int32, [], dtypes.float32)
foo = spec._from_components([1, 2.3])
ctx = trace_type.InternalCastContext()
value = spec.cast(foo, ctx)
tensor_type = type(ops.convert_to_tensor([1, 2, 3]))
self.assertIsInstance(value.x, tensor_type)
self.assertIsInstance(value.y, tensor_type)
self.assertEqual(value.x.dtype, dtypes.int32)
self.assertEqual(value.y.dtype, dtypes.float32)
bar = TwoComposites(
ragged_factory_ops.constant([[1, 2], [3]], dtypes.int32),
ragged_factory_ops.constant([[5], [6, 7, 8]], dtypes.float32))
bar_spec = type_spec.type_spec_from_value(bar)
value = bar_spec.cast(bar, ctx)
self.assertEqual(value.x.dtype, dtypes.int32)
self.assertEqual(value.y.dtype, dtypes.float32)
def testNestedRagged(self):
# Check that TwoCompositeSpecs are compatible if one has a nested
# RaggedTensorSpec w/ ragged_rank=0 and the other has a corresponding
# nested TensorSpec.
spec1 = TwoCompositesSpec(
ragged_tensor.RaggedTensorSpec([10], dtypes.int32, ragged_rank=0),
tensor_spec.TensorSpec(None, dtypes.int32))
spec2 = TwoCompositesSpec(
tensor_spec.TensorSpec([10], dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32))
spec3 = TwoCompositesSpec(
tensor_spec.TensorSpec([12], dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32))
self.assertTrue(spec1.is_compatible_with(spec2))
self.assertFalse(spec1.is_compatible_with(spec3))
def testRegistry(self):
self.assertEqual("tf.TwoCompositesSpec",
type_spec_registry.get_name(TwoCompositesSpec))
self.assertEqual("tf.TwoTensorsSpec",
type_spec_registry.get_name(TwoTensorsSpec))
self.assertEqual(TwoCompositesSpec,
type_spec_registry.lookup("tf.TwoCompositesSpec"))
self.assertEqual(TwoTensorsSpec,
type_spec_registry.lookup("tf.TwoTensorsSpec"))
def testRegistryTypeErrors(self):
with self.assertRaisesRegex(TypeError, "Expected `name` to be a string"):
type_spec_registry.register(None)
with self.assertRaisesRegex(TypeError, "Expected `name` to be a string"):
type_spec_registry.register(TwoTensorsSpec)
with self.assertRaisesRegex(TypeError, "Expected `cls` to be a TypeSpec"):
type_spec_registry.register("tf.foo")(None)
with self.assertRaisesRegex(TypeError, "Expected `cls` to be a TypeSpec"):
type_spec_registry.register("tf.foo")(ragged_tensor.RaggedTensor)
def testRegistryDuplicateErrors(self):
with self.assertRaisesRegex(
ValueError, "Name tf.TwoCompositesSpec has already been registered "
"for class __main__.TwoCompositesSpec."):
@type_spec_registry.register("tf.TwoCompositesSpec") # pylint: disable=unused-variable
class NewTypeSpec(TwoCompositesSpec):
pass
with self.assertRaisesRegex(
ValueError, "Class __main__.TwoCompositesSpec has already been "
"registered with name tf.TwoCompositesSpec"):
type_spec_registry.register("tf.NewName")(TwoCompositesSpec)
def testRegistryNameErrors(self):
for bad_name in ["foo", "", "hello world"]:
with self.assertRaises(ValueError):
type_spec_registry.register(bad_name)
def testRegistryLookupErrors(self):
with self.assertRaises(TypeError):
type_spec_registry.lookup(None)
with self.assertRaisesRegex(
ValueError, "No TypeSpec has been registered with name 'foo.bar'"):
type_spec_registry.lookup("foo.bar")
def testRegistryGetNameErrors(self):
with self.assertRaises(TypeError):
type_spec_registry.get_name(None)
class Foo(TwoCompositesSpec):
pass
with self.assertRaisesRegex(
ValueError, "TypeSpec __main__.Foo has not been registered."):
type_spec_registry.get_name(Foo)
def testSerialization(self):
spec = TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)
self.assertEqual(spec, trace_type.deserialize(trace_type.serialize(spec)))
| TypeSpecTest |
python | pypa__pip | src/pip/_vendor/urllib3/packages/six.py | {
"start": 3598,
"end": 3976
} | class ____(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
| _LazyModule |
python | davidhalter__parso | parso/python/tree.py | {
"start": 5269,
"end": 5337
} | class ____(PythonMixin, ErrorNode):
__slots__ = ()
| PythonErrorNode |
python | walkccc__LeetCode | solutions/753. Cracking the Safe/753.py | {
"start": 0,
"end": 489
} | class ____:
def crackSafe(self, n: int, k: int) -> str:
passwordSize = k**n
path = '0' * n
seen = set()
seen.add(path)
def dfs(path: str) -> str:
if len(seen) == passwordSize:
return path
for c in map(str, range(k)):
node = path[-n + 1:] + c if n > 1 else c
if node not in seen:
seen.add(node)
res = dfs(path + c)
if res:
return res
seen.remove(node)
return dfs(path)
| Solution |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 153615,
"end": 153693
} | class ____(Buffer):
def num_reads(self) -> int:
return 1
| InputBuffer |
python | optuna__optuna | optuna/terminator/improvement/emmr.py | {
"start": 1263,
"end": 10204
} | class ____(BaseImprovementEvaluator):
"""Evaluates a kind of regrets, called the Expected Minimum Model Regret(EMMR).
EMMR is an upper bound of "expected minimum simple regret" in the optimization process.
Expected minimum simple regret is a quantity that converges to zero only if the
optimization process has found the global optima.
For further information about expected minimum simple regret and the algorithm,
please refer to the following paper:
- `A stopping criterion for Bayesian optimization by the gap of expected minimum simple
regrets <https://proceedings.mlr.press/v206/ishibashi23a.html>`__
Also, there is our blog post explaining this evaluator:
- `Introducing A New Terminator: Early Termination of Black-box Optimization Based on
Expected Minimum Model Regret
<https://medium.com/optuna/introducing-a-new-terminator-early-termination-of-black-box-optimization-based-on-expected-9a660774fcdb>`__
Args:
deterministic_objective:
A boolean value which indicates whether the objective function is deterministic.
Default is :obj:`False`.
delta:
A float number related to the criterion for termination. Default to 0.1.
For further information about this parameter, please see the aforementioned paper.
min_n_trials:
A minimum number of complete trials to compute the criterion. Default to 2.
seed:
A random seed for EMMREvaluator.
Example:
.. testcode::
import optuna
from optuna.terminator import EMMREvaluator
from optuna.terminator import MedianErrorEvaluator
from optuna.terminator import Terminator
sampler = optuna.samplers.TPESampler(seed=0)
study = optuna.create_study(sampler=sampler, direction="minimize")
emmr_improvement_evaluator = EMMREvaluator()
median_error_evaluator = MedianErrorEvaluator(emmr_improvement_evaluator)
terminator = Terminator(
improvement_evaluator=emmr_improvement_evaluator,
error_evaluator=median_error_evaluator,
)
for i in range(1000):
trial = study.ask()
ys = [trial.suggest_float(f"x{i}", -10.0, 10.0) for i in range(5)]
value = sum(ys[i] ** 2 for i in range(5))
study.tell(trial, value)
if terminator.should_terminate(study):
# Terminated by Optuna Terminator!
break
"""
def __init__(
self,
deterministic_objective: bool = False,
delta: float = 0.1,
min_n_trials: int = 2,
seed: int | None = None,
) -> None:
if min_n_trials <= 1 or not np.isfinite(min_n_trials):
raise ValueError("`min_n_trials` is expected to be a finite integer more than one.")
self._deterministic = deterministic_objective
self._delta = delta
self.min_n_trials = min_n_trials
self._rng = LazyRandomState(seed)
def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float:
optuna_search_space = intersection_search_space(trials)
complete_trials = [t for t in trials if t.state == TrialState.COMPLETE]
if len(complete_trials) < self.min_n_trials:
return sys.float_info.max * MARGIN_FOR_NUMARICAL_STABILITY # Do not terminate.
search_space = gp_search_space.SearchSpace(optuna_search_space)
normalized_params = search_space.get_normalized_params(complete_trials)
if not search_space.dim:
optuna_warn(
f"{self.__class__.__name__} cannot consider any search space."
"Termination will never occur in this study."
)
return sys.float_info.max * MARGIN_FOR_NUMARICAL_STABILITY # Do not terminate.
len_trials = len(complete_trials)
assert normalized_params.shape == (len_trials, search_space.dim)
# _gp module assumes that optimization direction is maximization
sign = -1 if study_direction == StudyDirection.MINIMIZE else 1
score_vals = np.array([cast(float, t.value) for t in complete_trials]) * sign
score_vals = gp.warn_and_convert_inf(score_vals)
standarized_score_vals = (score_vals - score_vals.mean()) / max(
sys.float_info.min, score_vals.std()
)
assert len(standarized_score_vals) == len(normalized_params)
gpr_t1 = gp.fit_kernel_params( # Fit kernel with up to (t-1)-th observation
X=normalized_params[..., :-1, :],
Y=standarized_score_vals[:-1],
is_categorical=search_space.is_categorical,
log_prior=prior.default_log_prior,
minimum_noise=prior.DEFAULT_MINIMUM_NOISE_VAR,
gpr_cache=None,
deterministic_objective=self._deterministic,
)
gpr_t = gp.fit_kernel_params( # Fit kernel with up to t-th observation
X=normalized_params,
Y=standarized_score_vals,
is_categorical=search_space.is_categorical,
log_prior=prior.default_log_prior,
minimum_noise=prior.DEFAULT_MINIMUM_NOISE_VAR,
gpr_cache=gpr_t1,
deterministic_objective=self._deterministic,
)
theta_t_star_index = int(np.argmax(standarized_score_vals))
theta_t1_star_index = int(np.argmax(standarized_score_vals[:-1]))
theta_t_star = normalized_params[theta_t_star_index, :]
theta_t1_star = normalized_params[theta_t1_star_index, :]
cov_t_between_theta_t_star_and_theta_t1_star = _compute_gp_posterior_cov_two_thetas(
normalized_params, gpr_t, theta_t_star_index, theta_t1_star_index
)
# Use gpr_t instead of gpr_t1 because KL Div. requires the same prior for both posterior.
# cf. Sec. 4.4 of https://proceedings.mlr.press/v206/ishibashi23a/ishibashi23a.pdf
mu_t1_theta_t_with_nu_t, variance_t1_theta_t_with_nu_t = _compute_gp_posterior(
normalized_params[-1, :], gpr_t
)
_, variance_t_theta_t1_star = _compute_gp_posterior(theta_t1_star, gpr_t)
mu_t_theta_t_star, variance_t_theta_t_star = _compute_gp_posterior(theta_t_star, gpr_t)
mu_t1_theta_t1_star, _ = _compute_gp_posterior(theta_t1_star, gpr_t1)
y_t = standarized_score_vals[-1]
kappa_t1 = _compute_standardized_regret_bound(
gpr_t1,
search_space,
normalized_params[:-1, :],
standarized_score_vals[:-1],
self._delta,
rng=self._rng.rng,
)
theorem1_delta_mu_t_star = mu_t1_theta_t1_star - mu_t_theta_t_star
alg1_delta_r_tilde_t_term1 = theorem1_delta_mu_t_star
theorem1_v = math.sqrt(
max(
1e-10,
variance_t_theta_t_star
- 2.0 * cov_t_between_theta_t_star_and_theta_t1_star
+ variance_t_theta_t1_star,
)
)
theorem1_g = (mu_t_theta_t_star - mu_t1_theta_t1_star) / theorem1_v
alg1_delta_r_tilde_t_term2 = theorem1_v * scipy_stats.norm.pdf(theorem1_g)
alg1_delta_r_tilde_t_term3 = theorem1_v * theorem1_g * scipy_stats.norm.cdf(theorem1_g)
_lambda = prior.DEFAULT_MINIMUM_NOISE_VAR**-1
eq4_rhs_term1 = 0.5 * math.log(1.0 + _lambda * variance_t1_theta_t_with_nu_t)
eq4_rhs_term2 = (
-0.5 * variance_t1_theta_t_with_nu_t / (variance_t1_theta_t_with_nu_t + _lambda**-1)
)
eq4_rhs_term3 = (
0.5
* variance_t1_theta_t_with_nu_t
* (y_t - mu_t1_theta_t_with_nu_t) ** 2
/ (variance_t1_theta_t_with_nu_t + _lambda**-1) ** 2
)
alg1_delta_r_tilde_t_term4 = kappa_t1 * math.sqrt(
0.5 * (eq4_rhs_term1 + eq4_rhs_term2 + eq4_rhs_term3)
)
return min(
sys.float_info.max * 0.5,
alg1_delta_r_tilde_t_term1
+ alg1_delta_r_tilde_t_term2
+ alg1_delta_r_tilde_t_term3
+ alg1_delta_r_tilde_t_term4,
)
def _compute_gp_posterior(x_params: np.ndarray, gpr: gp.GPRegressor) -> tuple[float, float]:
# best_params or normalized_params[..., -1, :]
mean, var = gpr.posterior(torch.from_numpy(x_params))
return mean.item(), var.item()
def _compute_gp_posterior_cov_two_thetas(
normalized_params: np.ndarray, gpr: gp.GPRegressor, theta1_index: int, theta2_index: int
) -> float: # cov
if theta1_index == theta2_index:
return _compute_gp_posterior(normalized_params[theta1_index], gpr)[1]
_, covar = gpr.posterior(
torch.from_numpy(normalized_params[[theta1_index, theta2_index]]), joint=True
)
assert covar.shape == (2, 2)
return covar[0, 1].item()
| EMMREvaluator |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 61680,
"end": 63639
} | class ____(test_util.TensorFlowTestCase):
def testEager(self):
with context.eager_mode():
t = constant_op.constant([[1, 2, 3], [4, 5, 6]])
paddings = constant_op.constant([[
1,
1,
], [2, 2]])
padded = array_ops.pad(t, paddings, "CONSTANT")
self.assertAllEqual(padded.numpy(),
[[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
# b/246325518: Bad shape size. Explicitly testing different execution paths.
def testInvalidMirrorPadGradEagerMode(self):
with context.eager_mode():
with self.assertRaises(Exception):
gen_array_ops.MirrorPadGrad(
input=[1], paddings=[[0x77f00000, 0xa000000]], mode="REFLECT")
# b/246325518: Bad shape size. Explicitly testing different execution paths.
def testInvalidMirrorPadGradGraphMode(self):
with context.graph_mode():
with self.assertRaises(Exception):
result = gen_array_ops.MirrorPadGrad(
input=[1], paddings=[[0x77f00000, 0xa000000]], mode="REFLECT")
self.evaluate(result)
def testSymmetricMirrorPadGrad(self):
t = np.broadcast_to(np.arange(0, 7), (3, 2, 1, 7))
paddings = constant_op.constant([
[1, 1],
[0, 0],
[0, 0],
[2, 2],
])
expected = np.broadcast_to(np.array([9, 27, 27]), (1, 2, 1, 3))
result = gen_array_ops.mirror_pad_grad(t, paddings, "SYMMETRIC")
self.assertAllEqual(result, expected)
def testReflectMirrorPadGrad(self):
t = np.broadcast_to(np.reshape(np.arange(0, 7), (7, 1)), (1, 4, 7, 1))
paddings = constant_op.constant([
[0, 0],
[1, 1],
[2, 2],
[0, 0],
])
expected = np.broadcast_to(
np.reshape(np.array([16, 18, 8]), (3, 1)), (1, 2, 3, 1))
result = gen_array_ops.mirror_pad_grad(t, paddings, "REFLECT")
self.assertAllEqual(result, expected)
| PadTest |
python | facebookresearch__faiss | tests/test_factory.py | {
"start": 11002,
"end": 11704
} | class ____(unittest.TestCase):
def test_rcq(self):
index = faiss.index_factory(12, "IVF256(RCQ2x4),RQ3x4")
self.assertEqual(
faiss.downcast_index(index.quantizer).__class__,
faiss.ResidualCoarseQuantizer
)
def test_rq3(self):
index = faiss.index_factory(5, "RQ2x16_3x8_6x4")
np.testing.assert_array_equal(
faiss.vector_to_array(index.rq.nbits),
np.array([16, 16, 8, 8, 8, 4, 4, 4, 4, 4, 4])
)
def test_norm(self):
index = faiss.index_factory(5, "RQ8x8_Nqint8")
self.assertEqual(
index.rq.search_type,
faiss.AdditiveQuantizer.ST_norm_qint8)
| TestAdditive |
python | jina-ai__jina | jina/clients/mixin.py | {
"start": 7605,
"end": 16947
} | class ____:
"""The Post Mixin class for Client and Flow"""
def _with_retry(
self,
func,
inputs,
on_done,
on_error,
on_always,
exec_endpoint,
target_executor,
parameters,
request_size,
max_attempts,
initial_backoff,
max_backoff,
backoff_multiplier,
results_in_order,
stream,
prefetch,
**kwargs,
):
is_document_or_documentarray = isinstance(inputs, Document) or isinstance(
inputs, DocumentArray
)
if (
is_document_or_documentarray
and isinstance(self.client, GRPCBaseClient)
and max_attempts > 1
and stream
):
for attempt in range(1, max_attempts + 1):
try:
return PostMixin._run_async(
backoff_multiplier,
exec_endpoint,
func,
initial_backoff,
inputs,
kwargs,
max_attempts,
max_backoff,
on_always,
on_done,
on_error,
parameters,
prefetch,
request_size,
results_in_order,
stream,
target_executor,
)
except (
grpc.aio.AioRpcError,
InternalNetworkError,
ConnectionError,
) as err:
run_async(
wait_or_raise_err,
attempt=attempt,
err=err,
max_attempts=max_attempts,
backoff_multiplier=backoff_multiplier,
initial_backoff=initial_backoff,
max_backoff=max_backoff,
)
else:
return PostMixin._run_async(
backoff_multiplier,
exec_endpoint,
func,
initial_backoff,
inputs,
kwargs,
max_attempts,
max_backoff,
on_always,
on_done,
on_error,
parameters,
prefetch,
request_size,
results_in_order,
stream,
target_executor,
)
@staticmethod
def _run_async(
backoff_multiplier,
exec_endpoint,
func,
initial_backoff,
inputs,
kwargs,
max_attempts,
max_backoff,
on_always,
on_done,
on_error,
parameters,
prefetch,
request_size,
results_in_order,
stream,
target_executor,
):
return run_async(
func,
inputs=inputs,
on_done=on_done,
on_error=on_error,
on_always=on_always,
exec_endpoint=exec_endpoint,
target_executor=target_executor,
parameters=parameters,
request_size=request_size,
max_attempts=max_attempts,
initial_backoff=initial_backoff,
max_backoff=max_backoff,
backoff_multiplier=backoff_multiplier,
results_in_order=results_in_order,
stream=stream,
prefetch=prefetch,
**kwargs,
)
def post(
self,
on: str,
inputs: Optional['InputType'] = None,
on_done: Optional['CallbackFnType'] = None,
on_error: Optional['CallbackFnType'] = None,
on_always: Optional['CallbackFnType'] = None,
parameters: Union[Dict, 'BaseModel', None] = None,
target_executor: Optional[str] = None,
request_size: int = 100,
show_progress: bool = False,
continue_on_error: bool = False,
return_responses: bool = False,
max_attempts: int = 1,
initial_backoff: float = 0.5,
max_backoff: float = 2,
backoff_multiplier: float = 1.5,
results_in_order: bool = False,
stream: bool = True,
prefetch: Optional[int] = None,
return_type: Type[DocumentArray] = DocumentArray,
**kwargs,
) -> Optional[Union['DocumentArray', List['Response']]]:
"""Post a general data request to the Flow.
:param inputs: input data which can be a DocList, a BaseDoc, an Iterable, a function which returns an Iterable.
:param on: the endpoint which is invoked. All the functions in the executors decorated by `@requests(on=...)` with the same endpoint are invoked.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is either resolved or rejected.
:param parameters: the parameters that will be sent to the executor, this can be a Dict or a Pydantic model
:param target_executor: a regex string. Only matching Executors will process the request.
:param request_size: the number of Documents per request. <=0 means all inputs in one request.
:param show_progress: if set, client will show a progress bar on receiving every request.
:param continue_on_error: if set, a Request that causes an error will be logged only without blocking the further requests.
:param return_responses: if set to True, the result will come as Response and not as a `DocumentArray`
:param max_attempts: Number of sending attempts, including the original request.
:param initial_backoff: The first retry will happen with a delay of random(0, initial_backoff)
:param max_backoff: The maximum accepted backoff after the exponential incremental delay
:param backoff_multiplier: The n-th attempt will occur at random(0, min(initialBackoff*backoffMultiplier**(n-1), maxBackoff))
:param results_in_order: return the results in the same order as the inputs
:param stream: Applicable only to grpc client. If True, the requests are sent to the target using the gRPC streaming interface otherwise the gRPC unary interface will be used. The value is True by default.
:param prefetch: How many Requests are processed from the Client at the same time. If not provided then Gateway prefetch value will be used.
:param return_type: the DocList or BaseDoc type to be returned. By default, it is `DocumentArray`.
:param kwargs: additional parameters
:return: None or DocumentArray containing all response Documents
.. warning::
``target_executor`` uses ``re.match`` for checking if the pattern is matched. ``target_executor=='foo'`` will match both deployments with the name ``foo`` and ``foo_what_ever_suffix``.
"""
c = self.client
c.show_progress = show_progress
c.continue_on_error = continue_on_error
parameters = _include_results_field_in_param(parameters)
return_results = (on_always is None) and (on_done is None)
async def _get_results(*args, **kwargs):
is_singleton = False
inferred_return_type = return_type
if docarray_v2:
from docarray import DocList
if not safe_issubclass(return_type, DocList):
is_singleton = True
inferred_return_type = DocList[return_type]
result = [] if return_responses else inferred_return_type([])
async for resp in c._get_results(*args, **kwargs):
if return_results:
resp.document_array_cls = inferred_return_type
if return_responses:
result.append(resp)
else:
result.extend(resp.docs)
if return_results:
if not return_responses and is_singleton and len(result) == 1:
return result[0]
else:
return result
return self._with_retry(
func=_get_results,
inputs=inputs,
on_done=on_done,
on_error=on_error,
on_always=on_always,
exec_endpoint=on,
target_executor=target_executor,
parameters=parameters,
request_size=request_size,
max_attempts=max(max_attempts, 1),
initial_backoff=initial_backoff,
max_backoff=max_backoff,
backoff_multiplier=backoff_multiplier,
results_in_order=results_in_order,
stream=stream,
prefetch=prefetch,
return_type=return_type,
on=on,
**kwargs,
)
# ONLY CRUD, for other request please use `.post`
index = partialmethod(post, '/index')
search = partialmethod(post, '/search')
update = partialmethod(post, '/update')
delete = partialmethod(post, '/delete')
| PostMixin |
python | gevent__gevent | src/greentest/3.14/test_urllib.py | {
"start": 71931,
"end": 72219
} | class ____(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
| Utility_Tests |
python | bottlepy__bottle | test/test_multipart.py | {
"start": 1645,
"end": 2410
} | class ____(BaseMultipartTest):
def test_options_parser(self):
parse = bottle._parse_http_header
self.assertEqual(
parse('form-data; name="Test"; filename="Test.txt"'),
[('form-data', {"name": "Test", "filename": "Test.txt"})])
self.assertEqual(parse('form-data; name="Test"; FileName="Te\\"st.txt"'),
[('form-data', {"name": "Test", "filename": "Te\"st.txt"})])
self.assertEqual(parse('form-data; name="Test"; filename="C:\\test\\bla.txt"'),
[('form-data', {"name": "Test", "filename": "C:\\test\\bla.txt"})])
self.assertEqual(parse('form-data; name="Test"; filename="\\\\test\\bla.txt"'),
[('form-data', {"name": "Test", "filename": "\\\\test\\bla.txt"})])
| TestHeaderParser |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_infra_overrides.py | {
"start": 16250,
"end": 26450
} | class ____:
async def test_updating_flow_run_with_valid_updates(
self,
session,
client,
):
# create a pool with a pool schema that has a default value
*_, deployment = await create_objects_for_pool(
session,
pool_job_config={
"job_configuration": {
"thing_one": "{{ expected_variable_1 }}",
},
"variables": {
"properties": {
"expected_variable_1": {
"title": "expected_variable_1",
"default": 0,
"type": "integer",
},
},
"required": ["expected_variable_1"],
},
},
)
# create a flow run with no overrides
response = await client.post(
f"/deployments/{deployment.id}/create_flow_run", json={}
)
assert response.status_code == 201
flow_run_id = response.json()["id"]
# update the flow run with new job vars
job_variables = {"expected_variable_1": 100}
response = await client.patch(
f"/flow_runs/{flow_run_id}", json={"job_variables": job_variables}
)
assert response.status_code == 204
# verify that updates were applied
flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run_id
)
assert flow_run.job_variables == job_variables
async def test_updating_flow_run_with_invalid_update_type(
self,
session,
client,
):
# create a pool with a pool schema that has a default value
*_, deployment = await create_objects_for_pool(
session,
pool_job_config={
"job_configuration": {
"thing_one": "{{ expected_variable_1 }}",
},
"variables": {
"properties": {
"expected_variable_1": {
"title": "expected_variable_1",
"default": 0,
"type": "integer",
},
},
"required": ["expected_variable_1"],
},
},
)
# create a flow run with no overrides
response = await client.post(
f"/deployments/{deployment.id}/create_flow_run", json={}
)
assert response.status_code == 201
flow_run_id = response.json()["id"]
# update the flow run with a job var that doesn't conform to the schema
job_variables = {"expected_variable_1": "this_should_be_an_int"}
response = await client.patch(
f"/flow_runs/{flow_run_id}", json={"job_variables": job_variables}
)
# verify that the update failed
assert response.status_code == 422
assert (
response.json()["detail"]
== "Error updating flow run: Validation failed for field 'expected_variable_1'. Failure reason: 'this_should_be_an_int' is not of type 'integer'"
)
flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run_id
)
assert flow_run.job_variables == {}
async def test_updating_flow_run_in_non_scheduled_state(
self,
session,
client,
db,
):
# create a pool with a pool schema that has a default value
*_, deployment = await create_objects_for_pool(
session,
pool_job_config={
"job_configuration": {
"thing_one": "{{ expected_variable_1 }}",
},
"variables": {
"properties": {
"expected_variable_1": {
"title": "expected_variable_1",
"default": 0,
"type": "integer",
},
},
"required": ["expected_variable_1"],
},
},
)
# create a flow run
original_job_variables = {"expected_variable_1": 1}
response = await client.post(
f"/deployments/{deployment.id}/create_flow_run",
json={"job_variables": original_job_variables},
)
assert response.status_code == 201
flow_run_id = response.json()["id"]
# set the flow run state to be non-scheduled
flow_run = await models.flow_runs.read_flow_run(
session=session,
flow_run_id=flow_run_id,
)
flow_run.set_state(db.FlowRunState(**schemas.states.Pending().orm_dict()))
await session.commit()
# attempt to update the flow run
job_variables = {"expected_variable_1": 100}
response = await client.patch(
f"/flow_runs/{flow_run_id}", json={"job_variables": job_variables}
)
# verify that the update failed
assert response.status_code == 400
assert (
response.json()["detail"]
== "Job variables for a flow run in state PENDING cannot be updated"
)
flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run_id
)
assert flow_run.job_variables == original_job_variables
async def test_updating_non_existant_flow_run(
self,
client,
):
# update a non-existent flow run
response = await client.patch(
f"/flow_runs/{uuid.uuid4()}", json={"job_variables": {"foo": "bar"}}
)
# verify the update failed
assert response.status_code == 404
assert response.json()["detail"] == "Flow run not found"
@pytest.mark.parametrize("job_variables", [{}, None, {"x": "y"}])
async def test_updating_flow_run_with_missing_work_queue(
self,
session,
client,
job_variables,
):
# create a pool with a pool schema that has a default value
_, pool, deployment = await create_objects_for_pool(session)
# create a flow run
response = await client.post(
f"/deployments/{deployment.id}/create_flow_run", json={}
)
# delete the deployment's work queue + work pool
deleted = await models.workers.delete_work_pool(session, pool.id)
assert deleted
await session.commit()
# attempt to update the flow run
flow_run_id = response.json()["id"]
response = await client.patch(
f"/flow_runs/{flow_run_id}", json={"job_variables": job_variables}
)
# verify we were able to successfully create the run
assert response.status_code == 204
async def test_base_job_template_default_references_to_blocks(
self,
session,
hosted_api_client,
k8s_credentials,
):
# create a pool with a pool schema that has a default value referencing a block
*_, deployment = await create_objects_for_pool(
session,
pool_job_config={
"variables": {
"type": "object",
"required": ["k8s_credentials"],
"properties": {
"k8s_credentials": {
"allOf": [{"$ref": "#/definitions/k8s_credentials"}],
"title": "K8s Credentials",
"default": {
"$ref": {
"block_document_id": f"{k8s_credentials._block_document_id}"
}
},
"description": "The credentials to use to authenticate with K8s.",
},
},
"definitions": {
"k8s_credentials": {
"type": "object",
"title": "k8s_credentials",
"required": ["context_name", "config"],
"properties": {
"context_name": {
"type": "string",
"title": "Context name",
},
"config": {
"type": "object",
"title": "Config",
},
},
"description": "Block used to manage K8s Credentials.",
"block_type_slug": "k8s-credentials",
"block_schema_references": {},
}
},
"description": "Variables for a Modal flow run.",
},
"job_configuration": {
"k8s_credentials": "{{ k8s_credentials }}",
},
},
)
# create a flow run with custom overrides
updates = {"k8s_credentials": {"context_name": "foo", "config": {}}}
response = await hosted_api_client.post(
f"/deployments/{deployment.id}/create_flow_run",
json={"job_variables": updates},
)
assert response.status_code == 201, response.text
assert response.json()["job_variables"] == updates
# update the flow run to force it to refer to the default block's value
flow_run_id = response.json()["id"]
response = await hosted_api_client.patch(
f"/flow_runs/{flow_run_id}", json={"job_variables": {}}
)
assert response.status_code == 204, response.text
# verify that the flow run's job variables are removed
flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run_id
)
assert flow_run.job_variables == {}
| TestInfraOverridesUpdates |
python | ray-project__ray | doc/source/ray-core/examples/rdt/grpo_contextual_bandits.py | {
"start": 4012,
"end": 5860
} | class ____:
"""Storage for scored trajectory slices.
This class stores the past experiences (AKA trajectories, or slices) of the model.
This allows the learner to sample and learn from the same experiences multiple times
by comparing the latest model with previous models.
The sampler weights the trajectories by the policy version, such that trajectories produced
by more recent versions of the model are more likely to be sampled.
"""
def __init__(self) -> None:
# Each entry stores a TrajectorySlice with CPU tensors.
self.storage: list[TrajectorySlice] = []
def put(self, slice: TrajectorySlice) -> None:
"""Add a new slice to the buffer.
The buffer discards the oldest slices if the buffer gets too large to prevent memory leaks,
and so that the latest model can gradually explore further from the initial random policy.
"""
self.storage.append(slice)
if len(self.storage) > MAX_BUFFER_SIZE:
self.storage = self.storage[-MAX_BUFFER_SIZE:]
def sample_from(self, n: int) -> list[TrajectorySlice]:
"""Sample n scored trajectory slices.
Each slice is a 'group' of actions sampled from the same state.
"""
if self.size() < n:
return []
# The probability of sampling a slice is proportional to its policy version.
total = sum(slice["policy_version"] for slice in self.storage)
probs = [slice["policy_version"] / total for slice in self.storage]
# Sample with replacement without exceeding the buffer's size.
n = min(n, self.size())
chosen = np.random.choice(self.size(), size=n, p=probs, replace=True)
return [self.storage[i] for i in chosen]
def size(self) -> int:
return len(self.storage)
@ray.remote
| ReplayBuffer |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 2327,
"end": 2487
} | class ____(ShowFieldTypeAndContent, PolymorphicModel):
base_id = models.AutoField(primary_key=True)
field_b = models.CharField(max_length=30)
| Enhance_Base |
python | charliermarsh__ruff | crates/ruff_python_ast/generate.py | {
"start": 2219,
"end": 2932
} | class ____:
"""
The parsed representation of the `ast.toml` file. Defines all of the Python
AST syntax nodes, and which groups (`Stmt`, `Expr`, etc.) they belong to.
"""
groups: list[Group]
ungrouped_nodes: list[Node]
all_nodes: list[Node]
def __init__(self, ast: dict[str, Any]) -> None:
self.groups = []
self.ungrouped_nodes = []
self.all_nodes = []
for group_name, group in ast.items():
group = Group(group_name, group)
self.all_nodes.extend(group.nodes)
if group_name == "ungrouped":
self.ungrouped_nodes = group.nodes
else:
self.groups.append(group)
@dataclass
| Ast |
python | falconry__falcon | examples/things_advanced_asgi.py | {
"start": 560,
"end": 1068
} | class ____:
engines = {
'ddg': 'https://duckduckgo.com',
'y': 'https://search.yahoo.com/search',
}
async def __call__(self, req, resp, engine):
url = self.engines[engine]
params = {'q': req.get_param('q', True)}
async with httpx.AsyncClient() as client:
result = await client.get(url, params=params)
resp.status = result.status_code
resp.content_type = result.headers['content-type']
resp.text = result.text
| SinkAdapter |
python | pytest-dev__pytest | src/_pytest/logging.py | {
"start": 23308,
"end": 32516
} | class ____:
"""Attaches to the logging module and captures log messages for each test."""
def __init__(self, config: Config) -> None:
"""Create a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
# Report logging.
self.formatter = self._create_formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
get_option_ini(config, "log_auto_indent"),
)
self.log_level = get_log_level_for_setting(config, "log_level")
self.caplog_handler = LogCaptureHandler()
self.caplog_handler.setFormatter(self.formatter)
self.report_handler = LogCaptureHandler()
self.report_handler.setFormatter(self.formatter)
# File logging.
self.log_file_level = get_log_level_for_setting(
config, "log_file_level", "log_level"
)
log_file = get_option_ini(config, "log_file") or os.devnull
if log_file != os.devnull:
directory = os.path.dirname(os.path.abspath(log_file))
if not os.path.isdir(directory):
os.makedirs(directory)
self.log_file_mode = get_option_ini(config, "log_file_mode") or "w"
self.log_file_handler = _FileHandler(
log_file, mode=self.log_file_mode, encoding="UTF-8"
)
log_file_format = get_option_ini(config, "log_file_format", "log_format")
log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
log_file_formatter = DatetimeFormatter(
log_file_format, datefmt=log_file_date_format
)
self.log_file_handler.setFormatter(log_file_formatter)
# CLI/live logging.
self.log_cli_level = get_log_level_for_setting(
config, "log_cli_level", "log_level"
)
if self._log_cli_enabled():
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
# Guaranteed by `_log_cli_enabled()`.
assert terminal_reporter is not None
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
self.log_cli_handler: (
_LiveLoggingStreamHandler | _LiveLoggingNullHandler
) = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
else:
self.log_cli_handler = _LiveLoggingNullHandler()
log_cli_formatter = self._create_formatter(
get_option_ini(config, "log_cli_format", "log_format"),
get_option_ini(config, "log_cli_date_format", "log_date_format"),
get_option_ini(config, "log_auto_indent"),
)
self.log_cli_handler.setFormatter(log_cli_formatter)
self._disable_loggers(loggers_to_disable=config.option.logger_disable)
def _disable_loggers(self, loggers_to_disable: list[str]) -> None:
if not loggers_to_disable:
return
for name in loggers_to_disable:
logger = logging.getLogger(name)
logger.disabled = True
def _create_formatter(self, log_format, log_date_format, auto_indent):
# Color option doesn't exist if terminal plugin is disabled.
color = getattr(self._config.option, "color", "no")
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
log_format
):
formatter: logging.Formatter = ColoredLevelFormatter(
create_terminal_writer(self._config), log_format, log_date_format
)
else:
formatter = DatetimeFormatter(log_format, log_date_format)
formatter._style = PercentStyleMultiline(
formatter._style._fmt, auto_indent=auto_indent
)
return formatter
def set_log_path(self, fname: str) -> None:
"""Set the filename parameter for Logging.FileHandler().
Creates parent directory if it does not exist.
.. warning::
This is an experimental API.
"""
fpath = Path(fname)
if not fpath.is_absolute():
fpath = self._config.rootpath / fpath
if not fpath.parent.exists():
fpath.parent.mkdir(exist_ok=True, parents=True)
# https://github.com/python/mypy/issues/11193
stream: io.TextIOWrapper = fpath.open(mode=self.log_file_mode, encoding="UTF-8") # type: ignore[assignment]
old_stream = self.log_file_handler.setStream(stream)
if old_stream:
old_stream.close()
def _log_cli_enabled(self) -> bool:
"""Return whether live logging is enabled."""
enabled = self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
if not enabled:
return False
terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter")
if terminal_reporter is None:
# terminal reporter is disabled e.g. by pytest-xdist.
return False
return True
@hookimpl(wrapper=True, tryfirst=True)
def pytest_sessionstart(self) -> Generator[None]:
self.log_cli_handler.set_when("sessionstart")
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
return (yield)
@hookimpl(wrapper=True, tryfirst=True)
def pytest_collection(self) -> Generator[None]:
self.log_cli_handler.set_when("collection")
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
return (yield)
@hookimpl(wrapper=True)
def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]:
if session.config.option.collectonly:
return (yield)
if self._log_cli_enabled() and self._config.get_verbosity() < 1:
# The verbose flag is needed to avoid messy test progress output.
self._config.option.verbose = 1
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
return (yield) # Run all the tests.
@hookimpl
def pytest_runtest_logstart(self) -> None:
self.log_cli_handler.reset()
self.log_cli_handler.set_when("start")
@hookimpl
def pytest_runtest_logreport(self) -> None:
self.log_cli_handler.set_when("logreport")
@contextmanager
def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None]:
"""Implement the internals of the pytest_runtest_xxx() hooks."""
with (
catching_logs(
self.caplog_handler,
level=self.log_level,
) as caplog_handler,
catching_logs(
self.report_handler,
level=self.log_level,
) as report_handler,
):
caplog_handler.reset()
report_handler.reset()
item.stash[caplog_records_key][when] = caplog_handler.records
item.stash[caplog_handler_key] = caplog_handler
try:
yield
finally:
log = report_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@hookimpl(wrapper=True)
def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None]:
self.log_cli_handler.set_when("setup")
empty: dict[str, list[logging.LogRecord]] = {}
item.stash[caplog_records_key] = empty
with self._runtest_for(item, "setup"):
yield
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: nodes.Item) -> Generator[None]:
self.log_cli_handler.set_when("call")
with self._runtest_for(item, "call"):
yield
@hookimpl(wrapper=True)
def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None]:
self.log_cli_handler.set_when("teardown")
try:
with self._runtest_for(item, "teardown"):
yield
finally:
del item.stash[caplog_records_key]
del item.stash[caplog_handler_key]
@hookimpl
def pytest_runtest_logfinish(self) -> None:
self.log_cli_handler.set_when("finish")
@hookimpl(wrapper=True, tryfirst=True)
def pytest_sessionfinish(self) -> Generator[None]:
self.log_cli_handler.set_when("sessionfinish")
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
return (yield)
@hookimpl
def pytest_unconfigure(self) -> None:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
self.log_file_handler.close()
| LoggingPlugin |
python | coleifer__peewee | playhouse/hybrid.py | {
"start": 613,
"end": 1528
} | class ____(ModelDescriptor):
def __init__(self, fget, fset=None, fdel=None, expr=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.expr = expr or fget
def __get__(self, instance, instance_type):
if instance is None:
return self.expr(instance_type)
return self.fget(instance)
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError('Cannot set attribute.')
self.fset(instance, value)
def __delete__(self, instance):
if self.fdel is None:
raise AttributeError('Cannot delete attribute.')
self.fdel(instance)
def setter(self, fset):
self.fset = fset
return self
def deleter(self, fdel):
self.fdel = fdel
return self
def expression(self, expr):
self.expr = expr
return self
| hybrid_property |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 43830,
"end": 44011
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("CREATED_AT", "DOMAIN")
| VerifiableDomainOrderField |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 32025,
"end": 33497
} | class ____(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
| ResizeAreaBenchmark |
python | patrick-kidger__equinox | equinox/nn/_conv.py | {
"start": 24511,
"end": 25571
} | class ____(ConvTranspose):
"""As [`equinox.nn.ConvTranspose`][] with `num_spatial_dims=1`."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
output_padding: int | Sequence[int] = 0,
padding: str | int | Sequence[int] | Sequence[tuple[int, int]] = 0,
dilation: int | Sequence[int] = 1,
groups: int = 1,
use_bias: bool = True,
padding_mode: str = "ZEROS",
dtype=None,
*,
key: PRNGKeyArray,
):
super().__init__(
num_spatial_dims=1,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
padding_mode=padding_mode,
dtype=dtype,
key=key,
)
| ConvTranspose1d |
python | google__pytype | pytype/pytd/optimize.py | {
"start": 11462,
"end": 12408
} | class ____(visitors.Visitor):
"""Simplify Unions with superclasses.
E.g., this changes
Union[int, bool]
to
int
since bool is a subclass of int.
(Interpreting types as "sets of values", this simplification is sound since
A union B = A, if B is a subset of A.)
"""
def __init__(self, hierarchy):
super().__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
c = collections.Counter()
for t in set(union.type_list):
if isinstance(t, pytd.GENERIC_BASE_TYPE):
c += collections.Counter(self.hierarchy.ExpandSubClasses(str(t)))
# Below, c[str[t]] can be zero - that's the default for non-existent items
# in collections.Counter. It'll happen for types that are not
# instances of GENERIC_BASE_TYPE, like container types.
new_type_list = [t for t in union.type_list if c[str(t)] <= 1]
return pytd_utils.JoinTypes(new_type_list)
| SimplifyUnionsWithSuperclasses |
python | streamlit__streamlit | lib/streamlit/elements/form.py | {
"start": 2392,
"end": 18733
} | class ____:
@gather_metrics("form")
def form(
self,
key: str,
clear_on_submit: bool = False,
*,
enter_to_submit: bool = True,
border: bool = True,
width: Width = "stretch",
height: Height = "content",
) -> DeltaGenerator:
"""Create a form that batches elements together with a "Submit" button.
A form is a container that visually groups other elements and
widgets together, and contains a Submit button. When the form's
Submit button is pressed, all widget values inside the form will be
sent to Streamlit in a batch.
To add elements to a form object, you can use ``with`` notation
(preferred) or just call methods directly on the form. See
examples below.
Forms have a few constraints:
- Every form must contain a ``st.form_submit_button``.
- ``st.button`` and ``st.download_button`` cannot be added to a form.
- Forms can appear anywhere in your app (sidebar, columns, etc),
but they cannot be embedded inside other forms.
- Within a form, the only widget that can have a callback function is
``st.form_submit_button``.
Parameters
----------
key : str
A string that identifies the form. Each form must have its own
key. (This key is not displayed to the user in the interface.)
clear_on_submit : bool
If True, all widgets inside the form will be reset to their default
values after the user presses the Submit button. Defaults to False.
(Note that Custom Components are unaffected by this flag, and
will not be reset to their defaults on form submission.)
enter_to_submit : bool
Whether to submit the form when a user presses Enter while
interacting with a widget inside the form.
If this is ``True`` (default), pressing Enter while interacting
with a form widget is equivalent to clicking the first
``st.form_submit_button`` in the form.
If this is ``False``, the user must click an
``st.form_submit_button`` to submit the form.
If the first ``st.form_submit_button`` in the form is disabled,
the form will override submission behavior with
``enter_to_submit=False``.
border : bool
Whether to show a border around the form. Defaults to True.
.. note::
Not showing a border can be confusing to viewers since interacting with a
widget in the form will do nothing. You should only remove the border if
there's another border (e.g. because of an expander) or the form is small
(e.g. just a text input and a submit button).
width : "stretch", "content", or int
The width of the form container. This can be one of the following:
- ``"stretch"`` (default): The width of the container matches the
width of the parent container.
- ``"content"``: The width of the container matches the width of its
content, but doesn't exceed the width of the parent container.
- An integer specifying the width in pixels: The container has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the container matches the width
of the parent container.
height : "content", "stretch", or int
The height of the form container. This can be one of the following:
- ``"content"`` (default): The height of the container matches the
height of its content.
- ``"stretch"``: The height of the container matches the height of
its content or the height of the parent container, whichever is
larger. If the container is not in a parent container, the height
of the container matches the height of its content.
- An integer specifying the height in pixels: The container has a
fixed height. If the content is larger than the specified
height, scrolling is enabled.
.. note::
Use scrolling containers sparingly. If you use scrolling
containers, avoid heights that exceed 500 pixels. Otherwise,
the scroll surface of the container might cover the majority of
the screen on mobile devices, which makes it hard to scroll the
rest of the app.
Examples
--------
Inserting elements using ``with`` notation:
>>> import streamlit as st
>>>
>>> with st.form("my_form"):
... st.write("Inside the form")
... slider_val = st.slider("Form slider")
... checkbox_val = st.checkbox("Form checkbox")
...
... # Every form must have a submit button.
... submitted = st.form_submit_button("Submit")
... if submitted:
... st.write("slider", slider_val, "checkbox", checkbox_val)
>>> st.write("Outside the form")
.. output::
https://doc-form1.streamlit.app/
height: 425px
Inserting elements out of order:
>>> import streamlit as st
>>>
>>> form = st.form("my_form")
>>> form.slider("Inside the form")
>>> st.slider("Outside the form")
>>>
>>> # Now add a submit button to the form:
>>> form.form_submit_button("Submit")
.. output::
https://doc-form2.streamlit.app/
height: 375px
"""
if is_in_form(self.dg):
raise StreamlitAPIException("Forms cannot be nested in other forms.")
check_cache_replay_rules()
check_session_state_rules(default_value=None, key=key, writes_allowed=False)
# A form is uniquely identified by its key.
form_id = key
ctx = get_script_run_ctx()
if ctx is not None:
new_form_id = form_id not in ctx.form_ids_this_run
if new_form_id:
ctx.form_ids_this_run.add(form_id)
else:
raise StreamlitAPIException(_build_duplicate_form_message(key))
block_proto = Block_pb2.Block()
block_proto.form.form_id = form_id
block_proto.form.clear_on_submit = clear_on_submit
block_proto.form.enter_to_submit = enter_to_submit
block_proto.form.border = border
validate_width(width, allow_content=True)
block_proto.width_config.CopyFrom(get_width_config(width))
validate_height(height, allow_content=True)
block_proto.height_config.CopyFrom(get_height_config(height))
block_dg = self.dg._block(block_proto)
# Attach the form's button info to the newly-created block's
# DeltaGenerator.
block_dg._form_data = FormData(form_id)
return block_dg
@gather_metrics("form_submit_button")
def form_submit_button(
self,
label: str = "Submit",
help: str | None = None,
on_click: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
key: Key | None = None,
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
use_container_width: bool | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> bool:
r"""Display a form submit button.
When this button is clicked, all widget values inside the form will be
sent from the user's browser to your Streamlit server in a batch.
Every form must have at least one ``st.form_submit_button``. An
``st.form_submit_button`` cannot exist outside of a form.
For more information about forms, check out our `docs
<https://docs.streamlit.io/develop/concepts/architecture/forms>`_.
Parameters
----------
label : str
A short label explaining to the user what this button is for. This
defaults to ``"Submit"``. The label can optionally contain
GitHub-flavored Markdown of the following types: Bold, Italics,
Strikethroughs, Inline Code, Links, and Images. Images display like
icons, with a max height equal to the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
help : str or None
A tooltip that gets displayed when the button is hovered over. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_click : callable
An optional callback invoked when this button is clicked.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
type : "primary", "secondary", or "tertiary"
An optional string that specifies the button type. This can be one
of the following:
- ``"primary"``: The button's background is the app's primary color
for additional emphasis.
- ``"secondary"`` (default): The button's background coordinates
with the app's background color for normal emphasis.
- ``"tertiary"``: The button is plain text without a border or
background for subtlety.
icon : str or None
An optional emoji or icon to display next to the button label. If ``icon``
is ``None`` (default), no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
disabled : bool
Whether to disable the button. If this is ``False`` (default), the
user can interact with the button. If this is ``True``, the button
is grayed-out and can't be clicked.
If the first ``st.form_submit_button`` in the form is disabled,
the form will override submission behavior with
``enter_to_submit=False``.
use_container_width : bool
Whether to expand the button's width to fill its parent container.
If ``use_container_width`` is ``False`` (default), Streamlit sizes
the button to fit its contents. If ``use_container_width`` is
``True``, the width of the button matches its parent container.
In both cases, if the contents of the button are wider than the
parent container, the contents will line wrap.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : "content", "stretch", or int
The width of the button. This can be one of the following:
- ``"content"`` (default): The width of the button matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the button matches the width of the
parent container.
- An integer specifying the width in pixels: The button has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the button matches the width
of the parent container.
shortcut : str or None
An optional keyboard shortcut that triggers the submit button.
Provide a single alphanumeric key (e.g. ``"K"``, ``"4"``), a
function key (e.g. ``"F11"``), or a supported special key (e.g.
``"Enter"``, ``"Esc"``), optionally combined with modifiers.
Examples: ``"Ctrl+K"``, ``"Cmd+Shift+O"``, ``"Mod+Enter"``.
.. note::
The keys ``"C"`` and ``"R"`` are reserved and cannot be used,
even with modifiers. ``"Ctrl"``, ``"Cmd"``, and ``"Mod"`` are
platform-dependent: they map to ``"Command"`` (⌘) on macOS and
``"Control"`` on Windows/Linux. Punctuation keys (e.g. ``"."``,
``","``) are not currently supported.
Returns
-------
bool
True if the button was clicked.
"""
ctx = get_script_run_ctx()
if use_container_width is not None:
width = "stretch" if use_container_width else "content"
# Checks whether the entered button type is one of the allowed options
if type not in ["primary", "secondary", "tertiary"]:
raise StreamlitAPIException(
'The type argument to st.form_submit_button must be "primary", "secondary", or "tertiary". \n'
f'The argument passed was "{type}".'
)
return self._form_submit_button(
label=label,
help=help,
on_click=on_click,
args=args,
kwargs=kwargs,
type=type,
icon=icon,
disabled=disabled,
ctx=ctx,
width=width,
key=key,
shortcut=shortcut,
)
def _form_submit_button(
self,
label: str = "Submit",
help: str | None = None,
on_click: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
key: Key | None = None,
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
ctx: ScriptRunContext | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> bool:
form_id = current_form_id(self.dg)
submit_button_key = to_key(key) or f"FormSubmitter:{form_id}-{label}"
return self.dg._button(
label=label,
key=submit_button_key,
help=help,
is_form_submitter=True,
on_click=on_click,
args=args,
kwargs=kwargs,
type=type,
icon=icon,
disabled=disabled,
ctx=ctx,
width=width,
shortcut=shortcut,
)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| FormMixin |
python | huggingface__transformers | tests/models/glm46v/test_processor_glm46v.py | {
"start": 1039,
"end": 10835
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Glm46VProcessor
model_id = "THUDM/GLM-4.1V-9B-Thinking"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(
model_id,
do_sample_frames=False,
patch_size=4,
size={"shortest_edge": 12 * 12, "longest_edge": 18 * 18},
**kwargs,
)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
fps=2
if isinstance(input_data[0], str)
else None, # by default no more than 2 frames per second, otherwise too slow
do_sample_frames=bool(isinstance(input_data[0], str)), # don't sample frames if decoded video is used
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
mm_len = batch_size * 4
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
}
# Load with `video_fps` arg
video_fps = 10
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=video_fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 8)
# Load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 24)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 4)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
def test_model_input_names(self):
processor = self.get_processor()
text = self.prepare_text_inputs(modalities=["image", "video"])
image_input = self.prepare_image_inputs()
video_inputs = self.prepare_video_inputs()
inputs_dict = {"text": text, "images": image_input, "videos": video_inputs}
inputs = processor(**inputs_dict, return_tensors="pt", do_sample_frames=False)
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
| Glm46VProcessorTest |
python | huggingface__transformers | src/transformers/models/minimax/modeling_minimax.py | {
"start": 40233,
"end": 40342
} | class ____(GenericForSequenceClassification, MiniMaxPreTrainedModel):
pass
| MiniMaxForSequenceClassification |
python | google__jax | jax/_src/debugger/colab_debugger.py | {
"start": 1158,
"end": 3620
} | class ____(colab_lib.DynamicDOMElement):
"""A mutable DOM element that displays code as HTML."""
def __init__(self, code_: str, highlights: list[int], linenostart: int = 1):
self._code = code_
self._highlights = highlights
self._view = colab_lib.dynamic(colab_lib.div())
self._linenostart = linenostart
def render(self):
self.update_code(
self._code, self._highlights, linenostart=self._linenostart)
def clear(self):
self._view.clear()
def append(self, child):
raise NotImplementedError
def update(self, elem):
self._view.update(elem)
def _highlight_code(self, code: str, highlights, linenostart: int):
is_dark_mode = output.eval_js(
'document.documentElement.matches("[theme=dark]");')
code_style = "monokai" if is_dark_mode else "default"
hl_color = "#4e56b7" if is_dark_mode else "#fff7c1"
if IS_PYGMENTS_ENABLED:
lexer = pygments.lexers.get_lexer_by_name("python")
formatter = pygments.formatters.HtmlFormatter( # pytype: disable=module-attr
full=False,
hl_lines=highlights,
linenos=True,
linenostart=linenostart,
style=code_style)
if hl_color:
formatter.style.highlight_color = hl_color
css_ = formatter.get_style_defs()
code = pygments.highlight(code, lexer, formatter)
else:
return "";
return code, css_
def update_code(self, code_, highlights, *, linenostart: int = 1):
"""Updates the code viewer to use new code."""
self._code = code_
self._view.clear()
code_, css_ = self._highlight_code(self._code, highlights, linenostart)
uuid_ = uuid.uuid4()
code_div = colab_lib.div(
colab_lib.css(css_),
code_,
id=f"code-{uuid_}",
style=colab_lib.style({
"max-height": "500px",
"overflow-y": "scroll",
"background-color": "var(--colab-border-color)",
"padding": "5px 5px 5px 5px",
}))
if highlights:
percent_scroll = highlights[0] / len(self._code.split("\n"))
else:
percent_scroll = 0.
self.update(code_div)
# Scroll to where the line is
output.eval_js("""
console.log("{id}")
var elem = document.getElementById("{id}")
var maxScrollPosition = elem.scrollHeight - elem.clientHeight;
elem.scrollTop = maxScrollPosition * {percent_scroll}
""".format(id=f"code-{uuid_}", percent_scroll=percent_scroll))
| CodeViewer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 704954,
"end": 705355
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "branch_protection_rule")
actor = sgqlc.types.Field("BranchActorAllowanceActor", graphql_name="actor")
branch_protection_rule = sgqlc.types.Field(
BranchProtectionRule, graphql_name="branchProtectionRule"
)
| BypassForcePushAllowance |
python | django__django | tests/fixtures/models.py | {
"start": 775,
"end": 1035
} | class ____(models.Model):
headline = models.CharField(max_length=100, default="Default headline")
pub_date = models.DateTimeField()
class Meta:
ordering = ("-pub_date", "headline")
def __str__(self):
return self.headline
| Article |
python | sqlalchemy__sqlalchemy | test/sql/test_operators.py | {
"start": 51897,
"end": 58807
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test interaction of and_()/or_() with boolean , null constants"""
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def test_single_bool_one(self):
self.assert_compile(~and_(true()), "false")
def test_single_bool_two(self):
self.assert_compile(~and_(True), "false")
def test_single_bool_three(self):
self.assert_compile(or_(~and_(true())), "false")
def test_single_bool_four(self):
self.assert_compile(~or_(false()), "true")
def test_single_bool_five(self):
self.assert_compile(~or_(False), "true")
def test_single_bool_six(self):
self.assert_compile(and_(~or_(false())), "true")
def test_single_bool_seven(self):
self.assert_compile(and_(True), "true")
def test_single_bool_eight(self):
self.assert_compile(or_(False), "false")
def test_single_bool_nine(self):
self.assert_compile(
and_(True),
"1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
def test_single_bool_ten(self):
self.assert_compile(
or_(False),
"0 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
@combinations(
(and_, "and_", r"true", "True"),
(or_, "or_", r"false", "False"),
)
def test_empty_clauses(self, op, str_op, str_continue, str_continue_2):
# these warning classes will change to ArgumentError when the
# deprecated behavior is disabled
with expect_deprecated(
re.escape(
f"Invoking {str_op}() without arguments is deprecated, and "
"will be disallowed in a future release. For an empty "
f"{str_op}() construct, use "
f"'{str_op}({str_continue}(), *args)' or "
f"'{str_op}({str_continue_2}, *args)'."
)
):
op()
def test_empty_construct_for_whereclause(self):
eq_(BooleanClauseList._construct_for_whereclause(()), None)
def test_non_empty_construct_for_whereclause(self):
self.assert_compile(
BooleanClauseList._construct_for_whereclause([column("q") == 5]),
"q = :q_1",
)
def test_empty_and_raw(self):
self.assert_compile(
BooleanClauseList._construct_raw(operators.and_), ""
)
def test_empty_or_raw(self):
self.assert_compile(
BooleanClauseList._construct_raw(operators.and_), ""
)
def test_four(self):
x = column("x")
self.assert_compile(
and_(or_(x == 5), or_(x == 7)), "x = :x_1 AND x = :x_2"
)
def test_five(self):
x = column("x")
self.assert_compile(and_(true()._ifnone(None), x == 7), "x = :x_1")
def test_six(self):
x = column("x")
self.assert_compile(or_(true(), x == 7), "true")
self.assert_compile(or_(x == 7, true()), "true")
self.assert_compile(~or_(x == 7, true()), "false")
def test_six_pt_five(self):
x = column("x")
self.assert_compile(
select(x).where(or_(x == 7, true())), "SELECT x WHERE true"
)
self.assert_compile(
select(x).where(or_(x == 7, true())),
"SELECT x WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
def test_seven(self):
x = column("x")
self.assert_compile(
and_(true(), x == 7, true(), x == 9), "x = :x_1 AND x = :x_2"
)
def test_eight(self):
x = column("x")
self.assert_compile(
or_(false(), x == 7, false(), x == 9), "x = :x_1 OR x = :x_2"
)
def test_nine(self):
x = column("x")
self.assert_compile(and_(x == 7, x == 9, false(), x == 5), "false")
self.assert_compile(~and_(x == 7, x == 9, false(), x == 5), "true")
def test_ten(self):
self.assert_compile(and_(None, None), "NULL AND NULL")
def test_eleven(self):
x = column("x")
self.assert_compile(
select(x).where(None).where(None), "SELECT x WHERE NULL AND NULL"
)
def test_twelve(self):
x = column("x")
self.assert_compile(
select(x).where(and_(None, None)), "SELECT x WHERE NULL AND NULL"
)
def test_thirteen(self):
x = column("x")
self.assert_compile(
select(x).where(~and_(None, None)),
"SELECT x WHERE NOT (NULL AND NULL)",
)
def test_fourteen(self):
x = column("x")
self.assert_compile(
select(x).where(~null()), "SELECT x WHERE NOT NULL"
)
def test_constants_are_singleton(self):
is_(null(), null())
is_(false(), false())
is_(true(), true())
def test_constant_render_distinct(self):
self.assert_compile(
select(null(), null()), "SELECT NULL AS anon_1, NULL AS anon__1"
)
self.assert_compile(
select(true(), true()), "SELECT true AS anon_1, true AS anon__1"
)
self.assert_compile(
select(false(), false()),
"SELECT false AS anon_1, false AS anon__1",
)
def test_constant_render_distinct_use_labels(self):
self.assert_compile(
select(null(), null()).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT NULL AS anon_1, NULL AS anon__1",
)
self.assert_compile(
select(true(), true()).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT true AS anon_1, true AS anon__1",
)
self.assert_compile(
select(false(), false()).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT false AS anon_1, false AS anon__1",
)
def test_is_true_literal(self):
c = column("x", Boolean)
self.assert_compile(c.is_(True), "x IS true")
def test_is_false_literal(self):
c = column("x", Boolean)
self.assert_compile(c.is_(False), "x IS false")
def test_and_false_literal_leading(self):
self.assert_compile(and_(False, True), "false")
self.assert_compile(and_(False, False), "false")
def test_and_true_literal_leading(self):
self.assert_compile(and_(True, True), "true")
self.assert_compile(and_(True, False), "false")
def test_or_false_literal_leading(self):
self.assert_compile(or_(False, True), "true")
self.assert_compile(or_(False, False), "false")
def test_or_true_literal_leading(self):
self.assert_compile(or_(True, True), "true")
self.assert_compile(or_(True, False), "true")
| ConjunctionTest |
python | keras-team__keras | keras/src/saving/file_editor.py | {
"start": 814,
"end": 28970
} | class ____:
"""Utility to inspect, edit, and resave Keras weights files.
You will find this class useful when adapting
an old saved weights file after having made
architecture changes to a model.
Args:
filepath: The path to a local file to inspect and edit.
Examples:
```python
editor = KerasFileEditor("my_model.weights.h5")
# Displays current contents
editor.summary()
# Remove the weights of an existing layer
editor.delete_object("layers/dense_2")
# Add the weights of a new layer
editor.add_object("layers/einsum_dense", weights={"0": ..., "1": ...})
# Save the weights of the edited model
editor.resave_weights("edited_model.weights.h5")
```
"""
def __init__(
self,
filepath,
):
self.filepath = filepath
self.metadata = None
self.config = None
self.model = None
self.console = rich.console.Console(highlight=False)
if filepath.endswith(".keras"):
zf = zipfile.ZipFile(filepath, "r")
weights_store = H5IOStore(
f"{saving_lib._VARS_FNAME}.h5",
archive=zf,
mode="r",
)
with zf.open(saving_lib._CONFIG_FILENAME, "r") as f:
config_json = f.read()
with zf.open(saving_lib._METADATA_FILENAME, "r") as f:
metadata_json = f.read()
self.config = json.loads(config_json)
self.metadata = json.loads(metadata_json)
elif filepath.endswith(".weights.h5"):
weights_store = H5IOStore(filepath, mode="r")
else:
raise ValueError(
"Invalid filename: "
"expected a `.keras` `.weights.h5` extension. "
f"Received: filepath={filepath}"
)
weights_dict, object_metadata = self._extract_weights_from_store(
weights_store.h5_file
)
weights_store.close()
self.weights_dict = weights_dict
self.object_metadata = object_metadata # {path: object_name}
self.console.print(self._generate_filepath_info(rich_style=True))
if self.metadata is not None:
self.console.print(self._generate_metadata_info(rich_style=True))
def summary(self):
"""Prints the weight structure of the opened file."""
self._weights_summary_cli()
def compare(self, reference_model):
"""Compares the opened file to a reference model.
This method will list all mismatches between the
currently opened file and the provided reference model.
Args:
reference_model: Model instance to compare to.
Returns:
Dict with the following keys:
`'status'`, `'error_count'`, `'match_count'`.
Status can be `'success'` or `'error'`.
`'error_count'` is the number of mismatches found.
`'match_count'` is the number of matching weights found.
"""
self.console.print("Running comparison")
ref_spec = {}
get_weight_spec_of_saveable(reference_model, ref_spec)
def _compare(
target,
ref_spec,
inner_path,
target_name,
ref_name,
error_count,
match_count,
checked_paths,
):
base_inner_path = inner_path
for ref_key, ref_val in ref_spec.items():
inner_path = f"{base_inner_path}/{ref_key}"
if inner_path in checked_paths:
continue
if ref_key not in target:
error_count += 1
checked_paths.add(inner_path)
if isinstance(ref_val, dict):
self.console.print(
f"[color(160)]...Object [bold]{inner_path}[/] "
f"present in {ref_name}, "
f"missing from {target_name}[/]"
)
self.console.print(
f" In {ref_name}, {inner_path} contains "
f"the following keys: {list(ref_val.keys())}"
)
else:
self.console.print(
f"[color(160)]...Weight [bold]{inner_path}[/] "
f"present in {ref_name}, "
f"missing from {target_name}[/]"
)
elif isinstance(ref_val, dict):
_error_count, _match_count = _compare(
target[ref_key],
ref_spec[ref_key],
inner_path,
target_name,
ref_name,
error_count=error_count,
match_count=match_count,
checked_paths=checked_paths,
)
error_count += _error_count
match_count += _match_count
else:
if target[ref_key].shape != ref_val.shape:
error_count += 1
checked_paths.add(inner_path)
self.console.print(
f"[color(160)]...Weight shape mismatch "
f"for [bold]{inner_path}[/][/]\n"
f" In {ref_name}: "
f"shape={ref_val.shape}\n"
f" In {target_name}: "
f"shape={target[ref_key].shape}"
)
else:
match_count += 1
return error_count, match_count
checked_paths = set()
error_count, match_count = _compare(
self.weights_dict,
ref_spec,
inner_path="",
target_name="saved file",
ref_name="reference model",
error_count=0,
match_count=0,
checked_paths=checked_paths,
)
_error_count, _ = _compare(
ref_spec,
self.weights_dict,
inner_path="",
target_name="reference model",
ref_name="saved file",
error_count=0,
match_count=0,
checked_paths=checked_paths,
)
error_count += _error_count
self.console.print("─────────────────────")
if error_count == 0:
status = "success"
self.console.print(
"[color(28)][bold]Comparison successful:[/] "
"saved file is compatible with the reference model[/]"
)
if match_count == 1:
plural = ""
else:
plural = "s"
self.console.print(
f" Found {match_count} matching weight{plural}"
)
else:
status = "error"
if error_count == 1:
plural = ""
else:
plural = "s"
self.console.print(
f"[color(160)][bold]Found {error_count} error{plural}:[/] "
"saved file is not compatible with the reference model[/]"
)
return {
"status": status,
"error_count": error_count,
"match_count": match_count,
}
def _edit_object(self, edit_fn, source_name, target_name=None):
if target_name is not None and "/" in target_name:
raise ValueError(
"Argument `target_name` should be a leaf name, "
"not a full path name. "
f"Received: target_name='{target_name}'"
)
if "/" in source_name:
# It's a path
elements = source_name.split("/")
weights_dict = self.weights_dict
for e in elements[:-1]:
if e not in weights_dict:
raise ValueError(
f"Path '{source_name}' not found in model."
)
weights_dict = weights_dict[e]
if elements[-1] not in weights_dict:
raise ValueError(f"Path '{source_name}' not found in model.")
edit_fn(
weights_dict, source_name=elements[-1], target_name=target_name
)
else:
# Ensure unicity
def count_occurences(d, name, count=0):
for k in d:
if isinstance(d[k], dict):
count += count_occurences(d[k], name, count)
if name in d:
count += 1
return count
occurrences = count_occurences(self.weights_dict, source_name)
if occurrences > 1:
raise ValueError(
f"Name '{source_name}' occurs more than once in the model; "
"try passing a complete path"
)
if occurrences == 0:
raise ValueError(
f"Source name '{source_name}' does not appear in the "
"model. Use `editor.weights_summary()` "
"to list all objects."
)
def _edit(d):
for k in d:
if isinstance(d[k], dict):
_edit(d[k])
if source_name in d:
edit_fn(d, source_name=source_name, target_name=target_name)
_edit(self.weights_dict)
def rename_object(self, object_name, new_name):
"""Rename an object in the file (e.g. a layer).
Args:
object_name: String, name or path of the
object to rename (e.g. `"dense_2"` or
`"layers/dense_2"`).
new_name: String, new name of the object.
"""
def rename_fn(weights_dict, source_name, target_name):
weights_dict[target_name] = weights_dict[source_name]
weights_dict.pop(source_name)
self._edit_object(rename_fn, object_name, new_name)
def delete_object(self, object_name):
"""Removes an object from the file (e.g. a layer).
Args:
object_name: String, name or path of the
object to delete (e.g. `"dense_2"` or
`"layers/dense_2"`).
"""
def delete_fn(weights_dict, source_name, target_name=None):
weights_dict.pop(source_name)
self._edit_object(delete_fn, object_name)
def add_object(self, object_path, weights):
"""Add a new object to the file (e.g. a layer).
Args:
object_path: String, full path of the
object to add (e.g. `"layers/dense_2"`).
weights: Dict mapping weight names to weight
values (arrays),
e.g. `{"0": kernel_value, "1": bias_value}`.
"""
if not isinstance(weights, dict):
raise ValueError(
"Argument `weights` should be a dict "
"where keys are weight names (usually '0', '1', etc.) "
"and values are NumPy arrays. "
f"Received: type(weights)={type(weights)}"
)
if "/" in object_path:
# It's a path
elements = object_path.split("/")
partial_path = "/".join(elements[:-1])
weights_dict = self.weights_dict
for e in elements[:-1]:
if e not in weights_dict:
raise ValueError(
f"Path '{partial_path}' not found in model."
)
weights_dict = weights_dict[e]
weights_dict[elements[-1]] = weights
else:
self.weights_dict[object_path] = weights
def delete_weight(self, object_name, weight_name):
"""Removes a weight from an existing object.
Args:
object_name: String, name or path of the
object from which to remove the weight
(e.g. `"dense_2"` or `"layers/dense_2"`).
weight_name: String, name of the weight to
delete (e.g. `"0"`).
"""
def delete_weight_fn(weights_dict, source_name, target_name=None):
if weight_name not in weights_dict[source_name]:
raise ValueError(
f"Weight {weight_name} not found "
f"in object {object_name}. "
"Weights found: "
f"{list(weights_dict[source_name].keys())}"
)
weights_dict[source_name].pop(weight_name)
self._edit_object(delete_weight_fn, object_name)
def add_weights(self, object_name, weights):
"""Add one or more new weights to an existing object.
Args:
object_name: String, name or path of the
object to add the weights to
(e.g. `"dense_2"` or `"layers/dense_2"`).
weights: Dict mapping weight names to weight
values (arrays),
e.g. `{"0": kernel_value, "1": bias_value}`.
"""
if not isinstance(weights, dict):
raise ValueError(
"Argument `weights` should be a dict "
"where keys are weight names (usually '0', '1', etc.) "
"and values are NumPy arrays. "
f"Received: type(weights)={type(weights)}"
)
def add_weight_fn(weights_dict, source_name, target_name=None):
weights_dict[source_name].update(weights)
self._edit_object(add_weight_fn, object_name)
def save(self, filepath):
"""Save the edited weights file.
Args:
filepath: Path to save the file to.
Must be a `.weights.h5` file.
"""
filepath = str(filepath)
if not filepath.endswith(".weights.h5"):
raise ValueError(
"Invalid `filepath` argument: "
"expected a `.weights.h5` extension. "
f"Received: filepath={filepath}"
)
weights_store = H5IOStore(filepath, mode="w")
def _save(weights_dict, weights_store, inner_path):
vars_to_create = {}
for name, value in weights_dict.items():
if isinstance(value, dict):
if value:
_save(
weights_dict[name],
weights_store,
inner_path=os.path.join(inner_path, name),
)
else:
# e.g. name="0", value=HDF5Dataset
vars_to_create[name] = value
if vars_to_create:
var_store = weights_store.make(inner_path)
for name, value in vars_to_create.items():
var_store[name] = value
_save(self.weights_dict, weights_store, inner_path="")
weights_store.close()
def resave_weights(self, filepath):
self.save(filepath)
def _extract_weights_from_store(self, data, metadata=None, inner_path=""):
metadata = metadata or {}
object_metadata = {}
for k, v in data.attrs.items():
object_metadata[k] = v
if object_metadata:
metadata[inner_path] = object_metadata
result = collections.OrderedDict()
for key in data.keys():
inner_path = f"{inner_path}/{key}"
value = data[key]
if isinstance(value, h5py.Group):
if len(value) == 0:
continue
if "vars" in value.keys() and len(value["vars"]) == 0:
continue
if hasattr(value, "keys"):
if "vars" in value.keys():
result[key], metadata = self._extract_weights_from_store(
value["vars"], metadata=metadata, inner_path=inner_path
)
else:
result[key], metadata = self._extract_weights_from_store(
value, metadata=metadata, inner_path=inner_path
)
else:
result[key] = value[()]
return result, metadata
def _generate_filepath_info(self, rich_style=False):
if rich_style:
filepath = f"'{self.filepath}'"
filepath = f"{summary_utils.highlight_symbol(filepath)}"
else:
filepath = f"'{self.filepath}'"
return f"Keras model file {filepath}"
def _generate_config_info(self, rich_style=False):
return pprint.pformat(self.config)
def _generate_metadata_info(self, rich_style=False):
version = self.metadata["keras_version"]
date = self.metadata["date_saved"]
if rich_style:
version = f"{summary_utils.highlight_symbol(version)}"
date = f"{summary_utils.highlight_symbol(date)}"
return f"Saved with Keras {version} - date: {date}"
def _print_weights_structure(
self, weights_dict, indent=0, is_first=True, prefix="", inner_path=""
):
for idx, (key, value) in enumerate(weights_dict.items()):
inner_path = os.path.join(inner_path, key)
is_last = idx == len(weights_dict) - 1
if is_first:
is_first = False
connector = "> "
elif is_last:
connector = "└─ "
else:
connector = "├─ "
if isinstance(value, dict):
bold_key = summary_utils.bold_text(key)
object_label = f"{prefix}{connector}{bold_key}"
if inner_path in self.object_metadata:
metadata = self.object_metadata[inner_path]
if "name" in metadata:
name = metadata["name"]
object_label += f" ('{name}')"
self.console.print(object_label)
if is_last:
appended = " "
else:
appended = "│ "
new_prefix = prefix + appended
self._print_weights_structure(
value,
indent + 1,
is_first=is_first,
prefix=new_prefix,
inner_path=inner_path,
)
else:
if hasattr(value, "shape"):
bold_key = summary_utils.bold_text(key)
self.console.print(
f"{prefix}{connector}{bold_key}:"
+ f" shape={value.shape}, dtype={value.dtype}"
)
else:
self.console.print(f"{prefix}{connector}{key}: {value}")
def _weights_summary_cli(self):
self.console.print("Weights structure")
self._print_weights_structure(self.weights_dict, prefix=" " * 2)
def _weights_summary_interactive(self):
def _generate_html_weights(dictionary, margin_left=0, font_size=1):
html = ""
for key, value in dictionary.items():
if isinstance(value, dict) and value:
weights_html = _generate_html_weights(
value, margin_left + 20, font_size - 1
)
html += (
f'<details style="margin-left: {margin_left}px;">'
'<summary style="'
f"font-size: {font_size}em; "
"font-weight: bold;"
f'">{key}</summary>'
f"{weights_html}"
"</details>"
)
else:
html += (
f'<details style="margin-left: {margin_left}px;">'
f'<summary style="font-size: {font_size}em;">'
f"{key} : shape={value.shape}"
f", dtype={value.dtype}</summary>"
f"<div style="
f'"margin-left: {margin_left}px;'
f'"margin-top: {margin_left}px;">'
f"{display_weight(value)}"
"</div>"
"</details>"
)
return html
output = "Weights structure"
initialize_id_counter()
output += _generate_html_weights(self.weights_dict)
ipython.display.display(ipython.display.HTML(output))
def get_weight_spec_of_saveable(saveable, spec, visited_saveables=None):
from keras.src.saving.keras_saveable import KerasSaveable
visited_saveables = visited_saveables or set()
# If the saveable has already been saved, skip it.
if id(saveable) in visited_saveables:
return
if hasattr(saveable, "save_own_variables"):
store = {}
saveable.save_own_variables(store)
if store:
keys = sorted(store.keys())
for k in keys:
val = store[k]
spec[k] = backend.KerasTensor(shape=val.shape, dtype=val.dtype)
visited_saveables.add(id(saveable))
for child_attr, child_obj in saving_lib._walk_saveable(saveable):
if isinstance(child_obj, KerasSaveable):
sub_spec = {}
get_weight_spec_of_saveable(
child_obj,
sub_spec,
visited_saveables=visited_saveables,
)
if sub_spec:
spec[child_attr] = sub_spec
elif isinstance(child_obj, (list, dict, tuple, set)):
sub_spec = {}
get_weight_spec_of_container(
child_obj,
sub_spec,
visited_saveables=visited_saveables,
)
if sub_spec:
spec[child_attr] = sub_spec
def get_weight_spec_of_container(container, spec, visited_saveables):
from keras.src.saving.keras_saveable import KerasSaveable
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for saveable in container:
if isinstance(saveable, KerasSaveable):
name = naming.to_snake_case(saveable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
sub_spec = {}
get_weight_spec_of_saveable(
saveable,
sub_spec,
visited_saveables=visited_saveables,
)
if sub_spec:
spec[name] = sub_spec
def initialize_id_counter():
global div_id_counter
div_id_counter = 0
def increment_id_counter():
global div_id_counter
div_id_counter += 1
def get_id_counter():
return div_id_counter
def display_weight(weight, axis=-1, threshold=16):
def _find_factors_closest_to_sqrt(num):
sqrt_num = int(np.sqrt(num))
for i in range(sqrt_num, 0, -1):
if num % i == 0:
M = i
N = num // i
if M > N:
return N, M
return M, N
def _color_from_rbg(value):
return f"rgba({value[0]}, {value[1]}, {value[2]}, 1)"
def _reduce_3d_array_by_mean(arr, n, axis):
if axis == 2:
trimmed_arr = arr[:, :, : arr.shape[2] - (arr.shape[2] % n)]
reshaped = np.reshape(
trimmed_arr, (arr.shape[0], arr.shape[1], -1, n)
)
mean_values = np.mean(reshaped, axis=3)
elif axis == 1:
trimmed_arr = arr[:, : arr.shape[1] - (arr.shape[1] % n), :]
reshaped = np.reshape(
trimmed_arr, (arr.shape[0], -1, n, arr.shape[2])
)
mean_values = np.mean(reshaped, axis=2)
elif axis == 0:
trimmed_arr = arr[: arr.shape[0] - (arr.shape[0] % n), :, :]
reshaped = np.reshape(
trimmed_arr, (-1, n, arr.shape[1], arr.shape[2])
)
mean_values = np.mean(reshaped, axis=1)
else:
raise ValueError("Axis must be 0, 1, or 2.")
return mean_values
def _create_matrix_html(matrix, subplot_size=840):
rows, cols, num_slices = matrix.shape
M, N = _find_factors_closest_to_sqrt(num_slices)
try:
from matplotlib import cm
except ImportError:
cm = None
if cm:
rgb_matrix = cm.jet(matrix)
else:
rgb_matrix = (matrix - np.min(matrix)) / (
np.max(matrix) - np.min(matrix)
)
rgb_matrix = np.stack([rgb_matrix, rgb_matrix, rgb_matrix], axis=-1)
rgb_matrix = (rgb_matrix[..., :3] * 255).astype("uint8")
subplot_html = ""
for i in range(num_slices):
cell_html = ""
for row in rgb_matrix[..., i, :]:
for rgb in row:
color = _color_from_rbg(rgb)
cell_html += (
f'<div class="cell" '
f'style="background-color: {color};">'
f"</div>"
)
subplot_html += f"""
<div class="matrix">
{cell_html}
</div>
"""
cell_size = subplot_size // (N * cols)
increment_id_counter()
div_id = get_id_counter()
html_code = f"""
<div class="unique-container_{div_id}">
<style>
.unique-container_{div_id} .subplots {{
display: inline-grid;
grid-template-columns: repeat({N}, 1fr);
column-gap: 5px; /* Minimal horizontal gap */
row-gap: 5px; /* Small vertical gap */
margin: 0;
padding: 0;
}}
.unique-container_{div_id} .matrix {{
display: inline-grid;
grid-template-columns: repeat({cols}, {cell_size}px);
grid-template-rows: repeat({rows}, {cell_size}px);
gap: 1px;
margin: 0;
padding: 0;
}}
.unique-container_{div_id} .cell {{
width: {cell_size}px;
height: {cell_size}px;
display: flex;
justify-content: center;
align-items: center;
font-size: 5px;
font-weight: bold;
color: white;
}}
.unique-container_{div_id} {{
margin-top: 20px;
margin-bottom: 20px;
}}
</style>
<div class="subplots">
{subplot_html}
</div>
</div>
"""
return html_code
if weight.ndim == 1:
weight = weight[..., np.newaxis]
weight = np.swapaxes(weight, axis, -1)
weight = weight.reshape(-1, weight.shape[-1])
M, N = _find_factors_closest_to_sqrt(weight.shape[0])
weight = weight.reshape(M, N, weight.shape[-1])
for reduce_axis in [0, 1, 2]:
if weight.shape[reduce_axis] > threshold:
weight = _reduce_3d_array_by_mean(
weight,
weight.shape[reduce_axis] // threshold,
axis=reduce_axis,
)
weight = (weight - weight.min()) / (weight.max() - weight.min() + 1e-5)
html_code = _create_matrix_html(weight)
return html_code
| KerasFileEditor |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 55680,
"end": 55883
} | class ____(Qwen2_5OmniAudioAttention):
def __init__(self, config):
super().__init__(config)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
| Qwen3OmniMoeAudioAttention |
python | huggingface__transformers | tests/models/instructblip/test_modeling_instructblip.py | {
"start": 4800,
"end": 7900
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as InstructBLIP's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (InstructBlipVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = InstructBlipVisionModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=InstructBlipConfig,
has_text_modality=False,
common_properties=["num_query_tokens", "image_token_index"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="InstructBLIP's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training")
def test_training(self):
pass
@unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "Salesforce/instructblip-flan-t5-xl"
model = InstructBlipVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| InstructBlipVisionModelTest |
python | pypa__warehouse | warehouse/tasks.py | {
"start": 1117,
"end": 11815
} | class ____(celery.Task):
"""
A custom Celery Task that integrates with Pyramid's transaction manager and
metrics service.
"""
__header__: typing.Callable
_wh_original_run: typing.Callable
def __new__(cls, *args, **kwargs) -> WarehouseTask:
"""
Override to wrap the `run` method of the task with a new method that
will handle exceptions from the task and retry them if they're retryable.
"""
obj = super().__new__(cls, *args, **kwargs)
if getattr(obj, "__header__", None) is not None:
obj.__header__ = functools.partial(obj.__header__, object())
# We do this here instead of inside of __call__ so that exceptions
# coming from the transaction manager get caught by the autoretry
# mechanism.
@functools.wraps(obj.run)
def run(*args, **kwargs):
original_run = obj._wh_original_run
request = obj.get_request()
metrics = request.find_service(IMetricsService, context=None)
metric_tags = [f"task:{obj.name}"]
with request.tm, metrics.timed("warehouse.task.run", tags=metric_tags):
metrics.increment("warehouse.task.start", tags=metric_tags)
try:
result = original_run(*args, **kwargs)
metrics.increment("warehouse.task.complete", tags=metric_tags)
return result
except BaseException as exc:
if isinstance(
exc, pyramid_retry.RetryableException
) or pyramid_retry.IRetryableError.providedBy(exc):
metrics.increment("warehouse.task.retried", tags=metric_tags)
raise obj.retry(exc=exc)
metrics.increment("warehouse.task.failed", tags=metric_tags)
raise
# Reassign the `run` method to the new one we've created.
obj._wh_original_run, obj.run = obj.run, run # type: ignore[method-assign]
return obj
def __call__(self, *args, **kwargs):
"""
Override to inject a faux request object into the task when it's called.
There's no WSGI request object available when a task is called, so we
create a fake one here. This is necessary as a lot of our code assumes
that there's a Pyramid request object available.
"""
return super().__call__(*(self.get_request(),) + args, **kwargs)
def get_request(self) -> Request:
"""
Get a request object to use for this task.
This will either return the request object that was injected into the
task when it was called, or it will create a new request object to use
for the task.
Note: The `type: ignore` comments are necessary because the `pyramid_env`
attribute is not defined on the request object, but we're adding it
dynamically.
"""
if not hasattr(self.request, "pyramid_env"):
registry = self.app.pyramid_config.registry # type: ignore[attr-defined]
env = pyramid.scripting.prepare(registry=registry)
env["request"].tm = transaction.TransactionManager(explicit=True)
env["request"].timings = {"new_request_start": time.time() * 1000}
env["request"].remote_addr = "127.0.0.1"
env["request"].remote_addr_hashed = hashlib.sha256(
("127.0.0.1" + registry.settings["warehouse.ip_salt"]).encode("utf8")
).hexdigest()
self.request.update(pyramid_env=env)
return self.request.pyramid_env["request"] # type: ignore[attr-defined]
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
Called after the task has returned. This is where we'll clean up the
request object that we injected into the task.
"""
if hasattr(self.request, "pyramid_env"):
pyramid_env = self.request.pyramid_env
pyramid_env["request"]._process_finished_callbacks()
pyramid_env["closer"]()
def apply_async(self, *args, **kwargs):
"""
Override the apply_async method to add an after commit hook to the
transaction manager to send the task after the transaction has been
committed.
This is necessary because we want to ensure that the task is only sent
after the transaction has been committed. This is important because we
want to ensure that the task is only sent if the transaction was
successful.
"""
# The API design of Celery makes this threadlocal pretty impossible to
# avoid :(
request = get_current_request()
# If for whatever reason we were unable to get a request we'll just
# skip this and call the original method to send this immediately.
if request is None or not hasattr(request, "tm"):
return super().apply_async(*args, **kwargs)
# This will break things that expect to get an AsyncResult because
# we're no longer going to be returning an async result from this when
# called from within a request, response cycle. Ideally we shouldn't be
# waiting for responses in a request/response cycle anyways though.
request.tm.get().addAfterCommitHook(
self._after_commit_hook, args=args, kws=kwargs
)
def retry(self, *args, **kwargs):
"""
Override the retry method to increment a metric when a task is retried.
This is necessary because the `retry` method is called when a task is
retried, and we want to track how many times a task has been retried.
"""
request = get_current_request()
metrics = request.find_service(IMetricsService, context=None)
metrics.increment("warehouse.task.retried", tags=[f"task:{self.name}"])
return super().retry(*args, **kwargs)
def _after_commit_hook(self, success, *args, **kwargs):
"""
This is the hook that gets called after the transaction has been
committed. We'll only send the task if the transaction was successful.
"""
if success:
super().apply_async(*args, **kwargs)
def task(**kwargs):
"""
A decorator that can be used to define a Celery task.
A thin wrapper around Celery's `task` decorator that allows us to attach
the task to the Celery app when the configuration is scanned during the
application startup.
This decorator also sets the `shared` option to `False` by default. This
means that the task will be created anew for each worker process that is
started. This is important because the `WarehouseTask` class that we use
for our tasks is not thread-safe, so we need to ensure that each worker
process has its own instance of the task.
This decorator also adds the task to the `warehouse` category in the
configuration scanner. This is important because we use this category to
find all the tasks that have been defined in the configuration.
Example usage:
```
@tasks.task(...)
def my_task(self, *args, **kwargs):
pass
```
"""
kwargs.setdefault("shared", False)
def deco(wrapped):
def callback(scanner, name, wrapped):
celery_app = scanner.config.registry["celery.app"]
celery_app.task(**kwargs)(wrapped)
venusian.attach(wrapped, callback, category="warehouse")
return wrapped
return deco
def _get_task(celery_app, task_func):
task_name = celery_app.gen_task_name(task_func.__name__, task_func.__module__)
return celery_app.tasks[task_name]
def _get_task_from_request(request):
celery_app = request.registry["celery.app"]
return functools.partial(_get_task, celery_app)
def _get_task_from_config(config, task):
celery_app = config.registry["celery.app"]
return _get_task(celery_app, task)
def _get_celery_app(config):
return config.registry["celery.app"]
def _add_periodic_task(config, schedule, func, args=(), kwargs=(), name=None, **opts):
def add_task():
config.registry["celery.app"].add_periodic_task(
schedule, config.task(func).s(), args=args, kwargs=kwargs, name=name, **opts
)
config.action(None, add_task, order=100)
def includeme(config: Configurator) -> None:
s = config.registry.settings
broker_transport_options: dict[str, str | int | dict] = {}
broker_url = s["celery.broker_redis_url"]
# Only redis is supported as a broker
assert broker_url.startswith("redis")
parsed_url = urllib.parse.urlparse( # noqa: WH001, going to urlunparse this
broker_url
)
parsed_query = urllib.parse.parse_qs(parsed_url.query)
celery_transport_options = {
"socket_timeout": int,
}
for key, value in parsed_query.copy().items():
if key.startswith("ssl_"):
continue
else:
if key in celery_transport_options:
broker_transport_options[key] = celery_transport_options[key](value[0])
del parsed_query[key]
parsed_url = parsed_url._replace(
query=urllib.parse.urlencode(parsed_query, doseq=True, safe="/")
)
broker_url = urllib.parse.urlunparse(parsed_url)
config.registry["celery.app"] = celery.Celery(
"warehouse", autofinalize=False, set_as_current=False
)
config.registry["celery.app"].conf.update(
accept_content=["json", "msgpack"],
broker_url=broker_url,
broker_use_ssl=s["warehouse.env"] == Environment.production,
broker_transport_options=broker_transport_options,
task_default_queue="default",
task_default_routing_key="task.default",
task_queue_ha_policy="all",
task_queues=(Queue("default", routing_key="task.#"),),
task_routes={},
task_serializer="json",
worker_disable_rate_limits=True,
REDBEAT_REDIS_URL=s["celery.scheduler_url"],
# Silence deprecation warning on startup
broker_connection_retry_on_startup=False,
)
config.registry["celery.app"].Task = WarehouseTask
config.registry["celery.app"].pyramid_config = config
config.action(("celery", "finalize"), config.registry["celery.app"].finalize)
config.add_directive("add_periodic_task", _add_periodic_task, action_wrap=False)
config.add_directive("make_celery_app", _get_celery_app, action_wrap=False)
config.add_directive("task", _get_task_from_config, action_wrap=False)
config.add_request_method(_get_task_from_request, name="task", reify=True)
| WarehouseTask |
python | ray-project__ray | rllib/offline/json_writer.py | {
"start": 913,
"end": 4962
} | class ____(OutputWriter):
"""Writer object that saves experiences in JSON file chunks."""
@PublicAPI
def __init__(
self,
path: str,
ioctx: IOContext = None,
max_file_size: int = 64 * 1024 * 1024,
compress_columns: List[str] = frozenset(["obs", "new_obs"]),
):
"""Initializes a JsonWriter instance.
Args:
path: a path/URI of the output directory to save files in.
ioctx: current IO context object.
max_file_size: max size of single files before rolling over.
compress_columns: list of sample batch columns to compress.
"""
logger.info(
"You are using JSONWriter. It is recommended to use "
+ "DatasetWriter instead."
)
self.ioctx = ioctx or IOContext()
self.max_file_size = max_file_size
self.compress_columns = compress_columns
if urlparse(path).scheme not in [""] + WINDOWS_DRIVES:
self.path_is_uri = True
else:
path = os.path.abspath(os.path.expanduser(path))
# Try to create local dirs if they don't exist
os.makedirs(path, exist_ok=True)
assert os.path.exists(path), "Failed to create {}".format(path)
self.path_is_uri = False
self.path = path
self.file_index = 0
self.bytes_written = 0
self.cur_file = None
@override(OutputWriter)
def write(self, sample_batch: SampleBatchType):
start = time.time()
data = _to_json(sample_batch, self.compress_columns)
f = self._get_file()
f.write(data)
f.write("\n")
if hasattr(f, "flush"): # legacy smart_open impls
f.flush()
self.bytes_written += len(data)
logger.debug(
"Wrote {} bytes to {} in {}s".format(len(data), f, time.time() - start)
)
def _get_file(self) -> FileType:
if not self.cur_file or self.bytes_written >= self.max_file_size:
if self.cur_file:
self.cur_file.close()
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
path = os.path.join(
self.path,
"output-{}_worker-{}_{}.json".format(
timestr, self.ioctx.worker_index, self.file_index
),
)
if self.path_is_uri:
if smart_open is None:
raise ValueError(
"You must install the `smart_open` module to write "
"to URIs like {}".format(path)
)
self.cur_file = smart_open(path, "w")
else:
self.cur_file = open(path, "w")
self.file_index += 1
self.bytes_written = 0
logger.info("Writing to new output file {}".format(self.cur_file))
return self.cur_file
def _to_jsonable(v, compress: bool) -> Any:
if compress and compression_supported():
return str(pack(v))
elif isinstance(v, np.ndarray):
return v.tolist()
return v
def _to_json_dict(batch: SampleBatchType, compress_columns: List[str]) -> Dict:
out = {}
if isinstance(batch, MultiAgentBatch):
out["type"] = "MultiAgentBatch"
out["count"] = batch.count
policy_batches = {}
for policy_id, sub_batch in batch.policy_batches.items():
policy_batches[policy_id] = {}
for k, v in sub_batch.items():
policy_batches[policy_id][k] = _to_jsonable(
v, compress=k in compress_columns
)
out["policy_batches"] = policy_batches
else:
out["type"] = "SampleBatch"
for k, v in batch.items():
out[k] = _to_jsonable(v, compress=k in compress_columns)
return out
def _to_json(batch: SampleBatchType, compress_columns: List[str]) -> str:
out = _to_json_dict(batch, compress_columns)
return json.dumps(out, cls=SafeFallbackEncoder)
| JsonWriter |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/s3/s3_fake_resource.py | {
"start": 225,
"end": 3412
} | class ____:
"""Stateful mock of a boto3 s3 session for test.
Wraps a ``mock.MagicMock``. Buckets are implemented using an in-memory dict.
"""
def __init__(self, buckets=None):
from unittest import mock
self.buckets = defaultdict(dict, buckets) if buckets else defaultdict(dict)
self.mock_extras = mock.MagicMock()
def head_bucket(self, Bucket, *args, **kwargs):
self.mock_extras.head_bucket(*args, **kwargs)
def head_object(self, Bucket, Key, *args, **kwargs):
self.mock_extras.head_object(*args, **kwargs)
return {"ContentLength": len(self.buckets.get(Bucket, {}).get(Key, b""))}
def _list_objects(self, Bucket, Prefix):
bucket = self.buckets.get(Bucket, {})
contents = []
for key in sorted(bucket.keys()):
if key.startswith(Prefix):
contents.append({"Key": key})
return {"Contents": contents, "IsTruncated": False}
def list_objects_v2(self, Bucket, Prefix, *args, **kwargs):
self.mock_extras.list_objects_v2(*args, **kwargs)
response = self._list_objects(Bucket, Prefix)
response["KeyCount"] = len(response["Contents"])
return response
def list_objects(self, Bucket, Prefix, *args, **kwargs):
self.mock_extras.list_objects(*args, **kwargs)
return self._list_objects(Bucket, Prefix)
def put_object(self, Bucket, Key, Body, *args, **kwargs):
self.mock_extras.put_object(*args, **kwargs)
if isinstance(Body, bytes):
self.buckets[Bucket][Key] = Body
else:
self.buckets[Bucket][Key] = Body.read()
def get_object(self, Bucket, Key, *args, **kwargs):
if not self.has_object(Bucket, Key):
raise ClientError({}, None) # pyright: ignore[reportArgumentType]
self.mock_extras.get_object(*args, **kwargs)
return {"Body": self._get_byte_stream(Bucket, Key)}
def delete_object(self, Bucket, Key, *args, **kwargs):
self.mock_extras.delete_object(*args, **kwargs)
if Bucket in self.buckets:
self.buckets[Bucket].pop(Key, None)
def upload_file(self, Filename, Bucket, Key, *args, **kwargs):
self.mock_extras.upload_file(*args, **kwargs)
with open(Filename, "rb") as fileobj:
self.buckets[Bucket][Key] = fileobj.read()
def upload_fileobj(self, Fileobj, Bucket, Key, *args, **kwargs):
self.mock_extras.upload_fileobj(*args, **kwargs)
self.buckets[Bucket][Key] = Fileobj.read()
def has_object(self, bucket, key):
return bucket in self.buckets and key in self.buckets[bucket]
def _get_byte_stream(self, bucket, key):
return io.BytesIO(self.buckets[bucket][key])
def download_file(self, Bucket, Key, Filename, *args, **kwargs):
self.mock_extras.download_file(*args, **kwargs)
with open(Filename, "wb") as ff:
ff.write(self._get_byte_stream(Bucket, Key).read())
def download_fileobj(self, Bucket, Key, Fileobj, *args, **kwargs):
self.mock_extras.download_fileobj(*args, **kwargs)
Fileobj.write(self._get_byte_stream(Bucket, Key).read())
| S3FakeSession |
python | pytransitions__transitions | tests/test_mermaid.py | {
"start": 6311,
"end": 10015
} | class ____(TestDiagramsNested, TestMermaidDiagrams):
machine_cls = HierarchicalGraphMachine \
# type: Type[Union[HierarchicalGraphMachine, LockedHierarchicalGraphMachine]]
def test_diagram(self):
m = self.machine_cls(states=self.states, transitions=self.transitions, initial='A', auto_transitions=False,
title='A test', show_conditions=True, graph_engine=self.graph_engine)
graph = m.get_graph()
self.assertIsNotNone(graph)
_, nodes, edges = self.parse_dot(graph)
self.assertEqual(len(edges), 8)
# Test that graph properties match the Machine
self.assertEqual(set(m.get_nested_state_names()), nodes)
m.walk()
m.run()
# write diagram to temp file
target = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
m.get_graph().draw(target.name, prog='dot')
self.assertTrue(os.path.getsize(target.name) > 0)
# backwards compatibility check
m.get_graph().draw(target.name, prog='dot')
self.assertTrue(os.path.getsize(target.name) > 0)
# cleanup temp file
target.close()
os.unlink(target.name)
def test_roi(self):
class Model:
def is_fast(self, *args, **kwargs):
return True
model = Model()
m = self.machine_cls(model, states=self.states, transitions=self.transitions, initial='A', title='A test',
graph_engine=self.graph_engine, show_conditions=True)
model.walk()
model.run()
g1 = model.get_graph(show_roi=True)
_, nodes, edges = self.parse_dot(g1)
self.assertEqual(len(edges), 4)
self.assertEqual(len(nodes), 4)
model.sprint()
g2 = model.get_graph(show_roi=True)
dot, nodes, edges = self.parse_dot(g2)
self.assertEqual(len(edges), 2)
self.assertEqual(len(nodes), 3)
def test_roi_parallel(self):
class Model:
@staticmethod
def is_fast(*args, **kwargs):
return True
self.states[0] = {"name": "A", "parallel": ["1", "2"]}
model = Model()
m = self.machine_cls(model, states=self.states, transitions=self.transitions, initial='A', title='A test',
graph_engine=self.graph_engine, show_conditions=True)
g1 = model.get_graph(show_roi=True)
_, nodes, edges = self.parse_dot(g1)
self.assertEqual(2, len(edges)) # reset and walk
self.assertEqual(4, len(nodes))
model.walk()
model.run()
model.sprint()
g2 = model.get_graph(show_roi=True)
_, nodes, edges = self.parse_dot(g2)
self.assertEqual(len(edges), 2)
self.assertEqual(len(nodes), 3)
def test_roi_parallel_deeper(self):
states = ['A', 'B', 'C', 'D',
{'name': 'P',
'parallel': [
'1',
{'name': '2', 'parallel': [
{'name': 'a'},
{'name': 'b', 'parallel': [
{'name': 'x', 'parallel': ['1', '2']}, 'y'
]}
]},
]}]
transitions = [["go", "A", "P"], ["reset", "*", "A"]]
m = self.machine_cls(states=states, transitions=transitions, initial='A', title='A test',
graph_engine=self.graph_engine, show_conditions=True)
m.go()
_, nodes, edges = self.parse_dot(m.get_graph(show_roi=True))
self.assertEqual(len(edges), 2)
self.assertEqual(len(nodes), 10)
| TestMermaidDiagramsNested |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_subset.py | {
"start": 1517,
"end": 2927
} | class ____(NonLaunchableGraphQLContextTestMatrix):
def test_csv_hello_world_pipeline_or_error_subset_wrong_solid_name(
self, graphql_context: WorkspaceRequestContext
):
selector = infer_job_selector(graphql_context, "csv_hello_world", ["nope"])
result = execute_dagster_graphql(
graphql_context, SCHEMA_OR_ERROR_SUBSET_QUERY, {"selector": selector}
)
assert not result.errors
assert result.data
assert result.data["runConfigSchemaOrError"]["__typename"] == "InvalidSubsetError"
assert "No qualified ops to execute" in result.data["runConfigSchemaOrError"]["message"]
def test_pipeline_with_invalid_definition_error(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(
graphql_context, "job_with_invalid_definition_error", ["fail_subset"]
)
result = execute_dagster_graphql(
graphql_context, SCHEMA_OR_ERROR_SUBSET_QUERY, {"selector": selector}
)
assert not result.errors
assert result.data
assert result.data["runConfigSchemaOrError"]["__typename"] == "InvalidSubsetError"
error_msg = result.data["runConfigSchemaOrError"]["message"]
assert "DagsterInvalidSubsetError" in error_msg
assert "Input 'some_input' of op 'fail_subset' has no way of being resolved" in error_msg
| TestSolidSelections |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/trt_convert.py | {
"start": 4977,
"end": 17081
} | class ____(
collections.namedtuple("TrtConversionParams", [
"max_workspace_size_bytes", "precision_mode", "minimum_segment_size",
"maximum_cached_engines", "use_calibration", "allow_build_at_runtime"
])):
"""Parameters that are used for TF-TRT conversion.
Fields:
max_workspace_size_bytes: the maximum GPU temporary memory that the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of the strings in
TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
maximum_cached_engines: max number of cached TRT engines for dynamic TRT
ops. Created TRT engines for a dynamic dimension are cached. If the
number of cached engines is already at max but none of them supports the
input shapes, the TRTEngineOp will fall back to run the original TF
subgraph that corresponds to the TRTEngineOp.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(excluding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
allow_build_at_runtime: whether to allow building TensorRT engines during
runtime if no prebuilt TensorRT engine can be found that can handle the
given inputs during runtime, then a new TensorRT engine is built at
runtime if allow_build_at_runtime=True, and otherwise native TF is used.
"""
def __new__(cls,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
maximum_cached_engines=1,
use_calibration=True,
allow_build_at_runtime=True):
return super(TrtConversionParams,
cls).__new__(cls, max_workspace_size_bytes, precision_mode,
minimum_segment_size, maximum_cached_engines,
use_calibration, allow_build_at_runtime)
DEFAULT_TRT_CONVERSION_PARAMS = TrtConversionParams()
_TRT_ENGINE_OP_NAME = "TRTEngineOp"
def _check_conversion_params(conversion_params, is_v2=False):
"""Validate the provided TrtConversionParams.
Args:
conversion_params: a TrtConversionParams instance.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
supported_precision_modes = TrtPrecisionMode.supported_precision_modes()
if conversion_params.precision_mode not in supported_precision_modes:
raise ValueError(
("precision mode '{}' is not supported."
"It should be one of {}").format(conversion_params.precision_mode,
supported_precision_modes))
if (conversion_params.minimum_segment_size <= 0 and
conversion_params.minimum_segment_size != -1):
raise ValueError("minimum segment size should be positive or -1 "
"(to disable main graph conversion).")
def _check_trt_version_compatibility():
"""Check compatibility of TensorRT version.
Raises:
RuntimeError: if the TensorRT library version is incompatible.
"""
if not _pywrap_py_utils.is_tensorrt_enabled():
logging.error(
"Tensorflow needs to be built with TensorRT support enabled to allow "
"TF-TRT to operate.")
raise RuntimeError("Tensorflow has not been built with TensorRT support.")
if platform.system() == "Windows":
logging.warn(
"Windows support is provided experimentally. No guarantee is made "
"regarding functionality or engineering support. Use at your own risk.")
linked_version = _pywrap_py_utils.get_linked_tensorrt_version()
loaded_version = _pywrap_py_utils.get_loaded_tensorrt_version()
logging.info("Linked TensorRT version: %s", str(linked_version))
logging.info("Loaded TensorRT version: %s", str(loaded_version))
def raise_trt_version_deprecated(version_type, trt_version):
assert version_type in [
"linked", "loaded"
], ("Incorrect value received for version_type: %s. Accepted: ['linked', "
"'loaded']") % version_type
logging.error(
"The {version_type} version of TensorRT: `{trt_version}` has now "
"been removed. Please upgrade to TensorRT 7 or more recent.".format(
version_type=version_type,
trt_version=trt_utils.version_tuple_to_string(trt_version)))
raise RuntimeError("Incompatible %s TensorRT versions" % version_type)
if not trt_utils.is_linked_tensorrt_version_greater_equal(7, 0, 0):
raise_trt_version_deprecated("linked", linked_version)
if not trt_utils.is_loaded_tensorrt_version_greater_equal(7, 0, 0):
raise_trt_version_deprecated("loaded", loaded_version)
if (loaded_version[0] != linked_version[0] or
not trt_utils.is_loaded_tensorrt_version_greater_equal(*linked_version)):
logging.error(
"Loaded TensorRT %s but linked TensorFlow against TensorRT %s. A few "
"requirements must be met:\n"
"\t-It is required to use the same major version of TensorRT during "
"compilation and runtime.\n"
"\t-TensorRT does not support forward compatibility. The loaded "
"version has to be equal or more recent than the linked version.",
trt_utils.version_tuple_to_string(loaded_version),
trt_utils.version_tuple_to_string(linked_version))
raise RuntimeError("Incompatible TensorRT major version")
elif loaded_version != linked_version:
logging.info(
"Loaded TensorRT %s and linked TensorFlow against TensorRT %s. This is "
"supported because TensorRT minor/patch upgrades are backward "
"compatible.", trt_utils.version_tuple_to_string(loaded_version),
trt_utils.version_tuple_to_string(linked_version))
def _get_tensorrt_rewriter_config(conversion_params,
is_dynamic_op=None,
max_batch_size=None,
is_v2=False,
disable_non_trt_optimizers=False,
use_implicit_batch=True,
profile_strategy=PROFILE_STRATEGY_RANGE):
"""Returns a RewriterConfig proto for TRT transformation.
Args:
conversion_params: a TrtConversionParams instance.
is_dynamic_op: whether to use dynamic engines.
max_batch_size: maximum batch size for static engines.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
disable_non_trt_optimizers: Turn off all default Grappler optimizers.
use_implicit_batch: Whether to use implicit batch or explicit batch.
profile_strategy: dynamic shape optimization profile strategy.
Returns:
A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
_check_conversion_params(conversion_params, is_v2=is_v2)
if is_v2 and is_dynamic_op is not None and not is_dynamic_op:
raise ValueError("is_dynamic_op is either None or True for TF2")
if not is_v2 and is_dynamic_op is None:
raise ValueError("is_dynamic_op can't be None for TF1")
if (is_dynamic_op is None or is_dynamic_op) and max_batch_size is not None:
raise ValueError("max_batch_size has to be None for TF2"
" or when is_dynamic_op == True in TF1")
if is_dynamic_op is not None and not is_dynamic_op and not isinstance(
max_batch_size, int):
raise ValueError(
"max_batch_size has to be an integer for is_dynamic_op==False in TF1")
rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig()
# Disable Grappler Remapper to avoid that fused OPs that may not be
# beneficial to TF-TRT and are not supported by TF-TRT.
rewriter_config_with_trt.remapping = False
# Prevent folding of Const->QDQ chains.
rewriter_config_with_trt. \
experimental_disable_folding_quantization_emulation = (
trt_utils.is_linked_tensorrt_version_greater_equal(8, 0, 0) or
trt_utils.is_loaded_tensorrt_version_greater_equal(8, 0, 0))
if not disable_non_trt_optimizers:
rewriter_config_with_trt.optimizers.extend([
"pruning", "debug_stripper", "layout", "dependency", "constfold",
"common_subgraph_elimination"
])
rewriter_config_with_trt.meta_optimizer_iterations = (
rewriter_config_pb2.RewriterConfig.ONE)
optimizer = rewriter_config_with_trt.custom_optimizers.add()
if not disable_non_trt_optimizers:
# Add a constfold optimizer to cleanup the unused Const nodes.
rewriter_config_with_trt.custom_optimizers.add().name = "constfold"
optimizer.name = "TensorRTOptimizer"
optimizer.parameter_map[
"minimum_segment_size"].i = conversion_params.minimum_segment_size
optimizer.parameter_map["max_workspace_size_bytes"].i = (
conversion_params.max_workspace_size_bytes)
optimizer.parameter_map["precision_mode"].s = _to_bytes(
conversion_params.precision_mode)
optimizer.parameter_map[
"maximum_cached_engines"].i = conversion_params.maximum_cached_engines
optimizer.parameter_map[
"use_calibration"].b = conversion_params.use_calibration
optimizer.parameter_map["is_dynamic_op"].b = is_dynamic_op
optimizer.parameter_map[
"allow_build_at_runtime"].b = conversion_params.allow_build_at_runtime
if max_batch_size is not None:
optimizer.parameter_map["max_batch_size"].i = max_batch_size
optimizer.parameter_map["use_implicit_batch"].b = use_implicit_batch
# While we accept case insensitive strings from the users, we only pass the
# strings in lower cases to TF-TRT converter.
if not use_implicit_batch:
optimizer.parameter_map["profile_strategy"].s = _to_bytes(
profile_strategy.lower())
# Disabling optimizers should happen after defining the TF-TRT grappler pass
# otherwise the template can overwrite the disablement.
if disable_non_trt_optimizers:
trt_utils.disable_non_trt_optimizers_in_rewriter_config(
rewriter_config_with_trt)
return rewriter_config_with_trt
@deprecation.deprecated(
None, "You shouldn't need a rewriter_config with the current TF-TRT APIs.")
def get_tensorrt_rewriter_config(conversion_params,
is_dynamic_op=None,
max_batch_size=None,
is_v2=False,
disable_non_trt_optimizers=False):
return _get_tensorrt_rewriter_config(conversion_params, is_dynamic_op,
max_batch_size, is_v2,
disable_non_trt_optimizers)
# Remove all scope prefixes in the node name. In TF 2.0, the same concrete
# function can be initialized multiple times with different prefixes, and
# this will result in the same TRTEngineOp being initialized multiple times
# with different cache and duplicate TRT engines.
# TODO(laigd): this may be caused by the fact that TRTEngineOp is not
# stateful, need to investigate.
# TODO(laigd): we rely on the fact that all functions are fully inlined
# before TF-TRT optimizer is called, as otherwise it may generate the same
# name when optimizing a different function graph. Fix this.
def _get_canonical_engine_name(name):
return name.split("/")[-1]
| TrtConversionParams |
python | walkccc__LeetCode | solutions/3438. Find Valid Pair of Adjacent Digits in String/3438.py | {
"start": 0,
"end": 244
} | class ____:
def findValidPair(self, s: str) -> str:
count = collections.Counter(s)
return next((a + b
for a, b in itertools.pairwise(s)
if a != b and count[a] == int(a) and count[b] == int(b)), '')
| Solution |
python | pandas-dev__pandas | pandas/tests/reshape/test_pivot.py | {
"start": 88181,
"end": 100397
} | class ____:
def test_pivot(self):
data = {
"index": ["A", "B", "C", "C", "B", "A"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
}
frame = DataFrame(data)
pivoted = frame.pivot(index="index", columns="columns", values="values")
expected = DataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
expected.index.name, expected.columns.name = "index", "columns"
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == "index"
assert pivoted.columns.name == "columns"
# don't specify values
pivoted = frame.pivot(index="index", columns="columns")
assert pivoted.index.name == "index"
assert pivoted.columns.names == (None, "columns")
def test_pivot_duplicates(self):
data = DataFrame(
{
"a": ["bar", "bar", "foo", "foo", "foo"],
"b": ["one", "two", "one", "one", "two"],
"c": [1.0, 2.0, 3.0, 3.0, 4.0],
}
)
with pytest.raises(ValueError, match="duplicate entries"):
data.pivot(index="a", columns="b", values="c")
def test_pivot_empty(self):
df = DataFrame(columns=["a", "b", "c"])
result = df.pivot(index="a", columns="b", values="c")
expected = DataFrame(index=[], columns=[])
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self, any_string_dtype):
df = DataFrame(
data=[("A", "1", "A1"), ("B", "2", "B2")], dtype=any_string_dtype
)
result = df.pivot(index=1, columns=0, values=2)
expected_columns = Index(["A", "B"], name=0, dtype=any_string_dtype)
tm.assert_index_equal(result.columns, expected_columns)
def test_pivot_index_none(self):
# GH#3962
data = {
"index": ["A", "B", "C", "C", "B", "A"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
}
frame = DataFrame(data).set_index("index")
result = frame.pivot(columns="columns", values="values")
expected = DataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
expected.index.name, expected.columns.name = "index", "columns"
tm.assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns="columns")
expected.columns = MultiIndex.from_tuples(
[("values", "One"), ("values", "Two")], names=[None, "columns"]
)
expected.index.name = "index"
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == "index"
assert result.columns.names == (None, "columns")
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns="columns", values="values")
expected.columns.name = "columns"
tm.assert_frame_equal(result, expected)
def test_pivot_index_list_values_none_immutable_args(self):
# GH37635
df = DataFrame(
{
"lev1": [1, 1, 1, 2, 2, 2],
"lev2": [1, 1, 2, 1, 1, 2],
"lev3": [1, 2, 1, 2, 1, 2],
"lev4": [1, 2, 3, 4, 5, 6],
"values": [0, 1, 2, 3, 4, 5],
}
)
index = ["lev1", "lev2"]
columns = ["lev3"]
result = df.pivot(index=index, columns=columns)
expected = DataFrame(
np.array(
[
[1.0, 2.0, 0.0, 1.0],
[3.0, np.nan, 2.0, np.nan],
[5.0, 4.0, 4.0, 3.0],
[np.nan, 6.0, np.nan, 5.0],
]
),
index=MultiIndex.from_arrays(
[(1, 1, 2, 2), (1, 2, 1, 2)], names=["lev1", "lev2"]
),
columns=MultiIndex.from_arrays(
[("lev4", "lev4", "values", "values"), (1, 2, 1, 2)],
names=[None, "lev3"],
),
)
tm.assert_frame_equal(result, expected)
assert index == ["lev1", "lev2"]
assert columns == ["lev3"]
def test_pivot_columns_not_given(self):
# GH#48293
df = DataFrame({"a": [1], "b": 1})
with pytest.raises(TypeError, match="missing 1 required keyword-only argument"):
df.pivot()
# this still fails because columns=None gets passed down to unstack as level=None
# while at that point None was converted to NaN
@pytest.mark.xfail(
using_string_dtype(), reason="TODO(infer_string) None is cast to NaN"
)
def test_pivot_columns_is_none(self):
# GH#48293
df = DataFrame({None: [1], "b": 2, "c": 3})
result = df.pivot(columns=None)
expected = DataFrame({("b", 1): [2], ("c", 1): 3})
tm.assert_frame_equal(result, expected)
result = df.pivot(columns=None, index="b")
expected = DataFrame({("c", 1): 3}, index=Index([2], name="b"))
tm.assert_frame_equal(result, expected)
result = df.pivot(columns=None, index="b", values="c")
expected = DataFrame({1: 3}, index=Index([2], name="b"))
tm.assert_frame_equal(result, expected)
def test_pivot_index_is_none(self, using_infer_string):
# GH#48293
df = DataFrame({None: [1], "b": 2, "c": 3})
result = df.pivot(columns="b", index=None)
expected = DataFrame({("c", 2): 3}, index=[1])
expected.columns.names = [None, "b"]
tm.assert_frame_equal(result, expected)
result = df.pivot(columns="b", index=None, values="c")
expected = DataFrame(3, index=[1], columns=Index([2], name="b"))
if using_infer_string:
expected.index.name = np.nan
tm.assert_frame_equal(result, expected)
def test_pivot_values_is_none(self):
# GH#48293
df = DataFrame({None: [1], "b": 2, "c": 3})
result = df.pivot(columns="b", index="c", values=None)
expected = DataFrame(
1, index=Index([3], name="c"), columns=Index([2], name="b")
)
tm.assert_frame_equal(result, expected)
result = df.pivot(columns="b", values=None)
expected = DataFrame(1, index=[0], columns=Index([2], name="b"))
tm.assert_frame_equal(result, expected)
def test_pivot_not_changing_index_name(self):
# GH#52692
df = DataFrame({"one": ["a"], "two": 0, "three": 1})
expected = df.copy(deep=True)
df.pivot(index="one", columns="two", values="three")
tm.assert_frame_equal(df, expected)
def test_pivot_table_empty_dataframe_correct_index(self):
# GH 21932
df = DataFrame([], columns=["a", "b", "value"])
pivot = df.pivot_table(index="a", columns="b", values="value", aggfunc="count")
expected = Index([], dtype="object", name="b")
tm.assert_index_equal(pivot.columns, expected)
def test_pivot_table_handles_explicit_datetime_types(self):
# GH#43574
df = DataFrame(
[
{"a": "x", "date_str": "2023-01-01", "amount": 1},
{"a": "y", "date_str": "2023-01-02", "amount": 2},
{"a": "z", "date_str": "2023-01-03", "amount": 3},
]
)
df["date"] = pd.to_datetime(df["date_str"])
with tm.assert_produces_warning(False):
pivot = df.pivot_table(
index=["a", "date"], values=["amount"], aggfunc="sum", margins=True
)
expected = MultiIndex.from_tuples(
[
("x", datetime.strptime("2023-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")),
("y", datetime.strptime("2023-01-02 00:00:00", "%Y-%m-%d %H:%M:%S")),
("z", datetime.strptime("2023-01-03 00:00:00", "%Y-%m-%d %H:%M:%S")),
("All", ""),
],
names=["a", "date"],
)
tm.assert_index_equal(pivot.index, expected)
def test_pivot_table_with_margins_and_numeric_column_names(self):
# GH#26568
df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]])
result = df.pivot_table(
index=0, columns=1, values=2, aggfunc="sum", fill_value=0, margins=True
)
expected = DataFrame(
[[1, 2, 0, 3], [0, 3, 4, 7], [1, 5, 4, 10]],
columns=Index(["x", "y", "z", "All"], name=1),
index=Index(["a", "b", "All"], name=0),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("m", [1, 10])
def test_unstack_copy(self, m):
# GH#56633
levels = np.arange(m)
index = MultiIndex.from_product([levels] * 2)
values = np.arange(m * m * 100).reshape(m * m, 100)
df = DataFrame(values, index, np.arange(100))
df_orig = df.copy()
result = df.unstack(sort=False)
result.iloc[0, 0] = -1
tm.assert_frame_equal(df, df_orig)
def test_pivot_empty_with_datetime(self):
# GH#59126
df = DataFrame(
{
"timestamp": Series([], dtype=pd.DatetimeTZDtype(tz="UTC")),
"category": Series([], dtype=str),
"value": Series([], dtype=str),
}
)
df_pivoted = df.pivot_table(
index="category", columns="value", values="timestamp"
)
assert df_pivoted.empty
def test_pivot_margins_with_none_index(self):
# GH#58722
df = DataFrame(
{
"x": [1, 1, 2],
"y": [3, 3, 4],
"z": [5, 5, 6],
"w": [7, 8, 9],
}
)
result = df.pivot_table(
index=None,
columns=["y", "z"],
values="w",
margins=True,
aggfunc="count",
)
expected = DataFrame(
[[2, 2, 1, 1]],
index=["w"],
columns=MultiIndex(
levels=[[3, 4], [5, 6, "All"]],
codes=[[0, 0, 1, 1], [0, 2, 1, 2]],
names=["y", "z"],
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
def test_pivot_with_pyarrow_categorical(self):
# GH#53051
pa = pytest.importorskip("pyarrow")
df = DataFrame(
{"string_column": ["A", "B", "C"], "number_column": [1, 2, 3]}
).astype(
{
"string_column": ArrowDtype(pa.dictionary(pa.int32(), pa.string())),
"number_column": "float[pyarrow]",
}
)
df = df.pivot(columns=["string_column"], values=["number_column"])
multi_index = MultiIndex.from_arrays(
[["number_column", "number_column", "number_column"], ["A", "B", "C"]],
names=(None, "string_column"),
)
df_expected = DataFrame(
[[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]],
columns=multi_index,
)
tm.assert_frame_equal(
df, df_expected, check_dtype=False, check_column_type=False
)
@pytest.mark.parametrize("freq", ["D", "M", "Q", "Y"])
def test_pivot_empty_dataframe_period_dtype(self, freq):
# GH#62705
dtype = pd.PeriodDtype(freq=freq)
df = DataFrame({"index": [], "columns": [], "values": []})
df = df.astype({"values": dtype})
result = df.pivot(index="index", columns="columns", values="values")
expected_index = Index([], name="index", dtype="float64")
expected_columns = Index([], name="columns", dtype="float64")
expected = DataFrame(
index=expected_index, columns=expected_columns, dtype=dtype
)
tm.assert_frame_equal(result, expected)
| TestPivot |
python | ansible__ansible | test/units/module_utils/facts/virtual/test_sunos.py | {
"start": 286,
"end": 4976
} | class ____(SunOSVirtual):
def __init__(self, module):
self.module = module
def mock_get_bin_path(filename):
cmd_bins = {
"zonename": "/usr/bin/zonename",
"virtinfo": "/usr/sbin/virtinfo",
}
return cmd_bins.get(filename, None)
def test_get_virtual_facts_global(mocker):
module = mocker.Mock()
module.get_bin_path.side_effect = mock_get_bin_path
module.run_command.return_value = (0, "global", "")
mixin = MockVirtualSysctl(module=module)
guest_facts = mixin.get_virtual_facts()
expected = {
"virtualization_tech_guest": set(),
"virtualization_tech_host": set(["zone"]),
}
assert guest_facts == expected
@pytest.mark.parametrize(
("guest_tech", "expected_guest"),
[
pytest.param(
"VMware",
"vmware",
id="VMware",
),
pytest.param(
"VirtualBox",
"virtualbox",
id="VirtualBox",
),
],
)
def test_get_virtual_facts_guest(mocker, guest_tech, expected_guest):
module = mocker.Mock()
module.get_bin_path.side_effect = [
"/usr/bin/zonename",
"/usr/sbin/modinfo",
"/usr/sbin/virtinfo",
]
module.run_command.side_effect = [
(0, "local", ""),
(0, guest_tech, ""),
(0, "", ""),
]
mixin = MockVirtualSysctl(module=module)
guest_facts = mixin.get_virtual_facts()
expected = {
"virtualization_tech_guest": set([expected_guest, "zone"]),
"virtualization_tech_host": set(),
"virtualization_type": expected_guest,
"virtualization_role": "guest",
"container": "zone",
}
assert guest_facts == expected
@pytest.mark.parametrize(
("guest_tech", "expected_guest"),
[
pytest.param(
"VMware",
"vmware",
id="VMware",
),
pytest.param(
"VirtualBox",
"virtualbox",
id="VirtualBox",
),
],
)
def test_get_virtual_facts_ldoms(mocker, guest_tech, expected_guest):
module = mocker.Mock()
module.get_bin_path.side_effect = [
"/usr/bin/zonename",
"/usr/sbin/modinfo",
"/usr/sbin/virtinfo",
]
module.run_command.side_effect = [
(0, "local", ""),
(0, guest_tech, ""),
(0, "DOMAINROLE|impl=LDoms", ""),
]
mixin = MockVirtualSysctl(module=module)
guest_facts = mixin.get_virtual_facts()
expected = {
"virtualization_tech_guest": set(["ldom", expected_guest, "zone"]),
"virtualization_tech_host": set(),
"virtualization_type": "ldom",
"virtualization_role": "guest",
"container": "zone",
}
assert guest_facts == expected
@pytest.mark.parametrize(
("guest_tech", "expected_guest"),
[
pytest.param(
"VMware",
"vmware",
id="VMware",
),
pytest.param(
"VirtualBox",
"virtualbox",
id="VirtualBox",
),
pytest.param(
"Parallels",
"parallels",
id="Parallels",
),
pytest.param(
"HVM domU",
"xen",
id="Xen",
),
pytest.param(
"KVM",
"kvm",
id="KVM",
),
],
)
def test_get_virtual_facts_smbios(mocker, guest_tech, expected_guest):
module = mocker.Mock()
module.get_bin_path.side_effect = [
"/usr/bin/zonename",
None,
None,
"/usr/sbin/smbios",
]
module.run_command.side_effect = [
(0, "local", ""),
(0, guest_tech, ""),
]
mixin = MockVirtualSysctl(module=module)
guest_facts = mixin.get_virtual_facts()
expected = {
"virtualization_tech_guest": set([expected_guest, "zone"]),
"virtualization_tech_host": set(),
"virtualization_type": expected_guest,
"virtualization_role": "guest",
"container": "zone",
}
assert guest_facts == expected
def test_get_virtual_facts_openvz(mocker):
mocker.patch("os.path.exists", return_value=True)
module = mocker.Mock()
module.get_bin_path.side_effect = [
None, # zonename
"/usr/sbin/virtinfo",
]
module.run_command.return_value = (0, "", "")
mixin = MockVirtualSysctl(module=module)
guest_facts = mixin.get_virtual_facts()
expected = {
"virtualization_role": "guest",
"virtualization_tech_guest": set(["virtuozzo"]),
"virtualization_tech_host": set(),
"virtualization_type": "virtuozzo",
}
assert guest_facts == expected
| MockVirtualSysctl |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_event_details.py | {
"start": 11518,
"end": 16134
} | class ____(MetricsEnhancedPerformanceTestCase):
endpoint = "sentry-api-0-organization-event-details"
def setUp(self) -> None:
self.init_snuba()
self.ten_mins_ago = before_now(minutes=10)
self.transaction_data = load_data("transaction", timestamp=self.ten_mins_ago)
self.RESULT_COLUMN = "span.averageResults"
event = self.store_event(self.transaction_data, self.project)
self.url = reverse(
self.endpoint,
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event.event_id,
},
)
self.login_as(user=self.user)
self.store_span_metric(
1,
internal_metric=constants.SELF_TIME_LIGHT,
timestamp=self.ten_mins_ago,
tags={"span.group": "26b881987e4bad99"},
)
def test_get_without_feature(self) -> None:
with self.feature({"organizations:insight-modules": False}):
response = self.client.get(self.url, {"averageColumn": "span.self_time"})
assert response.status_code == 200, response.content
entries = response.data["entries"] # type: ignore[attr-defined]
for entry in entries:
if entry["type"] == "spans":
for span in entry["data"]:
assert span.get(self.RESULT_COLUMN) is None
def test_get(self) -> None:
with self.feature("organizations:insight-modules"):
response = self.client.get(self.url, {"averageColumn": "span.self_time"})
assert response.status_code == 200, response.content
entries = response.data["entries"] # type: ignore[attr-defined]
for entry in entries:
if entry["type"] == "spans":
for span in entry["data"]:
if span["op"] == "db":
assert span[self.RESULT_COLUMN] == {"avg(span.self_time)": 1.0}
if span["op"] == "django.middleware":
assert self.RESULT_COLUMN not in span
def test_get_multiple_columns(self) -> None:
self.store_span_metric(
2,
internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
timestamp=self.ten_mins_ago,
tags={"span.group": "26b881987e4bad99"},
)
with self.feature("organizations:insight-modules"):
response = self.client.get(
self.url, {"averageColumn": ["span.self_time", "span.duration"]}
)
assert response.status_code == 200, response.content
entries = response.data["entries"] # type: ignore[attr-defined]
for entry in entries:
if entry["type"] == "spans":
for span in entry["data"]:
if span["op"] == "db":
assert span[self.RESULT_COLUMN] == {
"avg(span.self_time)": 1.0,
"avg(span.duration)": 2.0,
}
if span["op"] == "django.middleware":
assert self.RESULT_COLUMN not in span
def test_nan_column(self) -> None:
# If there's nothing stored for a metric, span.duration in this case the query returns nan
with self.feature("organizations:insight-modules"):
response = self.client.get(
self.url, {"averageColumn": ["span.self_time", "span.duration"]}
)
assert response.status_code == 200, response.content
entries = response.data["entries"] # type: ignore[attr-defined]
for entry in entries:
if entry["type"] == "spans":
for span in entry["data"]:
if span["op"] == "db":
assert span[self.RESULT_COLUMN] == {"avg(span.self_time)": 1.0}
if span["op"] == "django.middleware":
assert self.RESULT_COLUMN not in span
def test_invalid_column(self) -> None:
# If any columns are invalid, ignore average field in results completely
response = self.client.get(
self.url, {"averageColumn": ["span.self_time", "span.everything"]}
)
assert response.status_code == 200, response.content
entries = response.data["entries"] # type: ignore[attr-defined]
for entry in entries:
if entry["type"] == "spans":
for span in entry["data"]:
assert self.RESULT_COLUMN not in span
| EventComparisonTest |
python | haoel__leetcode | algorithms/python/jumpGame/jumpGame.py | {
"start": 520,
"end": 811
} | class ____:
def canJump(self, nums: List[int]) -> bool:
n = len(nums)
# max index where we can go
farest = 0;
for i in range(n):
if i > farest:
return False
farest = max(farest, i + nums[i])
return True
| Solution |
python | pydata__xarray | xarray/coding/variables.py | {
"start": 8417,
"end": 17907
} | class ____(VariableCoder):
"""Mask or unmask fill values according to CF conventions."""
def __init__(
self,
decode_times: bool | CFDatetimeCoder = False,
decode_timedelta: bool | CFTimedeltaCoder = False,
) -> None:
self.decode_times = decode_times
self.decode_timedelta = decode_timedelta
def encode(self, variable: Variable, name: T_Name = None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
dtype = np.dtype(encoding.get("dtype", data.dtype))
# from netCDF best practices
# https://docs.unidata.ucar.edu/nug/current/best_practices.html#bp_Unsigned-Data
# "_Unsigned = "true" to indicate that
# integer data should be treated as unsigned"
has_unsigned = encoding.get("_Unsigned") is not None
fv = encoding.get("_FillValue")
mv = encoding.get("missing_value")
fill_value = None
fv_exists = fv is not None
mv_exists = mv is not None
if not fv_exists and not mv_exists:
return variable
if fv_exists and mv_exists and not duck_array_ops.allclose_or_equiv(fv, mv):
raise ValueError(
f"Variable {name!r} has conflicting _FillValue ({fv}) and missing_value ({mv}). Cannot encode data."
)
if fv_exists:
# Ensure _FillValue is cast to same dtype as data's
# but not for packed data
if has_unsigned:
encoding["_FillValue"] = _encode_unsigned_fill_value(name, fv, dtype)
elif "add_offset" not in encoding and "scale_factor" not in encoding:
encoding["_FillValue"] = dtype.type(fv)
else:
encoding["_FillValue"] = fv
fill_value = pop_to(encoding, attrs, "_FillValue", name=name)
if mv_exists:
# try to use _FillValue, if it exists to align both values
# or use missing_value and ensure it's cast to same dtype as data's
# but not for packed data
encoding["missing_value"] = attrs.get(
"_FillValue",
(
_encode_unsigned_fill_value(name, mv, dtype)
if has_unsigned
else (
dtype.type(mv)
if "add_offset" not in encoding
and "scale_factor" not in encoding
else mv
)
),
)
fill_value = pop_to(encoding, attrs, "missing_value", name=name)
# apply fillna
if fill_value is not None and not pd.isnull(fill_value):
# special case DateTime to properly handle NaT
if _is_time_like(attrs.get("units")):
if data.dtype.kind in "iu":
data = duck_array_ops.where(
data != np.iinfo(np.int64).min, data, fill_value
)
else:
# if we have float data (data was packed prior masking)
# we just fillna
data = duck_array_ops.fillna(data, fill_value)
# but if the fill_value is of integer type
# we need to round and cast
if np.array(fill_value).dtype.kind in "iu":
data = duck_array_ops.astype(
duck_array_ops.around(data), type(fill_value)
)
else:
data = duck_array_ops.fillna(data, fill_value)
if fill_value is not None and has_unsigned:
pop_to(encoding, attrs, "_Unsigned")
# XXX: Is this actually needed? Doesn't the backend handle this?
# two-stage casting to prevent undefined cast from float to unsigned int
# first float -> int with corresponding itemsize
# second int -> int/uint to final itemsize
signed_dtype = np.dtype(f"i{data.itemsize}")
data = duck_array_ops.astype(
duck_array_ops.astype(
duck_array_ops.around(data), signed_dtype, copy=False
),
dtype,
copy=False,
)
attrs["_FillValue"] = fill_value
return Variable(dims, data, attrs, encoding, fastpath=True)
def decode(self, variable: Variable, name: T_Name = None):
raw_fill_dict, encoded_fill_values = _check_fill_values(
variable.attrs, name, variable.dtype
)
if "_Unsigned" not in variable.attrs and not raw_fill_dict:
return variable
dims, data, attrs, encoding = unpack_for_decoding(variable)
# Even if _Unsigned is used, retain on-disk _FillValue
for attr, value in raw_fill_dict.items():
safe_setitem(encoding, attr, value, name=name)
if "_Unsigned" in attrs:
unsigned = pop_to(attrs, encoding, "_Unsigned")
data = _convert_unsigned_fill_value(
name,
data,
unsigned,
raw_fill_dict.get("_FillValue"),
encoded_fill_values,
)
if encoded_fill_values:
dtype: np.typing.DTypeLike
decoded_fill_value: Any
# in case of packed data we have to decode into float
# in any case
if "scale_factor" in attrs or "add_offset" in attrs:
dtype, decoded_fill_value = (
_choose_float_dtype(data.dtype, attrs),
np.nan,
)
else:
# in case of no-packing special case DateTime/Timedelta to properly
# handle NaT, we need to check if time-like will be decoded
# or not in further processing
is_time_like = _is_time_like(attrs.get("units"))
if (
(is_time_like == "datetime" and self.decode_times)
or (is_time_like == "timedelta" and self.decode_timedelta)
) and data.dtype.kind in "iu":
dtype = np.int64
decoded_fill_value = np.iinfo(np.int64).min
else:
dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)
transform = partial(
_apply_mask,
encoded_fill_values=encoded_fill_values,
decoded_fill_value=decoded_fill_value,
dtype=dtype,
)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding, fastpath=True)
def _scale_offset_decoding(
data, scale_factor, add_offset, dtype: np.typing.DTypeLike | None
):
data = data.astype(dtype=dtype, copy=True)
if scale_factor is not None:
data *= scale_factor
if add_offset is not None:
data += add_offset
return data
def _choose_float_dtype(
dtype: np.dtype, mapping: MutableMapping
) -> type[np.floating[Any]]:
"""Return a float dtype that can losslessly represent `dtype` values."""
# check scale/offset first to derive wanted float dtype
# see https://github.com/pydata/xarray/issues/5597#issuecomment-879561954
scale_factor = mapping.get("scale_factor")
add_offset = mapping.get("add_offset")
if scale_factor is not None or add_offset is not None:
# get the type from scale_factor/add_offset to determine
# the needed floating point type
if scale_factor is not None:
scale_type = np.dtype(type(scale_factor))
if add_offset is not None:
offset_type = np.dtype(type(add_offset))
# CF conforming, both scale_factor and add-offset are given and
# of same floating point type (float32/64)
if (
add_offset is not None
and scale_factor is not None
and offset_type == scale_type
and scale_type in [np.float32, np.float64]
):
# in case of int32 -> we need upcast to float64
# due to precision issues
if dtype.itemsize == 4 and np.issubdtype(dtype, np.integer):
return np.float64
return scale_type.type
# Not CF conforming and add_offset given:
# A scale factor is entirely safe (vanishing into the mantissa),
# but a large integer offset could lead to loss of precision.
# Sensitivity analysis can be tricky, so we just use a float64
# if there's any offset at all - better unoptimised than wrong!
if add_offset is not None:
return np.float64
# return dtype depending on given scale_factor
return scale_type.type
# If no scale_factor or add_offset is given, use some general rules.
# Keep float32 as-is. Upcast half-precision to single-precision,
# because float16 is "intended for storage but not computation"
if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):
return np.float32
# float32 can exactly represent all integers up to 24 bits
if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):
return np.float32
# For all other types and circumstances, we just use float64.
# Todo: with nc-complex from netcdf4-python >= 1.7.0 this is available
# (safe because eg. complex numbers are not supported in NetCDF)
return np.float64
| CFMaskCoder |
python | pytest-dev__pytest-xdist | testing/acceptance_test.py | {
"start": 28860,
"end": 38050
} | class ____:
def test_load_single(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
def test_a(): os._exit(1)
def test_b(): pass
"""
)
res = pytester.runpytest(f, "-n1")
res.stdout.fnmatch_lines(
[
"replacing crashed worker gw*",
"worker*crashed while running*",
"*1 failed*1 passed*",
]
)
def test_load_multiple(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
def test_a(): pass
def test_b(): os._exit(1)
def test_c(): pass
def test_d(): pass
"""
)
res = pytester.runpytest(f, "-n2")
res.stdout.fnmatch_lines(
[
"replacing crashed worker gw*",
"worker*crashed while running*",
"*1 failed*3 passed*",
]
)
def test_each_single(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
def test_a(): os._exit(1)
def test_b(): pass
"""
)
res = pytester.runpytest(f, "--dist=each", "--tx=popen")
res.stdout.fnmatch_lines(
[
"replacing crashed worker gw*",
"worker*crashed while running*",
"*1 failed*1 passed*",
]
)
@pytest.mark.xfail(reason="#20: xdist race condition on node restart")
def test_each_multiple(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
def test_a(): os._exit(1)
def test_b(): pass
"""
)
res = pytester.runpytest(f, "--dist=each", "--tx=2*popen")
res.stdout.fnmatch_lines(
[
"*Replacing crashed worker*",
"*Worker*crashed while running*",
"*2 failed*2 passed*",
]
)
def test_max_worker_restart(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
def test_a(): pass
def test_b(): os._exit(1)
def test_c(): os._exit(1)
def test_d(): pass
"""
)
res = pytester.runpytest(f, "-n4", "--max-worker-restart=1")
res.stdout.fnmatch_lines(
[
"replacing crashed worker*",
"maximum crashed workers reached: 1*",
"worker*crashed while running*",
"worker*crashed while running*",
"*2 failed*2 passed*",
]
)
def test_max_worker_restart_tests_queued(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os, pytest
@pytest.mark.parametrize('i', range(10))
def test(i): os._exit(1)
"""
)
res = pytester.runpytest(f, "-n2", "--max-worker-restart=3")
res.stdout.fnmatch_lines(
[
"replacing crashed worker*",
"maximum crashed workers reached: 3*",
"worker*crashed while running*",
"worker*crashed while running*",
"* xdist: maximum crashed workers reached: 3 *",
"* 4 failed in *",
]
)
assert "INTERNALERROR" not in res.stdout.str()
def test_max_worker_restart_die(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
os._exit(1)
"""
)
res = pytester.runpytest(f, "-n4", "--max-worker-restart=0")
res.stdout.fnmatch_lines(
[
"* xdist: worker gw* crashed and worker restarting disabled *",
"* no tests ran in *",
]
)
def test_disable_restart(self, pytester: pytest.Pytester) -> None:
f = pytester.makepyfile(
"""
import os
def test_a(): pass
def test_b(): os._exit(1)
def test_c(): pass
"""
)
res = pytester.runpytest(f, "-n4", "--max-worker-restart=0")
res.stdout.fnmatch_lines(
[
"worker gw* crashed and worker restarting disabled",
"*worker*crashed while running*",
"* xdist: worker gw* crashed and worker restarting disabled *",
"* 1 failed, 2 passed in *",
]
)
@pytest.mark.parametrize("n", [0, 2])
def test_worker_id_fixture(pytester: pytest.Pytester, n: int) -> None:
import glob
f = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("run_num", range(2))
def test_worker_id1(worker_id, run_num):
with open("worker_id%s.txt" % run_num, "w") as f:
f.write(worker_id)
"""
)
result = pytester.runpytest(f, "-n%d" % n)
result.stdout.fnmatch_lines("* 2 passed in *")
worker_ids = set()
for fname in glob.glob(str(pytester.path / "*.txt")):
with open(fname) as fp:
worker_ids.add(fp.read().strip())
if n == 0:
assert worker_ids == {"master"}
else:
assert worker_ids == {"gw0", "gw1"}
@pytest.mark.parametrize("n", [0, 2])
def test_testrun_uid_fixture(pytester: pytest.Pytester, n: int) -> None:
import glob
f = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("run_num", range(2))
def test_testrun_uid1(testrun_uid, run_num):
with open("testrun_uid%s.txt" % run_num, "w") as f:
f.write(testrun_uid)
"""
)
result = pytester.runpytest(f, "-n%d" % n)
result.stdout.fnmatch_lines("* 2 passed in *")
testrun_uids = set()
for fname in glob.glob(str(pytester.path / "*.txt")):
with open(fname) as fp:
testrun_uids.add(fp.read().strip())
assert len(testrun_uids) == 1
assert len(testrun_uids.pop()) == 32
@pytest.mark.parametrize("tb", ["auto", "long", "short", "no", "line", "native"])
def test_error_report_styles(pytester: pytest.Pytester, tb: str) -> None:
pytester.makepyfile(
"""
import pytest
def test_error_report_styles():
raise RuntimeError('some failure happened')
"""
)
result = pytester.runpytest("-n1", "--tb=%s" % tb)
if tb != "no":
result.stdout.fnmatch_lines("*some failure happened*")
result.assert_outcomes(failed=1)
def test_color_yes_collection_on_non_atty(pytester: pytest.Pytester) -> None:
"""Skip collect progress report when working on non-terminals.
Similar to pytest-dev/pytest#1397
"""
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_this(i):
assert 1
"""
)
args = ["--color=yes", "-n2"]
result = pytester.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
assert "created: 2/2 workers" in result.stdout.str()
assert "2 workers [10 items]" in result.stdout.str()
assert "collecting:" not in result.stdout.str()
def test_without_terminal_plugin(pytester: pytest.Pytester) -> None:
"""No output when terminal plugin is disabled."""
pytester.makepyfile(
"""
def test_1():
pass
"""
)
result = pytester.runpytest("-p", "no:terminal", "-n2")
assert result.stdout.str() == ""
assert result.stderr.str() == ""
assert result.ret == 0
def test_internal_error_with_maxfail(pytester: pytest.Pytester) -> None:
"""Internal error when using --maxfail option (#62, #65)."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=['1', '2'])
def crasher():
raise RuntimeError
def test_aaa0(crasher):
pass
def test_aaa1(crasher):
pass
"""
)
result = pytester.runpytest_subprocess("--maxfail=1", "-n1")
result.stdout.re_match_lines([".* [12] errors? in .*"])
assert "INTERNALERROR" not in result.stderr.str()
def test_maxfail_causes_early_termination(pytester: pytest.Pytester) -> None:
"""Ensure subsequent tests on a worker aren't run when using --maxfail (#1024)."""
pytester.makepyfile(
"""
def test1():
assert False
def test2():
pass
"""
)
result = pytester.runpytest_subprocess("--maxfail=1", "-n 1")
result.assert_outcomes(failed=1)
def test_internal_errors_propagate_to_controller(pytester: pytest.Pytester) -> None:
pytester.makeconftest(
"""
def pytest_collection_modifyitems():
raise RuntimeError("Some runtime error")
"""
)
pytester.makepyfile("def test(): pass")
result = pytester.runpytest("-n1")
result.stdout.fnmatch_lines(["*RuntimeError: Some runtime error*"])
| TestNodeFailure |
python | doocs__leetcode | solution/3400-3499/3433.Count Mentions Per User/Solution.py | {
"start": 0,
"end": 796
} | class ____:
def countMentions(self, numberOfUsers: int, events: List[List[str]]) -> List[int]:
events.sort(key=lambda e: (int(e[1]), e[0][2]))
ans = [0] * numberOfUsers
online_t = [0] * numberOfUsers
lazy = 0
for etype, ts, s in events:
cur = int(ts)
if etype[0] == "O":
online_t[int(s)] = cur + 60
elif s[0] == "A":
lazy += 1
elif s[0] == "H":
for i, t in enumerate(online_t):
if t <= cur:
ans[i] += 1
else:
for a in s.split():
ans[int(a[2:])] += 1
if lazy:
for i in range(numberOfUsers):
ans[i] += lazy
return ans
| Solution |
python | pallets__werkzeug | src/werkzeug/local.py | {
"start": 850,
"end": 3320
} | class ____:
"""Create a namespace of context-local data. This wraps a
:class:`ContextVar` containing a :class:`dict` value.
This may incur a performance penalty compared to using individual
context vars, as it has to copy data to avoid mutating the dict
between nested contexts.
:param context_var: The :class:`~contextvars.ContextVar` to use as
storage for this local. If not given, one will be created.
Context vars not created at the global scope may interfere with
garbage collection.
.. versionchanged:: 2.0
Uses ``ContextVar`` instead of a custom storage implementation.
"""
__slots__ = ("__storage",)
def __init__(self, context_var: ContextVar[dict[str, t.Any]] | None = None) -> None:
if context_var is None:
# A ContextVar not created at global scope interferes with
# Python's garbage collection. However, a local only makes
# sense defined at the global scope as well, in which case
# the GC issue doesn't seem relevant.
context_var = ContextVar(f"werkzeug.Local<{id(self)}>.storage")
object.__setattr__(self, "_Local__storage", context_var)
def __iter__(self) -> t.Iterator[tuple[str, t.Any]]:
return iter(self.__storage.get({}).items())
def __call__(
self, name: str, *, unbound_message: str | None = None
) -> LocalProxy[t.Any]:
"""Create a :class:`LocalProxy` that access an attribute on this
local namespace.
:param name: Proxy this attribute.
:param unbound_message: The error message that the proxy will
show if the attribute isn't set.
"""
return LocalProxy(self, name, unbound_message=unbound_message)
def __release_local__(self) -> None:
self.__storage.set({})
def __getattr__(self, name: str) -> t.Any:
values = self.__storage.get({})
if name in values:
return values[name]
raise AttributeError(name)
def __setattr__(self, name: str, value: t.Any) -> None:
values = self.__storage.get({}).copy()
values[name] = value
self.__storage.set(values)
def __delattr__(self, name: str) -> None:
values = self.__storage.get({})
if name in values:
values = values.copy()
del values[name]
self.__storage.set(values)
else:
raise AttributeError(name)
| Local |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 7483,
"end": 7692
} | class ____(graphene.InputObjectType):
filter = graphene.String()
max_results = LongString()
order_by = graphene.List(graphene.String)
page_token = graphene.String()
| MlflowSearchModelVersionsInput |
python | altair-viz__altair | tests/utils/test_core.py | {
"start": 1389,
"end": 1502
} | class ____(ValueChannel, schemapi.SchemaBase):
_schema = {json_schema_dict_str}
_encoding_name = "y"
| YValue |
python | astropy__astropy | astropy/time/tests/test_basic.py | {
"start": 1449,
"end": 29111
} | class ____:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, format="iso", scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2, np.array([-0.5 + 1.4288980208333335e-06, -0.50000000e00])
)
# Set scale to TAI
t = t.tai
assert (
repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2,
np.array([-0.5 + 0.00037179926839122024, -0.5 + 0.00039351851851851852]),
)
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (
repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>"
)
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(
t.cxcsec, np.array([31536064.307456788, 378691266.18400002])
)
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format="jd")
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000.0, 2450010.0)
t2 = Time(val, format="jd")
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.0
t3 = Time(val, val2, format="jd")
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.0) / 10.0).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format="jd")
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize("format_", Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == "tai"
@pytest.mark.parametrize("value", [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format="jd", scale="utc")
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format="iso", scale="tai", precision=1)
assert t2.value == "2010-01-01 00:00:34.0"
t2 = Time(t, format="iso", scale="tai", out_subfmt="date")
assert t2.value == "2010-01-01"
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format="mjd", scale="utc", location=("45d", "50d"))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format="mjd", scale="utc")
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.arange(len(mjd)), np.arange(len(mjd))),
)
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0.0, 0.0, 0.0), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0.0, 0.999, 0.2)
t7 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
)
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
)
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == "2010-01-01 00:00:00.000"
assert t.tt.iso == "2010-01-01 00:01:06.184"
assert t.tai.fits == "2010-01-01T00:00:34.000"
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == "2010-01-01T00:01:06.910"
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
# Uses initial class-defined precision=3
assert t.iso == "2010-01-01 00:00:00.000"
# Set instance precision to 9
t.precision = 9
assert t.iso == "2010-01-01 00:00:00.000000000"
assert t.tai.utc.iso == "2010-01-01 00:00:00.000000000"
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = "precision attribute must be an int"
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=7,
location=(lon, lat),
)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843728"
assert t.tcb.iso == "2006-01-15 21:25:56.8939523"
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time("2006-01-15 21:24:37.5", format="iso", scale="utc", precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843725"
assert t.tcb.iso == "2006-01-15 21:25:56.8939519"
# Check we get the same result
t2 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
location=(0 * u.m, 0 * u.m, 0 * u.m),
)
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=location,
)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(location.x, location.y, location.z),
)
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert np.all(t.utc.iso == "2006-01-15 21:24:37.500000")
assert np.all(t.tdb.iso[0] == "2006-01-15 21:25:42.684373")
t2 = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert np.all(t2.utc.iso == "2006-01-15 21:24:37.500000")
assert t2.tdb.iso[0] == "2006-01-15 21:25:42.684373"
assert t2.tdb.iso[1] != "2006-01-15 21:25:42.684373"
with pytest.raises(ValueError): # 1 time, but two locations
Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
with pytest.raises(ValueError): # 3 times, but two locations
Time(
["2006-01-15 21:24:37.5"] * 3,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
# multidimensional
mjd = np.arange(50000.0, 50008.0).reshape(4, 2)
t3 = Time(mjd, format="mjd", scale="utc", location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(
mjd,
format="mjd",
scale="utc",
location=(
np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]]),
),
)
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp("auto_download", False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale=scale1,
location=(lon, lat),
)
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = "local"
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format="decimalyear")
Time(100.0, format="cxcsec")
Time(100.0, format="galexsec")
Time(100.0, format="unix")
Time(100.0, format="gps")
Time(1950.0, format="byear", scale="tai")
Time(2000.0, format="jyear", scale="tai")
Time("B1950.0", format="byear_str", scale="tai")
Time("J2000.0", format="jyear_str", scale="tai")
Time("2000-01-01 12:23:34.0", format="iso", scale="tai")
Time("2000-01-01 12:23:34.0Z", format="iso", scale="utc")
Time("2000-01-01T12:23:34.0", format="isot", scale="tai")
Time("2000-01-01T12:23:34.0Z", format="isot", scale="utc")
Time("2000-01-01T12:23:34.0", format="fits")
Time("2000-01-01T12:23:34.0", format="fits", scale="tdb")
Time(2400000.5, 51544.0333981, format="jd", scale="tai")
Time(0.0, 51544.0333981, format="mjd", scale="tai")
Time("2000:001:12:23:34.0", format="yday", scale="tai")
Time("2000:001:12:23:34.0Z", format="yday", scale="utc")
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format="datetime", scale="tai")
Time([dt, dt], format="datetime", scale="tai")
dt64 = np.datetime64("2012-06-18T02:00:05.453000000")
Time(dt64, format="datetime64", scale="tai")
Time([dt64, dt64], format="datetime64", scale="tai")
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time("2006-01-15 21:24:37.5", scale="local")
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(
t.decimalyear,
2006.0408002758752,
atol=0.001 / 3600.0 / 24.0 / 365.0,
rtol=0.0,
)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == "2006-01-15T21:24:37.500"
assert t.yday == "2006:015:21:24:37.500"
assert t.fits == "2006-01-15T21:24:37.500"
assert_allclose(
t.byear, 2006.04217888831, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert_allclose(
t.jyear, 2006.0407723496082, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert t.byear_str == "B2006.042"
assert t.jyear_str == "J2006.041"
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.galexsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456000"
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale="utc")
assert t2.datetime == dt
t = Time([dt, dt2], scale="utc")
assert np.all(t.value == [dt, dt2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2 - dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
dt64_2 = np.datetime64("2000-01-02")
t = Time(dt64, scale="utc", precision=9, format="datetime64")
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64
t = Time(dt64_2, scale="utc", precision=3, format="datetime64")
assert t.iso == "2000-01-02 00:00:00.000"
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale="utc", format="datetime64")
assert np.all(t.value == [dt64, dt64_2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime64 == np.datetime64("2000-01-01T01:01:01.123456789")
# broadcasting
dt3 = (dt64 + (dt64_2 - dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc", format="datetime64")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format="datetime64")
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format="datetime64"))
assert Time(t3[2, 0], format="datetime64") == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format="jd", scale="tai", precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == "B2015.136594"
assert t.jyear_str == "J2015.134993"
t2 = Time(t.byear, format="byear", scale="tai")
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format="jyear", scale="tai")
assert allclose_jd(t2.jd, jd)
t = Time("J2015.134993", scale="tai", precision=6)
assert np.allclose(
t.jd, jd, rtol=1e-10, atol=0
) # J2015.134993 has 10 digit precision
assert t.byear_str == "B2015.136594"
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format="iso", scale="utc")
with pytest.raises(ValueError):
Time("2000:001", format="jd", scale="utc")
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ["bad"], format="mjd", scale="tai")
with pytest.raises(ValueError):
Time(50000.0, "bad", format="mjd", scale="tai")
with pytest.raises(ValueError):
Time("2005-08-04T00:01:02.000Z", scale="tai")
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format="jd", scale="utc")
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time("2000-01-02T03:04:05(TAI)", scale="utc")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(TAI")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(UT(NIST)")
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f"{year:04d}-{month:02d}"
yyyy_mm_dd = f"{year:04d}-{month:02d}-{day:02d}"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + "-01 23:59:60.0", scale="utc")
assert t1.iso == yyyy_mm + "-02 00:00:00.000"
# Leap second is different
t1 = Time(yyyy_mm_dd + " 23:59:59.900", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:59.900"
t1 = Time(yyyy_mm_dd + " 23:59:60.000", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.000"
t1 = Time(yyyy_mm_dd + " 23:59:60.999", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.999"
if month == 6:
yyyy_mm_dd_plus1 = f"{year:04d}-07-01"
else:
yyyy_mm_dd_plus1 = f"{year + 1:04d}-01-01"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + " 23:59:61.0", scale="utc")
assert t1.iso == yyyy_mm_dd_plus1 + " 00:00:00.000"
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + " 23:59:59", scale="utc")
t1 = Time(yyyy_mm_dd_plus1 + " 00:00:00", scale="utc")
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time("2007:001", scale="tai")
t2 = Time(["2007-01-02", "2007-01-03"], scale="utc")
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale="utc")
assert t3.scale == "utc"
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale="tt")
assert t3.scale == "tt"
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000.0, 50006.0)
frac = np.arange(0.0, 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc")
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale="local")
| TestBasic |
python | langchain-ai__langchain | libs/langchain/langchain_classic/smith/evaluation/progress.py | {
"start": 338,
"end": 3658
} | class ____(base_callbacks.BaseCallbackHandler):
"""A simple progress bar for the console."""
def __init__(
self,
total: int,
ncols: int = 50,
end_with: str = "\n",
):
"""Initialize the progress bar.
Args:
total: The total number of items to be processed.
ncols: The character width of the progress bar.
end_with: Last string to print after progress bar reaches end.
"""
self.total = total
self.ncols = ncols
self.end_with = end_with
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
def increment(self) -> None:
"""Increment the counter and update the progress bar."""
with self.lock:
self.counter += 1
self._print_bar()
def _print_bar(self) -> None:
"""Print the progress bar to the console."""
progress = self.counter / self.total
arrow = "-" * int(round(progress * self.ncols) - 1) + ">"
spaces = " " * (self.ncols - len(arrow))
end = "" if self.counter < self.total else self.end_with
print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end=end) # noqa: T201
@override
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
@override
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
| ProgressBarCallback |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 11538,
"end": 12702
} | class ____(SentryPermission):
scope_map = {
"GET": ("org:read", "org:integrations", "org:write", "org:admin"),
"POST": ("org:integrations", "org:write", "org:admin"),
}
def has_object_permission(self, request: Request, view, organization):
if not hasattr(request, "user") or not request.user:
return False
self.determine_access(request, organization)
if superuser_has_permission(request):
return True
organizations = (
user_service.get_organizations(user_id=request.user.id)
if request.user.id is not None
else ()
)
if not any(organization.id == org.id for org in organizations):
raise SentryAppError(
message="User must belong to the given organization",
status_code=403,
public_context={"user_organizations": [org.slug for org in organizations]},
)
assert request.method, "method must be present in request to get permissions"
return ensure_scoped_permission(request, self.scope_map.get(request.method))
| SentryAppInstallationsPermission |
python | ray-project__ray | python/ray/data/_internal/execution/backpressure_policy/downstream_capacity_backpressure_policy.py | {
"start": 571,
"end": 3620
} | class ____(BackpressurePolicy):
"""Backpressure policy based on downstream processing capacity.
This policy triggers backpressure when the output bundles size exceeds both:
1. A ratio threshold multiplied by the number of running tasks in downstream operators
2. An absolute threshold for the output bundles size
The policy monitors actual downstream processing capacity by tracking the number
of currently running tasks rather than configured parallelism. This approach
ensures effective backpressure even when cluster resources are insufficient or
scaling is slow, preventing memory pressure and maintaining pipeline stability.
Key benefits:
- Prevents memory bloat from unprocessed output objects
- Adapts to actual cluster conditions and resource availability
- Maintains balanced throughput across pipeline operators
- Reduces object spilling and unnecessary rebuilds
"""
def __init__(
self,
data_context: DataContext,
topology: "Topology",
resource_manager: "ResourceManager",
):
super().__init__(data_context, topology, resource_manager)
self._backpressure_concurrency_ratio = (
self._data_context.downstream_capacity_backpressure_ratio
)
self._backpressure_max_queued_blocks = (
self._data_context.downstream_capacity_backpressure_max_queued_bundles
)
self._backpressure_disabled = (
self._backpressure_concurrency_ratio is None
or self._backpressure_max_queued_blocks is None
)
def _max_concurrent_tasks(self, op: "PhysicalOperator") -> int:
if isinstance(op, ActorPoolMapOperator):
return sum(
[
actor_pool.max_concurrent_tasks()
for actor_pool in op.get_autoscaling_actor_pools()
]
)
return op.num_active_tasks()
def can_add_input(self, op: "PhysicalOperator") -> bool:
"""Determine if we can add input to the operator based on downstream capacity."""
if self._backpressure_disabled:
return True
for output_dependency in op.output_dependencies:
total_enqueued_blocks = self._topology[
output_dependency
].total_enqueued_input_blocks()
avg_inputs_per_task = (
output_dependency.metrics.num_task_inputs_processed
/ max(output_dependency.metrics.num_tasks_finished, 1)
)
outstanding_tasks = total_enqueued_blocks / max(avg_inputs_per_task, 1)
max_allowed_outstanding = (
self._max_concurrent_tasks(output_dependency)
* self._backpressure_concurrency_ratio
)
if (
total_enqueued_blocks > self._backpressure_max_queued_blocks
and outstanding_tasks > max_allowed_outstanding
):
return False
return True
| DownstreamCapacityBackpressurePolicy |
python | ray-project__ray | rllib/env/wrappers/pettingzoo_env.py | {
"start": 173,
"end": 5232
} | class ____(MultiAgentEnv):
"""An interface to the PettingZoo MARL environment library.
See: https://github.com/Farama-Foundation/PettingZoo
Inherits from MultiAgentEnv and exposes a given AEC
(actor-environment-cycle) game from the PettingZoo project via the
MultiAgentEnv public API.
Note that the wrapper has the following important limitation:
Environments are positive sum games (-> Agents are expected to cooperate
to maximize reward). This isn't a hard restriction, it just that
standard algorithms aren't expected to work well in highly competitive
games.
Also note that the earlier existing restriction of all agents having the same
observation- and action spaces has been lifted. Different agents can now have
different spaces and the entire environment's e.g. `self.action_space` is a Dict
mapping agent IDs to individual agents' spaces. Same for `self.observation_space`.
.. testcode::
:skipif: True
from pettingzoo.butterfly import prison_v3
from ray.rllib.env.wrappers.pettingzoo_env import PettingZooEnv
env = PettingZooEnv(prison_v3.env())
obs, infos = env.reset()
# only returns the observation for the agent which should be stepping
print(obs)
.. testoutput::
{
'prisoner_0': array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
...,
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype=uint8)
}
.. testcode::
:skipif: True
obs, rewards, terminateds, truncateds, infos = env.step({
"prisoner_0": 1
})
# only returns the observation, reward, info, etc, for
# the agent who's turn is next.
print(obs)
.. testoutput::
{
'prisoner_1': array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
...,
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype=uint8)
}
.. testcode::
:skipif: True
print(rewards)
.. testoutput::
{
'prisoner_1': 0
}
.. testcode::
:skipif: True
print(terminateds)
.. testoutput::
{
'prisoner_1': False, '__all__': False
}
.. testcode::
:skipif: True
print(truncateds)
.. testoutput::
{
'prisoner_1': False, '__all__': False
}
.. testcode::
:skipif: True
print(infos)
.. testoutput::
{
'prisoner_1': {'map_tuple': (1, 0)}
}
"""
def __init__(self, env):
super().__init__()
self.env = env
env.reset()
self._agent_ids = set(self.env.agents)
# If these important attributes are not set, try to infer them.
if not self.agents:
self.agents = list(self._agent_ids)
if not self.possible_agents:
self.possible_agents = self.agents.copy()
# Set these attributes for sampling in `VectorMultiAgentEnv`s.
self.observation_spaces = {
aid: self.env.observation_space(aid) for aid in self._agent_ids
}
self.action_spaces = {
aid: self.env.action_space(aid) for aid in self._agent_ids
}
self.observation_space = gym.spaces.Dict(self.observation_spaces)
self.action_space = gym.spaces.Dict(self.action_spaces)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
info = self.env.reset(seed=seed, options=options)
return (
{self.env.agent_selection: self.env.observe(self.env.agent_selection)},
info or {},
)
def step(self, action):
self.env.step(action[self.env.agent_selection])
obs_d = {}
rew_d = {}
terminated_d = {}
truncated_d = {}
info_d = {}
while self.env.agents:
obs, rew, terminated, truncated, info = self.env.last()
agent_id = self.env.agent_selection
obs_d[agent_id] = obs
rew_d[agent_id] = rew
terminated_d[agent_id] = terminated
truncated_d[agent_id] = truncated
info_d[agent_id] = info
if (
self.env.terminations[self.env.agent_selection]
or self.env.truncations[self.env.agent_selection]
):
self.env.step(None)
else:
break
all_gone = not self.env.agents
terminated_d["__all__"] = all_gone and all(terminated_d.values())
truncated_d["__all__"] = all_gone and all(truncated_d.values())
return obs_d, rew_d, terminated_d, truncated_d, info_d
def close(self):
self.env.close()
def render(self):
return self.env.render(self.render_mode)
@property
def get_sub_environments(self):
return self.env.unwrapped
@PublicAPI
| PettingZooEnv |
python | simonw__datasette | datasette/utils/asgi.py | {
"start": 10833,
"end": 13765
} | class ____:
def __init__(self, body=None, status=200, headers=None, content_type="text/plain"):
self.body = body
self.status = status
self.headers = headers or {}
self._set_cookie_headers = []
self.content_type = content_type
async def asgi_send(self, send):
headers = {}
headers.update(self.headers)
headers["content-type"] = self.content_type
raw_headers = [
[key.encode("utf-8"), value.encode("utf-8")]
for key, value in headers.items()
]
for set_cookie in self._set_cookie_headers:
raw_headers.append([b"set-cookie", set_cookie.encode("utf-8")])
await send(
{
"type": "http.response.start",
"status": self.status,
"headers": raw_headers,
}
)
body = self.body
if not isinstance(body, bytes):
body = body.encode("utf-8")
await send({"type": "http.response.body", "body": body})
def set_cookie(
self,
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
samesite="lax",
):
assert samesite in SAMESITE_VALUES, "samesite should be one of {}".format(
SAMESITE_VALUES
)
cookie = SimpleCookie()
cookie[key] = value
for prop_name, prop_value in (
("max_age", max_age),
("expires", expires),
("path", path),
("domain", domain),
("samesite", samesite),
):
if prop_value is not None:
cookie[key][prop_name.replace("_", "-")] = prop_value
for prop_name, prop_value in (("secure", secure), ("httponly", httponly)):
if prop_value:
cookie[key][prop_name] = True
self._set_cookie_headers.append(cookie.output(header="").strip())
@classmethod
def html(cls, body, status=200, headers=None):
return cls(
body,
status=status,
headers=headers,
content_type="text/html; charset=utf-8",
)
@classmethod
def text(cls, body, status=200, headers=None):
return cls(
str(body),
status=status,
headers=headers,
content_type="text/plain; charset=utf-8",
)
@classmethod
def json(cls, body, status=200, headers=None, default=None):
return cls(
json.dumps(body, default=default),
status=status,
headers=headers,
content_type="application/json; charset=utf-8",
)
@classmethod
def redirect(cls, path, status=302, headers=None):
headers = headers or {}
headers["Location"] = path
return cls("", status=status, headers=headers)
| Response |
python | cython__cython | tests/run/posonly.py | {
"start": 18330,
"end": 18709
} | class ____:
"""
>>> t = TestExtensionClass()
>>> t.f(1,2)
(1, 2, 3)
>>> t.f(1,2,4)
(1, 2, 4)
>>> t.f(1, 2, c=4)
(1, 2, 4)
>>> t.f(1, 2, 5, c=6) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...f() got multiple values for ...argument 'c'
"""
def f(self, a, b, /, c=3):
return (a,b,c)
| TestExtensionClass |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 84095,
"end": 86762
} | class ____(Response):
"""
Response of events.get_task_log endpoint.
:param events: Log items list
:type events: Sequence[dict]
:param returned: Number of log events returned
:type returned: int
:param total: Total number of log events available for this query
:type total: float
"""
_service = "events"
_action = "get_task_log"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"events": {
"description": "Log items list",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "Number of log events returned",
"type": ["integer", "null"],
},
"total": {
"description": "Total number of log events available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
events: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
**kwargs: Any
) -> None:
super(GetTaskLogResponse, self).__init__(**kwargs)
self.events = events
self.returned = returned
self.total = total
@schema_property("events")
def events(self) -> Optional[List[dict]]:
return self._property_events
@events.setter
def events(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (list, tuple))
self.assert_isinstance(value, "events", (dict,), is_array=True)
self._property_events = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
| GetTaskLogResponse |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/application.py | {
"start": 1491,
"end": 1748
} | class ____(ApplicationOwnerIsUserMixin, DetailView):
"""
Detail view for an application instance owned by the request.user
"""
context_object_name = "application"
template_name = "oauth2_provider/application_detail.html"
| ApplicationDetail |
python | kamyu104__LeetCode-Solutions | Python/egg-drop-with-2-eggs-and-n-floors.py | {
"start": 127,
"end": 336
} | class ____(object):
def twoEggDrop(self, n):
"""
:type n: int
:rtype: int
"""
return int(math.ceil((-1+(1+8*n)**0.5)/2))
# Time: O(k * n^2)
# Space: O(n)
| Solution |
python | automl__auto-sklearn | test/mocks/logging.py | {
"start": 180,
"end": 1406
} | class ____(PicklableClientLogger):
"""Should not be used for testing the actual loggers functionality
Overwrites all methods with mock objects that can be queries
* All logging methods do nothing
* isEnabledFor returns True for everything as it's part of the logging config we
don't have access to
* __setstate__ and __getstate__ remain the same and are not mocked
"""
def __init__(
self,
name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
):
self.name = name or MOCKNAME
self.host = host or MOCKHOST
self.port = port or MOCKPORT
# Overwrite the logging implementations with mocks
self.debug = Mock(return_value=None) # type: ignore
self.info = Mock(return_value=None) # type: ignore
self.warning = Mock(return_value=None) # type: ignore
self.error = Mock(return_value=None) # type: ignore
self.exception = Mock(return_value=None) # type: ignore
self.critical = Mock(return_value=None) # type: ignore
self.log = Mock(return_value=None) # type: ignore
self.isEnabledFor = Mock(return_value=True) # type: ignore
| MockLogger |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_typing.py | {
"start": 3029,
"end": 3188
} | class ____(TypedDict):
uint8: DType
uint16: DType
uint32: DType
uint64: DType
# `__array_namespace_info__.dtypes(kind="integral")`
| DTypesUnsigned |
python | django__django | django/middleware/csp.py | {
"start": 220,
"end": 1279
} | class ____(MiddlewareMixin):
def process_request(self, request):
request._csp_nonce = LazyNonce()
def process_response(self, request, response):
nonce = get_nonce(request)
sentinel = object()
if (csp_config := getattr(response, "_csp_config", sentinel)) is sentinel:
csp_config = settings.SECURE_CSP
if (csp_ro_config := getattr(response, "_csp_ro_config", sentinel)) is sentinel:
csp_ro_config = settings.SECURE_CSP_REPORT_ONLY
for header, config in [
(CSP.HEADER_ENFORCE, csp_config),
(CSP.HEADER_REPORT_ONLY, csp_ro_config),
]:
# If headers are already set on the response, don't overwrite them.
# This allows for views to set their own CSP headers as needed.
# An empty config means CSP headers are not added to the response.
if config and header not in response:
response.headers[str(header)] = build_policy(config, nonce)
return response
| ContentSecurityPolicyMiddleware |
python | apache__airflow | devel-common/src/tests_common/test_utils/mock_operators.py | {
"start": 1722,
"end": 1925
} | class ____(BaseOperatorLink):
"""Operator Link for Apache Airflow Website."""
name = "airflow"
def get_link(self, operator, *, ti_key):
return "https://airflow.apache.org"
| AirflowLink |
python | optuna__optuna | optuna/distributions.py | {
"start": 3217,
"end": 6998
} | class ____(BaseDistribution):
"""A distribution on floats.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_float`, and passed to
:mod:`~optuna.samplers` in general.
.. note::
When ``step`` is not :obj:`None`, if the range :math:`[\\mathsf{low}, \\mathsf{high}]`
is not divisible by :math:`\\mathsf{step}`, :math:`\\mathsf{high}` will be replaced
with the maximum of :math:`k \\times \\mathsf{step} + \\mathsf{low} < \\mathsf{high}`,
where :math:`k` is an integer.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
``low`` must be less than or equal to ``high``. If ``log`` is :obj:`True`,
``low`` must be larger than 0.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
``high`` must be greater than or equal to ``low``.
log:
If ``log`` is :obj:`True`, this distribution is in log-scaled domain.
In this case, all parameters enqueued to the distribution must be positive values.
This parameter must be :obj:`False` when the parameter ``step`` is not :obj:`None`.
step:
A discretization step. ``step`` must be larger than 0.
This parameter must be :obj:`None` when the parameter ``log`` is :obj:`True`.
"""
def __init__(
self, low: float, high: float, log: bool = False, step: None | float = None
) -> None:
if log and step is not None:
raise ValueError("The parameter `step` is not supported when `log` is true.")
if low > high:
raise ValueError(f"`low <= high` must hold, but got ({low=}, {high=}).")
if log and low <= 0.0:
raise ValueError(f"`low > 0` must hold for `log=True`, but got ({low=}, {high=}).")
if step is not None and step <= 0:
raise ValueError(f"`step > 0` must hold, but got {step=}.")
self.step = None
if step is not None:
high = _adjust_discrete_uniform_high(low, high, step)
self.step = float(step)
self.low = float(low)
self.high = float(high)
self.log = log
def single(self) -> bool:
if self.step is None:
return self.low == self.high
else:
if self.low == self.high:
return True
high = decimal.Decimal(str(self.high))
low = decimal.Decimal(str(self.low))
step = decimal.Decimal(str(self.step))
return (high - low) < step
def _contains(self, param_value_in_internal_repr: float) -> bool:
value = param_value_in_internal_repr
if self.step is None:
return self.low <= value <= self.high
else:
k = (value - self.low) / self.step
return self.low <= value <= self.high and abs(k - round(k)) < 1.0e-8
def to_internal_repr(self, param_value_in_external_repr: float) -> float:
try:
internal_repr = float(param_value_in_external_repr)
except (ValueError, TypeError) as e:
raise ValueError(
f"'{param_value_in_external_repr}' is not a valid type. "
"float-castable value is expected."
) from e
if math.isnan(internal_repr):
raise ValueError(f"`{param_value_in_external_repr}` is invalid value.")
if self.log and internal_repr <= 0.0:
raise ValueError(
f"`{param_value_in_external_repr}` is invalid value for the case log=True."
)
return internal_repr
@deprecated_class("3.0.0", "6.0.0", text=_float_distribution_deprecated_msg)
| FloatDistribution |
python | pytorch__pytorch | torch/distributed/fsdp/sharded_grad_scaler.py | {
"start": 1313,
"end": 17104
} | class ____(GradScaler):
"""
ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends
functionality from GradScaler:
* Supports Pytorch DDP and FSDP implementations
* Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP])
* Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns
* Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across
nodes
Example::
# Creates a ShardedGradScaler once at the beginning of training.
scaler = ShardedGradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales gradients of the optimizer's params.
# If gradients don't contain infs/NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See :class:`GradScaler` for explanation of scaling/unscaling and more use cases.
Args:
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
Default: ``True``
process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD):
process group for sharding
"""
def __init__(
self,
device: str = "cuda",
init_scale: float = 2.0**16,
backoff_factor: float = 0.5,
growth_factor: float = 2.0,
growth_interval: int = 2000,
enabled: bool = True,
process_group: Optional[ProcessGroup] = dist.group.WORLD,
) -> None:
super().__init__(
device,
init_scale=init_scale,
backoff_factor=backoff_factor,
growth_factor=growth_factor,
growth_interval=growth_interval,
enabled=enabled,
)
if self._enabled:
self.process_group = process_group
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
@overload
def scale(self, outputs: torch.Tensor) -> torch.Tensor: ...
@overload
def scale(self, outputs: list[torch.Tensor]) -> list[torch.Tensor]: ...
@overload
def scale(self, outputs: tuple[torch.Tensor, ...]) -> tuple[torch.Tensor, ...]: ...
@overload
def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: ...
def scale(
self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]]
) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
if not self._enabled:
return outputs
if isinstance(outputs, torch.Tensor):
if not _is_supported_device(outputs):
raise AssertionError(f"Expected supported device, got {outputs.device}")
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
if self._scale is None:
raise AssertionError("Expected _scale to be initialized, got None")
scaled_output = outputs * self._scale.to(
device=outputs.device, non_blocking=True
)
# Here we ensure the return dtype is the same as the outputs dtype.
# For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
# format (fp16, bf16) and so the scaled loss should be of the same dtype.
return scaled_output.type(outputs.dtype)
stash: list[_GeneralMultiDeviceReplicator] = []
def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):
if isinstance(val, torch.Tensor):
if not _is_supported_device(val):
raise AssertionError(f"Expected supported device, got {val.device}")
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
if self._scale is None:
raise AssertionError(
"Expected _scale to be initialized, got None"
)
stash.append(_GeneralMultiDeviceReplicator(self._scale))
scaled_val = val * stash[0].get(val.device)
# Here we ensure the return dtype is the same as the outputs dtype.
# For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
# format (fp16, bf16) and so the scaled loss should be of the same dtype.
return scaled_val.type(val.dtype)
if isinstance(val, abc.Iterable):
iterator = map(apply_scale, val)
if isinstance(val, (list, tuple)):
return type(val)(iterator)
return iterator
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
def _unscale_grads_(
self,
optimizer: torch.optim.Optimizer,
inv_scale: torch.Tensor,
found_inf: torch.Tensor,
allow_fp16: bool = True,
) -> dict[torch.device, torch.Tensor]:
per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale)
per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf)
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
# There could be thousands of grads, so we'd like to iterate through them just once.
# However, we don't know their devices or dtypes in advance.
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
# Google says mypy struggles with defaultdicts type annotations.
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
# coalesce is not supported in torch.float16
param_grad_fp32 = param.grad.type(torch.float32).coalesce()
param.grad = param_grad_fp32.type(torch.float16)
to_unscale = param.grad._values()
else:
to_unscale = param.grad
per_device_and_dtype_grads[to_unscale.device][
to_unscale.dtype
].append(to_unscale)
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._amp_foreach_non_finite_check_and_unscale_(
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
# There exist contexts (e.g. w/ `use_orig_params=True`) wherein some
# ranks may have no (non-zero sized) parameter shards, necessitating the
# initialization of `per_device_found_inf._per_device_tensors` here
if not per_device_found_inf._per_device_tensors:
if self._scale is None:
raise AssertionError("Expected _scale to be initialized, got None")
per_device_found_inf.get(self._scale.device)
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.UNSCALED:
raise RuntimeError(
"unscale_() has already been called on this optimizer since the last update()."
)
elif optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
if self._scale is None:
raise AssertionError("Expected _scale to be initialized, got None")
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full(
(1,), 0.0, dtype=torch.float32, device=self._scale.device
)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(
optimizer, inv_scale, found_inf, True
)
optimizer_state["stage"] = OptState.UNSCALED
# Synchronize the detected inf across the ranks
optimizer_state = self._per_optimizer_states[id(optimizer)]
works = []
found_inf_on_cpus = []
found_inf_on_devices = []
for found_inf in optimizer_state["found_inf_per_device"].values():
if self._device != "cpu" and found_inf.device.type == "cpu":
found_inf_on_cpus.append(found_inf)
found_inf_on_device = found_inf.to(self._device)
found_inf_on_devices.append(found_inf_on_device)
works.append(
dist.all_reduce(
found_inf_on_device, async_op=True, group=self.process_group
)
)
else:
works.append(
dist.all_reduce(found_inf, async_op=True, group=self.process_group)
)
for work in works:
work.wait()
if found_inf_on_cpus:
torch._foreach_copy_(found_inf_on_cpus, found_inf_on_devices)
def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None:
"""
If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero.
Otherwise, scale is multiplied by the growth factor when the growth interval is reached.
"""
if self._scale is None or self._growth_tracker is None:
raise AssertionError(
"Expected _scale and _growth_tracker to be initialized, got None"
)
if found_inf.item() >= 1.0:
self._scale *= self._backoff_factor
self._growth_tracker.fill_(0)
else:
successful = self._growth_tracker + 1
if successful == self._growth_interval:
self._scale *= self._growth_factor
self._growth_tracker.fill_(0)
else:
self._growth_tracker = successful
def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None:
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated]
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = (
"new_scale should be a float or a 1-element torch.cuda.FloatTensor or "
"torch.FloatTensor with requires_grad=False."
)
if new_scale.device.type != self._device:
raise AssertionError(reason)
if new_scale.numel() != 1:
raise AssertionError(reason)
if new_scale.requires_grad is not False:
raise AssertionError(reason)
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
if len(found_infs) == 0:
raise AssertionError("No inf checks were recorded prior to update.")
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
if _scale.device.type == "cpu":
self._amp_update_scale_cpu_(found_inf_combined)
else:
torch._amp_update_scale_(
self._scale, # type: ignore[arg-type]
self._growth_tracker, # type: ignore[arg-type]
found_inf_combined,
self._growth_factor, # type: ignore[arg-type]
self._backoff_factor, # type: ignore[arg-type]
self._growth_interval, # type: ignore[arg-type]
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
| ShardedGradScaler |
python | ray-project__ray | python/ray/serve/_private/proxy_response_generator.py | {
"start": 476,
"end": 2637
} | class ____(ABC):
def __init__(
self,
*,
timeout_s: Optional[float] = None,
disconnected_task: Optional[asyncio.Task] = None,
result_callback: Optional[Callable[[Any], Any]] = None,
):
"""Implements a generator wrapping a deployment response.
Args:
- timeout_s: an end-to-end timeout for the request. If this expires and the
response is not completed, the request will be cancelled. If `None`,
there's no timeout.
- disconnected_task: a task whose completion signals that the client has
disconnected. When this happens, the request will be cancelled. If `None`,
disconnects will not be detected.
- result_callback: will be called on each result before it's returned. If
`None`, the unmodified result is returned.
"""
self._timeout_s = timeout_s
self._start_time_s = time.time()
self._disconnected_task = disconnected_task
self._result_callback = result_callback
def __aiter__(self):
return self
@abstractmethod
async def __anext__(self):
"""Return the next message in the stream.
Raises:
TimeoutError: On timeout.
asyncio.CancelledError: On disconnect.
StopAsyncIteration: When the stream is completed.
"""
pass
def stop_checking_for_disconnect(self):
"""Once this is called, the disconnected_task will be ignored."""
self._disconnected_task = None
def swallow_cancelled(task: asyncio.Task):
try:
task.result()
except (RequestCancelledError, asyncio.CancelledError):
# We expect RequestCancelledError to be raised because for disconnect or
# timeouts, we explicitly call resp.cancel(). To avoid "Task exception
# was never retrieved" errors from spamming the proxy logs, swallow
# them here.
pass
except Exception:
# For all other exceptions, do not catch and instead re-raise here so that
# they will be logged properly.
raise
| _ProxyResponseGeneratorBase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/unreachable1.py | {
"start": 229,
"end": 2494
} | class ____:
b: bool
@staticmethod
def method1():
"""
Docstring
"""
raise NotImplementedError("Not Implemented")
def method2(self, a: int):
"""
Docstring
"""
if a < 10 or self.b:
raise NotImplementedError()
@abstractmethod
def method3(self):
print(self.b)
raise RuntimeError()
def method4(self) -> None:
print(self.b)
raise RuntimeError()
def method5(self) -> NoReturn:
print(self.b)
raise RuntimeError()
def func2():
func1()
# This should not be marked unreachable because NotImplementedError
# is special-cased.
return 3
def func3(foo: Foo):
foo.method1()
return 3
def func4(foo: Foo):
foo.method2(2)
return 3
def func5(foo: Foo):
foo.method3()
return 3
def func6(foo: Foo):
foo.method4()
return 3
def func7(foo: Foo):
foo.method5()
# This should be marked as unreachable.
# If reportUnreachable is enabled, it should generate a diagnostic.
return 3
def func8() -> NoReturn:
raise NameError()
def func9():
func8()
# This should be marked unreachable.
# If reportUnreachable is enabled, it should generate a diagnostic.
return 3
def func10():
e = OSError()
a1 = os.name == "nt" and None == e.errno
reveal_type(a1, expected_text="bool")
a2 = True and os.name == "nt"
reveal_type(a2, expected_text="bool")
if os.name == "nt":
# This should be marked unreachable.
b = e.errno
if sys.version_info >= (4, 0):
# This should be marked unreachable.
b = e.errno
return
# This should be marked unreachable.
# If reportUnreachable is enabled, it should generate a diagnostic.
b = e.errno
def func11(obj: str) -> list:
if isinstance(obj, str):
return []
else:
# This should be marked as unreachable.
# If reportUnreachable is enabled, it should generate a diagnostic.
return obj
def func12(obj: str) -> list:
if isinstance(obj, str):
return []
# This should be marked as unreachable.
# If reportUnreachable is enabled, it should generate a diagnostic.
return obj
| Foo |
python | realpython__materials | python-protocol/adder_v6.py | {
"start": 198,
"end": 287
} | class ____:
def add(self, x: float, y: float) -> float:
return x + y
| FloatAdder |
python | Lightning-AI__lightning | tests/tests_fabric/utilities/test_data.py | {
"start": 3542,
"end": 3773
} | class ____(DataLoader):
def __init__(self, options, *args, **kwargs):
super().__init__(*args, **kwargs)
self._options = options
@property
def options(self):
return self._options
| PoptorchDataLoader |
python | walkccc__LeetCode | solutions/1913. Maximum Product Difference Between Two Pairs/1913.py | {
"start": 0,
"end": 420
} | class ____:
def maxProductDifference(self, nums: list[int]) -> int:
max1 = -math.inf
max2 = -math.inf
min1 = math.inf
min2 = math.inf
for num in nums:
if num > max1:
max2 = max1
max1 = num
elif num > max2:
max2 = num
if num < min1:
min2 = min1
min1 = num
elif num < min2:
min2 = num
return max1 * max2 - min1 * min2
| Solution |
python | nmslib__hnswlib | setup.py | {
"start": 1804,
"end": 4588
} | class ____(build_ext):
"""A custom build extension for adding compiler-specific options."""
compiler_flag_native = '-march=native'
c_opts = {
'msvc': ['/EHsc', '/openmp', '/O2'],
'unix': ['-O3', compiler_flag_native], # , '-w'
}
link_opts = {
'unix': [],
'msvc': [],
}
if os.environ.get("HNSWLIB_NO_NATIVE"):
c_opts['unix'].remove(compiler_flag_native)
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
link_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
c_opts['unix'].append("-fopenmp")
link_opts['unix'].extend(['-fopenmp', '-pthread'])
def build_extensions(self):
ct = self.compiler.compiler_type
opts = BuildExt.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
if not os.environ.get("HNSWLIB_NO_NATIVE"):
# check that native flag is available
print('checking avalability of flag:', BuildExt.compiler_flag_native)
if not has_flag(self.compiler, BuildExt.compiler_flag_native):
print('removing unsupported compiler flag:', BuildExt.compiler_flag_native)
opts.remove(BuildExt.compiler_flag_native)
# for macos add apple-m1 flag if it's available
if sys.platform == 'darwin':
m1_flag = '-mcpu=apple-m1'
print('checking avalability of flag:', m1_flag)
if has_flag(self.compiler, m1_flag):
print('adding flag:', m1_flag)
opts.append(m1_flag)
else:
print(f'flag: {m1_flag} is not available')
else:
print(f'flag: {BuildExt.compiler_flag_native} is available')
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args.extend(opts)
ext.extra_link_args.extend(BuildExt.link_opts.get(ct, []))
build_ext.build_extensions(self)
setup(
name='hnswlib',
version=__version__,
description='hnswlib',
author='Yury Malkov and others',
url='https://github.com/yurymalkov/hnsw',
long_description="""hnsw""",
ext_modules=ext_modules,
install_requires=['numpy'],
cmdclass={'build_ext': BuildExt},
zip_safe=False,
)
| BuildExt |
python | ray-project__ray | rllib/models/torch/torch_action_dist.py | {
"start": 2060,
"end": 3413
} | class ____(TorchDistributionWrapper):
"""Wrapper class for PyTorch Categorical distribution."""
@override(ActionDistribution)
def __init__(
self,
inputs: List[TensorType],
model: TorchModelV2 = None,
temperature: float = 1.0,
):
if temperature != 1.0:
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
inputs /= temperature
super().__init__(inputs, model)
self.dist = torch.distributions.categorical.Categorical(logits=self.inputs)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
self.last_sample = self.dist.probs.argmax(dim=1)
return self.last_sample
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
return action_space.n
@OldAPIStack
def get_torch_categorical_class_with_temperature(t: float):
"""TorchCategorical distribution class that has customized default temperature."""
class TorchCategoricalWithTemperature(TorchCategorical):
def __init__(self, inputs, model=None, temperature=t):
super().__init__(inputs, model, temperature)
return TorchCategoricalWithTemperature
@OldAPIStack
| TorchCategorical |
python | cython__cython | Demos/benchmarks/bm_comprehensions.py | {
"start": 392,
"end": 2844
} | class ____:
def __init__(self, owner_id: int, widgets: list[Widget]) -> None:
self.owner_id = owner_id
self.sorted_widgets: list[Widget] = []
self._add_widgets(widgets)
def _any_knobby(self, widgets: Iterable[Optional[Widget]]) -> bool:
return any(w.has_knob for w in widgets if w)
def _is_big_spinny(self, widget: Widget) -> bool:
return widget.kind == WidgetKind.BIG and widget.has_spinner
def _add_widgets(self, widgets: list[Widget]) -> None:
# sort order: mine first, then any widgets with derived knobby widgets in order of
# number derived, then other widgets in order of number derived, and we exclude
# big spinny widgets entirely
widgets = [w for w in widgets if not self._is_big_spinny(w)]
id_to_widget = {w.widget_id: w for w in widgets}
id_to_derived = {
w.widget_id: [id_to_widget.get(dwid) for dwid in w.derived_widget_ids]
for w in widgets
}
sortable_widgets = [
(
w.creator_id == self.owner_id,
self._any_knobby(id_to_derived[w.widget_id]),
len(id_to_derived[w.widget_id]),
w.widget_id,
)
for w in widgets
]
sortable_widgets.sort()
self.sorted_widgets = [id_to_widget[sw[-1]] for sw in sortable_widgets]
def make_some_widgets() -> list[Widget]:
widget_id = 0
widgets = []
for creator_id in range(3):
for kind in WidgetKind:
for has_knob in [True, False]:
for has_spinner in [True, False]:
derived = [w.widget_id for w in widgets[::creator_id + 1]]
widgets.append(
Widget(
widget_id, creator_id, derived, kind, has_knob, has_spinner
)
)
widget_id += 1
assert len(widgets) == 24
return widgets
def run_benchmark(repeat=True, scale: cython.long = 1):
from util import repeat_to_accuracy
widgets = make_some_widgets()
def single_run(scale, timer):
s: cython.long
t0 = timer()
for s in range(scale):
tray = WidgetTray(1, widgets)
assert len(tray.sorted_widgets) == 18
t1 = timer() - t0
return t1
return repeat_to_accuracy(single_run, scale=scale, repeat=repeat)[0]
| WidgetTray |
python | explosion__spaCy | spacy/displacy/render.py | {
"start": 1042,
"end": 11492
} | class ____:
"""Render Spans as SVGs."""
style = "span"
def __init__(self, options: Dict[str, Any] = {}) -> None:
"""Initialise span renderer
options (dict): Visualiser-specific options (colors, spans)
"""
# Set up the colors and overall look
colors = dict(DEFAULT_LABEL_COLORS)
user_colors = registry.displacy_colors.get_all()
for user_color in user_colors.values():
if callable(user_color):
# Since this comes from the function registry, we want to make
# sure we support functions that *return* a dict of colors
user_color = user_color()
if not isinstance(user_color, dict):
raise ValueError(Errors.E925.format(obj=type(user_color)))
colors.update(user_color)
colors.update(options.get("colors", {}))
self.default_color = DEFAULT_ENTITY_COLOR
self.colors = {label.upper(): color for label, color in colors.items()}
# Set up how the text and labels will be rendered
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
# These values are in px
self.top_offset = options.get("top_offset", 40)
# This is how far under the top offset the span labels appear
self.span_label_offset = options.get("span_label_offset", 20)
self.offset_step = options.get("top_offset_step", 17)
# Set up which templates will be used
template = options.get("template")
if template:
self.span_template = template["span"]
self.span_slice_template = template["slice"]
self.span_start_template = template["start"]
else:
if self.direction == "rtl":
self.span_template = TPL_SPAN_RTL
self.span_slice_template = TPL_SPAN_SLICE_RTL
self.span_start_template = TPL_SPAN_START_RTL
else:
self.span_template = TPL_SPAN
self.span_slice_template = TPL_SPAN_SLICE
self.span_start_template = TPL_SPAN_START
def render(
self, parsed: List[Dict[str, Any]], page: bool = False, minify: bool = False
) -> str:
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (str): Rendered SVG or HTML markup.
"""
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
self.lang = settings.get("lang", DEFAULT_LANG)
rendered.append(self.render_spans(p["tokens"], p["spans"], p.get("title")))
if page:
docs = "".join([TPL_FIGURE.format(content=doc) for doc in rendered])
markup = TPL_PAGE.format(content=docs, lang=self.lang, dir=self.direction)
else:
markup = "".join(rendered)
if minify:
return minify_html(markup)
return markup
def render_spans(
self,
tokens: List[str],
spans: List[Dict[str, Any]],
title: Optional[str],
) -> str:
"""Render span types in text.
Spans are rendered per-token, this means that for each token, we check if it's part
of a span slice (a member of a span type) or a span start (the starting token of a
given span type).
tokens (list): Individual tokens in the text
spans (list): Individual entity spans and their start, end, label, kb_id and kb_url.
title (str / None): Document title set in Doc.user_data['title'].
"""
per_token_info = self._assemble_per_token_info(tokens, spans)
markup = self._render_markup(per_token_info)
markup = TPL_SPANS.format(content=markup, dir=self.direction)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
@staticmethod
def _assemble_per_token_info(
tokens: List[str], spans: List[Dict[str, Any]]
) -> List[Dict[str, List[Dict[str, Any]]]]:
"""Assembles token info used to generate markup in render_spans().
tokens (List[str]): Tokens in text.
spans (List[Dict[str, Any]]): Spans in text.
RETURNS (List[Dict[str, List[Dict, str, Any]]]): Per token info needed to render HTML markup for given tokens
and spans.
"""
per_token_info: List[Dict[str, List[Dict[str, Any]]]] = []
# we must sort so that we can correctly describe when spans need to "stack"
# which is determined by their start token, then span length (longer spans on top),
# then break any remaining ties with the span label
spans = sorted(
spans,
key=lambda s: (
s["start_token"],
-(s["end_token"] - s["start_token"]),
s["label"],
),
)
for s in spans:
# this is the vertical 'slot' that the span will be rendered in
# vertical_position = span_label_offset + (offset_step * (slot - 1))
s["render_slot"] = 0
for idx, token in enumerate(tokens):
# Identify if a token belongs to a Span (and which) and if it's a
# start token of said Span. We'll use this for the final HTML render
token_markup: Dict[str, Any] = {}
token_markup["text"] = token
intersecting_spans: List[Dict[str, Any]] = []
entities = []
for span in spans:
ent = {}
if span["start_token"] <= idx < span["end_token"]:
span_start = idx == span["start_token"]
ent["label"] = span["label"]
ent["is_start"] = span_start
if span_start:
# When the span starts, we need to know how many other
# spans are on the 'span stack' and will be rendered.
# This value becomes the vertical render slot for this entire span
span["render_slot"] = (
intersecting_spans[-1]["render_slot"]
if len(intersecting_spans)
else 0
) + 1
intersecting_spans.append(span)
ent["render_slot"] = span["render_slot"]
kb_id = span.get("kb_id", "")
kb_url = span.get("kb_url", "#")
ent["kb_link"] = (
TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else ""
)
entities.append(ent)
else:
# We don't specifically need to do this since we loop
# over tokens and spans sorted by their start_token,
# so we'll never use a span again after the last token it appears in,
# but if we were to use these spans again we'd want to make sure
# this value was reset correctly.
span["render_slot"] = 0
token_markup["entities"] = entities
per_token_info.append(token_markup)
return per_token_info
def _render_markup(self, per_token_info: List[Dict[str, Any]]) -> str:
"""Render the markup from per-token information"""
markup = ""
for token in per_token_info:
entities = sorted(token["entities"], key=lambda d: d["render_slot"])
# Whitespace tokens disrupt the vertical space (no line height) so that the
# span indicators get misaligned. We don't render them as individual
# tokens anyway, so we'll just not display a span indicator either.
is_whitespace = token["text"].strip() == ""
if entities and not is_whitespace:
slices = self._get_span_slices(token["entities"])
starts = self._get_span_starts(token["entities"])
total_height = (
self.top_offset
+ self.span_label_offset
+ (self.offset_step * (len(entities) - 1))
)
markup += self.span_template.format(
text=escape_html(token["text"]),
span_slices=slices,
span_starts=starts,
total_height=total_height,
)
else:
markup += escape_html(token["text"] + " ")
return markup
def _get_span_slices(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span slices"""
span_slices = []
for entity in entities:
# rather than iterate over multiples of offset_step, we use entity['render_slot']
# to determine the vertical position, since that tells where
# the span starts vertically so we can extend it horizontally,
# past other spans that might have already ended
color = self.colors.get(entity["label"].upper(), self.default_color)
top_offset = self.top_offset + (
self.offset_step * (entity["render_slot"] - 1)
)
span_slice = self.span_slice_template.format(
bg=color,
top_offset=top_offset,
)
span_slices.append(span_slice)
return "".join(span_slices)
def _get_span_starts(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span start tokens"""
span_starts = []
for entity in entities:
color = self.colors.get(entity["label"].upper(), self.default_color)
top_offset = self.top_offset + (
self.offset_step * (entity["render_slot"] - 1)
)
span_start = (
self.span_start_template.format(
bg=color,
top_offset=top_offset,
label=entity["label"],
kb_link=entity["kb_link"],
)
if entity["is_start"]
else ""
)
span_starts.append(span_start)
return "".join(span_starts)
| SpanRenderer |
python | ethereum__web3.py | web3/_utils/abi.py | {
"start": 18625,
"end": 27507
} | class ____(namedtuple("ABITypedData", "abi_type, data")):
"""
Marks data as having a certain ABI-type.
>>> a1 = ABITypedData(['address', addr1])
>>> a2 = ABITypedData(['address', addr2])
>>> addrs = ABITypedData(['address[]', [a1, a2]])
You can access the fields using tuple() interface, or with
attributes:
>>> assert a1.abi_type == a1[0]
>>> assert a1.data == a1[1]
Unlike a typical `namedtuple`, you initialize with a single
positional argument that is iterable, to match the init
interface of all other relevant collections.
"""
def __new__(cls, iterable: Iterable[Any]) -> "ABITypedData":
return super().__new__(cls, *iterable)
def abi_sub_tree(
type_str_or_abi_type: TypeStr | ABIType | None, data_value: Any
) -> ABITypedData:
if type_str_or_abi_type is None:
return ABITypedData([None, data_value])
if isinstance(type_str_or_abi_type, TypeStr):
abi_type = parse(type_str_or_abi_type)
else:
abi_type = type_str_or_abi_type
# In the two special cases below, we rebuild the given data structures with
# annotated items
if abi_type.is_array:
# If type is array, determine item type and annotate all
# items in iterable with that type
item_type_str = abi_type.item_type.to_type_str()
value_to_annotate = [
abi_sub_tree(item_type_str, item_value) for item_value in data_value
]
elif isinstance(abi_type, TupleType):
# Otherwise, if type is tuple, determine component types and annotate
# tuple components in iterable respectively with those types
value_to_annotate = type(data_value)(
abi_sub_tree(comp_type.to_type_str(), comp_value)
for comp_type, comp_value in zip(abi_type.components, data_value)
)
else:
value_to_annotate = data_value
return ABITypedData(
[
abi_type.to_type_str(),
value_to_annotate,
]
)
def strip_abi_type(elements: Any) -> Any:
if isinstance(elements, ABITypedData):
return elements.data
else:
return elements
def strip_abi_types(elements: Any) -> Any:
return recursive_map(strip_abi_type, elements)
def build_non_strict_registry() -> ABIRegistry:
# We make a copy here just to make sure that eth-abi's default registry is not
# affected by our custom encoder subclasses
registry = default_registry.copy() # type: ignore[no-untyped-call]
registry.unregister("address")
registry.unregister("bytes<M>")
registry.unregister("bytes")
registry.unregister("string")
registry.register(
BaseEquals("address"), # type: ignore[no-untyped-call]
AddressEncoder,
decoding.AddressDecoder,
label="address",
)
registry.register(
BaseEquals("bytes", with_sub=True), # type: ignore[no-untyped-call]
BytesEncoder,
decoding.BytesDecoder,
label="bytes<M>",
)
registry.register(
BaseEquals("bytes", with_sub=False), # type: ignore[no-untyped-call]
ByteStringEncoder,
decoding.ByteStringDecoder,
label="bytes",
)
registry.register(
BaseEquals("string"), # type: ignore[no-untyped-call]
TextStringEncoder,
decoding.StringDecoder,
label="string",
)
return registry
def build_strict_registry() -> ABIRegistry:
registry = default_registry.copy() # type: ignore[no-untyped-call]
registry.unregister("address")
registry.unregister("bytes<M>")
registry.unregister("bytes")
registry.unregister("string")
registry.register(
BaseEquals("address"), # type: ignore[no-untyped-call]
AddressEncoder,
decoding.AddressDecoder,
label="address",
)
registry.register(
BaseEquals("bytes", with_sub=True), # type: ignore[no-untyped-call]
ExactLengthBytesEncoder,
decoding.BytesDecoder,
label="bytes<M>",
)
registry.register(
BaseEquals("bytes", with_sub=False), # type: ignore[no-untyped-call]
StrictByteStringEncoder,
decoding.ByteStringDecoder,
label="bytes",
)
registry.register(
BaseEquals("string"), # type: ignore[no-untyped-call]
encoding.TextStringEncoder,
decoding.StringDecoder,
label="string",
)
return registry
def named_tree(
abi: Iterable[
(
ABIComponent
| ABIComponentIndexed
| ABIFunction
| ABIEvent
| dict[TypeStr, Any]
)
],
data: Iterable[tuple[Any, ...]],
) -> dict[str, Any]:
"""
Convert function inputs/outputs or event data tuple to dict with names from ABI.
"""
names = [item["name"] for item in abi]
items = [_named_subtree(*item) for item in zip(abi, data)]
return dict(zip(names, items))
def _named_subtree(
abi: (
ABIComponent | ABIComponentIndexed | ABIFunction | ABIEvent | dict[TypeStr, Any]
),
data: tuple[Any, ...],
) -> dict[str, Any] | tuple[Any, ...] | list[Any]:
abi_type = parse(collapse_if_tuple(cast(dict[str, Any], abi)))
if abi_type.is_array:
item_type = abi_type.item_type.to_type_str()
item_abi = {**abi, "type": item_type, "name": ""}
items = [_named_subtree(item_abi, item) for item in data]
return items
elif isinstance(abi_type, TupleType):
if abi.get("indexed"):
abi = cast(ABIComponentIndexed, abi)
else:
abi = cast(ABIComponent, abi)
names = [item["name"] for item in abi["components"]]
items = [_named_subtree(*item) for item in zip(abi["components"], data)]
if len(names) == len(data):
return dict(zip(names, items))
else:
raise MismatchedABI(
f"ABI fields {names} has length {len(names)} but received "
f"data {data} with length {len(data)}"
)
return data
def recursive_dict_to_namedtuple(data: dict[str, Any]) -> tuple[Any, ...]:
def _dict_to_namedtuple(
value: dict[str, Any] | list[Any],
) -> tuple[Any, ...] | list[Any]:
if not isinstance(value, dict):
return value
keys, values = zip(*value.items()) if value else ((), ())
return abi_decoded_namedtuple_factory(keys)(values)
return recursive_map(_dict_to_namedtuple, data)
def abi_decoded_namedtuple_factory(
fields: tuple[Any, ...],
) -> Callable[..., tuple[Any, ...]]:
class ABIDecodedNamedTuple(namedtuple("ABIDecodedNamedTuple", fields, rename=True)): # type: ignore # noqa: E501
def __new__(self, args: Any) -> "ABIDecodedNamedTuple":
return super().__new__(self, *args)
return ABIDecodedNamedTuple
# -- async -- #
async def async_data_tree_map(
async_w3: "AsyncWeb3[Any]",
func: Callable[
["AsyncWeb3[Any]", TypeStr, Any], Coroutine[Any, Any, tuple[TypeStr, Any]]
],
data_tree: Any,
) -> "ABITypedData":
"""
Map an awaitable method to every ABITypedData element in the tree.
The awaitable method should receive three positional args:
async_w3, abi_type, and data
"""
async def async_map_to_typed_data(elements: Any) -> "ABITypedData":
if isinstance(elements, ABITypedData) and elements.abi_type is not None:
formatted = await func(async_w3, *elements)
return ABITypedData(formatted)
else:
return elements
return await async_recursive_map(async_w3, async_map_to_typed_data, data_tree)
@reject_recursive_repeats
async def async_recursive_map(
async_w3: "AsyncWeb3[Any]",
func: Callable[[Any], Coroutine[Any, Any, TReturn]],
data: Any,
) -> TReturn:
"""
Apply an awaitable method to data and any collection items inside data
(using async_map_collection).
Define the awaitable method so that it only applies to the type of value that you
want it to apply to.
"""
async def async_recurse(item: Any) -> TReturn:
return await async_recursive_map(async_w3, func, item)
items_mapped = await async_map_if_collection(async_recurse, data)
return await func(items_mapped)
async def async_map_if_collection(
func: Callable[[Any], Coroutine[Any, Any, Any]], value: Any
) -> Any:
"""
Apply an awaitable method to each element of a collection or value of a dictionary.
If the value is not a collection, return it unmodified.
"""
datatype = type(value)
if isinstance(value, Mapping):
return datatype({key: await func(val) for key, val in value.values()})
if is_string(value):
return value
elif isinstance(value, Iterable):
return datatype([await func(item) for item in value])
else:
return value
| ABITypedData |
python | numba__numba | numba/np/npyimpl.py | {
"start": 5900,
"end": 27199
} | class ____(namedtuple('_ArrayHelper', ('context', 'builder',
'shape', 'strides', 'data',
'layout', 'base_type', 'ndim',
'inner_arr_ty', 'is_input_arg'))):
"""Helper class to handle array arguments/result.
It provides methods to generate code loading/storing specific
items as well as support code for handling indices.
Contrary to _ArrayHelper, this class can create a view to a subarray
"""
def create_iter_indices(self):
intpty = self.context.get_value_type(types.intp)
ZERO = ir.Constant(ir.IntType(intpty.width), 0)
indices = []
for i in range(self.ndim - self.inner_arr_ty.ndim):
x = cgutils.alloca_once(self.builder, ir.IntType(intpty.width))
self.builder.store(ZERO, x)
indices.append(x)
return _ArrayIndexingHelper(self, indices)
def _load_effective_address(self, indices):
context = self.context
builder = self.builder
arr_ty = types.Array(self.base_type, self.ndim, self.layout)
arr = context.make_array(arr_ty)(context, builder, self.data)
return cgutils.get_item_pointer2(context,
builder,
data=arr.data,
shape=self.shape,
strides=self.strides,
layout=self.layout,
inds=indices)
def load_data(self, indices):
context, builder = self.context, self.builder
if self.inner_arr_ty.ndim == 0 and self.is_input_arg:
# scalar case for input arguments
model = context.data_model_manager[self.base_type]
ptr = self._load_effective_address(indices)
return model.load_from_data_pointer(builder, ptr)
elif self.inner_arr_ty.ndim == 0 and not self.is_input_arg:
# Output arrays are handled as 1d with shape=(1,) when its
# signature represents a scalar. For instance: "(n),(m) -> ()"
intpty = context.get_value_type(types.intp)
one = intpty(1)
fromty = types.Array(self.base_type, self.ndim, self.layout)
toty = types.Array(self.base_type, 1, self.layout)
itemsize = intpty(arrayobj.get_itemsize(context, fromty))
# create a view from the original ndarray to a 1d array
arr_from = self.context.make_array(fromty)(context,
builder,
self.data)
arr_to = self.context.make_array(toty)(context, builder)
arrayobj.populate_array(
arr_to,
data=self._load_effective_address(indices),
shape=cgutils.pack_array(builder, [one]),
strides=cgutils.pack_array(builder, [itemsize]),
itemsize=arr_from.itemsize,
meminfo=arr_from.meminfo,
parent=arr_from.parent)
return arr_to._getvalue()
else:
# generic case
# getitem n-dim array -> m-dim array, where N > M
index_types = (types.int64,) * (self.ndim - self.inner_arr_ty.ndim)
arrty = types.Array(self.base_type, self.ndim, self.layout)
arr = self.context.make_array(arrty)(context, builder, self.data)
res = _getitem_array_generic(context, builder,
self.inner_arr_ty, arrty, arr,
index_types, indices)
# NOTE: don't call impl_ret_borrowed since the caller doesn't handle
# references; but this is a borrow.
return res
def guard_shape(self, loopshape):
inner_ndim = self.inner_arr_ty.ndim
def raise_impl(loop_shape, array_shape):
# This would in fact be a test for broadcasting.
# Broadcast would fail if, ignoring the core dimensions, the
# remaining ones are different than indices given by loop shape.
remaining = len(array_shape) - inner_ndim
_raise = (remaining > len(loop_shape))
if not _raise:
for i in range(remaining):
_raise |= (array_shape[i] != loop_shape[i])
if _raise:
# Ideally we should call `np.broadcast_shapes` with loop and
# array shapes. But since broadcasting is not supported here,
# we just raise an error
# TODO: check why raising a dynamic exception here fails
raise ValueError('Loop and array shapes are incompatible')
context, builder = self.context, self.builder
sig = types.none(
types.UniTuple(types.intp, len(loopshape)),
types.UniTuple(types.intp, len(self.shape)),
)
tup = (context.make_tuple(builder, sig.args[0], loopshape),
context.make_tuple(builder, sig.args[1], self.shape))
context.compile_internal(builder, raise_impl, sig, tup)
def guard_match_core_dims(self, other: '_ArrayGUHelper', ndims: int):
# arguments with the same signature should match their core dimensions
#
# @guvectorize('(n,m), (n,m) -> (n)')
# def foo(x, y, res):
# ...
#
# x and y should have the same core (2D) dimensions
def raise_impl(self_shape, other_shape):
same = True
a, b = len(self_shape) - ndims, len(other_shape) - ndims
for i in range(ndims):
same &= self_shape[a + i] == other_shape[b + i]
if not same:
# NumPy raises the following:
# ValueError: gufunc: Input operand 1 has a mismatch in its
# core dimension 0, with gufunc signature (n),(n) -> ()
# (size 3 is different from 2)
# But since we cannot raise a dynamic exception here, we just
# (try) something meaninful
msg = ('Operand has a mismatch in one of its core dimensions. '
'Please, check if all arguments to a @guvectorize '
'function have the same core dimensions.')
raise ValueError(msg)
context, builder = self.context, self.builder
sig = types.none(
types.UniTuple(types.intp, len(self.shape)),
types.UniTuple(types.intp, len(other.shape)),
)
tup = (context.make_tuple(builder, sig.args[0], self.shape),
context.make_tuple(builder, sig.args[1], other.shape),)
context.compile_internal(builder, raise_impl, sig, tup)
def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):
"""returns an instance of the appropriate Helper (either
_ScalarHelper or _ArrayHelper) class to handle the argument.
using the polymorphic interface of the Helper classes, scalar
and array cases can be handled with the same code"""
# first un-Optional Optionals
if isinstance(tyinp, types.Optional):
oty = tyinp
tyinp = tyinp.type
inp = ctxt.cast(bld, inp, oty, tyinp)
# then prepare the arg for a concrete instance
if isinstance(tyinp, types.ArrayCompatible):
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
return _ArrayHelper(ctxt, bld, shape, strides, ary.data,
tyinp.layout, tyinp.dtype, tyinp.ndim, inp)
elif (types.unliteral(tyinp) in types.number_domain | {types.boolean}
or isinstance(tyinp, types.scalars._NPDatetimeBase)):
return _ScalarHelper(ctxt, bld, inp, tyinp)
else:
raise NotImplementedError('unsupported type for {0}: {1}'.format(where,
str(tyinp)))
if config.USE_LEGACY_TYPE_SYSTEM:
_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),
types.intp, types.CPointer(types.intp))
else:
_broadcast_onto_sig = types.np_intp(types.np_intp, types.CPointer(types.np_intp),
types.np_intp, types.CPointer(types.np_intp))
def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):
'''Low-level utility function used in calculating a shape for
an implicit output array. This function assumes that the
destination shape is an LLVM pointer to a C-style array that was
already initialized to a size of one along all axes.
Returns an integer value:
>= 1 : Succeeded. Return value should equal the number of dimensions in
the destination shape.
0 : Failed to broadcast because source shape is larger than the
destination shape (this case should be weeded out at type
checking).
< 0 : Failed to broadcast onto destination axis, at axis number ==
-(return_value + 1).
'''
if src_ndim > dest_ndim:
# This check should have been done during type checking, but
# let's be defensive anyway...
return 0
else:
src_index = 0
dest_index = dest_ndim - src_ndim
while src_index < src_ndim:
src_dim_size = src_shape[src_index]
dest_dim_size = dest_shape[dest_index]
# Check to see if we've already mutated the destination
# shape along this axis.
if dest_dim_size != 1:
# If we have mutated the destination shape already,
# then the source axis size must either be one,
# or the destination axis size.
if src_dim_size != dest_dim_size and src_dim_size != 1:
return -(dest_index + 1)
elif src_dim_size != 1:
# If the destination size is still its initial
dest_shape[dest_index] = src_dim_size
src_index += 1
dest_index += 1
return dest_index
def _build_array(context, builder, array_ty, input_types, inputs):
"""Utility function to handle allocation of an implicit output array
given the target context, builder, output array type, and a list of
_ArrayHelper instances.
"""
# First, strip optional types, ufunc loops are typed on concrete types
input_types = [x.type if isinstance(x, types.Optional) else x
for x in input_types]
intp_ty = context.get_value_type(types.intp)
def make_intp_const(val):
return context.get_constant(types.intp, val)
ZERO = make_intp_const(0)
ONE = make_intp_const(1)
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"src_shape")
dest_ndim = make_intp_const(array_ty.ndim)
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"dest_shape")
dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)
for index in range(array_ty.ndim))
# Initialize the destination shape with all ones.
for dest_shape_addr in dest_shape_addrs:
builder.store(ONE, dest_shape_addr)
# For each argument, try to broadcast onto the destination shape,
# mutating along any axis where the argument shape is not one and
# the destination shape is one.
for arg_number, arg in enumerate(inputs):
if not hasattr(arg, "ndim"): # Skip scalar arguments
continue
arg_ndim = make_intp_const(arg.ndim)
for index in range(arg.ndim):
builder.store(arg.shape[index],
cgutils.gep_inbounds(builder, src_shape, index))
arg_result = context.compile_internal(
builder, _broadcast_onto, _broadcast_onto_sig,
[arg_ndim, src_shape, dest_ndim, dest_shape])
with cgutils.if_unlikely(builder,
builder.icmp_signed('<', arg_result, ONE)):
msg = "unable to broadcast argument %d to output array" % (
arg_number,)
loc = errors.loc_info.get('loc', None)
if loc is not None:
msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line)
context.call_conv.return_user_exc(builder, ValueError, (msg,))
real_array_ty = array_ty.as_array
dest_shape_tup = tuple(builder.load(dest_shape_addr)
for dest_shape_addr in dest_shape_addrs)
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,
dest_shape_tup)
# Get the best argument to call __array_wrap__ on
array_wrapper_index = select_array_wrapper(input_types)
array_wrapper_ty = input_types[array_wrapper_index]
try:
# __array_wrap__(source wrapped array, out array) -> out wrapped array
array_wrap = context.get_function('__array_wrap__',
array_ty(array_wrapper_ty, real_array_ty))
except NotImplementedError:
# If it's the same priority as a regular array, assume we
# should use the allocated array unchanged.
if array_wrapper_ty.array_priority != types.Array.array_priority:
raise
out_val = array_val._getvalue()
else:
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
out_val = array_wrap(builder, wrap_args)
ndim = array_ty.ndim
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
return _ArrayHelper(context, builder, shape, strides, array_val.data,
array_ty.layout, array_ty.dtype, ndim,
out_val)
# ufuncs either return a single result when nout == 1, else a tuple of results
def _unpack_output_types(ufunc, sig):
if ufunc.nout == 1:
return [sig.return_type]
else:
return list(sig.return_type)
def _unpack_output_values(ufunc, builder, values):
if ufunc.nout == 1:
return [values]
else:
return cgutils.unpack_tuple(builder, values)
def _pack_output_values(ufunc, context, builder, typ, values):
if ufunc.nout == 1:
return values[0]
else:
return context.make_tuple(builder, typ, values)
def numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel_class):
# This is the code generator that builds all the looping needed
# to execute a numpy functions over several dimensions (including
# scalar cases).
#
# context - the code generation context
# builder - the code emitter
# sig - signature of the ufunc
# args - the args to the ufunc
# ufunc - the ufunc itself
# kernel_class - a code generating subclass of _Kernel that provides
arguments = [_prepare_argument(context, builder, arg, tyarg)
for arg, tyarg in zip(args, sig.args)]
if len(arguments) < ufunc.nin:
raise RuntimeError(
"Not enough inputs to {}, expected {} got {}"
.format(ufunc.__name__, ufunc.nin, len(arguments)))
for out_i, ret_ty in enumerate(_unpack_output_types(ufunc, sig)):
if ufunc.nin + out_i >= len(arguments):
# this out argument is not provided
if isinstance(ret_ty, types.ArrayCompatible):
output = _build_array(context, builder, ret_ty, sig.args, arguments)
else:
output = _prepare_argument(
context, builder,
ir.Constant(context.get_value_type(ret_ty), None), ret_ty)
arguments.append(output)
elif context.enable_nrt:
# Incref the output
context.nrt.incref(builder, ret_ty, args[ufunc.nin + out_i])
inputs = arguments[:ufunc.nin]
outputs = arguments[ufunc.nin:]
assert len(outputs) == ufunc.nout
outer_sig = _ufunc_loop_sig(
[a.base_type for a in outputs],
[a.base_type for a in inputs]
)
kernel = kernel_class(context, builder, outer_sig)
intpty = context.get_value_type(types.intp)
indices = [inp.create_iter_indices() for inp in inputs]
# assume outputs are all the same size, which numpy requires
loopshape = outputs[0].shape
# count the number of C and F layout arrays, respectively
input_layouts = [inp.layout for inp in inputs
if isinstance(inp, _ArrayHelper)]
num_c_layout = len([x for x in input_layouts if x == 'C'])
num_f_layout = len([x for x in input_layouts if x == 'F'])
# Only choose F iteration order if more arrays are in F layout.
# Default to C order otherwise.
# This is a best effort for performance. NumPy has more fancy logic that
# uses array iterators in non-trivial cases.
if num_f_layout > num_c_layout:
order = 'F'
else:
order = 'C'
with cgutils.loop_nest(builder, loopshape, intp=intpty, order=order) as loop_indices:
vals_in = []
for i, (index, arg) in enumerate(zip(indices, inputs)):
index.update_indices(loop_indices, i)
vals_in.append(arg.load_data(index.as_values()))
vals_out = _unpack_output_values(ufunc, builder, kernel.generate(*vals_in))
for val_out, output in zip(vals_out, outputs):
output.store_data(loop_indices, val_out)
out = _pack_output_values(ufunc, context, builder, sig.return_type, [o.return_val for o in outputs])
return impl_ret_new_ref(context, builder, sig.return_type, out)
def numpy_gufunc_kernel(context, builder, sig, args, ufunc, kernel_class):
arguments = []
expected_ndims = kernel_class.dufunc.expected_ndims()
expected_ndims = expected_ndims[0] + expected_ndims[1]
is_input = [True] * ufunc.nin + [False] * ufunc.nout
for arg, ty, exp_ndim, is_inp in zip(args, sig.args, expected_ndims, is_input): # noqa: E501
if isinstance(ty, types.ArrayCompatible):
# Create an array helper that iteration returns a subarray
# with ndim specified by "exp_ndim"
arr = context.make_array(ty)(context, builder, arg)
shape = cgutils.unpack_tuple(builder, arr.shape, ty.ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, ty.ndim)
inner_arr_ty = ty.copy(ndim=exp_ndim)
ndim = ty.ndim
layout = ty.layout
base_type = ty.dtype
array_helper = _ArrayGUHelper(context, builder,
shape, strides, arg,
layout, base_type, ndim,
inner_arr_ty, is_inp)
arguments.append(array_helper)
else:
scalar_helper = _ScalarHelper(context, builder, arg, ty)
arguments.append(scalar_helper)
kernel = kernel_class(context, builder, sig)
layouts = [arg.layout for arg in arguments
if isinstance(arg, _ArrayGUHelper)]
num_c_layout = len([x for x in layouts if x == 'C'])
num_f_layout = len([x for x in layouts if x == 'F'])
# Only choose F iteration order if more arrays are in F layout.
# Default to C order otherwise.
# This is a best effort for performance. NumPy has more fancy logic that
# uses array iterators in non-trivial cases.
if num_f_layout > num_c_layout:
order = 'F'
else:
order = 'C'
outputs = arguments[ufunc.nin:]
intpty = context.get_value_type(types.intp)
indices = [inp.create_iter_indices() for inp in arguments]
loopshape_ndim = outputs[0].ndim - outputs[0].inner_arr_ty.ndim
loopshape = outputs[0].shape[ : loopshape_ndim]
_sig = parse_signature(ufunc.gufunc_builder.signature)
for (idx_a, sig_a), (idx_b, sig_b) in itertools.combinations(
zip(range(len(arguments)),
_sig[0] + _sig[1]),
r = 2
):
# For each pair of arguments, both inputs and outputs, must match their
# inner dimensions if their signatures are the same.
arg_a, arg_b = arguments[idx_a], arguments[idx_b]
if sig_a == sig_b and \
all(isinstance(x, _ArrayGUHelper) for x in (arg_a, arg_b)):
arg_a, arg_b = arguments[idx_a], arguments[idx_b]
arg_a.guard_match_core_dims(arg_b, len(sig_a))
for arg in arguments[:ufunc.nin]:
if isinstance(arg, _ArrayGUHelper):
arg.guard_shape(loopshape)
with cgutils.loop_nest(builder,
loopshape,
intp=intpty,
order=order) as loop_indices:
vals_in = []
for i, (index, arg) in enumerate(zip(indices, arguments)):
index.update_indices(loop_indices, i)
vals_in.append(arg.load_data(index.as_values()))
kernel.generate(*vals_in)
# Kernels are the code to be executed inside the multidimensional loop.
| _ArrayGUHelper |
python | walkccc__LeetCode | solutions/3305. Count of Substrings Containing Every Vowel and K Consonants I/3305.py | {
"start": 0,
"end": 1149
} | class ____:
def countOfSubstrings(self, word: str, k: int) -> int:
VOWELS = 'aeiou'
def substringsWithAtMost(k: int) -> int:
"""
Return the number of substrings containing every vowel with at most k
consonants.
"""
if k == -1:
return 0
res = 0
vowels = 0
uniqueVowels = 0
vowelLastSeen = {}
l = 0
for r, c in enumerate(word):
if c in VOWELS:
vowels += 1
if c not in vowelLastSeen or vowelLastSeen[c] < l:
uniqueVowels += 1
vowelLastSeen[c] = r
while r - l + 1 - vowels > k:
if word[l] in VOWELS:
vowels -= 1
if vowelLastSeen[word[l]] == l:
uniqueVowels -= 1
l += 1
if uniqueVowels == 5:
# Add substrings containing every vowel with at most k consonants to
# the answer. They are
# word[l..r], word[l + 1..r], ..., word[min(vowelLastSeen[vowel])..r]
res += min(vowelLastSeen[vowel] for vowel in VOWELS) - l + 1
return res
return substringsWithAtMost(k) - substringsWithAtMost(k - 1)
| Solution |
python | doocs__leetcode | solution/1100-1199/1102.Path With Maximum Minimum Value/Solution2.py | {
"start": 624,
"end": 1246
} | class ____:
def maximumMinimumPath(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
uf = UnionFind(m * n)
q = [(v, i, j) for i, row in enumerate(grid) for j, v in enumerate(row)]
q.sort()
ans = 0
vis = set()
dirs = (-1, 0, 1, 0, -1)
while uf.find(0) != uf.find(m * n - 1):
v, i, j = q.pop()
ans = v
vis.add((i, j))
for a, b in pairwise(dirs):
x, y = i + a, j + b
if (x, y) in vis:
uf.union(x * n + y, i * n + j)
return ans
| Solution |
python | PyCQA__pylint | tests/functional/s/string/string_formatting.py | {
"start": 449,
"end": 568
} | class ____:
""" test custom getitem for lookup access """
def __getitem__(self, index):
return 42
| Getitem |
python | doocs__leetcode | solution/2000-2099/2094.Finding 3-Digit Even Numbers/Solution.py | {
"start": 0,
"end": 405
} | class ____:
def findEvenNumbers(self, digits: List[int]) -> List[int]:
cnt = Counter(digits)
ans = []
for x in range(100, 1000, 2):
cnt1 = Counter()
y = x
while y:
y, v = divmod(y, 10)
cnt1[v] += 1
if all(cnt[i] >= cnt1[i] for i in range(10)):
ans.append(x)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/type_api.py | {
"start": 2819,
"end": 2925
} | class ____(Protocol[_T_co]):
def __call__(self, value: Any) -> Optional[_T_co]: ...
| _ResultProcessorType |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.