language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/services/eventstore/test_base.py | {
"start": 334,
"end": 1520
} | class ____(TestCase):
def setUp(self) -> None:
self.eventstorage = EventStorage()
def test_minimal_columns(self) -> None:
assert len(self.eventstorage.minimal_columns[Dataset.Events]) == 5
assert len(self.eventstorage.minimal_columns[Dataset.Transactions]) == 4
def test_bind_nodes(self) -> None:
"""
Test that bind_nodes populates _node_data
"""
min_ago = before_now(minutes=1).isoformat()
self.store_event(
data={"event_id": "a" * 32, "timestamp": min_ago, "user": {"id": "user1"}},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "timestamp": min_ago, "user": {"id": "user2"}},
project_id=self.project.id,
)
event = Event(project_id=self.project.id, event_id="a" * 32)
event2 = Event(project_id=self.project.id, event_id="b" * 32)
before = event.data._node_data
self.eventstorage.bind_nodes([event, event2])
after = event.data._node_data
assert before is None
assert after is not None
assert event.data["user"]["id"] == "user1"
| EventStorageTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/sqs.py | {
"start": 1731,
"end": 10672
} | class ____(AwsBaseSensor[SqsHook]):
"""
Get messages from an Amazon SQS queue and then delete the messages from the queue.
If deletion of messages fails, an AirflowException is thrown. Otherwise, the messages
are pushed through XCom with the key ``messages``.
By default,the sensor performs one and only one SQS call per poke, which limits the result to
a maximum of 10 messages. However, the total number of SQS API calls per poke can be controlled
by num_batches param.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SqsSensor`
:param sqs_queue: The SQS queue url (templated)
:param max_messages: The maximum number of messages to retrieve for each poke (templated)
:param num_batches: The number of times the sensor will call the SQS API to receive messages (default: 1)
:param wait_time_seconds: The time in seconds to wait for receiving messages (default: 1 second)
:param visibility_timeout: Visibility timeout, a period of time during which
Amazon SQS prevents other consumers from receiving and processing the message.
:param message_filtering: Specified how received messages should be filtered. Supported options are:
`None` (no filtering, default), `'literal'` (message Body literal match), `'jsonpath'`
(message Body filtered using a JSONPath expression), or `'jsonpath-ext'` (like `'jsonpath'`, but with
an expanded query grammar). You may add further methods by overriding the relevant class methods.
:param message_filtering_match_values: Optional value/s for the message filter to match on.
For example, with literal matching, if a message body matches any of the specified values
then it is included. For JSONPath matching, the result of the JSONPath expression is used
and may match any of the specified values.
:param message_filtering_config: Additional configuration to pass to the message filter.
For example with JSONPath filtering you can pass a JSONPath expression string here,
such as `'foo[*].baz'`. Messages with a Body which does not match are ignored.
:param delete_message_on_reception: Default to `True`, the messages are deleted from the queue
as soon as being consumed. Otherwise, the messages remain in the queue after consumption and
should be deleted manually.
:param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore
module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = SqsHook
template_fields: Sequence[str] = aws_template_fields(
"sqs_queue", "max_messages", "message_filtering_config"
)
def __init__(
self,
*,
sqs_queue,
max_messages: int = 5,
num_batches: int = 1,
wait_time_seconds: int = 1,
visibility_timeout: int | None = None,
message_filtering: MessageFilteringType | None = None,
message_filtering_match_values: Any = None,
message_filtering_config: Any = None,
delete_message_on_reception: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.sqs_queue = sqs_queue
self.max_messages = max_messages
self.num_batches = num_batches
self.wait_time_seconds = wait_time_seconds
self.visibility_timeout = visibility_timeout
self.message_filtering = message_filtering
self.delete_message_on_reception = delete_message_on_reception
if message_filtering_match_values is not None:
if not isinstance(message_filtering_match_values, set):
message_filtering_match_values = set(message_filtering_match_values)
self.message_filtering_match_values = message_filtering_match_values
if self.message_filtering == "literal":
if self.message_filtering_match_values is None:
raise TypeError("message_filtering_match_values must be specified for literal matching")
self.message_filtering_config = message_filtering_config
self.deferrable = deferrable
def execute(self, context: Context) -> Any:
if self.deferrable:
self.defer(
trigger=SqsSensorTrigger(
sqs_queue=self.sqs_queue,
aws_conn_id=self.aws_conn_id,
max_messages=self.max_messages,
num_batches=self.num_batches,
wait_time_seconds=self.wait_time_seconds,
visibility_timeout=self.visibility_timeout,
message_filtering=self.message_filtering,
message_filtering_match_values=self.message_filtering_match_values,
message_filtering_config=self.message_filtering_config,
delete_message_on_reception=self.delete_message_on_reception,
waiter_delay=int(self.poke_interval),
region_name=self.region_name,
verify=self.verify,
botocore_config=self.botocore_config,
),
method_name="execute_complete",
timeout=timedelta(seconds=self.timeout),
)
else:
super().execute(context=context)
def execute_complete(self, context: Context, event: dict | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Trigger error: event is {validated_event}")
context["ti"].xcom_push(key="messages", value=validated_event["message_batch"])
def poll_sqs(self, sqs_conn: BaseAwsConnection) -> Collection:
"""
Poll SQS queue to retrieve messages.
:param sqs_conn: SQS connection
:return: A list of messages retrieved from SQS
"""
self.log.info("SqsSensor checking for message on queue: %s", self.sqs_queue)
receive_message_kwargs = {
"QueueUrl": self.sqs_queue,
"MaxNumberOfMessages": self.max_messages,
"WaitTimeSeconds": self.wait_time_seconds,
}
if self.visibility_timeout is not None:
receive_message_kwargs["VisibilityTimeout"] = self.visibility_timeout
response = sqs_conn.receive_message(**receive_message_kwargs)
return response
def poke(self, context: Context):
"""
Check subscribed queue for messages and write them to xcom with the ``messages`` key.
:param context: the context object
:return: ``True`` if message is available or ``False``
"""
message_batch: list[Any] = []
# perform multiple SQS call to retrieve messages in series
for _ in range(self.num_batches):
response = self.poll_sqs(sqs_conn=self.hook.conn)
messages = process_response(
response,
self.message_filtering,
self.message_filtering_match_values,
self.message_filtering_config,
)
if not messages:
continue
message_batch.extend(messages)
if self.delete_message_on_reception:
self.log.info("Deleting %d messages", len(messages))
entries = [
{"Id": message["MessageId"], "ReceiptHandle": message["ReceiptHandle"]}
for message in messages
]
response = self.hook.conn.delete_message_batch(QueueUrl=self.sqs_queue, Entries=entries)
if "Successful" not in response:
raise AirflowException(f"Delete SQS Messages failed {response} for messages {messages}")
if message_batch:
context["ti"].xcom_push(key="messages", value=message_batch)
return True
return False
| SqsSensor |
python | getsentry__sentry | tests/sentry/release_health/test_tasks.py | {
"start": 991,
"end": 22447
} | class ____(TestCase, BaseMetricsTestCase):
__test__ = Abstract(__module__, __qualname__)
backend_class: type[BaseReleaseMonitorBackend]
def setUp(self) -> None:
super().setUp()
backend = self.backend_class()
self.backend = mock.patch("sentry.release_health.tasks.release_monitor", backend)
self.backend.__enter__()
# no global option mocking needed
self.project = self.create_project()
self.project1 = self.create_project()
self.project2 = self.create_project()
self.project1.update(flags=F("flags").bitor(Project.flags.has_releases))
self.project2.update(flags=F("flags").bitor(Project.flags.has_releases))
self.repo = Repository.objects.create(
organization_id=self.organization.id, name=self.organization.id
)
self.release = self.create_release(project=self.project, version="foo@1.0.0")
self.release2 = self.create_release(project=self.project, version="foo@2.0.0")
self.release3 = self.create_release(project=self.project2, version="bar@1.0.0")
self.environment = self.create_environment(name="prod", project=self.project1)
self.environment2 = self.create_environment(name="canary", project=self.project2)
self.group = self.create_group(
project=self.project, message="Kaboom!", first_release=self.release
)
self.rpe = ReleaseProjectEnvironment.objects.create(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
)
self.rpe1 = ReleaseProjectEnvironment.objects.create(
project_id=self.project1.id,
release_id=self.release2.id,
environment_id=self.environment.id,
)
self.rpe2 = ReleaseProjectEnvironment.objects.create(
project_id=self.project1.id,
release_id=self.release3.id,
environment_id=self.environment.id,
)
self.rpe3 = ReleaseProjectEnvironment.objects.create(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
)
GroupRelease.objects.create(
group_id=self.group.id, release_id=self.release.id, project_id=self.project.id
)
self.event = self.store_event(
data={
"message": "Kaboom!",
"platform": "python",
"timestamp": before_now(seconds=10).isoformat(),
"stacktrace": {
"frames": [
{
"function": "handle_set_commits",
"abs_path": "/usr/src/sentry/src/sentry/tasks.py",
"module": "sentry.tasks",
"in_app": True,
"lineno": 30,
"filename": "sentry/tasks.py",
},
{
"function": "set_commits",
"abs_path": "/usr/src/sentry/src/sentry/models/release.py",
"module": "sentry.models.release",
"in_app": True,
"lineno": 39,
"filename": "sentry/models/release.py",
},
]
},
"tags": {"sentry:release": self.release.version},
"fingerprint": ["finterpring"],
},
project_id=self.project.id,
)
GroupRelease.objects.create(
group_id=self.event.group.id, project_id=self.project.id, release_id=self.release.id
)
def tearDown(self) -> None:
self.backend.__exit__(None, None, None)
def test_simple(self) -> None:
self.bulk_store_sessions([self.build_session(project_id=self.project1) for _ in range(11)])
self.bulk_store_sessions(
[
self.build_session(project_id=self.project2, environment=self.environment2.name)
for _ in range(1)
]
)
assert not self.project1.flags.has_sessions
now = timezone.now()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted=None,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted__gte=now,
).exists()
test_data = [
{
"org_id": [self.organization.id],
"project_id": [self.project2.id, self.project1.id],
},
]
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
project1 = Project.objects.get(id=self.project1.id)
assert project1.flags.has_sessions
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted__gte=now,
).exists()
def test_simple_no_sessions(self) -> None:
now = timezone.now()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted=None,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted__gte=now,
).exists()
test_data = [
{
"org_id": [self.organization.id],
"project_id": [self.project2.id, self.project1.id],
},
]
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted=None,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted__gte=now,
).exists()
def test_release_is_unadopted_with_sessions(self) -> None:
# Releases that are returned with sessions but no longer meet the threshold get unadopted
self.bulk_store_sessions([self.build_session(project_id=self.project1) for _ in range(1)])
self.bulk_store_sessions(
[
self.build_session(project_id=self.project2, environment=self.environment2)
for _ in range(11)
]
)
self.bulk_store_sessions(
[self.build_session(project_id=self.project1, release=self.release2) for _ in range(20)]
)
now = timezone.now()
self.rpe.update(adopted=now)
self.rpe1.update(adopted=now)
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=now,
unadopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release2.id,
environment_id=self.environment.id,
adopted=now,
unadopted=None,
).exists()
test_data = [
{
"org_id": [self.organization.id],
"project_id": [self.project2.id, self.project1.id],
},
]
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=now,
unadopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release2.id,
environment_id=self.environment.id,
adopted=now,
unadopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted=now,
unadopted__gte=now,
).exists()
# Make sure re-adopting works
self.bulk_store_sessions([self.build_session(project_id=self.project1) for _ in range(50)])
time.sleep(1)
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=now, # doesn't get updated, unadopted just gets set to null
unadopted=None,
).exists()
def test_release_is_unadopted_without_sessions(self) -> None:
# This test should verify that releases that have no sessions (i.e. no result from snuba)
# get marked as unadopted
now = timezone.now()
self.rpe.update(adopted=now)
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=now,
unadopted=None,
).exists()
test_data = [
{
"org_id": [self.organization.id],
"project_id": [self.project2.id, self.project1.id],
},
]
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=now,
unadopted__gte=now,
).exists()
def test_multi_proj_env_release_counter(self) -> None:
self.bulk_store_sessions(
[
self.build_session(
project_id=self.project1,
)
for _ in range(11)
]
)
self.bulk_store_sessions(
[
self.build_session(project_id=self.project2, environment=self.environment2)
for _ in range(1)
]
)
self.bulk_store_sessions(
[self.build_session(project_id=self.project1, release=self.release2) for _ in range(1)]
)
self.bulk_store_sessions(
[self.build_session(project_id=self.project1, release=self.release3) for _ in range(1)]
)
now = timezone.now()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted=None,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted__gte=now,
).exists()
test_data = [
{
"org_id": [self.organization.id],
"project_id": [self.project2.id, self.project1.id],
},
]
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release_id=self.release.id,
environment_id=self.environment2.id,
adopted__gte=now,
).exists()
def test_monitor_release_adoption(self) -> None:
now = timezone.now()
self.org2 = self.create_organization(
name="Yet Another Test Org",
owner=self.user,
)
self.org2_project = self.create_project(organization=self.org2)
self.org2_project.update(flags=F("flags").bitor(Project.flags.has_releases))
self.org2_release = self.create_release(project=self.org2_project, version="org@2.0.0")
self.org2_environment = self.create_environment(name="yae", project=self.org2_project)
self.org2_rpe = ReleaseProjectEnvironment.objects.create(
project_id=self.org2_project.id,
release_id=self.org2_release.id,
environment_id=self.org2_environment.id,
)
self.bulk_store_sessions(
[
self.build_session(
org_id=self.org2,
project_id=self.org2_project,
release=self.org2_release,
environment=self.org2_environment,
)
for _ in range(20)
]
)
# Tests the scheduled task to ensure it properly processes each org
self.bulk_store_sessions(
[
self.build_session(
project_id=self.project1,
)
for _ in range(11)
]
)
self.bulk_store_sessions(
[
self.build_session(project_id=self.project2, environment=self.environment2)
for _ in range(1)
]
)
with self.tasks():
monitor_release_adoption()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment_id=self.environment.id,
adopted__gte=now,
unadopted=None,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.org2_project.id,
release_id=self.org2_release.id,
environment_id=self.org2_environment.id,
adopted__gte=now,
unadopted=None,
).exists()
def test_missing_rpe_is_created(self) -> None:
self.bulk_store_sessions(
[
self.build_session(
project_id=self.project1, release=self.release2, environment="somenvname"
)
for _ in range(20)
]
)
self.bulk_store_sessions(
[
self.build_session(project_id=self.project1, release=self.release2, environment="")
for _ in range(20)
]
)
now = timezone.now()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release.id,
environment__name="somenvname",
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release2.id,
environment__name="",
).exists()
test_data = [
{
"org_id": [self.organization.id],
"project_id": [self.project2.id, self.project1.id],
},
]
# This will make the appropriate models (Environment, ReleaseProject, ReleaseEnvironment and ReleaseProjectEnvironment)
process_projects_with_sessions(test_data[0]["org_id"][0], test_data[0]["project_id"])
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release2.id,
environment__name="somenvname",
adopted__gte=now,
).exists()
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release_id=self.release2.id,
environment__name="",
).exists()
def test_has_releases_is_set(self) -> None:
no_release_project = self.create_project()
assert not no_release_project.flags.has_releases
self.bulk_store_sessions(
[
self.build_session(
project_id=no_release_project, release=self.release2, environment="somenvname"
)
]
)
process_projects_with_sessions(no_release_project.organization_id, [no_release_project.id])
no_release_project.refresh_from_db()
assert no_release_project.flags.has_releases
def test_no_env(self) -> None:
no_env_project = self.create_project()
assert not no_env_project.flags.has_releases
# If environment is None, we shouldn't make any changes
self.bulk_store_sessions(
[self.build_session(project_id=no_env_project, release=self.release2, environment=None)]
)
process_projects_with_sessions(no_env_project.organization_id, [no_env_project.id])
no_env_project.refresh_from_db()
assert not no_env_project.flags.has_releases
def test_updates_last_seen_on_health_data(self) -> None:
# Set last_seen sufficiently in the past so it qualifies for an update (>60s old)
past = timezone.now() - timedelta(minutes=5)
self.rpe.update(last_seen=past)
# Ingest a session for the same project/release/environment
self.bulk_store_sessions(
[
self.build_session(
project_id=self.project1,
release=self.release,
environment=self.environment,
)
]
)
before_call = timezone.now()
# Disable flag must be False to allow updates in this test
# Patch sampling to ensure deterministic update during test
with (
mock.patch("sentry.release_health.tasks.LAST_SEEN_UPDATE_SAMPLE_RATE", 1.0),
self.options({"release-health.disable-release-last-seen-update": False}),
):
process_projects_with_sessions(self.organization.id, [self.project1.id])
updated = ReleaseProjectEnvironment.objects.get(id=self.rpe.id)
assert updated.last_seen >= before_call
| BaseTestReleaseMonitor |
python | getsentry__sentry | src/sentry/api/endpoints/organization_search_details.py | {
"start": 1501,
"end": 4224
} | class ____(OrganizationEndpoint):
owner = ApiOwner.UNOWNED
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
permission_classes = (OrganizationSearchEditPermission,)
def convert_args(self, request: Request, organization_id_or_slug, search_id, *args, **kwargs):
(args, kwargs) = super().convert_args(request, organization_id_or_slug, *args, **kwargs)
# Only allow users to delete their own personal searches OR
# organization level searches
org_search = Q(visibility=Visibility.ORGANIZATION)
personal_search = Q(owner_id=request.user.id, visibility=Visibility.OWNER)
try:
search = SavedSearch.objects.get(
org_search | personal_search,
organization=kwargs["organization"],
id=search_id,
)
except SavedSearch.DoesNotExist:
raise ResourceDoesNotExist
self.check_object_permissions(request, search)
kwargs["search"] = search
return (args, kwargs)
def put(self, request: Request, organization: Organization, search: SavedSearch) -> Response:
"""
Updates a saved search
"""
if request.access.has_scope("org:write"):
serializer: BaseOrganizationSearchSerializer = OrganizationSearchAdminSerializer(
data=request.data
)
else:
serializer = OrganizationSearchMemberSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.validated_data
if (
SavedSearch.objects
# Query duplication for pinned searches is fine, exlcuded these
.exclude(visibility=Visibility.OWNER_PINNED)
.exclude(id=search.id)
.filter(Q(is_global=True) | Q(organization=organization), query=result["query"])
.exists()
):
return Response(
{"detail": "Query {} already exists".format(result["query"])}, status=400
)
search.update(**result)
return Response(serialize(search, request.user))
def delete(self, request: Request, organization: Organization, search: SavedSearch) -> Response:
"""
Permanently remove a saved search.
"""
search.delete()
analytics.record(
OrganizationSavedSearchDeletedEvent(
search_type=SearchType(search.type).name,
org_id=organization.id,
query=search.query,
)
)
return Response(status=204)
| OrganizationSearchDetailsEndpoint |
python | huggingface__transformers | src/transformers/models/detr/image_processing_detr.py | {
"start": 27942,
"end": 76914
} | class ____(BaseImageProcessor):
r"""
Constructs a Detr image processor.
Args:
format (`str`, *optional*, defaults to `"coco_detection"`):
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's `(height, width)` dimensions to the specified `size`. Can be
overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
in the `preprocess` method. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to True):
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_annotations (`bool`, *optional*, defaults to `True`):
Controls whether to convert the annotations to the format expected by the DETR model. Converts the
bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
method. If `True`, padding will be applied to the bottom and right of the image with zeros.
If `pad_size` is provided, the image will be padded to the specified dimensions.
Otherwise, the image will be padded to the maximum height and width of the batch.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = DetrImageProcessorKwargs
def __init__(
self,
format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_annotations: Optional[bool] = None,
do_pad: bool = True,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
) -> None:
max_size = None if size is None else kwargs.pop("max_size", 1333)
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
size = get_size_dict(size, max_size=max_size, default_to_square=False)
# Backwards compatibility
if do_convert_annotations is None:
do_convert_annotations = do_normalize
super().__init__(**kwargs)
self.format = format
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_convert_annotations = do_convert_annotations
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_pad = kwargs.pop("pad_and_return_pixel_mask", do_pad)
self.pad_size = pad_size
self._valid_processor_keys = [
"images",
"annotations",
"return_segmentation_masks",
"masks_path",
"do_resize",
"size",
"resample",
"do_rescale",
"rescale_factor",
"do_normalize",
"do_convert_annotations",
"image_mean",
"image_std",
"do_pad",
"pad_size",
"format",
"return_tensors",
"data_format",
"input_data_format",
]
def prepare_annotation(
self,
image: np.ndarray,
target: dict,
format: Optional[AnnotationFormat] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> dict:
"""
Prepare an annotation for feeding into DETR model.
"""
format = format if format is not None else self.format
if format == AnnotationFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(
image, target, return_segmentation_masks, input_data_format=input_data_format
)
elif format == AnnotationFormat.COCO_PANOPTIC:
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_panoptic_annotation(
image,
target,
masks_path=masks_path,
return_masks=return_segmentation_masks,
input_data_format=input_data_format,
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, max_size=None, default_to_square=False)
if "shortest_edge" in size and "longest_edge" in size:
new_size = get_resize_output_image_size(
image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
)
elif "max_height" in size and "max_width" in size:
new_size = get_image_size_for_max_height_width(
image, size["max_height"], size["max_width"], input_data_format=input_data_format
)
elif "height" in size and "width" in size:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = resize(
image,
size=new_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return image
def resize_annotation(
self,
annotation,
orig_size,
size,
resample: PILImageResampling = PILImageResampling.NEAREST,
) -> dict:
"""
Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
to this number.
"""
return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
# TODO (Amy) - update to use `rescale_factor` instead of `scale`
def rescale(
self,
image: np.ndarray,
rescale_factor: float,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict:
"""
Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
`[center_x, center_y, width, height]` format and from absolute to relative pixel values.
"""
return normalize_annotation(annotation, image_size=image_size)
def _update_annotation_for_padded_image(
self,
annotation: dict,
input_image_size: tuple[int, int],
output_image_size: tuple[int, int],
padding,
update_bboxes,
) -> dict:
"""
Update the annotation for a padded image.
"""
new_annotation = {}
new_annotation["size"] = output_image_size
for key, value in annotation.items():
if key == "masks":
masks = value
masks = pad(
masks,
padding,
mode=PaddingMode.CONSTANT,
constant_values=0,
input_data_format=ChannelDimension.FIRST,
)
masks = safe_squeeze(masks, 1)
new_annotation["masks"] = masks
elif key == "boxes" and update_bboxes:
boxes = value
boxes *= np.asarray(
[
input_image_size[1] / output_image_size[1],
input_image_size[0] / output_image_size[0],
input_image_size[1] / output_image_size[1],
input_image_size[0] / output_image_size[0],
]
)
new_annotation["boxes"] = boxes
elif key == "size":
new_annotation["size"] = output_image_size
else:
new_annotation[key] = value
return new_annotation
def _pad_image(
self,
image: np.ndarray,
output_size: tuple[int, int],
annotation: Optional[dict[str, Any]] = None,
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
update_bboxes: bool = True,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=PaddingMode.CONSTANT,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
if annotation is not None:
annotation = self._update_annotation_for_padded_image(
annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
)
return padded_image, annotation
def pad(
self,
images: list[np.ndarray],
annotations: Optional[Union[AnnotationType, list[AnnotationType]]] = None,
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
update_bboxes: bool = True,
pad_size: Optional[dict[str, int]] = None,
) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
images (list[`np.ndarray`]):
Images to pad.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
Annotations to transform according to the padding that is applied to the images.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
update_bboxes (`bool`, *optional*, defaults to `True`):
Whether to update the bounding boxes in the annotations to match the padded images. If the
bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
format, the bounding boxes will not be updated.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
pad_size = pad_size if pad_size is not None else self.pad_size
if pad_size is not None:
padded_size = (pad_size["height"], pad_size["width"])
else:
padded_size = get_max_height_width(images, input_data_format=input_data_format)
annotation_list = annotations if annotations is not None else [None] * len(images)
padded_images = []
padded_annotations = []
for image, annotation in zip(images, annotation_list):
padded_image, padded_annotation = self._pad_image(
image,
padded_size,
annotation,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
update_bboxes=update_bboxes,
)
padded_images.append(padded_image)
padded_annotations.append(padded_annotation)
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [
make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format)
for image in images
]
data["pixel_mask"] = masks
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
]
return encoded_inputs
def preprocess(
self,
images: ImageInput,
annotations: Optional[Union[AnnotationType, list[AnnotationType]]] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample=None, # PILImageResampling
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
do_convert_annotations: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
format: Optional[Union[str, AnnotationFormat]] = None,
return_tensors: Optional[Union[TensorType, str]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
List of annotations associated with the image or batch of images. If annotation is for object
detection, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "annotations" (`list[Dict]`): List of annotations for an image. Each annotation should be a
dictionary. An image can have no annotations, in which case the list should be empty.
If annotation is for segmentation, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "segments_info" (`list[Dict]`): List of segments for an image. Each segment should be a dictionary.
An image can have no segments, in which case the list should be empty.
- "file_name" (`str`): The file name of the image.
return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
Whether to return segmentation masks.
masks_path (`str` or `pathlib.Path`, *optional*):
Path to the directory containing the segmentation masks.
do_resize (`bool`, *optional*, defaults to self.do_resize):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to self.size):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to self.resample):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to self.do_rescale):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
Rescale factor to use when rescaling the image.
do_normalize (`bool`, *optional*, defaults to self.do_normalize):
Whether to normalize the image.
do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
Whether to convert the annotations to the format expected by the model. Converts the bounding
boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
and in relative coordinates.
image_mean (`float` or `list[float]`, *optional*, defaults to self.image_mean):
Mean to use when normalizing the image.
image_std (`float` or `list[float]`, *optional*, defaults to self.image_std):
Standard deviation to use when normalizing the image.
do_pad (`bool`, *optional*, defaults to self.do_pad):
Whether to pad the image. If `True`, padding will be applied to the bottom and right of
the image with zeros. If `pad_size` is provided, the image will be padded to the specified
dimensions. Otherwise, the image will be padded to the maximum height and width of the batch.
format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
Format of the annotations.
return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
Type of tensors to return. If `None`, will return the list of images.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
do_resize = self.do_resize if do_resize is None else do_resize
size = self.size if size is None else size
size = get_size_dict(size=size, default_to_square=False)
resample = self.resample if resample is None else resample
do_rescale = self.do_rescale if do_rescale is None else do_rescale
rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = self.do_normalize if do_normalize is None else do_normalize
image_mean = self.image_mean if image_mean is None else image_mean
image_std = self.image_std if image_std is None else image_std
do_convert_annotations = (
self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
)
do_pad = self.do_pad if do_pad is None else do_pad
pad_size = self.pad_size if pad_size is None else pad_size
format = self.format if format is None else format
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor.")
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
# Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
format = AnnotationFormat(format)
if annotations is not None:
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
if (
masks_path is not None
and format == AnnotationFormat.COCO_PANOPTIC
and not isinstance(masks_path, (pathlib.Path, str))
):
raise ValueError(
"The path to the directory containing the mask PNG files should be provided as a"
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
)
# All transformations expect numpy arrays
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
if annotations is not None:
prepared_images = []
prepared_annotations = []
for image, target in zip(images, annotations):
target = self.prepare_annotation(
image,
target,
format,
return_segmentation_masks=return_segmentation_masks,
masks_path=masks_path,
input_data_format=input_data_format,
)
prepared_images.append(image)
prepared_annotations.append(target)
images = prepared_images
annotations = prepared_annotations
del prepared_images, prepared_annotations
# transformations
if do_resize:
if annotations is not None:
resized_images, resized_annotations = [], []
for image, target in zip(images, annotations):
orig_size = get_image_size(image, input_data_format)
resized_image = self.resize(
image, size=size, resample=resample, input_data_format=input_data_format
)
resized_annotation = self.resize_annotation(
target, orig_size, get_image_size(resized_image, input_data_format)
)
resized_images.append(resized_image)
resized_annotations.append(resized_annotation)
images = resized_images
annotations = resized_annotations
del resized_images, resized_annotations
else:
images = [
self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [
self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
]
if do_convert_annotations and annotations is not None:
annotations = [
self.normalize_annotation(annotation, get_image_size(image, input_data_format))
for annotation, image in zip(annotations, images)
]
if do_pad:
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
encoded_inputs = self.pad(
images,
annotations=annotations,
return_pixel_mask=True,
data_format=data_format,
input_data_format=input_data_format,
update_bboxes=do_convert_annotations,
return_tensors=return_tensors,
pad_size=pad_size,
)
else:
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in images
]
encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
# inspired by https://github.com/facebookresearch/detr/blob/master/models/detr.py#L258
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Optional[Union[TensorType, list[tuple]]] = None
):
"""
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = nn.functional.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# Convert to [x0, y0, x1, y1] format
boxes = center_to_corners_format(out_bbox)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None):
"""
Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`DetrForSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
A list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
# inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L218
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: Optional[list[tuple[int, int]]] = None,
return_coco_annotation: Optional[bool] = False,
) -> list[dict]:
"""
Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch.
Args:
outputs ([`DetrForSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If unset, predictions will not be resized.
return_coco_annotation (`bool`, *optional*):
Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE)
format.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`. Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=[],
target_size=target_size,
)
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
# inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L241
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_sizes: Optional[list[tuple[int, int]]] = None,
) -> list[dict]:
"""
Converts the output of [`DetrForSegmentation`] into image panoptic segmentation predictions. Only supports
PyTorch.
Args:
outputs ([`DetrForSegmentation`]):
The outputs from [`DetrForSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or
`None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to
the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["DetrImageProcessor"]
| DetrImageProcessor |
python | joke2k__faker | faker/providers/color/es/__init__.py | {
"start": 98,
"end": 6166
} | class ____(ColorProvider):
"""Implement color provider for ``es`` locale."""
all_colors = OrderedDict(
(
("Agua marina medio", "#66CDAA"),
("Agua-marina", "#7FFFD4"),
("Almendra blanqueado", "#FFEBCD"),
("Amarillo", "#FFFF00"),
("Amarillo claro", "#FFFFE0"),
("Amarillo dorado", "#DAA520"),
("Amarillo dorado claro", "#FAFAD2"),
("Amarillo dorado oscuro", "#B8860B"),
("Amarillo dorado pálido", "#EEE8AA"),
("Amarillo trigo", "#F5DEB3"),
("Amarillo verde", "#9ACD32"),
("Azul", "#0000FF"),
("Azul Alicia", "#F0F8FF"),
("Azul acero", "#4682B4"),
("Azul acero claro", "#B0C4DE"),
("Azul anciano", "#6495ED"),
("Azul azur", "#F0FFFF"),
("Azul cadete", "#5F9EA0"),
("Azul cielo", "#87CEEB"),
("Azul cielo claro", "#87CEFA"),
("Azul cielo profundo", "#00BFFF"),
("Azul claro", "#ADD8E6"),
("Azul lona", "#1E90FF"),
("Azul marino", "#000080"),
("Azul medianoche", "#191970"),
("Azul medio", "#0000CD"),
("Azul oscuro", "#00008B"),
("Azul pizarra", "#6A5ACD"),
("Azul pizarra medio", "#7B68EE"),
("Azul pizarra oscuro", "#483D8B"),
("Azul polvo", "#B0E0E6"),
("Azul real", "#4169E1"),
("Azul violeta", "#8A2BE2"),
("Beige", "#F5F5DC"),
("Beige antiguo", "#FAEBD7"),
("Beige limón", "#FFFACD"),
("Beige melocotón", "#FFDAB9"),
("Beige mocasín", "#FFE4B5"),
("Beige papaya", "#FFEFD5"),
("Bisque", "#FFE4C4"),
("Blanco", "#FFFFFF"),
("Blanco concha", "#FFF5EE"),
("Blanco encaje", "#FDF5E6"),
("Blanco fantasma", "#F8F8FF"),
("Blanco floral", "#FFFAF0"),
("Blanco humo", "#F5F5F5"),
("Blanco lavanda", "#FFF0F5"),
("Blanco lino", "#FAF0E6"),
("Blanco menta", "#F5FFFA"),
("Blanco navajo", "#FFDEAD"),
("Blanco nieve", "#FFFAFA"),
("Caqui", "#6B8E23"),
("Caqui oscuro", "#BDB76B"),
("Chartreuse", "#7FFF00"),
("Chocolate", "#D2691E"),
("Cian", "#00FFFF"),
("Cian clarto", "#E0FFFF"),
("Ciruela", "#DDA0DD"),
("Coral", "#FF7F50"),
("Coral claro", "#F08080"),
("Amarillo maíz dulce", "#FFF8DC"),
("Cyan oscuro", "#008B8B"),
("Fucsia", "#FF00FF"),
("Granate", "#800000"),
("Gris", "#808080"),
("Gris claro", "#D3D3D3"),
("Gris gainsboro (Estaño)", "#DCDCDC"),
("Gris mate", "#696969"),
("Gris oscuro", "#A9A9A9"),
("Gris pizarra", "#708090"),
("Gris pizarra claro", "#778899"),
("Gris pizarra oscuro", "#2F4F4F"),
("Lavanda", "#E6E6FA"),
("Lima", "#00FF00"),
("Magenta", "#FF00FF"),
("Magenta oscuro", "#8B008B"),
("Marfil", "#FFFFF0"),
("Marrón", "#A52A2A"),
("Marrón arena", "#F4A460"),
("Marrón caqui", "#F0E68C"),
("Marrón cuero", "#8B4513"),
("Marrón madera rústica", "#DEB887"),
("Marrón perú", "#CD853F"),
("Marrón rojizo", "#D2B48C"),
("Marrón rosado", "#BC8F8F"),
("Marrón siena", "#A0522D"),
("Melón dulce", "#F0FFF0"),
("Naranja", "#FFA500"),
("Naranja oscuro", "#FF8C00"),
("Negro", "#000000"),
("Oliva", "#808000"),
("Oro", "#FFD700"),
("Orquídea", "#DA70D6"),
("Orquídea medio", "#BA55D3"),
("Orquídea púrpura oscuro", "#9932CC"),
("Plata", "#C0C0C0"),
("Púrpura", "#800080"),
("Púrpura medio", "#9370DB"),
("Rojo", "#FF0000"),
("Rojo anaranjado", "#FF4500"),
("Rojo carmesí", "#DC143C"),
("Rojo indio", "#CD5C5C"),
("Rojo ladrillo", "#B22222"),
("Rojo oscuro", "#8B0000"),
("Rojo tomate", "#FF6347"),
("Rojo violeta medio", "#C71585"),
("Rosa", "#FFC0CB"),
("Rosa brumoso", "#FFE4E1"),
("Rosa caliente", "#FF69B4"),
("Rosa claro", "#FFB6C1"),
("Rosa profundo", "#FF1493"),
("Salmón", "#FA8072"),
("Salmón claro", "#FFA07A"),
("Salmón oscuro", "#E9967A"),
("Turquesa", "#40E0D0"),
("Turquesa medio", "#48D1CC"),
("Turquesa oscuro", "#00CED1"),
("Turquesa pálido", "#AFEEEE"),
("Verde", "#008000"),
("Verde azulado", "#008080"),
("Verde bosque", "#228B22"),
("Verde claro", "#90EE90"),
("Verde lima", "#32CD32"),
("Verde limón", "#ADFF2F"),
("Verde mar", "#2E8B57"),
("Verde mar claro", "#20B2AA"),
("Verde mar medio", "#3CB371"),
("Verde mar oscuro", "#8FBC8F"),
("Verde oliva oscuro", "#556B2F"),
("Verde oscuro", "#006400"),
("Verde prado", "#7CFC00"),
("Verde primavera", "#00FF7F"),
("Verde primavera medio", "#00FA9A"),
("Verde pálido", "#98FB98"),
("Violeta", "#EE82EE"),
("Violeta cardo", "#D8BFD8"),
("Violeta oscuro", "#9400D3"),
("Violeta sonrojado pálido", "#DB7093"),
("Índigo", "#4B0082"),
)
)
safe_colors = (
"negro",
"budeos",
"verde",
"rojo",
"violeta",
"verde azulado",
"azul",
"plata",
"gris",
"amarilo",
"fucsia",
"cian",
"blanco",
)
| Provider |
python | huggingface__transformers | tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py | {
"start": 20190,
"end": 20366
} | class ____(GPTBigCodeModelTest):
# `parameterized_class` breaks with mixins, so we use inheritance instead
multi_query = False
@slow
@require_torch
| GPTBigCodeMHAModelTest |
python | huggingface__transformers | tests/models/mllama/test_modeling_mllama.py | {
"start": 4492,
"end": 9591
} | class ____:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=4,
seq_length=7,
is_training=True,
text_config={
"model_type": "mllama",
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"max_position_embeddings": 512,
"initializer_range": 0.02,
"rope_parameters": {"rope_type": "default"},
"pad_token_id": 0,
"bos_token_id": 1,
"eos_token_id": 2,
"cross_attention_layers": [1],
},
vision_config={
"image_size": 30,
"patch_size": 2,
"num_channels": 3,
"hidden_size": 16,
"intermediate_layers_indices": [0],
"vision_output_dim": 32,
"projection_dim": 32,
"num_hidden_layers": 2,
"num_global_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"initializer_range": 0.02,
"supported_aspect_ratios": [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]],
},
):
self.parent = parent
self.is_training = is_training
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.text_config = text_config
self.vision_config = vision_config
self.seq_length = seq_length
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.pad_token_id = self.text_config["pad_token_id"]
self.batch_size = 3
self.num_channels = 3
self.image_size = 224
self.max_num_images = 1
self.max_image_tiles = 4
self.image_length = 904
def get_config(self):
return MllamaConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_index=self.image_token_index,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.max_num_images,
self.max_image_tiles,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
aspect_ratio_ids = torch.tensor([[6] * self.batch_size], device=torch_device).transpose(0, 1)
aspect_ratio_mask = torch.ones(self.batch_size, self.max_num_images, self.max_image_tiles)
config = self.get_config()
return config, pixel_values, aspect_ratio_ids, aspect_ratio_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, aspect_ratio_ids, aspect_ratio_mask = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = input_ids.ne(1).to(torch_device)
aspect_ratio_mask = aspect_ratio_mask.to(torch_device)
cross_attention_mask = torch.ones(
(self.batch_size, self.seq_length, self.max_num_images, self.max_image_tiles), device=torch_device
)
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[:, 1] = config.image_token_index
inputs_dict = {
"pixel_values": pixel_values,
"aspect_ratio_ids": aspect_ratio_ids,
"input_ids": input_ids,
"attention_mask": attention_mask,
"aspect_ratio_mask": aspect_ratio_mask,
"cross_attention_mask": cross_attention_mask,
"use_cache": True,
}
return config, inputs_dict
def create_and_check_mllama_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask):
model = MllamaForConditionalGeneration(config=config)
model.to(torch_device)
model.eval()
with torch.autocast(device_type="cuda", dtype=torch.float16):
logits = model(
input_ids=input_ids,
attention_mask=attention_mask,
pixel_values=pixel_values.to(torch.bfloat16),
return_dict=True,
)["logits"]
self.parent.assertFalse(torch.isnan(logits).any().item())
@unittest.skip("Mllama applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Mllama applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
| MllamaVisionText2TextModelTester |
python | zarr-developers__zarr-python | src/zarr/codecs/transpose.py | {
"start": 922,
"end": 4079
} | class ____(ArrayArrayCodec):
"""Transpose codec"""
is_fixed_size = True
order: tuple[int, ...]
def __init__(self, *, order: Iterable[int]) -> None:
order_parsed = parse_transpose_order(order)
object.__setattr__(self, "order", order_parsed)
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
_, configuration_parsed = parse_named_configuration(data, "transpose")
return cls(**configuration_parsed) # type: ignore[arg-type]
def to_dict(self) -> dict[str, JSON]:
return {"name": "transpose", "configuration": {"order": tuple(self.order)}}
def validate(
self,
shape: tuple[int, ...],
dtype: ZDType[TBaseDType, TBaseScalar],
chunk_grid: ChunkGrid,
) -> None:
if len(self.order) != len(shape):
raise ValueError(
f"The `order` tuple must have as many entries as there are dimensions in the array. Got {self.order}."
)
if len(self.order) != len(set(self.order)):
raise ValueError(
f"There must not be duplicates in the `order` tuple. Got {self.order}."
)
if not all(0 <= x < len(shape) for x in self.order):
raise ValueError(
f"All entries in the `order` tuple must be between 0 and the number of dimensions in the array. Got {self.order}."
)
def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self:
ndim = array_spec.ndim
if len(self.order) != ndim:
raise ValueError(
f"The `order` tuple must have as many entries as there are dimensions in the array. Got {self.order}."
)
if len(self.order) != len(set(self.order)):
raise ValueError(
f"There must not be duplicates in the `order` tuple. Got {self.order}."
)
if not all(0 <= x < ndim for x in self.order):
raise ValueError(
f"All entries in the `order` tuple must be between 0 and the number of dimensions in the array. Got {self.order}."
)
order = tuple(self.order)
if order != self.order:
return replace(self, order=order)
return self
def resolve_metadata(self, chunk_spec: ArraySpec) -> ArraySpec:
return ArraySpec(
shape=tuple(chunk_spec.shape[self.order[i]] for i in range(chunk_spec.ndim)),
dtype=chunk_spec.dtype,
fill_value=chunk_spec.fill_value,
config=chunk_spec.config,
prototype=chunk_spec.prototype,
)
async def _decode_single(
self,
chunk_array: NDBuffer,
chunk_spec: ArraySpec,
) -> NDBuffer:
inverse_order = np.argsort(self.order)
return chunk_array.transpose(inverse_order)
async def _encode_single(
self,
chunk_array: NDBuffer,
_chunk_spec: ArraySpec,
) -> NDBuffer | None:
return chunk_array.transpose(self.order)
def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int:
return input_byte_length
| TransposeCodec |
python | sympy__sympy | sympy/assumptions/predicates/ntheory.py | {
"start": 88,
"end": 931
} | class ____(Predicate):
"""
Prime number predicate.
Explanation
===========
``ask(Q.prime(x))`` is true iff ``x`` is a natural number greater
than 1 that has no positive divisors other than ``1`` and the
number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.prime(0))
False
>>> ask(Q.prime(1))
False
>>> ask(Q.prime(2))
True
>>> ask(Q.prime(20))
False
>>> ask(Q.prime(-3))
False
"""
name = 'prime'
handler = Dispatcher(
"PrimeHandler",
doc=("Handler for key 'prime'. Test that an expression represents a prime"
" number. When the expression is an exact number, the result (when True)"
" is subject to the limitations of isprime() which is used to return the "
"result.")
)
| PrimePredicate |
python | mahmoud__glom | glom/core.py | {
"start": 64821,
"end": 65059
} | class ____(_AbstractIterableBase):
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if C in (str, bytes):
return False
return callable(getattr(C, "__iter__", None))
| _AbstractIterable |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 8522,
"end": 8780
} | class ____(GestureTool):
''' A base class for tools that respond to drag events.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
| Drag |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/mixins.py | {
"start": 7860,
"end": 8561
} | class ____(OAuthLibMixin):
"""
Helper mixin that implements OAuth2 protection on request dispatch,
specially useful for Django Generic Views
"""
def dispatch(self, request, *args, **kwargs):
# let preflight OPTIONS requests pass
if request.method.upper() == "OPTIONS":
return super().dispatch(request, *args, **kwargs)
# check if the request is valid and the protected resource may be accessed
valid, r = self.verify_request(request)
if valid:
request.resource_owner = r.user
return super().dispatch(request, *args, **kwargs)
else:
return HttpResponseForbidden()
| ProtectedResourceMixin |
python | eventlet__eventlet | tests/websocket_new_test.py | {
"start": 8268,
"end": 22838
} | class ____(tests.wsgi_test._TestBase):
TEST_TIMEOUT = 5
def set_site(self):
self.site = wsapp
def setUp(self):
super().setUp()
self.connect = '\r\n'.join([
"GET /echo HTTP/1.1",
"Upgrade: websocket",
"Connection: upgrade",
"Host: %s:%s" % self.server_addr,
"Origin: http://%s:%s" % self.server_addr,
"Sec-WebSocket-Version: 13",
"Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==",
"Sec-WebSocket-Extensions: %s",
'\r\n'
])
self.handshake_re = re.compile('\r\n'.join([
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: ywSyWXCPNsDxLrQdQrn5RFNRfBU=',
'Sec-WebSocket-Extensions: (.+)'
'\r\n',
]).encode())
@staticmethod
def get_deflated_reply(ws):
msg = ws._recv_frame(None)
msg.decompressor = None
return msg.getvalue()
def test_accept_basic_deflate_ext_13(self):
for extension in [
'permessage-deflate',
'PeRMessAGe-dEFlaTe',
]:
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extension).encode())
result = sock.recv(1024)
# The server responds the correct Websocket handshake
# print('Extension offer: %r' % extension)
match = re.match(self.handshake_re, result)
assert match is not None
assert len(match.groups()) == 1
def test_accept_deflate_ext_context_takeover_13(self):
for extension in [
'permessage-deflate;CLient_No_conteXT_TAkeOver',
'permessage-deflate; SerVER_No_conteXT_TAkeOver',
'permessage-deflate; server_no_context_takeover; client_no_context_takeover',
]:
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extension).encode())
result = sock.recv(1024)
# The server responds the correct Websocket handshake
# print('Extension offer: %r' % extension)
match = re.match(self.handshake_re, result)
assert match is not None
assert len(match.groups()) == 1
offered_ext_parts = (ex.strip().lower() for ex in extension.split(';'))
accepted_ext_parts = match.groups()[0].decode().split('; ')
assert all(oep in accepted_ext_parts for oep in offered_ext_parts)
def test_accept_deflate_ext_window_max_bits_13(self):
for extension_string, vals in [
('permessage-deflate; client_max_window_bits', [15]),
('permessage-deflate; Server_Max_Window_Bits = 11', [11]),
('permessage-deflate; server_max_window_bits; '
'client_max_window_bits=9', [15, 9])
]:
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extension_string).encode())
result = sock.recv(1024)
# The server responds the correct Websocket handshake
# print('Extension offer: %r' % extension_string)
match = re.match(self.handshake_re, result)
assert match is not None
assert len(match.groups()) == 1
offered_parts = [part.strip().lower() for part in extension_string.split(';')]
offered_parts_names = [part.split('=')[0].strip() for part in offered_parts]
offered_parts_dict = dict(zip(offered_parts_names[1:], vals))
accepted_ext_parts = match.groups()[0].decode().split('; ')
assert accepted_ext_parts[0] == 'permessage-deflate'
for param, val in (part.split('=') for part in accepted_ext_parts[1:]):
assert int(val) == offered_parts_dict[param]
def test_reject_max_window_bits_out_of_range_13(self):
extension_string = ('permessage-deflate; client_max_window_bits=7,'
'permessage-deflate; server_max_window_bits=16, '
'permessage-deflate; client_max_window_bits=16; '
'server_max_window_bits=7, '
'permessage-deflate')
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extension_string).encode())
result = sock.recv(1024)
# The server responds the correct Websocket handshake
# print('Extension offer: %r' % extension_string)
match = re.match(self.handshake_re, result)
assert match.groups()[0] == b'permessage-deflate'
def test_server_compress_with_context_takeover_13(self):
extensions_string = 'permessage-deflate; client_no_context_takeover;'
extensions = {'permessage-deflate': {
'client_no_context_takeover': True,
'server_no_context_takeover': False}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True,
extensions=extensions)
# Deflated values taken from Section 7.2.3 of RFC 7692
# https://tools.ietf.org/html/rfc7692#section-7.2.3
ws.send(b'Hello')
msg1 = self.get_deflated_reply(ws)
assert msg1 == b'\xf2\x48\xcd\xc9\xc9\x07\x00'
ws.send(b'Hello')
msg2 = self.get_deflated_reply(ws)
assert msg2 == b'\xf2\x00\x11\x00\x00'
ws.close()
eventlet.sleep(0.01)
def test_server_compress_no_context_takeover_13(self):
extensions_string = 'permessage-deflate; server_no_context_takeover;'
extensions = {'permessage-deflate': {
'client_no_context_takeover': False,
'server_no_context_takeover': True}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True,
extensions=extensions)
masked_msg1 = ws._pack_message(b'Hello', masked=True)
ws._send(masked_msg1)
masked_msg2 = ws._pack_message(b'Hello', masked=True)
ws._send(masked_msg2)
# Verify that client uses context takeover by checking
# that the second message
assert len(masked_msg2) < len(masked_msg1)
# Verify that server drops context between messages
# Deflated values taken from Section 7.2.3 of RFC 7692
# https://tools.ietf.org/html/rfc7692#section-7.2.3
reply_msg1 = self.get_deflated_reply(ws)
assert reply_msg1 == b'\xf2\x48\xcd\xc9\xc9\x07\x00'
reply_msg2 = self.get_deflated_reply(ws)
assert reply_msg2 == b'\xf2\x48\xcd\xc9\xc9\x07\x00'
def test_client_compress_with_context_takeover_13(self):
extensions = {'permessage-deflate': {
'client_no_context_takeover': False,
'server_no_context_takeover': True}}
ws = websocket.RFC6455WebSocket(None, {}, client=True,
extensions=extensions)
# Deflated values taken from Section 7.2.3 of RFC 7692
# modified opcode to Binary instead of Text
# https://tools.ietf.org/html/rfc7692#section-7.2.3
packed_msg_1 = ws._pack_message(b'Hello', masked=False)
assert packed_msg_1 == b'\xc2\x07\xf2\x48\xcd\xc9\xc9\x07\x00'
packed_msg_2 = ws._pack_message(b'Hello', masked=False)
assert packed_msg_2 == b'\xc2\x05\xf2\x00\x11\x00\x00'
eventlet.sleep(0.01)
def test_client_compress_no_context_takeover_13(self):
extensions = {'permessage-deflate': {
'client_no_context_takeover': True,
'server_no_context_takeover': False}}
ws = websocket.RFC6455WebSocket(None, {}, client=True,
extensions=extensions)
# Deflated values taken from Section 7.2.3 of RFC 7692
# modified opcode to Binary instead of Text
# https://tools.ietf.org/html/rfc7692#section-7.2.3
packed_msg_1 = ws._pack_message(b'Hello', masked=False)
assert packed_msg_1 == b'\xc2\x07\xf2\x48\xcd\xc9\xc9\x07\x00'
packed_msg_2 = ws._pack_message(b'Hello', masked=False)
assert packed_msg_2 == b'\xc2\x07\xf2\x48\xcd\xc9\xc9\x07\x00'
def test_compressed_send_recv_13(self):
extensions_string = 'permessage-deflate'
extensions = {'permessage-deflate': {
'client_no_context_takeover': False,
'server_no_context_takeover': False}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions)
ws.send(b'hello')
assert ws.wait() == b'hello'
ws.send(b'hello world!')
ws.send('hello world again!')
assert ws.wait() == b'hello world!'
assert ws.wait() == 'hello world again!'
ws.close()
eventlet.sleep(0.01)
def test_send_uncompressed_msg_13(self):
extensions_string = 'permessage-deflate'
extensions = {'permessage-deflate': {
'client_no_context_takeover': False,
'server_no_context_takeover': False}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
# Send without using deflate, having rsv1 unset
ws = websocket.RFC6455WebSocket(sock, {}, client=True)
ws.send(b'Hello')
# Adding extensions to recognise deflated response
ws.extensions = extensions
assert ws.wait() == b'Hello'
ws.close()
eventlet.sleep(0.01)
def test_compressed_send_recv_client_no_context_13(self):
extensions_string = 'permessage-deflate; client_no_context_takeover'
extensions = {'permessage-deflate': {
'client_no_context_takeover': True,
'server_no_context_takeover': False}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions)
ws.send(b'hello')
assert ws.wait() == b'hello'
ws.send(b'hello world!')
ws.send('hello world again!')
assert ws.wait() == b'hello world!'
assert ws.wait() == 'hello world again!'
ws.close()
eventlet.sleep(0.01)
def test_compressed_send_recv_server_no_context_13(self):
extensions_string = 'permessage-deflate; server_no_context_takeover'
extensions = {'permessage-deflate': {
'client_no_context_takeover': False,
'server_no_context_takeover': False}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions)
ws.send(b'hello')
assert ws.wait() == b'hello'
ws.send(b'hello world!')
ws.send('hello world again!')
assert ws.wait() == b'hello world!'
assert ws.wait() == 'hello world again!'
ws.close()
eventlet.sleep(0.01)
def test_compressed_send_recv_both_no_context_13(self):
extensions_string = ('permessage-deflate;'
' server_no_context_takeover; client_no_context_takeover')
extensions = {'permessage-deflate': {
'client_no_context_takeover': True,
'server_no_context_takeover': True}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions)
ws.send(b'hello')
assert ws.wait() == b'hello'
ws.send(b'hello world!')
ws.send('hello world again!')
assert ws.wait() == b'hello world!'
assert ws.wait() == 'hello world again!'
ws.close()
eventlet.sleep(0.01)
def test_large_frame_size_compressed_13(self):
# Test fix for GHSA-9p9m-jm8w-94p2
extensions_string = 'permessage-deflate'
extensions = {'permessage-deflate': {
'client_no_context_takeover': False,
'server_no_context_takeover': False}}
sock = eventlet.connect(self.server_addr)
sock.sendall((self.connect % extensions_string).encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions)
should_still_fit = b"x" * TEST_MAX_FRAME_LENGTH
one_too_much = should_still_fit + b"x"
# send just fitting frame twice to make sure they are fine independently
ws.send(should_still_fit)
assert ws.wait() == should_still_fit
ws.send(should_still_fit)
assert ws.wait() == should_still_fit
ws.send(one_too_much)
res = ws.wait()
assert res is None # socket closed
# TODO: The websocket currently sents compressed control frames, which contradicts RFC7692.
# Renable the following assert after that has been fixed.
# assert ws._remote_close_data == b"\x03\xf1Incoming compressed frame is above length limit."
eventlet.sleep(0.01)
def test_large_frame_size_uncompressed_13(self):
# Test fix for GHSA-9p9m-jm8w-94p2
sock = eventlet.connect(self.server_addr)
sock.sendall(self.connect.encode())
sock.recv(1024)
ws = websocket.RFC6455WebSocket(sock, {}, client=True)
should_still_fit = b"x" * TEST_MAX_FRAME_LENGTH
one_too_much = should_still_fit + b"x"
# send just fitting frame twice to make sure they are fine independently
ws.send(should_still_fit)
assert ws.wait() == should_still_fit
ws.send(should_still_fit)
assert ws.wait() == should_still_fit
ws.send(one_too_much)
res = ws.wait()
assert res is None # socket closed
# close code should be available now
assert ws._remote_close_data == b"\x03\xf1Incoming frame of 50001 bytes is above length limit of 50000 bytes."
eventlet.sleep(0.01)
| TestWebSocketWithCompression |
python | huggingface__transformers | tests/models/autoformer/test_modeling_autoformer.py | {
"start": 1404,
"end": 8081
} | class ____:
def __init__(
self,
parent,
d_model=16,
batch_size=13,
prediction_length=7,
context_length=14,
label_length=10,
cardinality=19,
embedding_dimension=5,
num_time_features=4,
is_training=True,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
lags_sequence=[1, 2, 3, 4, 5],
moving_average=25,
autocorrelation_factor=5,
):
self.d_model = d_model
self.parent = parent
self.batch_size = batch_size
self.prediction_length = prediction_length
self.context_length = context_length
self.cardinality = cardinality
self.num_time_features = num_time_features
self.lags_sequence = lags_sequence
self.embedding_dimension = embedding_dimension
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.encoder_seq_length = context_length
self.decoder_seq_length = prediction_length + label_length
self.label_length = label_length
self.moving_average = moving_average
self.autocorrelation_factor = autocorrelation_factor
def get_config(self):
return AutoformerConfig(
d_model=self.d_model,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
prediction_length=self.prediction_length,
context_length=self.context_length,
label_length=self.label_length,
lags_sequence=self.lags_sequence,
num_time_features=self.num_time_features,
num_static_categorical_features=1,
cardinality=[self.cardinality],
embedding_dimension=[self.embedding_dimension],
moving_average=self.moving_average,
scaling="std", # we need std to get non-zero `loc`
)
def prepare_autoformer_inputs_dict(self, config):
_past_length = config.context_length + max(config.lags_sequence)
static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0])
past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features])
past_values = floats_tensor([self.batch_size, _past_length])
past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
future_values = floats_tensor([self.batch_size, config.prediction_length])
inputs_dict = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = self.prepare_autoformer_inputs_dict(config)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = AutoformerModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = AutoformerEncoder.from_pretrained(tmpdirname).to(torch_device)
transformer_inputs, feature, _, _, _ = model.create_network_inputs(**inputs_dict)
seasonal_input, trend_input = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
enc_input = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]),
dim=-1,
)
encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
mean = (
torch.mean(transformer_inputs[:, : config.context_length, ...], dim=1)
.unsqueeze(1)
.repeat(1, config.prediction_length, 1)
)
zeros = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]],
device=enc_input.device,
)
dec_input = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros), dim=1),
feature[:, config.context_length - config.label_length :, ...],
),
dim=-1,
)
trend_init = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean), dim=1),
feature[:, config.context_length - config.label_length :, ...],
),
dim=-1,
)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = AutoformerDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
trend=trend_init,
inputs_embeds=dec_input,
encoder_hidden_states=encoder_last_hidden_state,
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| AutoformerModelTester |
python | google__pytype | pytype/tests/test_base_test.py | {
"start": 154,
"end": 5490
} | class ____(test_base.BaseTest):
def test_error_comments(self):
err = self.CheckWithErrors("""
a = 10 # a random comment
b = "hello" + 3 # unsupported-operands[.mark]
c = (10).foo # attribute-error
d = int(int) # wrong-arg-types[.another_mark]
""")
self.assertEqual(
{mark: (e.line, e.name) for mark, e in err.marks.items()},
{
".mark": (2, "unsupported-operands"),
".another_mark": (4, "wrong-arg-types"),
},
)
self.assertEqual(
err.expected,
{
2: [("unsupported-operands", ".mark")],
3: [("attribute-error", None)],
4: [("wrong-arg-types", ".another_mark")],
},
)
def test_multiple_errors_one_line(self):
err = self.CheckWithErrors("""
x = (10).foo, "hello".foo # attribute-error[e1] # attribute-error[e2]
""")
line = 1
self.assertEqual(
err.expected,
{line: [("attribute-error", "e1"), ("attribute-error", "e2")]},
)
self.assertCountEqual(err.marks, ["e1", "e2"])
self.assertIn("on int", err.marks["e1"].message)
self.assertIn("on str", err.marks["e2"].message)
def test_different_order_of_errors_one_line(self):
self.CheckWithErrors("""
x = a.foo, "hello".foo # name-error[e1] # attribute-error[e2]
""")
self.CheckWithErrors("""
x = a.foo, "hello".foo # attribute-error[e2] # name-error[e1]
""")
def test_populate_marks(self):
# Test that assert_error_regexes populates self.marks if not already done.
src = "x = 0"
matcher = test_utils.ErrorMatcher(src)
self.assertIsNone(matcher.marks)
errorlog = errors.VmErrorLog(test_utils.FakePrettyPrinter(), src)
matcher.assert_errors_match_expected(errorlog)
self.assertErrorRegexes(matcher, {})
self.assertIsNotNone(matcher.marks)
def test_duplicate_mark(self):
with self.assertRaises(AssertionError) as ctx:
self.CheckWithErrors("x = 0 # attribute-error[e] # attribute-error[e]")
self.assertEqual(str(ctx.exception), "Mark e already used")
def test_error_regex_matching(self):
err = self.CheckWithErrors("""
a = 10
b = "hello"
c = a + b # unsupported-operands
d = a.foo() # attribute-error[.mark]
""")
self.assertErrorRegexes(err, {".mark": ".*foo.*"})
def test_error_sequence_matching(self):
err = self.CheckWithErrors("""
a = 10
b = a < "hello" # unsupported-operands[.mark]
c = a.foo() # attribute-error
""")
self.assertErrorSequences(err, {".mark": ["<", "a: int", "'hello': str"]})
def test_mismatched_error(self):
with self.assertRaises(AssertionError) as ctx:
self.CheckWithErrors("(10).foo # wrong-arg-types")
self.assertIn("Error does not match", str(ctx.exception))
def test_unexpected_error(self):
with self.assertRaises(AssertionError) as ctx:
self.CheckWithErrors("""
(10).foo # attribute-error
"hello".foo
""")
self.assertIn("Unexpected error", str(ctx.exception))
def test_leftover_error(self):
with self.assertRaises(AssertionError) as ctx:
self.CheckWithErrors("x = 0 # attribute-error")
self.assertIn("Errors not found", str(ctx.exception))
def test_misspelled_leftover_error(self):
with self.assertRaises(AssertionError) as ctx:
self.CheckWithErrors("x = 0 # misspelled-error")
self.assertIn("Errors not found", str(ctx.exception))
def test_mismatched_regex(self):
err = self.CheckWithErrors("(10).foo # attribute-error[e]")
with self.assertRaises(AssertionError) as ctx:
self.assertErrorRegexes(err, {"e": r"does not match error message"})
self.assertIn("Bad error message", str(ctx.exception))
def test_missing_regex(self):
err = self.CheckWithErrors("(10).foo # attribute-error[e]")
with self.assertRaises(AssertionError) as ctx:
self.assertErrorRegexes(err, {})
self.assertEqual(str(ctx.exception), "No matcher for mark e")
def test_leftover_regex(self):
err = self.CheckWithErrors("x = 0")
with self.assertRaises(AssertionError) as ctx:
self.assertErrorRegexes(err, {"e": ""})
self.assertEqual(str(ctx.exception), "Marks not found in code: e")
def test_mismatched_sequence(self):
# err = "No attribute 'foo' on int", check order of substrings is enforced.
err = self.CheckWithErrors("(10).foo # attribute-error[e]")
with self.assertRaises(AssertionError) as ctx:
self.assertErrorSequences(err, {"e": ["int", "foo", "attribute"]})
self.assertIn("Bad error message", str(ctx.exception))
def test_bad_check(self):
with self.assertRaises(AssertionError) as ctx:
self.Check("name_error # name-error")
self.assertIn("Cannot assert errors", str(ctx.exception))
def test_bad_infer(self):
with self.assertRaises(AssertionError) as ctx:
self.Infer("name_error # name-error")
self.assertIn("Cannot assert errors", str(ctx.exception))
def test_bad_infer_from_file(self):
with test_utils.Tempdir() as d:
d.create_file("some_file.py", "name_error # name-error")
with self.assertRaises(AssertionError) as ctx:
self.InferFromFile(filename=d["some_file.py"], pythonpath=[])
self.assertIn("Cannot assert errors", str(ctx.exception))
| ErrorLogTest |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_pool_test.py | {
"start": 40330,
"end": 41210
} | class ____(object):
def __init__(self, name, package, messages, dependencies=None,
public_dependencies=None):
self.name = name
self.package = package
self.messages = messages
self.dependencies = dependencies or []
self.public_dependencies = public_dependencies or []
def CheckFile(self, test, pool):
file_desc = pool.FindFileByName(self.name)
test.assertEqual(self.name, file_desc.name)
test.assertEqual(self.package, file_desc.package)
dependencies_names = [f.name for f in file_desc.dependencies]
test.assertEqual(self.dependencies, dependencies_names)
public_dependencies_names = [f.name for f in file_desc.public_dependencies]
test.assertEqual(self.public_dependencies, public_dependencies_names)
for name, msg_type in self.messages.items():
msg_type.CheckType(test, None, name, file_desc)
| ProtoFile |
python | django__django | django/views/generic/dates.py | {
"start": 15013,
"end": 15193
} | class ____(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""List of objects published in a given year."""
template_name_suffix = "_archive_year"
| YearArchiveView |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/tridiagonal_matmul_op_test.py | {
"start": 1561,
"end": 11554
} | class ____(test.TestCase):
def _testAllFormats(self,
superdiag,
maindiag,
subdiag,
rhs,
expected,
dtype=dtypes.float64):
superdiag_extended = np.pad(superdiag, [0, 1], 'constant')
subdiag_extended = np.pad(subdiag, [1, 0], 'constant')
diags_compact = np.stack([superdiag_extended, maindiag, subdiag_extended])
diags_matrix = np.diag(superdiag, 1) + np.diag(maindiag, 0) + np.diag(
subdiag, -1)
diags_sequence = (constant_op.constant(superdiag_extended, dtype),
constant_op.constant(maindiag, dtype),
constant_op.constant(subdiag_extended, dtype))
diags_compact = constant_op.constant(diags_compact, dtype)
diags_matrix = constant_op.constant(diags_matrix, dtype)
rhs = constant_op.constant(rhs, dtype)
rhs_batch = array_ops_stack.stack(
[rhs, 2 * rhs])
diags_compact_batch = array_ops_stack.stack(
[diags_compact, 2 * diags_compact])
diags_matrix_batch = array_ops_stack.stack(
[diags_matrix, 2 * diags_matrix])
diags_sequence_batch = [array_ops_stack.stack(
[x, 2 * x]) for x in diags_sequence]
results = [
linalg_impl.tridiagonal_matmul(
diags_sequence, rhs, diagonals_format='sequence'),
linalg_impl.tridiagonal_matmul(
diags_compact, rhs, diagonals_format='compact'),
linalg_impl.tridiagonal_matmul(
diags_matrix, rhs, diagonals_format='matrix')
]
results_batch = [
linalg_impl.tridiagonal_matmul(
diags_sequence_batch, rhs_batch, diagonals_format='sequence'),
linalg_impl.tridiagonal_matmul(
diags_compact_batch, rhs_batch, diagonals_format='compact'),
linalg_impl.tridiagonal_matmul(
diags_matrix_batch, rhs_batch, diagonals_format='matrix')
]
with self.cached_session():
results = self.evaluate(results)
results_batch = self.evaluate(results_batch)
expected = np.array(expected)
expected_batch = np.stack([expected, 4 * expected])
for result in results:
self.assertAllClose(result, expected)
for result in results_batch:
self.assertAllClose(result, expected_batch)
def _makeTridiagonalMatrix(self, superdiag, maindiag, subdiag):
super_pad = [[0, 0], [0, 1], [1, 0]]
sub_pad = [[0, 0], [1, 0], [0, 1]]
super_part = array_ops.pad(array_ops.matrix_diag(superdiag), super_pad)
main_part = array_ops.matrix_diag(maindiag)
sub_part = array_ops.pad(array_ops.matrix_diag(subdiag), sub_pad)
return super_part + main_part + sub_part
def _randomComplexArray(self, shape):
np.random.seed(43)
return (np.random.uniform(-10, 10, shape) +
np.random.uniform(-10, 10, shape) * 1j)
def _gradientTest(self, diags, rhs, dtype=dtypes.float64):
def reference_matmul(diags, rhs):
matrix = self._makeTridiagonalMatrix(diags[..., 0, :-1], diags[..., 1, :],
diags[..., 2, 1:])
return math_ops.matmul(matrix, rhs)
diags = constant_op.constant(diags, dtype=dtype)
rhs = constant_op.constant(rhs, dtype=dtype)
with self.cached_session():
grad_reference, _ = gradient_checker_v2.compute_gradient(
reference_matmul, [diags, rhs])
grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
linalg_impl.tridiagonal_matmul, [diags, rhs])
self.assertAllClose(grad_theoretical, grad_numerical)
self.assertAllClose(grad_theoretical, grad_reference)
def test2x2(self):
self._testAllFormats([1], [2, 3], [4], [[2, 1], [4, 3]], [[8, 5], [20, 13]])
def test3x3(self):
for dtype in [dtypes.float32, dtypes.float64]:
self._testAllFormats([1, 2], [1, 2, 1], [2, 1], [[1, 1], [2, 2], [3, 3]],
[[3, 3], [12, 12], [5, 5]],
dtype=dtype)
def testComplex(self):
for dtype in [dtypes.complex64, dtypes.complex128]:
self._testAllFormats([1j, 1j], [1, -1, 0], [1j, 1j],
np.array([[1, 1j], [1, 1j], [1, 1j]]),
[[1 + 1j, -1 + 1j], [-1 + 2j, -2 - 1j], [1j, -1]],
dtype=dtype)
def testBatch(self):
b = 20
m = 10
n = 15
superdiag = self._randomComplexArray((b, m - 1))
maindiag = self._randomComplexArray((b, m))
subdiag = self._randomComplexArray((b, m - 1))
rhs = self._randomComplexArray((b, m, n))
matrix = np.stack([np.diag(superdiag[i], 1) + \
np.diag(maindiag[i], 0) + \
np.diag(subdiag[i], -1) for i in range(b)])
expected_result = np.matmul(matrix, rhs)
result = linalg_impl.tridiagonal_matmul(
constant_op.constant(matrix, dtype=dtypes.complex128),
constant_op.constant(rhs, dtype=dtypes.complex128),
diagonals_format='matrix')
with self.cached_session():
result = self.evaluate(result)
self.assertAllClose(result, expected_result)
def testGradientSmall(self):
self._gradientTest([[[1, 2, 0], [1, 2, 3], [0, 1, 2]]],
[[[1, 2], [3, 4], [5, 6]]],
dtype=dtypes.float64)
def testGradientComplexSmall(self):
self._gradientTest(
np.array([[[1 + 1j, 2j, 0], [1 + 2j, 2j, 3 + 0j], [0, 1j, 2 + 0j]]]),
np.array([[[1j, 2 + 0j], [3 + 1j, 4j], [5j, 6 + 3j]]]),
dtype=dtypes.complex128)
def testGradientComplexWithBatches(self):
b = 5
m = 10
n = 15
diags = self._randomComplexArray((b, 3, m))
rhs = self._randomComplexArray((b, m, n))
self._gradientTest(diags, rhs, dtype=dtypes.complex128)
def _testErrorWithShapesEager(self, exception_regex, superdiag_shape,
maindiag_shape, subdiag_shape, rhs_shape):
with context.eager_mode():
superdiag = array_ops.ones(superdiag_shape)
maindiag = array_ops.ones(maindiag_shape)
subdiag = array_ops.ones(subdiag_shape)
rhs = array_ops.ones(rhs_shape)
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
exception_regex):
linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs)
def testInvalidShapesEagerGpu(self):
if test.is_built_with_rocm():
self.skipTest('Incorrect Regex on rocm')
if not test.is_gpu_available():
self.skipTest('Test requires GPU')
self._testErrorWithShapesEager('Input must have rank >= 2, but got ',
[2], [2], [2], [2])
self._testErrorWithShapesEager(
'superdiag must have same rank as rhs, but got 3 and 2',
[2, 1, 2], [2, 1], [2, 1], [2, 2])
self._testErrorWithShapesEager(
'maindiag must have same outer dimensions as rhs, but for index 0, got '
'3 and 2',
[2, 1, 2], [3, 1, 2], [2, 1, 2], [2, 2, 2])
self._testErrorWithShapesEager(
"subdiag's second-to-last dimension must be 1, but got 3",
[2, 1, 2], [2, 1, 2], [2, 3, 2], [2, 2, 2])
self._testErrorWithShapesEager(
"subdiag's last dimension size must be rhs's second-to-last dimension "
"size, but got 3 and 2",
[2, 1, 2], [2, 1, 2], [2, 1, 3], [2, 2, 2])
# Benchmark
class TridiagonalMatMulBenchmark(test.Benchmark):
sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1),
(100000, 100, 1), (10000, 1, 100), (10000, 1, 1000),
(10000, 1, 10000)]
def baseline(self, upper, diag, lower, vec):
diag_part = array_ops.expand_dims(diag, -1) * vec
lower_part = array_ops.pad(
array_ops.expand_dims(lower[:, 1:], -1) * vec[:, :-1, :],
[[0, 0], [1, 0], [0, 0]])
upper_part = array_ops.pad(
array_ops.expand_dims(upper[:, :-1], -1) * vec[:, 1:, :],
[[0, 0], [0, 1], [0, 0]])
return lower_part + diag_part + upper_part
def _generateData(self, batch_size, m, n, seed=42):
np.random.seed(seed)
data = np.random.normal(size=(batch_size, m, 3 + n))
return (variables.Variable(data[:, :, 0], dtype=dtypes.float64),
variables.Variable(data[:, :, 1], dtype=dtypes.float64),
variables.Variable(data[:, :, 2], dtype=dtypes.float64),
variables.Variable(data[:, :, 3:], dtype=dtypes.float64))
def benchmarkTridiagonalMulOp(self):
devices = [('/cpu:0', 'cpu')]
if test.is_gpu_available(cuda_only=True):
devices += [('/gpu:0', 'gpu')]
for device_option, size_option in itertools.product(devices, self.sizes):
device_id, device_name = device_option
m, batch_size, n = size_option
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device_id):
upper, diag, lower, vec = self._generateData(batch_size, m, n)
x1 = self.baseline(upper, diag, lower, vec)
x2 = linalg_impl.tridiagonal_matmul((upper, diag, lower),
vec,
diagonals_format='sequence')
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(x1),
min_iters=10,
store_memory_usage=False,
name=('tridiagonal_matmul_baseline_%s'
'_batch_size_%d_m_%d_n_%d' %
(device_name, batch_size, m, n)))
self.run_op_benchmark(
sess,
control_flow_ops.group(x2),
min_iters=10,
store_memory_usage=False,
name=('tridiagonal_matmul_%s_batch_size_%d_m_%d_n_%d' %
(device_name, batch_size, m, n)))
if __name__ == '__main__':
test.main()
| TridiagonalMulOpTest |
python | walkccc__LeetCode | solutions/3452. Sum of Good Numbers/3452.py | {
"start": 0,
"end": 240
} | class ____:
def sumOfGoodNumbers(self, nums: list[int], k: int) -> int:
return sum(num for i, num in enumerate(nums)
if (i - k < 0 or num > nums[i - k])
and (i + k >= len(nums) or num > nums[i + k]))
| Solution |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2.py | {
"start": 123160,
"end": 127179
} | class ____(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(input_tensor.indices,
sparse_id_values,
input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
@serialization.register_feature_column
| HashedCategoricalColumn |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 1973,
"end": 2108
} | class ____(ShowFieldType, PolymorphicModel):
polymorphic_showfield_deferred = True
field_b = models.CharField(max_length=30)
| Base |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 4612,
"end": 4825
} | class ____(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the
left of B's bounding box.
"""
lookup_name = "left"
@BaseSpatialField.register_lookup
| LeftLookup |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self1.py | {
"start": 2547,
"end": 2571
} | class ____(D[Self]): ...
| E |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 52315,
"end": 53567
} | class ____:
@pytest.mark.parametrize('dt', ['f', 'd', 'g'])
def test_log2_values(self, dt):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
@pytest.mark.parametrize("i", range(1, 65))
def test_log2_ints(self, i):
# a good log2 implementation should provide this,
# might fail on OS with bad libm
v = np.log2(2.**i)
assert_equal(v, float(i), err_msg='at exponent %d' % i)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_log2_special(self):
assert_equal(np.log2(1.), 0.)
assert_equal(np.log2(np.inf), np.inf)
assert_(np.isnan(np.log2(np.nan)))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.log2(-1.)))
assert_(np.isnan(np.log2(-np.inf)))
assert_equal(np.log2(0.), -np.inf)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
| TestLog2 |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 25405,
"end": 26983
} | class ____(RegexLexer):
"""
Generic `cheetah templates`_ lexer. Code that isn't Cheetah
markup is yielded as `Token.Other`. This also works for
`spitfire templates`_ which use the same syntax.
.. _cheetah templates: http://www.cheetahtemplate.org/
.. _spitfire templates: http://code.google.com/p/spitfire/
"""
name = 'Cheetah'
aliases = ['cheetah', 'spitfire']
filenames = ['*.tmpl', '*.spt']
mimetypes = ['application/x-cheetah', 'application/x-spitfire']
tokens = {
'root': [
(r'(##[^\n]*)$',
(bygroups(Comment))),
(r'#[*](.|\n)*?[*]#', Comment),
(r'#end[^#\n]*(?:#|$)', Comment.Preproc),
(r'#slurp$', Comment.Preproc),
(r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
(bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc))),
# TODO support other Python syntax like $foo['bar']
(r'(\$)([a-zA-Z_][\w.]*\w)',
bygroups(Comment.Preproc, using(CheetahPythonLexer))),
(r'(\$\{!?)(.*?)(\})(?s)',
bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?=\#[#a-zA-Z]*) | # an eval comment
(?=\$[a-zA-Z_{]) | # a substitution
\Z # end of string
)
''', Other),
(r'\s+', Text),
],
}
| CheetahLexer |
python | networkx__networkx | networkx/algorithms/tree/tests/test_coding.py | {
"start": 196,
"end": 2458
} | class ____:
"""Unit tests for the Prüfer sequence encoding and decoding
functions.
"""
def test_nontree(self):
with pytest.raises(nx.NotATree):
G = nx.cycle_graph(3)
nx.to_prufer_sequence(G)
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.to_prufer_sequence(nx.null_graph())
def test_trivial_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.to_prufer_sequence(nx.trivial_graph())
def test_bad_integer_labels(self):
with pytest.raises(KeyError):
T = nx.Graph(nx.utils.pairwise("abc"))
nx.to_prufer_sequence(T)
def test_encoding(self):
"""Tests for encoding a tree as a Prüfer sequence using the
iterative strategy.
"""
# Example from Wikipedia.
tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)])
sequence = nx.to_prufer_sequence(tree)
assert sequence == [3, 3, 3, 4]
def test_decoding(self):
"""Tests for decoding a tree from a Prüfer sequence."""
# Example from Wikipedia.
sequence = [3, 3, 3, 4]
tree = nx.from_prufer_sequence(sequence)
assert nodes_equal(list(tree), list(range(6)))
edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
assert edges_equal(list(tree.edges()), edges)
def test_decoding2(self):
# Example from "An Optimal Algorithm for Prufer Codes".
sequence = [2, 4, 0, 1, 3, 3]
tree = nx.from_prufer_sequence(sequence)
assert nodes_equal(list(tree), list(range(8)))
edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)]
assert edges_equal(list(tree.edges()), edges)
def test_inverse(self):
"""Tests that the encoding and decoding functions are inverses."""
for T in nx.nonisomorphic_trees(4):
T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T))
assert nodes_equal(list(T), list(T2))
assert edges_equal(list(T.edges()), list(T2.edges()))
for seq in product(range(4), repeat=2):
seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq))
assert list(seq) == seq2
| TestPruferSequence |
python | apache__thrift | test/py/SerializationTest.py | {
"start": 13816,
"end": 15239
} | class ____(unittest.TestCase):
def testSplit(self):
"""Test FramedTransport and BinaryProtocolAccelerated
Tests that TBinaryProtocolAccelerated and TFramedTransport
play nicely together when a read spans a frame"""
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
bigstring = "".join(chr(byte) for byte in range(ord("a"), ord("z") + 1))
databuf = TTransport.TMemoryBuffer()
prot = protocol_factory.getProtocol(databuf)
prot.writeI32(42)
prot.writeString(bigstring)
prot.writeI16(24)
data = databuf.getvalue()
cutpoint = len(data) // 2
parts = [data[:cutpoint], data[cutpoint:]]
framed_buffer = TTransport.TMemoryBuffer()
framed_writer = TTransport.TFramedTransport(framed_buffer)
for part in parts:
framed_writer.write(part)
framed_writer.flush()
self.assertEqual(len(framed_buffer.getvalue()), len(data) + 8)
# Recreate framed_buffer so we can read from it.
framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())
framed_reader = TTransport.TFramedTransport(framed_buffer)
prot = protocol_factory.getProtocol(framed_reader)
self.assertEqual(prot.readI32(), 42)
self.assertEqual(prot.readString(), bigstring)
self.assertEqual(prot.readI16(), 24)
| AcceleratedFramedTest |
python | Lightning-AI__lightning | src/lightning/pytorch/trainer/connectors/accelerator_connector.py | {
"start": 2435,
"end": 28769
} | class ____:
def __init__(
self,
devices: Union[list[int], str, int] = "auto",
num_nodes: int = 1,
accelerator: Union[str, Accelerator] = "auto",
strategy: Union[str, Strategy] = "auto",
plugins: Optional[Union[_PLUGIN_INPUT, Iterable[_PLUGIN_INPUT]]] = None,
precision: Optional[_PRECISION_INPUT] = None,
sync_batchnorm: bool = False,
benchmark: Optional[bool] = None,
use_distributed_sampler: bool = True,
deterministic: Optional[Union[bool, _LITERAL_WARN]] = None,
) -> None:
"""The AcceleratorConnector parses several Trainer arguments and instantiates the Strategy including other
components such as the Accelerator and Precision plugins.
A. accelerator flag could be:
1. accelerator class
2. accelerator str
3. accelerator auto
B. strategy flag could be:
1. strategy class
2. strategy str registered with StrategyRegistry
C. plugins flag could be:
1. precision class (should be removed, and precision flag should allow user pass classes)
2. checkpoint_io class
3. cluster_environment class
priorities which to take when:
A. Class > str
B. Strategy > Accelerator/precision/plugins
"""
self.use_distributed_sampler = use_distributed_sampler
_set_torch_flags(deterministic=deterministic, benchmark=benchmark)
# 1. Parsing flags
# Get registered strategies, built-in accelerators and precision plugins
self._registered_strategies = StrategyRegistry.available_strategies()
self._accelerator_types = AcceleratorRegistry.available_accelerators()
# Raise an exception if there are conflicts between flags
# Set each valid flag to `self._x_flag` after validation
self._strategy_flag: Union[Strategy, str] = "auto"
self._accelerator_flag: Union[Accelerator, str] = "auto"
self._precision_flag: _PRECISION_INPUT_STR = "32-true"
self._precision_plugin_flag: Optional[Precision] = None
self._cluster_environment_flag: Optional[Union[ClusterEnvironment, str]] = None
self._parallel_devices: list[Union[int, torch.device, str]] = []
self._layer_sync: Optional[LayerSync] = TorchSyncBatchNorm() if sync_batchnorm else None
self.checkpoint_io: Optional[CheckpointIO] = None
self._check_config_and_set_final_flags(
strategy=strategy,
accelerator=accelerator,
precision=precision,
plugins=plugins,
sync_batchnorm=sync_batchnorm,
)
# 2. Instantiate Accelerator
# handle `auto`, `None` and `gpu`
if self._accelerator_flag == "auto":
self._accelerator_flag = self._choose_auto_accelerator()
elif self._accelerator_flag == "gpu":
self._accelerator_flag = self._choose_gpu_accelerator_backend()
self._check_device_config_and_set_final_flags(devices=devices, num_nodes=num_nodes)
self._set_parallel_devices_and_init_accelerator()
# 3. Instantiate ClusterEnvironment
self.cluster_environment: ClusterEnvironment = self._choose_and_init_cluster_environment()
# 4. Instantiate Strategy - Part 1
if self._strategy_flag == "auto":
self._strategy_flag = self._choose_strategy()
# In specific cases, ignore user selection and fall back to a different strategy
self._check_strategy_and_fallback()
self._init_strategy()
# 5. Instantiate Precision Plugin
self.precision_plugin = self._check_and_init_precision()
# 6. Instantiate Strategy - Part 2
self._lazy_init_strategy()
def _check_config_and_set_final_flags(
self,
strategy: Union[str, Strategy],
accelerator: Union[str, Accelerator],
precision: Optional[_PRECISION_INPUT],
plugins: Optional[Union[_PLUGIN_INPUT, Iterable[_PLUGIN_INPUT]]],
sync_batchnorm: bool,
) -> None:
"""This method checks:
1. strategy: whether the strategy name is valid, and sets the internal flags if it is.
2. accelerator: if the value of the accelerator argument is a type of accelerator (instance or string),
set self._accelerator_flag accordingly.
3. precision: The final value of the precision flag may be determined either by the precision argument or
by a plugin instance.
4. plugins: The list of plugins may contain a Precision plugin, CheckpointIO, ClusterEnvironment and others.
Additionally, other flags such as `precision` or `sync_batchnorm` can populate the list with the
corresponding plugin instances.
"""
if plugins is not None:
plugins = [plugins] if not isinstance(plugins, Iterable) else plugins
if isinstance(strategy, str):
strategy = strategy.lower()
self._strategy_flag = strategy
if strategy != "auto" and strategy not in self._registered_strategies and not isinstance(strategy, Strategy):
raise ValueError(
f"You selected an invalid strategy name: `strategy={strategy!r}`."
" It must be either a string or an instance of `lightning.pytorch.strategies.Strategy`."
" Example choices: auto, ddp, ddp_spawn, deepspeed, ..."
" Find a complete list of options in our documentation at https://lightning.ai"
)
if (
accelerator not in self._accelerator_types
and accelerator not in ("auto", "gpu")
and not isinstance(accelerator, Accelerator)
):
raise ValueError(
f"You selected an invalid accelerator name: `accelerator={accelerator!r}`."
f" Available names are: auto, {', '.join(self._accelerator_types)}."
)
# MPS accelerator is incompatible with DDP family of strategies. It supports single-device operation only.
is_ddp_str = isinstance(strategy, str) and "ddp" in strategy
is_deepspeed_str = isinstance(strategy, str) and "deepspeed" in strategy
is_parallel_strategy = isinstance(strategy, ParallelStrategy) or is_ddp_str or is_deepspeed_str
is_mps_accelerator = MPSAccelerator.is_available() and (
accelerator in ("mps", "auto", "gpu", None) or isinstance(accelerator, MPSAccelerator)
)
if is_mps_accelerator and is_parallel_strategy:
raise ValueError(
f"You set `strategy={strategy}` but strategies from the DDP family are not supported on the"
f" MPS accelerator. Either explicitly set `accelerator='cpu'` or change the strategy."
)
self._accelerator_flag = accelerator
precision_flag = _convert_precision_to_unified_args(precision)
if plugins:
plugins_flags_types: dict[str, int] = Counter()
for plugin in plugins:
if isinstance(plugin, Precision):
self._precision_plugin_flag = plugin
plugins_flags_types[Precision.__name__] += 1
elif isinstance(plugin, CheckpointIO):
self.checkpoint_io = plugin
plugins_flags_types[CheckpointIO.__name__] += 1
elif isinstance(plugin, ClusterEnvironment):
self._cluster_environment_flag = plugin
plugins_flags_types[ClusterEnvironment.__name__] += 1
elif isinstance(plugin, LayerSync):
if sync_batchnorm and not isinstance(plugin, TorchSyncBatchNorm):
raise MisconfigurationException(
f"You set `Trainer(sync_batchnorm=True)` and provided a `{plugin.__class__.__name__}`"
" plugin, but this is not allowed. Choose one or the other."
)
self._layer_sync = plugin
plugins_flags_types[TorchSyncBatchNorm.__name__] += 1
else:
raise MisconfigurationException(
f"Found invalid type for plugin {plugin}. Expected one of: Precision, "
"CheckpointIO, ClusterEnvironment, or LayerSync."
)
duplicated_plugin_key = [k for k, v in plugins_flags_types.items() if v > 1]
if duplicated_plugin_key:
raise MisconfigurationException(
f"Received multiple values for {', '.join(duplicated_plugin_key)} flags in `plugins`."
" Expected one value for each type at most."
)
if plugins_flags_types.get(Precision.__name__) and precision_flag is not None:
raise ValueError(
f"Received both `precision={precision_flag}` and `plugins={self._precision_plugin_flag}`."
f" Choose one."
)
self._precision_flag = "32-true" if precision_flag is None else precision_flag
# handle the case when the user passes in a strategy instance which has an accelerator, precision,
# checkpoint io or cluster env set up
# TODO: improve the error messages below
if self._strategy_flag and isinstance(self._strategy_flag, Strategy):
if self._strategy_flag._accelerator:
if self._accelerator_flag != "auto":
raise MisconfigurationException(
"accelerator set through both strategy class and accelerator flag, choose one"
)
self._accelerator_flag = self._strategy_flag._accelerator
if self._strategy_flag._precision_plugin:
# [RFC] handle precision plugin set up conflict?
if self._precision_plugin_flag:
raise MisconfigurationException("precision set through both strategy class and plugins, choose one")
self._precision_plugin_flag = self._strategy_flag._precision_plugin
if self._strategy_flag._checkpoint_io:
if self.checkpoint_io:
raise MisconfigurationException(
"checkpoint_io set through both strategy class and plugins, choose one"
)
self.checkpoint_io = self._strategy_flag._checkpoint_io
if getattr(self._strategy_flag, "cluster_environment", None):
if self._cluster_environment_flag:
raise MisconfigurationException(
"cluster_environment set through both strategy class and plugins, choose one"
)
self._cluster_environment_flag = getattr(self._strategy_flag, "cluster_environment")
if hasattr(self._strategy_flag, "parallel_devices") and self._strategy_flag.parallel_devices:
if self._strategy_flag.parallel_devices[0].type == "cpu":
if self._accelerator_flag and self._accelerator_flag not in ("auto", "cpu"):
raise MisconfigurationException(
f"CPU parallel_devices set through {self._strategy_flag.__class__.__name__} class,"
f" but accelerator set to {self._accelerator_flag}, please choose one device type"
)
self._accelerator_flag = "cpu"
if self._strategy_flag.parallel_devices[0].type == "cuda":
if self._accelerator_flag and self._accelerator_flag not in ("auto", "cuda", "gpu"):
raise MisconfigurationException(
f"GPU parallel_devices set through {self._strategy_flag.__class__.__name__} class,"
f" but accelerator set to {self._accelerator_flag}, please choose one device type"
)
self._accelerator_flag = "cuda"
self._parallel_devices = self._strategy_flag.parallel_devices
def _check_device_config_and_set_final_flags(self, devices: Union[list[int], str, int], num_nodes: int) -> None:
if not isinstance(num_nodes, int) or num_nodes < 1:
raise ValueError(f"`num_nodes` must be a positive integer, but got {num_nodes}.")
self._num_nodes_flag = num_nodes
self._devices_flag = devices
if self._devices_flag in ([], 0, "0"):
accelerator_name = (
self._accelerator_flag.__class__.__qualname__
if isinstance(self._accelerator_flag, Accelerator)
else self._accelerator_flag
)
raise MisconfigurationException(
f"`Trainer(devices={self._devices_flag!r})` value is not a valid input"
f" using {accelerator_name} accelerator."
)
@staticmethod
def _choose_auto_accelerator() -> str:
"""Choose the accelerator type (str) based on availability."""
return _select_auto_accelerator()
@staticmethod
def _choose_gpu_accelerator_backend() -> str:
if MPSAccelerator.is_available():
return "mps"
if CUDAAccelerator.is_available():
return "cuda"
raise MisconfigurationException("No supported gpu backend found!")
def _set_parallel_devices_and_init_accelerator(self) -> None:
if isinstance(self._accelerator_flag, Accelerator):
self.accelerator: Accelerator = self._accelerator_flag
else:
self.accelerator = AcceleratorRegistry.get(self._accelerator_flag)
accelerator_cls = self.accelerator.__class__
if not accelerator_cls.is_available():
available_accelerator = [
acc_str
for acc_str in self._accelerator_types
if AcceleratorRegistry[acc_str]["accelerator"].is_available()
]
raise MisconfigurationException(
f"`{accelerator_cls.__qualname__}` can not run on your system"
" since the accelerator is not available. The following accelerator(s)"
" is available and can be passed into `accelerator` argument of"
f" `Trainer`: {available_accelerator}."
)
self._set_devices_flag_if_auto_passed()
self._devices_flag = accelerator_cls.parse_devices(self._devices_flag)
if not self._parallel_devices:
self._parallel_devices = accelerator_cls.get_parallel_devices(self._devices_flag)
def _set_devices_flag_if_auto_passed(self) -> None:
if self._devices_flag != "auto":
return
if (
_IS_INTERACTIVE
and isinstance(self.accelerator, CUDAAccelerator)
and self.accelerator.auto_device_count() > 1
):
self._devices_flag = 1
rank_zero_info(
f"Trainer will use only 1 of {self.accelerator.auto_device_count()} GPUs because it is running inside"
" an interactive / notebook environment. You may try to set `Trainer(devices="
f"{self.accelerator.auto_device_count()})` but please note that multi-GPU inside interactive /"
" notebook environments is considered experimental and unstable. Your mileage may vary."
)
else:
self._devices_flag = self.accelerator.auto_device_count()
def _choose_and_init_cluster_environment(self) -> ClusterEnvironment:
if isinstance(self._cluster_environment_flag, ClusterEnvironment):
return self._cluster_environment_flag
for env_type in (
# TorchElastic has the highest priority since it can also be used inside SLURM
TorchElasticEnvironment,
SLURMEnvironment,
LSFEnvironment,
MPIEnvironment,
):
if env_type.detect():
return env_type()
return LightningEnvironment()
def _choose_strategy(self) -> Union[Strategy, str]:
if self._accelerator_flag == "hpu":
raise MisconfigurationException("HPU is currently not supported. Please contact developer@lightning.ai")
if self._accelerator_flag == "tpu" or isinstance(self._accelerator_flag, XLAAccelerator):
if self._parallel_devices and len(self._parallel_devices) > 1:
return XLAStrategy.strategy_name
# TODO: lazy initialized device, then here could be self._strategy_flag = "single_xla"
return SingleDeviceXLAStrategy(device=self._parallel_devices[0])
if self._num_nodes_flag > 1:
return "ddp"
if len(self._parallel_devices) <= 1:
if isinstance(self._accelerator_flag, (CUDAAccelerator, MPSAccelerator)) or (
isinstance(self._accelerator_flag, str) and self._accelerator_flag in ("cuda", "gpu", "mps")
):
device = _determine_root_gpu_device(self._parallel_devices)
else:
device = "cpu"
# TODO: lazy initialized device, then here could be self._strategy_flag = "single_device"
return SingleDeviceStrategy(device=device) # type: ignore
if len(self._parallel_devices) > 1 and _IS_INTERACTIVE:
return "ddp_fork"
return "ddp"
def _check_strategy_and_fallback(self) -> None:
"""Checks edge cases when the strategy selection was a string input, and we need to fall back to a different
choice depending on other parameters or the environment."""
# current fallback and check logic only apply to user pass in str config and object config
# TODO this logic should apply to both str and object config
strategy_flag = "" if isinstance(self._strategy_flag, Strategy) else self._strategy_flag
if (
strategy_flag in FSDPStrategy.get_registered_strategies() or type(self._strategy_flag) is FSDPStrategy
) and not (self._accelerator_flag in ("cuda", "gpu") or isinstance(self._accelerator_flag, CUDAAccelerator)):
raise ValueError(
f"The strategy `{FSDPStrategy.strategy_name}` requires a GPU accelerator, but received "
f"`accelerator={self._accelerator_flag!r}`. Please set `accelerator='cuda'`, `accelerator='gpu'`,"
" or pass a `CUDAAccelerator()` instance to use FSDP."
)
if strategy_flag in _DDP_FORK_ALIASES and "fork" not in torch.multiprocessing.get_all_start_methods():
raise ValueError(
f"You selected `Trainer(strategy='{strategy_flag}')` but process forking is not supported on this"
f" platform. We recommend `Trainer(strategy='ddp_spawn')` instead."
)
if strategy_flag:
self._strategy_flag = strategy_flag
def _init_strategy(self) -> None:
"""Instantiate the Strategy given depending on the setting of ``_strategy_flag``."""
# The validation of `_strategy_flag` already happened earlier on in the connector
assert isinstance(self._strategy_flag, (str, Strategy))
if isinstance(self._strategy_flag, str):
self.strategy = StrategyRegistry.get(self._strategy_flag)
else:
self.strategy = self._strategy_flag
def _check_and_init_precision(self) -> Precision:
self._validate_precision_choice()
if isinstance(self._precision_plugin_flag, Precision):
return self._precision_plugin_flag
if isinstance(self.strategy, (SingleDeviceXLAStrategy, XLAStrategy)):
return XLAPrecision(self._precision_flag) # type: ignore
if isinstance(self.strategy, DeepSpeedStrategy):
return DeepSpeedPrecision(self._precision_flag) # type: ignore[arg-type]
if isinstance(self.strategy, FSDPStrategy):
return FSDPPrecision(self._precision_flag) # type: ignore[arg-type]
if self._precision_flag in ("16-true", "bf16-true"):
return HalfPrecision(self._precision_flag) # type: ignore
if self._precision_flag == "32-true":
return Precision()
if self._precision_flag == "64-true":
return DoublePrecision()
if self._precision_flag == "transformer-engine":
return TransformerEnginePrecision(weights_dtype=torch.bfloat16)
if self._precision_flag == "transformer-engine-float16":
return TransformerEnginePrecision(weights_dtype=torch.float16)
if self._precision_flag == "16-mixed" and self._accelerator_flag == "cpu":
rank_zero_warn(
"You passed `Trainer(accelerator='cpu', precision='16-mixed')` but AMP with fp16 is not supported on "
"CPU. Using `precision='bf16-mixed'` instead."
)
self._precision_flag = "bf16-mixed"
if self._precision_flag in ("16-mixed", "bf16-mixed"):
rank_zero_info(
f"Using {'16bit' if self._precision_flag == '16-mixed' else 'bfloat16'} Automatic Mixed Precision (AMP)"
)
device = self._accelerator_flag if self._accelerator_flag in ("cpu", "mps") else "cuda"
return MixedPrecision(self._precision_flag, device) # type: ignore[arg-type]
raise RuntimeError("No precision set")
def _validate_precision_choice(self) -> None:
"""Validate the combination of choices for precision, AMP type, and accelerator."""
if isinstance(self._precision_plugin_flag, BitsandbytesPrecision) and not isinstance(
self.accelerator, CUDAAccelerator
):
raise RuntimeError("Bitsandbytes is only supported on CUDA GPUs.")
mp_precision_supported = ("32-true", "bf16-mixed", "bf16-true", "16-true")
if (
isinstance(self._strategy_flag, ModelParallelStrategy)
and self._precision_flag not in mp_precision_supported
):
raise ValueError(
f"The `ModelParallelStrategy` does not support `Fabric(..., precision={self._precision_flag!r})`."
f" Choose a different precision among: {', '.join(mp_precision_supported)}."
)
def _lazy_init_strategy(self) -> None:
"""Lazily set missing attributes on the previously instantiated strategy."""
self.strategy.accelerator = self.accelerator
if self.precision_plugin:
self.strategy.precision_plugin = self.precision_plugin
if self.checkpoint_io:
self.strategy.checkpoint_io = self.checkpoint_io
if hasattr(self.strategy, "cluster_environment"):
if self.strategy.cluster_environment is None:
self.strategy.cluster_environment = self.cluster_environment
self.cluster_environment = self.strategy.cluster_environment
if hasattr(self.strategy, "parallel_devices"):
if self.strategy.parallel_devices:
self._parallel_devices = self.strategy.parallel_devices
else:
self.strategy.parallel_devices = self._parallel_devices
if hasattr(self.strategy, "num_nodes"):
self.strategy.num_nodes = self._num_nodes_flag
if hasattr(self.strategy, "_layer_sync"):
self.strategy._layer_sync = self._layer_sync
if hasattr(self.strategy, "set_world_ranks"):
self.strategy.set_world_ranks()
self.strategy._configure_launcher()
if _IS_INTERACTIVE and self.strategy.launcher and not self.strategy.launcher.is_interactive_compatible:
raise MisconfigurationException(
f"`Trainer(strategy={self._strategy_flag!r})` is not compatible with an interactive"
" environment. Run your code as a script, or choose a notebook-compatible strategy:"
f" `Trainer(strategy='ddp_notebook')`."
" In case you are spawning processes yourself, make sure to include the Trainer"
" creation inside the worker function."
)
# TODO: should be moved to _check_strategy_and_fallback().
# Current test check precision first, so keep this check here to meet error order
if isinstance(self.accelerator, XLAAccelerator) and not isinstance(
self.strategy, (SingleDeviceXLAStrategy, XLAStrategy)
):
raise ValueError(
"The `XLAAccelerator` can only be used with a `SingleDeviceXLAStrategy` or `XLAStrategy`,"
f" found {self.strategy.__class__.__name__}."
)
@property
def is_distributed(self) -> bool:
distributed_strategies = [
DDPStrategy,
FSDPStrategy,
DeepSpeedStrategy,
ModelParallelStrategy,
XLAStrategy,
]
if isinstance(self.strategy, tuple(distributed_strategies)):
return True
if hasattr(self.strategy, "is_distributed"):
# Used for custom plugins. They should implement this property
return self.strategy.is_distributed
return False
def _set_torch_flags(
*, deterministic: Optional[Union[bool, _LITERAL_WARN]] = None, benchmark: Optional[bool] = None
) -> None:
if deterministic:
if benchmark is None:
# Set benchmark to False to ensure determinism
benchmark = False
elif benchmark:
rank_zero_warn(
"You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores"
" torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.",
)
if benchmark is not None:
torch.backends.cudnn.benchmark = benchmark
if deterministic == "warn":
torch.use_deterministic_algorithms(True, warn_only=True)
elif isinstance(deterministic, bool):
# do not call this if deterministic wasn't passed
torch.use_deterministic_algorithms(deterministic)
if deterministic:
# https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
| _AcceleratorConnector |
python | django__django | tests/auth_tests/test_remote_user.py | {
"start": 17501,
"end": 19286
} | class ____(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = "auth_tests.test_remote_user.CustomRemoteUserBackend"
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = "knownuser@example.com"
known_user2 = "knownuser2@example.com"
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super().test_known_user()
knownuser = User.objects.get(username="knownuser")
knownuser2 = User.objects.get(username="knownuser2")
self.assertEqual(knownuser.email, "")
self.assertEqual(knownuser2.email, "")
self.assertEqual(knownuser.last_name, "knownuser")
self.assertEqual(knownuser2.last_name, "knownuser2")
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address
provided in the request header.
"""
num_users = User.objects.count()
response = self.client.get(
"/remote_user/",
**{
self.header: "newuser",
self.email_header: "user@example.com",
},
)
self.assertEqual(response.context["user"].username, "newuser")
self.assertEqual(response.context["user"].email, "user@example.com")
self.assertEqual(response.context["user"].last_name, "")
self.assertEqual(User.objects.count(), num_users + 1)
newuser = User.objects.get(username="newuser")
self.assertEqual(newuser.email, "user@example.com")
| RemoteUserCustomTest |
python | apache__airflow | providers/apache/kafka/tests/unit/apache/kafka/queues/test_kafka.py | {
"start": 1080,
"end": 4949
} | class ____:
"""Tests for KafkaMessageQueueProvider."""
def setup_method(self):
"""Set up the test environment."""
from airflow.providers.apache.kafka.queues.kafka import KafkaMessageQueueProvider
self.provider = KafkaMessageQueueProvider()
def test_queue_create(self):
"""Test the creation of the KafkaMessageQueueProvider."""
from airflow.providers.common.messaging.providers.base_provider import BaseMessageQueueProvider
assert isinstance(self.provider, BaseMessageQueueProvider)
@pytest.mark.parametrize(
("queue_uri", "expected_result"),
[
pytest.param("kafka://localhost:9092/topic1", True, id="single_broker_single_topic"),
pytest.param(
"kafka://broker1:9092,broker2:9092/topic1,topic2", True, id="multiple_brokers_multiple_topics"
),
pytest.param("http://example.com", False, id="http_url"),
pytest.param("not-a-url", False, id="invalid_url"),
],
)
def test_queue_matches(self, queue_uri, expected_result):
"""Test the queue_matches method with various URLs."""
assert self.provider.queue_matches(queue_uri) == expected_result
@pytest.mark.parametrize(
("scheme", "expected_result"),
[
pytest.param("kafka", True, id="kafka_scheme"),
pytest.param("redis+pubsub", False, id="redis_scheme"),
pytest.param("sqs", False, id="sqs_scheme"),
pytest.param("unknown", False, id="unknown_scheme"),
],
)
def test_scheme_matches(self, scheme, expected_result):
"""Test the scheme_matches method with various schemes."""
assert self.provider.scheme_matches(scheme) == expected_result
def test_trigger_class(self):
"""Test the trigger_class method."""
assert self.provider.trigger_class() == AwaitMessageTrigger
@pytest.mark.parametrize(
("queue_uri", "extra_kwargs", "expected_result"),
[
pytest.param(
"kafka://broker:9092/topic1,topic2",
{"apply_function": MOCK_KAFKA_TRIGGER_APPLY_FUNCTION},
{"topics": ["topic1", "topic2"]},
id="topics_from_uri",
),
pytest.param(
"kafka://broker:9092/",
{"topics": ["topic1", "topic2"], "apply_function": MOCK_KAFKA_TRIGGER_APPLY_FUNCTION},
{},
id="topics_from_kwargs",
),
],
)
def test_trigger_kwargs_valid_cases(self, queue_uri, extra_kwargs, expected_result):
"""Test the trigger_kwargs method with valid parameters."""
kwargs = self.provider.trigger_kwargs(queue_uri, **extra_kwargs)
assert kwargs == expected_result
@pytest.mark.parametrize(
("queue_uri", "extra_kwargs", "expected_error", "error_match"),
[
pytest.param(
"kafka://broker:9092/topic1",
{},
ValueError,
"apply_function is required in KafkaMessageQueueProvider kwargs",
id="missing_apply_function",
),
pytest.param(
"kafka://broker:9092/",
{"apply_function": MOCK_KAFKA_TRIGGER_APPLY_FUNCTION},
ValueError,
"topics is required in KafkaMessageQueueProvider kwargs or provide them in the queue URI",
id="missing_topics",
),
],
)
def test_trigger_kwargs_error_cases(self, queue_uri, extra_kwargs, expected_error, error_match):
"""Test that trigger_kwargs raises appropriate errors with invalid parameters."""
with pytest.raises(expected_error, match=error_match):
self.provider.trigger_kwargs(queue_uri, **extra_kwargs)
| TestKafkaMessageQueueProvider |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_release_previous_commits.py | {
"start": 807,
"end": 3855
} | class ____(OrganizationReleasesBaseEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ISSUES
rate_limits = RateLimitConfig(group="CLI")
def get(self, request: Request, organization: Organization, version: str) -> Response:
"""
Retrieve an Organization's Most Recent Release with Commits
````````````````````````````````````````````````````````````
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:auth: required
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
start_date = release.date_released or release.date_added
try:
filter_params = self.get_filter_params(request, organization)
except NoProjects:
# Returning 200 to conform to existing behavior. It would be much better to 404
# here and handle it in the client.
return Response({}, status=200)
# Release-related project-ids are pre-fetched and narrowed by the request parameters. If
# you want the previous release but you specify a list of project-ids you likely don't
# want a release from another project entirely!
project_ids = list(
ReleaseProject.objects.filter(
release_id=release.id,
project_id__in=filter_params["project_id"],
).values_list("project_id", flat=True)
)
prev_release_with_commits = (
Release.objects.filter(
organization_id=organization.id,
projects__id__in=project_ids,
last_commit_id__isnull=False,
)
.extra(
select={"date": "COALESCE(date_released, date_added)"},
where=["COALESCE(date_released, date_added) <= %s"],
params=[start_date],
)
.extra(order_by=["-date"])[:1]
)
try:
analytics.record(
ReleaseGetPreviousCommitsEvent(
user_id=request.user.id if request.user and request.user.id else None,
organization_id=organization.id,
project_ids=project_ids,
user_agent=request.META.get("HTTP_USER_AGENT", ""),
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
if not prev_release_with_commits:
return Response({})
return Response(
serialize(
prev_release_with_commits[0],
request.user,
)
)
| OrganizationReleasePreviousCommitsEndpoint |
python | huggingface__transformers | tests/models/whisper/test_modeling_whisper.py | {
"start": 235057,
"end": 238989
} | class ____:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden layers
seq_length=60,
is_training=True,
use_labels=True,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
input_channels=1,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_source_positions=30,
num_mel_bins=80,
num_conv_layers=1,
suppress_tokens=None,
classifier_proj_size=4,
num_labels=2,
is_encoder_decoder=False,
is_decoder=False,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.input_channels = input_channels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_mel_bins = num_mel_bins
self.max_position_embeddings = max_position_embeddings
self.max_source_positions = max_source_positions
self.num_conv_layers = num_conv_layers
self.suppress_tokens = suppress_tokens
self.classifier_proj_size = classifier_proj_size
self.num_labels = num_labels
self.is_encoder_decoder = is_encoder_decoder
self.is_decoder = is_decoder
def get_config(self):
return WhisperConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
input_channels=self.input_channels,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_source_positions=self.max_source_positions,
decoder_ffn_dim=self.hidden_size,
encoder_ffn_dim=self.hidden_size,
suppress_tokens=self.suppress_tokens,
classifier_proj_size=self.classifier_proj_size,
num_labels=self.num_labels,
is_encoder_decoder=self.is_encoder_decoder,
is_decoder=self.is_decoder,
)
def prepare_config_and_inputs(self):
input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length])
config = self.get_config()
inputs_dict = {"input_features": input_features}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
@property
def encoder_seq_length(self):
return self.get_subsampled_output_lengths(self.seq_length)
def create_and_check_model_forward(self, config, inputs_dict, use_weighted_layer_sum=False):
config.use_weighted_layer_sum = use_weighted_layer_sum
model = WhisperForAudioClassification(config=config)
model.to(torch_device).eval()
input_features = inputs_dict["input_features"]
with torch.no_grad():
last_hidden_state = model(input_features).logits
self.parent.assertTrue(last_hidden_state.shape, (13, 2))
@require_torch
| WhisperEncoderModelTester |
python | numba__numba | numba/tests/test_datamodel.py | {
"start": 5260,
"end": 5667
} | class ____(unittest.TestCase):
def test_issue2921(self):
import numpy as np
from numba import njit
@njit
def copy(a, b):
for i in range(a.shape[0]):
a[i] = b[i]
b = np.arange(5, dtype=np.uint8).view(np.bool_)
a = np.zeros_like(b)
copy(a, b)
np.testing.assert_equal(a, np.array((False,) + (True,) * 4))
| TestMisc |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/base.py | {
"start": 1814,
"end": 2150
} | class ____(Generic[T]):
"""
Generic column-statistic.
Parameters
----------
value
Statistics value. Value will be None
if the statistics is unknown.
exact
Whether the statistics is known exactly.
"""
value: T | None = None
exact: bool = False
@dataclasses.dataclass
| ColumnStat |
python | django-extensions__django-extensions | django_extensions/collision_resolvers.py | {
"start": 3900,
"end": 4399
} | class ____(PathBasedCR):
"""
Collision resolver which transform full model name to alias by changing dots to underscores.
He also removes 'models' part of alias, because all models are in models.py files.
Model from last application in alphabetical order is selected.
""" # noqa: E501
def transform_import(self, module_path):
module, model = module_path.rsplit(".models", 1)
module_path = module + model
return module_path.replace(".", "_")
| FullPathCR |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/generic.py | {
"start": 879,
"end": 1132
} | class ____(ClientProtectedResourceMixin, View):
"""View for protecting a resource with client-credentials method.
This involves allowing access tokens, Basic Auth and plain credentials in request body.
"""
pass
| ClientProtectedResourceView |
python | joke2k__faker | faker/providers/ssn/en_IN/__init__.py | {
"start": 77,
"end": 731
} | class ____(BaseProvider):
"""
Faker provider for Indian Identifiers
"""
aadhaar_id_formats = ("%##########",)
def aadhaar_id(self) -> str:
"""
Aadhaar is a 12 digit person identifier generated for residents of
India.
Details: https://en.wikipedia.org/wiki/Aadhaar
Official Website: https://uidai.gov.in/my-aadhaar/about-your-aadhaar.html
"""
aadhaar_digits = self.numerify(self.random_element(self.aadhaar_id_formats))
checksum = checksums.calculate_luhn(int(aadhaar_digits))
aadhaar_number = f"{aadhaar_digits}{checksum}"
return aadhaar_number
| Provider |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 8497,
"end": 9638
} | class ____(ast.NodeVisitor):
"""Get the nonlocal variables accessed of a function."""
def __init__(self) -> None:
"""Create a FunctionNonLocals visitor."""
self.nonlocals: set[str] = set()
@override
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
"""Visit a function definition.
Args:
node: The node to visit.
"""
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
@override
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
"""Visit an async function definition.
Args:
node: The node to visit.
"""
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
@override
def visit_Lambda(self, node: ast.Lambda) -> None:
"""Visit a lambda function.
Args:
node: The node to visit.
"""
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
| FunctionNonLocals |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 2947,
"end": 3346
} | class ____(Groups, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-groups/#api-rest-api-3-group-post
"""
def path(self, **kwargs) -> str:
return "group"
def generate(self):
for index in range(20):
payload = json.dumps({"name": f"Test group {index}"})
self.generate_record(payload)
| GroupsGenerator |
python | jazzband__django-waffle | waffle/tests/test_waffle.py | {
"start": 30938,
"end": 31350
} | class ____(TransactionTestMixin, TransactionTestCase):
def create_toggle(self):
return waffle.get_waffle_sample_model().objects.create(
name="transaction-sample-name", percent=0
)
def flip_toggle(self, sample):
sample.percent = 100
sample.save()
def toggle_is_active(self, sample):
return waffle.sample_is_active(sample.name)
| SampleTransactionTests |
python | google__pytype | pytype/load_pytd.py | {
"start": 13086,
"end": 31340
} | class ____:
"""A cache for loaded PyTD files.
Typically, you'll have one instance of this class, per module.
Attributes:
options: A config.Options object.
builtins: The builtins ast.
typing: The typing ast.
"""
def __init__(self, options, modules=None, missing_modules=()):
self.options = options
self._modules = _ModuleMap(options, modules)
self.builtins = self._modules["builtins"].ast
self.typing = self._modules["typing"].ast
self._module_loader = module_loader.ModuleLoader(options)
self._missing_modules = missing_modules
self._resolver = _Resolver(self.builtins)
self._late_type_loader = _LateTypeLoader(self)
self._import_name_cache = {} # performance cache
self._aliases = collections.defaultdict(dict)
self._prefixes = set()
# Paranoid verification that pytype.main properly checked the flags:
if options.imports_map is not None:
assert options.pythonpath == [""], options.pythonpath
@functools.cached_property
def _typeshed_loader(self):
return typeshed.TypeshedLoader(self._pyi_options, self._missing_modules)
@functools.cached_property
def _builtin_loader(self):
return builtin_stubs.BuiltinLoader(self._pyi_options)
@functools.cached_property
def _pyi_options(self):
return parser.PyiOptions.from_toplevel_options(self.options)
def get_default_ast(self):
return builtin_stubs.GetDefaultAst(
parser.PyiOptions.from_toplevel_options(self.options)
)
def save_to_pickle(self, filename):
"""Save to a pickle. See PickledPyiLoader.load_from_pickle for reverse."""
# We assume that the Loader is in a consistent state here. In particular, we
# assume that for every module in _modules, all the transitive dependencies
# have been loaded.
items = pickle_utils.PrepareModuleBundle(
((name, m.filename, m.ast) for name, m in sorted(self._modules.items()))
)
# Preparing an ast for pickling clears its class pointers, making it
# unsuitable for reuse, so we have to discard the builtins cache.
builtin_stubs.InvalidateCache()
pickle_utils.Save(
items, filename, compress=True, open_function=self.options.open_function
)
def _resolve_external_and_local_types(self, mod_ast, lookup_ast=None):
dependencies = self._resolver.collect_dependencies(mod_ast)
if dependencies:
lookup_ast = lookup_ast or mod_ast
self._load_ast_dependencies(dependencies, lookup_ast)
mod_ast = self._resolve_external_types(mod_ast, lookup_ast=lookup_ast)
mod_ast = self._resolver.resolve_local_types(mod_ast, lookup_ast=lookup_ast)
return mod_ast
def _create_empty(self, mod_info):
return self.load_module(
mod_info, mod_ast=pytd_utils.CreateModule(mod_info.module_name)
)
def load_file(self, module_name, filename, mod_ast=None):
"""Load a module from a filename."""
return self.load_module(ModuleInfo(module_name, filename), mod_ast=mod_ast)
def load_module(self, mod_info, mod_ast=None):
"""Load (or retrieve from cache) a module and resolve its dependencies."""
# TODO(mdemello): Should we do this in _ModuleMap.__setitem__? Also, should
# we only invalidate concatenated if existing = None?
self._modules.invalidate_concatenated()
# Check for an existing ast first
existing = self._modules.get_existing_ast(mod_info.module_name)
if existing:
return existing
if not mod_ast:
mod_ast = self._module_loader.load_ast(mod_info)
return self.process_module(mod_info, mod_ast)
def process_module(self, mod_info, mod_ast):
"""Create a module from a loaded ast and save it to the loader cache.
Args:
mod_info: The metadata of the module being imported.
mod_ast: The pytd.TypeDeclUnit representing the module.
Returns:
The ast (pytd.TypeDeclUnit) as represented in this loader.
"""
module_name = mod_info.module_name
module = Module(module_name, mod_info.filename, mod_ast)
# Builtins need to be resolved before the module is cached so that they are
# not mistaken for local types. External types can be left unresolved
# because they are unambiguous.
self._resolver.allow_singletons = False
module.ast = self._resolver.resolve_builtin_types(module.ast)
self._modules[module_name] = module
try:
self._resolver.allow_singletons = True
module.ast = self._resolve_external_and_local_types(module.ast)
# We need to resolve builtin singletons after we have made sure they are
# not shadowed by a local or a star import.
module.ast = self._resolver.resolve_builtin_types(module.ast)
self._resolver.allow_singletons = False
# Now that any imported TypeVar instances have been resolved, adjust type
# parameters in classes and functions.
module.ast = module.ast.Visit(visitors.AdjustTypeParameters())
# Now we can fill in internal cls pointers to ClassType nodes in the
# module. This code executes when the module is first loaded, which
# happens before any others use it to resolve dependencies, so there are
# no external pointers into the module at this point.
module_map = {"": module.ast, module_name: module.ast}
module.ast.Visit(visitors.FillInLocalPointers(module_map))
except:
# don't leave half-resolved modules around
del self._modules[module_name]
raise
if module_name:
self.add_module_prefixes(module_name)
return module.ast
def remove_name(self, module_name: str) -> None:
"""Removes a module from the cache, if it is present."""
if module_name in self._modules:
del self._modules[module_name]
if module_name in self._import_name_cache:
del self._import_name_cache[module_name]
def _try_import_prefix(self, name: str) -> _AST | None:
"""Try importing all prefixes of name, returning the first valid module."""
prefix = name
while "." in prefix:
prefix, _ = prefix.rsplit(".", 1)
ast = self._import_module_by_name(prefix)
if ast:
return ast
return None
def _load_ast_dependencies(
self, dependencies, lookup_ast, lookup_ast_name=None
):
"""Fill in all ClassType.cls pointers and load reexported modules."""
ast_name = lookup_ast_name or lookup_ast.name
for dep_name in dependencies:
name = self._resolver.resolve_module_alias(
dep_name, lookup_ast=lookup_ast, lookup_ast_name=lookup_ast_name
)
if dep_name != name:
# We have an alias. Store it in the aliases map.
self._aliases[ast_name][dep_name] = name
if name in self._modules and self._modules[name].ast:
dep_ast = self._modules[name].ast
else:
dep_ast = self._import_module_by_name(name)
if dep_ast is None:
dep_ast = self._try_import_prefix(name)
if dep_ast or f"{ast_name}.{name}" in lookup_ast:
# If any prefix is a valid module, then we'll assume that we're
# importing a nested class. If name is in lookup_ast, then it is a
# local reference and not an import at all.
continue
else:
self._module_loader.log_module_not_found(name)
try:
pytd.LookupItemRecursive(lookup_ast, name)
except KeyError as e:
raise BadDependencyError(
f"Can't find pyi for {name!r}", ast_name
) from e
# This is a dotted local reference, not an external reference.
continue
# If `name` is a package, try to load any base names not defined in
# __init__ as submodules.
if not self._modules[name].is_package() or "__getattr__" in dep_ast:
continue
for base_name in dependencies[dep_name]:
if base_name == "*":
continue
full_name = f"{name}.{base_name}"
# Check whether full_name is a submodule based on whether it is
# defined in the __init__ file.
assert isinstance(dep_ast, _AST)
attr = dep_ast.Get(full_name)
if attr is None:
# This hack is needed to support circular imports like the one here:
# https://github.com/python/typeshed/blob/875f0ca7fcc68e7bd6c9b807cdceeff8a6f734c8/stdlib/sqlite3/dbapi2.pyi#L258
# sqlite3.__init__ does a star import from sqlite3.dbapi2, and
# sqlite3.dbapi2 imports local names from the sqlite3 namespace to
# avoid name collisions.
maybe_star_import = dep_ast.Get(f"{name}.{ast_name}.*")
if (
isinstance(maybe_star_import, pytd.Alias)
and maybe_star_import.type.name == f"{ast_name}.*"
):
attr = lookup_ast.Get(f"{ast_name}.{base_name}")
# 'from . import submodule as submodule' produces
# Alias(submodule, NamedType(submodule)).
if attr is None or (
isinstance(attr, pytd.Alias) and attr.name == attr.type.name
):
if not self._import_module_by_name(full_name):
# Add logging to make debugging easier but otherwise ignore the
# result - resolve_external_types will raise a better error.
self._module_loader.log_module_not_found(full_name)
def _resolve_external_types(self, mod_ast, lookup_ast=None):
module_map = self._modules.get_module_map()
mod_name = lookup_ast and lookup_ast.name
if mod_name and mod_name not in module_map:
module_map[mod_name] = lookup_ast
mod_ast = self._resolver.resolve_external_types(
mod_ast, module_map, self._aliases, mod_name=mod_name
)
return mod_ast
def _resolve_classtype_pointers(self, mod_ast, *, lookup_ast=None):
module_map = self._modules.get_module_map()
module_map[""] = lookup_ast or mod_ast # The module itself (local lookup)
mod_ast.Visit(visitors.FillInLocalPointers(module_map))
def resolve_pytd(self, pytd_node, lookup_ast):
"""Resolve and verify pytd value, using the given ast for local lookup."""
# NOTE: Modules of dependencies will be loaded into the cache
pytd_node = self._resolver.resolve_builtin_types(
pytd_node, lookup_ast=lookup_ast
)
pytd_node = self._resolve_external_and_local_types(
pytd_node, lookup_ast=lookup_ast
)
self._resolve_classtype_pointers_for_all_modules()
self._resolve_classtype_pointers(pytd_node, lookup_ast=lookup_ast)
self._resolver.verify(pytd_node, mod_name=lookup_ast.name)
return pytd_node
def resolve_ast(self, ast):
"""Resolve the dependencies of an AST, without adding it to our modules."""
# NOTE: Modules of dependencies will be loaded into the cache
return self.resolve_pytd(ast, ast)
def _resolve_classtype_pointers_for_all_modules(self):
for module in self._modules.values():
if module.has_unresolved_pointers:
self._resolve_classtype_pointers(module.ast)
module.has_unresolved_pointers = False
def import_relative_name(self, name: str) -> _AST | None:
"""IMPORT_NAME with level=-1. A name relative to the current directory."""
if self.options.module_name is None:
raise ValueError("Attempting relative import in non-package.")
path = self.options.module_name.split(".")[:-1]
path.append(name)
return self.import_name(".".join(path))
def import_relative(self, level: int) -> _AST | None:
"""Import a module relative to our base module.
Args:
level: Relative level:
https://docs.python.org/2/library/functions.html#__import__
E.g.
1: "from . import abc"
2: "from .. import abc"
etc.
Since you'll use import_name() for -1 and 0, this function expects the
level to be >= 1.
Returns:
The parsed pytd. Instance of pytd.TypeDeclUnit. None if we can't find the
module.
Raises:
ValueError: If we don't know the name of the base module.
"""
assert level >= 1
if self.options.module_name is None:
raise ValueError("Attempting relative import in non-package.")
components = self.options.module_name.split(".")
sub_module = ".".join(components[0:-level])
return self.import_name(sub_module)
def import_name(self, module_name: str):
if module_name in self._import_name_cache:
return self._import_name_cache[module_name]
mod_ast = self._import_module_by_name(module_name)
if not mod_ast:
self._module_loader.log_module_not_found(module_name)
self._resolve_classtype_pointers_for_all_modules()
mod_ast = self.finish_and_verify_ast(mod_ast)
self._import_name_cache[module_name] = mod_ast
return mod_ast
def _resolve_module(self, name, aliases):
if name in aliases:
name = aliases[name]
while name not in self._modules:
if "." not in name:
break
name, _ = name.rsplit(".", 1)
return name
def finish_and_verify_ast(self, mod_ast):
"""Verify the ast, doing external type resolution first if necessary."""
if mod_ast:
try:
self._resolver.verify(mod_ast)
except (BadDependencyError, visitors.ContainerError) as e:
# In the case of a circular import, an external type may be left
# unresolved, so we re-resolve lookups in this module and its direct
# dependencies. Technically speaking, we should re-resolve all
# transitive imports, but lookups are expensive.
dependencies = self._resolver.collect_dependencies(mod_ast)
for k in dependencies:
k = self._resolve_module(k, self._aliases[mod_ast.name])
if k not in self._modules:
all_aliases = _merge_aliases(self._aliases)
k = self._resolve_module(k, all_aliases)
if k not in self._modules:
assert mod_ast
raise (
BadDependencyError(f"Can't find pyi for {k!r}", mod_ast.name)
) from e
self._modules[k].ast = self._resolve_external_types(
self._modules[k].ast
)
mod_ast = self._resolve_external_types(mod_ast)
# Circular imports can leave type params (e.g. ParamSpecArgs)
# unresolved. External type parameters are added to the AST in
# visitors.AdjustTypeParameters, after resolving local types. But those
# are needed to resolve e.g. `_P.args` references.
mod_ast = self._resolver.resolve_local_types(mod_ast)
self._resolver.verify(mod_ast)
return mod_ast
def add_module_prefixes(self, module_name):
for prefix in module_utils.get_all_prefixes(module_name):
self._prefixes.add(prefix)
def has_module_prefix(self, prefix):
return prefix in self._prefixes
def _load_builtin(self, namespace, module_name):
"""Load a pytd/pyi that ships with pytype or typeshed."""
loaders = []
# Try our own type definitions first, then typeshed's.
if namespace in ("builtins", "stdlib"):
loaders.append(self._builtin_loader)
if self.options.typeshed and namespace in ("stdlib", "third_party"):
loaders.append(self._typeshed_loader)
for loader in loaders:
filename, mod_ast = loader.load_module(namespace, module_name)
if mod_ast:
mod = ModuleInfo.internal_stub(module_name, filename)
return self.load_module(mod, mod_ast=mod_ast)
return None
def _import_module_by_name(self, module_name) -> _AST | None:
"""Load a name like 'sys' or 'foo.bar.baz'.
Args:
module_name: The name of the module. May contain dots.
Returns:
The parsed file, instance of pytd.TypeDeclUnit, or None if we
the module wasn't found.
"""
existing = self._modules.get_existing_ast(module_name)
if existing:
return existing
assert path_utils.sep not in module_name, (path_utils.sep, module_name)
log.debug("Trying to import %r", module_name)
# Builtin modules (but not standard library modules!) take precedence
# over modules in PYTHONPATH.
# Note: while typeshed no longer has a builtins subdir, the pytd
# tree still does, and order is important here.
mod = self._load_builtin("builtins", module_name)
if mod:
return mod
# Now try to retrieve an external module from the module loader.
mod_ast = None
default = None
mod_info = self._module_loader.find_import(module_name)
if mod_info:
if mod_info.file_exists:
# We have a concrete file the module loader can retrieve.
mod_ast = self.load_module(mod_info)
assert mod_ast is not None, mod_info.filename
else:
# Create an empty AST labelled with the info the module loader returned.
mod_ast = self._create_empty(mod_info)
if mod_info.is_default_pyi():
# Remove the default module from the cache; we will return it later if
# nothing else supplies the module AST.
default = self._modules.get(module_name)
del self._modules[module_name]
elif module_name in _ALWAYS_PREFER_TYPESHED:
del self._modules[module_name]
else:
return mod_ast
# The standard library is (typically) towards the end of PYTHONPATH.
mod = self._load_builtin("stdlib", module_name)
if mod:
return mod
# Third party modules from typeshed (typically site-packages) come last.
mod = self._load_builtin("third_party", module_name)
if mod:
return mod
# Now return the default module if we have found nothing better.
if mod_ast:
assert default
self._modules[module_name] = default
return mod_ast
return None
def concat_all(self):
return self._modules.concat_all()
def get_resolved_modules(self):
"""Gets a name -> ResolvedModule map of the loader's resolved modules."""
return self._modules.get_resolved_modules()
def lookup_pytd(self, module: str, name: str) -> pytd.Node:
ast = self.import_name(module)
assert ast, f"Module not found: {module}"
return ast.Lookup(f"{module}.{name}")
def load_late_type(self, late_type: pytd.LateType):
return self._late_type_loader.load_late_type(late_type)
def get_unused_imports_map_paths(self) -> set[str]:
return self._module_loader.get_unused_imports_map_paths()
| Loader |
python | pallets__quart | src/quart/utils.py | {
"start": 706,
"end": 5858
} | class ____(Exception):
pass
def file_path_to_path(*paths: FilePath) -> Path:
# Flask supports bytes paths
safe_paths: list[str | os.PathLike] = []
for path in paths:
if isinstance(path, bytes):
safe_paths.append(path.decode())
else:
safe_paths.append(path)
return Path(*safe_paths)
def run_sync(func: Callable[..., Any]) -> Callable[..., Coroutine[None, None, Any]]:
"""Ensure that the sync function is run within the event loop.
If the *func* is not a coroutine it will be wrapped such that
it runs in the default executor (use loop.set_default_executor
to change). This ensures that synchronous functions do not
block the event loop.
"""
@wraps(func)
async def _wrapper(*args: Any, **kwargs: Any) -> Any:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(
None, copy_context().run, partial(func, *args, **kwargs)
)
if inspect.isgenerator(result):
return run_sync_iterable(result)
else:
return result
_wrapper._quart_async_wrapper = True # type: ignore
return _wrapper
T = TypeVar("T")
def run_sync_iterable(iterable: Iterator[T]) -> AsyncIterator[T]:
async def _gen_wrapper() -> AsyncIterator[T]:
# Wrap the generator such that each iteration runs
# in the executor. Then rationalise the raised
# errors so that it ends.
def _inner() -> T:
# https://bugs.python.org/issue26221
# StopIteration errors are swallowed by the
# run_in_exector method
try:
return next(iterable)
except StopIteration as e:
raise StopAsyncIteration() from e
loop = asyncio.get_running_loop()
while True:
try:
yield await loop.run_in_executor(None, copy_context().run, _inner)
except StopAsyncIteration:
return
return _gen_wrapper()
def encode_headers(headers: Headers) -> list[tuple[bytes, bytes]]:
return [(key.lower().encode(), value.encode()) for key, value in headers.items()]
def decode_headers(headers: Iterable[tuple[bytes, bytes]]) -> Headers:
return Headers([(key.decode(), value.decode()) for key, value in headers])
async def observe_changes(
sleep: Callable[[float], Awaitable[Any]], shutdown_event: Event
) -> None:
last_updates: dict[Path, float] = {}
for module in list(sys.modules.values()):
filename = getattr(module, "__file__", None)
if filename is None:
continue
path = Path(filename)
try:
last_updates[Path(filename)] = path.stat().st_mtime
except (FileNotFoundError, NotADirectoryError):
pass
while not shutdown_event.is_set():
await sleep(1)
for index, (path, last_mtime) in enumerate(last_updates.items()):
if index % 10 == 0:
# Yield to the event loop
await sleep(0)
try:
mtime = path.stat().st_mtime
except FileNotFoundError as e:
# File deleted
raise MustReloadError() from e
else:
if mtime > last_mtime:
raise MustReloadError()
else:
last_updates[path] = mtime
def restart() -> None:
# Restart this process (only safe for dev/debug)
executable = sys.executable
script_path = Path(sys.argv[0]).resolve()
args = sys.argv[1:]
main_package = sys.modules["__main__"].__package__
if main_package is None:
# Executed by filename
if platform.system() == "Windows":
if not script_path.exists() and script_path.with_suffix(".exe").exists():
# quart run
executable = str(script_path.with_suffix(".exe"))
else:
# python run.py
args = [str(script_path), *args]
else:
if script_path.is_file() and os.access(script_path, os.X_OK):
# hypercorn run:app --reload
executable = str(script_path)
else:
# python run.py
args = [str(script_path), *args]
else:
# Executed as a module e.g. python -m run
module = script_path.stem
import_name = main_package
if module != "__main__":
import_name = f"{main_package}.{module}"
args[:0] = ["-m", import_name.lstrip(".")]
os.execv(executable, [executable] + args)
async def cancel_tasks(tasks: set[asyncio.Task]) -> None:
# Cancel any pending, and wait for the cancellation to
# complete i.e. finish any remaining work.
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise_task_exceptions(tasks)
def raise_task_exceptions(tasks: set[asyncio.Task]) -> None:
# Raise any unexpected exceptions
for task in tasks:
if not task.cancelled() and task.exception() is not None:
raise task.exception()
| MustReloadError |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg2.py | {
"start": 20781,
"end": 20869
} | class ____(_Psycopg2Range):
_psycopg2_range_cls = "NumericRange"
| _Psycopg2NumericRange |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 22470,
"end": 22543
} | class ____(_OracleNumericCommon, sqltypes.Numeric):
pass
| _OracleNumeric |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py | {
"start": 12470,
"end": 13774
} | class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.Topic.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ManagedKafkaUpdateTopicOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
topic=TEST_UPDATED_TOPIC,
update_mask=TEST_TOPIC_UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.update_topic.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
topic=TEST_UPDATED_TOPIC,
update_mask=TEST_TOPIC_UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestManagedKafkaUpdateTopicOperator |
python | mwaskom__seaborn | seaborn/relational.py | {
"start": 7023,
"end": 7244
} | class ____(VectorPlotter):
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
# TODO where best to define default parameters?
sort = True
| _RelationalPlotter |
python | getsentry__sentry | src/sentry/grouping/parameterization.py | {
"start": 9148,
"end": 9513
} | class ____:
"""
Represents a callable that can be used to modify a string, which can give
us more flexibility than just using regex.
"""
name: str # name of the pattern (also used as group name in combined regex)
apply: Callable[[str], tuple[str, int]] # function for modifying the input string
counter: int = 0
| ParameterizationCallable |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 198198,
"end": 200588
} | class ____(Operation):
def __init__(self, axes=2, *, name=None):
super().__init__(name=name)
self.axes = axes
def call(self, x1, x2):
return backend.numpy.tensordot(x1, x2, axes=self.axes)
def compute_output_spec(self, x1, x2):
x1_shape = list(getattr(x1, "shape", []))
x2_shape = list(getattr(x2, "shape", []))
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
if not isinstance(self.axes, int):
x1_select_shape = [x1_shape[ax] for ax in self.axes[0]]
x2_select_shape = [x2_shape[ax] for ax in self.axes[1]]
if not shape_equal(
x1_select_shape, x2_select_shape, allow_none=True
):
raise ValueError(
"Shape mismatch on `x1[axes[0]]` and `x2[axes[1]]`, "
f"received {x1_select_shape} and {x2_select_shape}."
)
for ax in self.axes[0]:
x1_shape[ax] = -1
for ax in self.axes[1]:
x2_shape[ax] = -1
x1_shape = list(filter((-1).__ne__, x1_shape))
x2_shape = list(filter((-1).__ne__, x2_shape))
output_shape = x1_shape + x2_shape
return KerasTensor(output_shape, dtype=dtype)
if self.axes <= 0:
output_shape = x1_shape + x2_shape
else:
output_shape = x1_shape[: -self.axes] + x2_shape[self.axes :]
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.tensordot", "keras.ops.numpy.tensordot"])
def tensordot(x1, x2, axes=2):
"""Compute the tensor dot product along specified axes.
Args:
x1: First tensor.
x2: Second tensor.
axes: - If an integer, N, sum over the last N axes of `x1` and the
first N axes of `x2` in order. The sizes of the corresponding
axes must match.
- Or, a list of axes to be summed over, first sequence applying
to `x1`, second to `x2`. Both sequences must be of the
same length.
Returns:
The tensor dot product of the inputs.
"""
if any_symbolic_tensors((x1, x2)):
return Tensordot(axes=axes).symbolic_call(x1, x2)
return backend.numpy.tensordot(x1, x2, axes=axes)
| Tensordot |
python | doocs__leetcode | solution/0000-0099/0017.Letter Combinations of a Phone Number/Solution.py | {
"start": 0,
"end": 335
} | class ____:
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
d = ["abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"]
ans = [""]
for i in digits:
s = d[int(i) - 2]
ans = [a + b for a in ans for b in s]
return ans
| Solution |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 19800,
"end": 25290
} | class ____(SimpleElementWithContent, _IDProperty, _XtypeProperty, _UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = "INFO"
_attr_list_11 = ["ID", "name", "value"]
_attr_list_12 = _attr_list_11 + ["xtype", "ref", "unit", "ucd", "utype"]
_utype_in_v1_2 = True
def __init__(
self,
ID=None,
name=None,
value=None,
id=None,
xtype=None,
ref=None,
unit=None,
ucd=None,
utype=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs("INFO", ["xtype"], config, pos)
if ref is not None:
warn_unknown_attrs("INFO", ["ref"], config, pos)
if unit is not None:
warn_unknown_attrs("INFO", ["unit"], config, pos)
if ucd is not None:
warn_unknown_attrs("INFO", ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs("INFO", ["utype"], config, pos)
warn_unknown_attrs("INFO", extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, "name", self._config, self._pos)
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, "value", self._config, self._pos)
check_string(value, "value", self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("ref", "INFO", "1.2"), self._config, self._pos)
xmlutil.check_id(ref, "ref", self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("unit", "INFO", "1.2"), self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
w.element(self._element_name, self._content, attrib=attrib)
| Info |
python | sympy__sympy | sympy/functions/special/bessel.py | {
"start": 67470,
"end": 68611
} | class ____(DefinedFunction):
"""
Helper function to make the $\\mathrm{besselk}(nu, z)$
function tractable for the Gruntz algorithm.
"""
def _eval_aseries(self, n, args0, x, logx):
from sympy.functions.combinatorial.factorials import RisingFactorial
from sympy.series.order import Order
point = args0[1]
if point in [S.Infinity, S.NegativeInfinity]:
nu, z = self.args
l = [((RisingFactorial(Rational(2*nu - 1, 2), k)*RisingFactorial(
Rational(2*nu + 1, 2), k))/((-2)**(k)*z**(Rational(2*k + 1, 2))*factorial(k))) for k in range(n)]
return sqrt(pi/(2))*(Add(*l)) + Order(1/z**(Rational(2*n + 1, 2)), x)
return super()._eval_aseries(n, args0, x, logx)
def _eval_rewrite_as_intractable(self,nu, z, **kwargs):
return exp(z)*besselk(nu, z)
def _eval_nseries(self, x, n, logx, cdir=0):
x0 = self.args[0].limit(x, 0)
if x0.is_zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super()._eval_nseries(x, n, logx)
| _besselk |
python | getsentry__sentry | src/sentry/issues/endpoints/group_reprocessing.py | {
"start": 338,
"end": 1684
} | class ____(GroupEndpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
def post(self, request: Request, group) -> Response:
"""
Reprocess a group
`````````````````
This endpoint triggers reprocessing for all events in a group.
:pparam string issue_id: the numeric ID of the issue to reprocess. The
reprocessed events will be assigned to a new numeric ID. See comments
in sentry.reprocessing2.
:auth: required
"""
max_events = request.data.get("maxEvents")
if max_events:
max_events = int(max_events)
if max_events <= 0:
return self.respond({"error": "maxEvents must be at least 1"}, status=400)
else:
max_events = None
remaining_events = request.data.get("remainingEvents")
if remaining_events not in ("delete", "keep"):
return self.respond({"error": "remainingEvents must be delete or keep"}, status=400)
reprocess_group.delay(
project_id=group.project_id,
group_id=group.id,
max_events=max_events,
acting_user_id=getattr(request.user, "id", None),
remaining_events=remaining_events,
)
return self.respond(status=200)
| GroupReprocessingEndpoint |
python | numba__numba | numba/tests/test_llvm_version_check.py | {
"start": 47,
"end": 1284
} | class ____(unittest.TestCase):
def test_llvmlite_version(self):
# test the system it's running on
import llvmlite
import numba
self.assertTrue(numba.__version__)
llvmlite_version = llvmlite.__version__
def cleanup():
llvmlite.__version__ = llvmlite_version
self.addCleanup(cleanup)
# explicitly test all 3 cases of version string
ver = numba._min_llvmlite_version
version_pass = '%d.%d.%d' % ver
git_version_pass = '%d.%d.%d-10-g92584ed' % ver
rc_version_pass = '%d.%d.%drc1' % (ver[0], ver[1], ver[2] + 1)
version_fail = '%d.%d.0' % (ver[0], ver[1] - 1)
git_version_fail = '%d.%d.9-10-g92584ed' % (ver[0], ver[1] - 1)
ver_pass = (version_pass, git_version_pass, rc_version_pass)
ver_fail = (version_fail, git_version_fail)
for v in ver_pass:
llvmlite.__version__ = v
importlib.reload(numba)
self.assertTrue(numba.__version__)
for v in ver_fail:
with self.assertRaises(ImportError):
llvmlite.__version__ = v
importlib.reload(numba)
if __name__ == '__main__':
unittest.main()
| TestLlvmVersion |
python | charliermarsh__ruff | scripts/update_schemastore.py | {
"start": 914,
"end": 984
} | class ____(NamedTuple):
fork: str
upstream: str
| SchemastoreRepos |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 9111,
"end": 13867
} | class ____(StepInputSource, IHaveNew):
"""This step input source is the output of a previous step.
Source handle may refer to graph in case of input mapping.
"""
step_output_handle: StepOutputHandle
fan_in: bool
# deprecated, preserved for back-compat
node_handle: NodeHandle
input_name: str
def __new__(
cls,
step_output_handle: StepOutputHandle,
fan_in: bool,
# deprecated, preserved for back-compat
node_handle: Optional[NodeHandle] = None,
input_name: Optional[str] = None,
):
return super().__new__(
cls,
step_output_handle=step_output_handle,
fan_in=fan_in,
# add placeholder values for back-compat
node_handle=node_handle or NodeHandle("", None),
input_name=input_name or "",
)
@property
def step_key_dependencies(self) -> set[str]:
return {self.step_output_handle.step_key}
@property
def step_output_handle_dependencies(self) -> Sequence[StepOutputHandle]:
return [self.step_output_handle]
def get_load_context(
self,
step_context: "StepExecutionContext",
input_def: InputDefinition,
io_manager_key: Optional[str] = None,
) -> "InputContext":
resolved_io_manager_key = (
step_context.execution_plan.get_manager_key(
self.step_output_handle, step_context.job_def
)
if io_manager_key is None
else io_manager_key
)
resource_config = step_context.resolved_run_config.resources[resolved_io_manager_key].config
resources = build_resources_for_manager(resolved_io_manager_key, step_context)
solid_config = step_context.resolved_run_config.ops.get(str(step_context.node_handle))
config_data = solid_config.inputs.get(input_def.name) if solid_config else None
return step_context.for_input_manager(
input_def.name,
config_data,
input_def.metadata,
input_def.dagster_type,
self.step_output_handle,
resource_config,
resources,
)
def load_input_object(
self,
step_context: "StepExecutionContext",
input_def: InputDefinition,
) -> Iterator[object]:
from dagster._core.events import DagsterEvent
from dagster._core.storage.input_manager import InputManager
source_handle = self.step_output_handle
if input_def.input_manager_key is not None:
manager_key = input_def.input_manager_key
input_manager = getattr(step_context.resources, manager_key)
check.invariant(
isinstance(input_manager, InputManager),
f'Input "{input_def.name}" for step "{step_context.step.key}" is depending on '
f'the manager "{manager_key}" to load it, but it is not an InputManager. '
"Please ensure that the resource returned for resource key "
f'"{manager_key}" is an InputManager.',
)
else:
manager_key = step_context.execution_plan.get_manager_key(
source_handle, step_context.job_def
)
input_manager = step_context.get_io_manager(source_handle)
check.invariant(
isinstance(input_manager, IOManager),
f'Input "{input_def.name}" for step "{step_context.step.key}" is depending on '
f'the manager of upstream output "{source_handle.output_name}" from step '
f'"{source_handle.step_key}" to load it, but that manager is not an IOManager. '
"Please ensure that the resource returned for resource key "
f'"{manager_key}" is an IOManager.',
)
load_input_context = self.get_load_context(
step_context, input_def, io_manager_key=manager_key
)
yield from _load_input_with_input_manager(input_manager, load_input_context)
metadata = {
**load_input_context.definition_metadata,
**load_input_context.consume_logged_metadata(),
}
yield DagsterEvent.loaded_input(
step_context,
input_name=input_def.name,
manager_key=manager_key,
upstream_output_name=source_handle.output_name,
upstream_step_key=source_handle.step_key,
metadata=metadata,
)
def required_resource_keys(
self, _job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str
) -> set[str]:
return set()
@whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"})
@record
| FromStepOutput |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 27781,
"end": 28434
} | class ____(BaseModel, extra="forbid"):
x: "Expression" = Field(..., description="")
target: Optional["Expression"] = Field(
default=None, description="The target value to start decaying from. Defaults to 0."
)
scale: Optional[float] = Field(
default=None,
description="The scale factor of the decay, in terms of `x`. Defaults to 1.0. Must be a non-zero positive number.",
)
midpoint: Optional[float] = Field(
default=None,
description="The midpoint of the decay. Should be between 0 and 1.Defaults to 0.5. Output will be this value when `|x - target| == scale`.",
)
| DecayParamsExpression |
python | pydata__xarray | xarray/namedarray/parallelcompat.py | {
"start": 6172,
"end": 28119
} | class ____(ABC, Generic[T_ChunkedArray]):
"""
Interface between a particular parallel computing framework and xarray.
This abstract base class must be subclassed by libraries implementing chunked array types, and
registered via the ``chunkmanagers`` entrypoint.
Abstract methods on this class must be implemented, whereas non-abstract methods are only required in order to
enable a subset of xarray functionality, and by default will raise a ``NotImplementedError`` if called.
Attributes
----------
array_cls
Type of the array class this parallel computing framework provides.
Parallel frameworks need to provide an array class that supports the array API standard.
This attribute is used for array instance type checking at runtime.
"""
array_cls: type[T_ChunkedArray]
available: bool = True
@abstractmethod
def __init__(self) -> None:
"""Used to set the array_cls attribute at import time."""
raise NotImplementedError()
def is_chunked_array(self, data: duckarray[Any, Any]) -> bool:
"""
Check if the given object is an instance of this type of chunked array.
Compares against the type stored in the array_cls attribute by default.
Parameters
----------
data : Any
Returns
-------
is_chunked : bool
See Also
--------
dask.is_dask_collection
"""
return isinstance(data, self.array_cls)
@abstractmethod
def chunks(self, data: T_ChunkedArray) -> _NormalizedChunks:
"""
Return the current chunks of the given array.
Returns chunks explicitly as a tuple of tuple of ints.
Used internally by xarray objects' .chunks and .chunksizes properties.
Parameters
----------
data : chunked array
Returns
-------
chunks : tuple[tuple[int, ...], ...]
See Also
--------
dask.array.Array.chunks
cubed.Array.chunks
"""
raise NotImplementedError()
@abstractmethod
def normalize_chunks(
self,
chunks: _Chunks | _NormalizedChunks,
shape: _ShapeType | None = None,
limit: int | None = None,
dtype: _DType | None = None,
previous_chunks: _NormalizedChunks | None = None,
) -> _NormalizedChunks:
"""
Normalize given chunking pattern into an explicit tuple of tuples representation.
Exposed primarily because different chunking backends may want to make different decisions about how to
automatically chunk along dimensions not given explicitly in the input chunks.
Called internally by xarray.open_dataset.
Parameters
----------
chunks : tuple, int, dict, or string
The chunks to be normalized.
shape : Tuple[int]
The shape of the array
limit : int (optional)
The maximum block size to target in bytes,
if freedom is given to choose
dtype : np.dtype
previous_chunks : Tuple[Tuple[int]], optional
Chunks from a previous array that we should use for inspiration when
rechunking dimensions automatically.
See Also
--------
dask.array.core.normalize_chunks
"""
raise NotImplementedError()
@abstractmethod
def from_array(
self, data: duckarray[Any, Any], chunks: _Chunks, **kwargs: Any
) -> T_ChunkedArray:
"""
Create a chunked array from a non-chunked numpy-like array.
Generally input should have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.
Called when the .chunk method is called on an xarray object that is not already chunked.
Also called within open_dataset (when chunks is not None) to create a chunked array from
an xarray lazily indexed array.
Parameters
----------
data : array_like
chunks : int, tuple
How to chunk the array.
See Also
--------
dask.array.from_array
cubed.from_array
"""
raise NotImplementedError()
def rechunk(
self,
data: T_ChunkedArray,
chunks: _NormalizedChunks | tuple[int, ...] | _Chunks,
**kwargs: Any,
) -> Any:
"""
Changes the chunking pattern of the given array.
Called when the .chunk method is called on an xarray object that is already chunked.
Parameters
----------
data : dask array
Array to be rechunked.
chunks : int, tuple, dict or str, optional
The new block dimensions to create. -1 indicates the full size of the
corresponding dimension. Default is "auto" which automatically
determines chunk sizes.
Returns
-------
chunked array
See Also
--------
dask.array.Array.rechunk
cubed.Array.rechunk
"""
from xarray.core.common import _contains_cftime_datetimes
from xarray.namedarray.utils import _get_chunk
if _contains_cftime_datetimes(data):
chunks2 = _get_chunk(data, chunks, self, preferred_chunks={}) # type: ignore[arg-type]
else:
chunks2 = chunks # type: ignore[assignment]
return data.rechunk(chunks2, **kwargs)
@abstractmethod
def compute(
self, *data: T_ChunkedArray | Any, **kwargs: Any
) -> tuple[np.ndarray[Any, _DType_co], ...]:
"""
Computes one or more chunked arrays, returning them as eager numpy arrays.
Called anytime something needs to computed, including multiple arrays at once.
Used by `.compute`, `.persist`, `.values`.
Parameters
----------
*data : object
Any number of objects. If an object is an instance of the chunked array type, it is computed
and the in-memory result returned as a numpy array. All other types should be passed through unchanged.
Returns
-------
objs
The input, but with all chunked arrays now computed.
See Also
--------
dask.compute
cubed.compute
"""
raise NotImplementedError()
def shuffle(
self, x: T_ChunkedArray, indexer: list[list[int]], axis: int, chunks: T_Chunks
) -> T_ChunkedArray:
raise NotImplementedError()
def persist(
self, *data: T_ChunkedArray | Any, **kwargs: Any
) -> tuple[T_ChunkedArray | Any, ...]:
"""
Persist one or more chunked arrays in memory.
Parameters
----------
*data : object
Any number of objects. If an object is an instance of the chunked array type, it is persisted
as a chunked array in memory. All other types should be passed through unchanged.
Returns
-------
objs
The input, but with all chunked arrays now persisted in memory.
See Also
--------
dask.persist
"""
raise NotImplementedError()
@property
def array_api(self) -> Any:
"""
Return the array_api namespace following the python array API standard.
See https://data-apis.org/array-api/latest/ . Currently used to access the array API function
``full_like``, which is called within the xarray constructors ``xarray.full_like``, ``xarray.ones_like``,
``xarray.zeros_like``, etc.
See Also
--------
dask.array
cubed.array_api
"""
raise NotImplementedError()
def reduction(
self,
arr: T_ChunkedArray,
func: Callable[..., Any],
combine_func: Callable[..., Any] | None = None,
aggregate_func: Callable[..., Any] | None = None,
axis: int | Sequence[int] | None = None,
dtype: _DType_co | None = None,
keepdims: bool = False,
) -> T_ChunkedArray:
"""
A general version of array reductions along one or more axes.
Used inside some reductions like nanfirst, which is used by ``groupby.first``.
Parameters
----------
arr : chunked array
Data to be reduced along one or more axes.
func : Callable(x_chunk, axis, keepdims)
First function to be executed when resolving the dask graph.
This function is applied in parallel to all original chunks of x.
See below for function parameters.
combine_func : Callable(x_chunk, axis, keepdims), optional
Function used for intermediate recursive aggregation (see
split_every below). If omitted, it defaults to aggregate_func.
aggregate_func : Callable(x_chunk, axis, keepdims)
Last function to be executed, producing the final output. It is always invoked, even when the reduced
Array counts a single chunk along the reduced axes.
axis : int or sequence of ints, optional
Axis or axes to aggregate upon. If omitted, aggregate along all axes.
dtype : np.dtype
data type of output. This argument was previously optional, but
leaving as ``None`` will now raise an exception.
keepdims : boolean, optional
Whether the reduction function should preserve the reduced axes,
leaving them at size ``output_size``, or remove them.
Returns
-------
chunked array
See Also
--------
dask.array.reduction
cubed.core.reduction
"""
raise NotImplementedError()
def scan(
self,
func: Callable[..., Any],
binop: Callable[..., Any],
ident: float,
arr: T_ChunkedArray,
axis: int | None = None,
dtype: _DType_co | None = None,
**kwargs: Any,
) -> T_ChunkedArray:
"""
General version of a 1D scan, also known as a cumulative array reduction.
Used in ``ffill`` and ``bfill`` in xarray.
Parameters
----------
func: callable
Cumulative function like np.cumsum or np.cumprod
binop: callable
Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``
ident: Number
Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``
arr: dask Array
axis: int, optional
dtype: dtype
Returns
-------
Chunked array
See also
--------
dask.array.cumreduction
"""
raise NotImplementedError()
@abstractmethod
def apply_gufunc(
self,
func: Callable[..., Any],
signature: str,
*args: Any,
axes: Sequence[tuple[int, ...]] | None = None,
keepdims: bool = False,
output_dtypes: Sequence[_DType_co] | None = None,
vectorize: bool | None = None,
**kwargs: Any,
) -> Any:
"""
Apply a generalized ufunc or similar python function to arrays.
``signature`` determines if the function consumes or produces core
dimensions. The remaining dimensions in given input arrays (``*args``)
are considered loop dimensions and are required to broadcast
naturally against each other.
In other terms, this function is like ``np.vectorize``, but for
the blocks of chunked arrays. If the function itself shall also
be vectorized use ``vectorize=True`` for convenience.
Called inside ``xarray.apply_ufunc``, which is called internally for most xarray operations.
Therefore this method must be implemented for the vast majority of xarray computations to be supported.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on input arrays
(``*args``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs,
``output_core_dims`` has to be set as well.
signature: string
Specifies what core dimensions are consumed and produced by ``func``.
According to the specification of numpy.gufunc signature [2]_
*args : numeric
Input arrays or scalars to the callable function.
axes: List of tuples, optional, keyword only
A list of tuples with indices of axes a generalized ufunc should operate on.
For instance, for a signature of ``"(i,j),(j,k)->(i,k)"`` appropriate for
matrix multiplication, the base elements are two-dimensional matrices
and these are taken to be stored in the two last axes of each argument. The
corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.
For simplicity, for generalized ufuncs that operate on 1-dimensional arrays
(vectors), a single integer is accepted instead of a single-element tuple,
and for generalized ufuncs for which all outputs are scalars, the output
tuples can be omitted.
keepdims: bool, optional, keyword only
If this is set to True, axes which are reduced over will be left in the result as
a dimension with size one, so that the result will broadcast correctly against the
inputs. This option can only be used for generalized ufuncs that operate on inputs
that all have the same number of core dimensions and with outputs that have no core
dimensions , i.e., with signatures like ``"(i),(i)->()"`` or ``"(m,m)->()"``.
If used, the location of the dimensions in the output can be controlled with axes
and axis.
output_dtypes : Optional, dtype or list of dtypes, keyword only
Valid numpy dtype specification or list thereof.
If not given, a call of ``func`` with a small set of data
is performed in order to try to automatically determine the
output dtypes.
vectorize: bool, keyword only
If set to ``True``, ``np.vectorize`` is applied to ``func`` for
convenience. Defaults to ``False``.
**kwargs : dict
Extra keyword arguments to pass to `func`
Returns
-------
Single chunked array or tuple of chunked arrays
See Also
--------
dask.array.gufunc.apply_gufunc
cubed.apply_gufunc
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html
"""
raise NotImplementedError()
def map_blocks(
self,
func: Callable[..., Any],
*args: Any,
dtype: _DType_co | None = None,
chunks: tuple[int, ...] | None = None,
drop_axis: int | Sequence[int] | None = None,
new_axis: int | Sequence[int] | None = None,
**kwargs: Any,
) -> Any:
"""
Map a function across all blocks of a chunked array.
Called in elementwise operations, but notably not (currently) called within xarray.map_blocks.
Parameters
----------
func : callable
Function to apply to every block in the array.
If ``func`` accepts ``block_info=`` or ``block_id=``
as keyword arguments, these will be passed dictionaries
containing information about input and output chunks/arrays
during computation. See examples for details.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.map_blocks
cubed.map_blocks
"""
raise NotImplementedError()
def blockwise(
self,
func: Callable[..., Any],
out_ind: Iterable[Any],
*args: Any, # can't type this as mypy assumes args are all same type, but dask blockwise args alternate types
adjust_chunks: dict[Any, Callable[..., Any]] | None = None,
new_axes: dict[Any, int] | None = None,
align_arrays: bool = True,
**kwargs: Any,
) -> Any:
"""
Tensor operation: Generalized inner and outer products.
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``blockwise`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Many chunked array operations are special cases of blockwise including
elementwise, broadcasting, reductions, tensordot, and transpose.
Currently only called explicitly in xarray when performing multidimensional interpolation.
Parameters
----------
func : callable
Function to apply to individual tuples of blocks
out_ind : iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args : sequence of Array, index pairs
You may also pass literal arguments, accompanied by None index
e.g. (x, 'ij', y, 'jk', z, 'i', some_literal, None)
**kwargs : dict
Extra keyword arguments to pass to function
adjust_chunks : dict
Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
align_arrays: bool
Whether or not to align chunks along equally sized dimensions when
multiple arrays are provided. This allows for larger chunks in some
arrays to be broken into smaller ones that match chunk sizes in other
arrays such that they are compatible for block function mapping. If
this is false, then an error will be thrown if arrays do not already
have the same number of blocks in each dimension.
See Also
--------
dask.array.blockwise
cubed.core.blockwise
"""
raise NotImplementedError()
def unify_chunks(
self,
*args: Any, # can't type this as mypy assumes args are all same type, but dask unify_chunks args alternate types
**kwargs: Any,
) -> tuple[dict[str, _NormalizedChunks], list[T_ChunkedArray]]:
"""
Unify chunks across a sequence of arrays.
Called by xarray.unify_chunks.
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
See Also
--------
dask.array.core.unify_chunks
cubed.core.unify_chunks
"""
raise NotImplementedError()
def store(
self,
sources: T_ChunkedArray | Sequence[T_ChunkedArray],
targets: Any,
**kwargs: dict[str, Any],
) -> Any:
"""
Store chunked arrays in array-like objects, overwriting data in target.
This stores chunked arrays into object that supports numpy-style setitem
indexing (e.g. a Zarr Store). Allows storing values chunk by chunk so that it does not have to
fill up memory. For best performance you likely want to align the block size of
the storage target with the block size of your array.
Used when writing to any registered xarray I/O backend.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or collection of array-likes
These should support setitem syntax ``target[10:20] = ...``.
If sources is a single item, targets must be a single item; if sources is a
collection of arrays, targets must be a matching collection.
kwargs:
Parameters passed to compute/persist (only used if compute=True)
See Also
--------
dask.array.store
cubed.store
"""
raise NotImplementedError()
def get_auto_chunk_size(
self,
) -> int:
"""
Get the default chunk size for a variable.
This is used to determine the chunk size when opening a dataset with
``chunks="auto"`` or when rechunking an array with ``chunks="auto"``.
Parameters
----------
target_chunksize : int, optional
The target chunk size in bytes. If not provided, a default value is used.
Returns
-------
chunk_size : int
The chunk size in bytes.
"""
raise NotImplementedError(
"For 'auto' rechunking of cftime arrays, get_auto_chunk_size must be implemented by the chunk manager"
)
| ChunkManagerEntrypoint |
python | sanic-org__sanic | sanic/cli/arguments.py | {
"start": 1440,
"end": 2588
} | class ____(Group):
name = None
def attach(self):
self.container.add_argument(
"--version",
action="version",
version=f"Sanic {__version__}; Routing {__routing_version__}",
)
self.container.add_argument(
"target",
help=(
"Path to your Sanic app instance.\n"
"\tExample: path.to.server:app\n"
"If running a Simple Server, path to directory to serve.\n"
"\tExample: ./\n"
"Additionally, this can be a path to a factory function\n"
"that returns a Sanic app instance.\n"
"\tExample: path.to.server:create_app\n"
),
)
self.container.add_argument(
"action",
nargs="?",
default="serve",
choices=[
"serve",
"exec",
],
help=(
"Action to perform.\n"
"\tserve: Run the Sanic app\n"
"\texec: Execute a command in the Sanic app context\n"
),
)
| GeneralGroup |
python | astropy__astropy | astropy/visualization/stretch.py | {
"start": 2450,
"end": 4446
} | class ____(BaseStretch):
"""
A linear stretch with a slope and offset.
The stretch is given by:
.. math::
y = slope * x + intercept
Parameters
----------
slope : float, optional
The ``slope`` parameter used in the above formula. Default is 1.
intercept : float, optional
The ``intercept`` parameter used in the above formula. Default is 0.
Examples
--------
.. plot::
:show-source-link:
import numpy as np
from astropy.visualization import LinearStretch
from matplotlib import pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
x = np.linspace(0, 1, 100)
slopes = [1, 0.5, 1.3, 1.4, 2.0]
intercepts = [0, 0.0, -0.4, 0., 0.2]
for slope, intercept in zip(slopes, intercepts):
stretch = LinearStretch(slope, intercept)
label = f'{slope=}, {intercept=}'
ax.plot(x, stretch(x, clip=True), label=label)
ax.axis('equal')
ax.plot(x, x, ls='dotted', color='k', alpha=0.3)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel('Input Value')
ax.set_ylabel('Output Value')
ax.set_title(stretch.__class__.__name__)
ax.legend(loc='lower right', fontsize=8)
"""
def __init__(self, slope=1, intercept=0):
super().__init__()
self.slope = slope
self.intercept = intercept
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
if self.slope != 1:
np.multiply(values, self.slope, out=values)
if self.intercept != 0:
np.add(values, self.intercept, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LinearStretch(1.0 / self.slope, -self.intercept / self.slope)
| LinearStretch |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 17456,
"end": 18044
} | class ____(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `JavascriptLexer`.
.. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
aliases = ['js+myghty', 'javascript+myghty']
mimetypes = ['application/x-javascript+myghty',
'text/x-javascript+myghty',
'text/javascript+mygthy']
def __init__(self, **options):
super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
MyghtyLexer, **options)
| MyghtyJavascriptLexer |
python | pytorch__pytorch | test/onnx/model_defs/op_test.py | {
"start": 67,
"end": 462
} | class ____(nn.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.features = nn.Sequential(
nn.LeakyReLU(0.02),
nn.BatchNorm2d(3),
nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False),
)
def forward(self, x):
output = self.features(x)
return output.view(-1, 1).squeeze(1)
| DummyNet |
python | pytorch__pytorch | torch/cuda/__init__.py | {
"start": 19015,
"end": 19366
} | class ____:
def __init__(self, index: int):
self.idx = index
self.prev_idx = -1
def __enter__(self):
self.prev_idx = torch.cuda._exchange_device(self.idx)
def __exit__(self, type: Any, value: Any, traceback: Any):
self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)
return False
| _DeviceGuard |
python | pypa__warehouse | warehouse/db.py | {
"start": 2203,
"end": 2318
} | class ____(Exception): ...
# The Global metadata object.
metadata = sqlalchemy.MetaData()
| DatabaseNotAvailableError |
python | python-poetry__poetry | tests/types.py | {
"start": 1452,
"end": 1738
} | class ____(Protocol):
def __call__(
self,
command: str,
poetry: Poetry | None = None,
installer: Installer | None = None,
executor: Executor | None = None,
environment: Env | None = None,
) -> CommandTester: ...
| CommandTesterFactory |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/signal/spectral_ops_test.py | {
"start": 1324,
"end": 16162
} | class ____(test.TestCase, parameterized.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length, tol):
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(
[actual_stft, actual_stft_from_ph, actual_inverse_stft])
actual_stft_ph = array_ops.placeholder_with_default(
actual_stft, shape=actual_stft.shape)
actual_inverse_stft_from_ph = self.evaluate(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length))
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)
def test_shapes(self):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@parameterized.parameters(
(512, 64, 32, 64, np.float32, 1e-4),
(512, 64, 32, 64, np.float64, 1e-8),
(512, 64, 64, 64, np.float32, 1e-4),
(512, 64, 64, 64, np.float64, 1e-8),
(512, 72, 64, 64, np.float32, 1e-4),
(512, 72, 64, 64, np.float64, 1e-8),
(512, 64, 25, 64, np.float32, 1e-4),
(512, 64, 25, 64, np.float64, 1e-8),
(512, 25, 15, 36, np.float32, 1e-4),
(512, 25, 15, 36, np.float64, 1e-8),
(123, 23, 5, 42, np.float32, 1e-4),
(123, 23, 5, 42, np.float64, 1e-8))
def test_stft_and_inverse_stft(self, signal_length, frame_length,
frame_step, fft_length, np_rtype, tol):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
signal = np.random.random(signal_length).astype(np_rtype)
self._compare(signal, frame_length, frame_step, fft_length, tol)
@parameterized.parameters(
# 87.5% overlap.
(4096, 256, 32, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 32, 256, np.float64, 1e-8, 1e-8),
# 75% overlap.
(4096, 256, 64, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 64, 256, np.float64, 1e-8, 1e-8),
# Odd frame hop.
(4096, 128, 25, 128, np.float32, 1e-3, 1e-6),
(4096, 128, 25, 128, np.float64, 5e-4, 1e-8),
# Odd frame length.
(4096, 127, 32, 128, np.float32, 1e-3, 1e-6),
(4096, 127, 32, 128, np.float64, 1e-3, 1e-8),
# 50% overlap.
(4096, 128, 64, 128, np.float32, 0.4, 1e-6),
(4096, 128, 64, 128, np.float64, 0.4, 1e-8))
def test_stft_round_trip(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, threshold,
corrected_threshold):
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
inverse_stft, inverse_stft_corrected = self.evaluate(
[inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
@parameterized.parameters(
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64),
(64, 64),
(64, 128),
)
def test_inverse_stft_window_fn(self, frame_length, frame_step):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
hann_window, inverse_window = self.evaluate([hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
hann_window_i = hann_window[i::frame_step]
inverse_window_i = inverse_window[i::frame_step]
# Skip if both windows are 0 (can happen for frame_length <= frame_step).
if (hann_window_i == 0.0).all() and (inverse_window_i == 0.0).all():
continue
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
@parameterized.parameters((256, 64), (128, 32))
def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
# TODO(rjryan): Update gradient tests for Eager.
if context.executing_eagerly():
return
with self.session() as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
@parameterized.parameters(
(64, 16, 8, 16, np.float32, 2e-3, 5e-4),
(64, 16, 8, 16, np.float64, 1e-8, 1e-8),
(64, 16, 16, 16, np.float32, 2e-3, 5e-4),
(64, 16, 16, 16, np.float64, 1e-8, 1e-8),
(64, 16, 7, 16, np.float32, 2e-3, 5e-4),
(64, 16, 7, 16, np.float64, 1e-8, 1e-8),
(64, 7, 4, 9, np.float32, 2e-3, 5e-4),
(64, 7, 4, 9, np.float64, 1e-8, 1e-8),
(29, 5, 1, 10, np.float32, 2e-3, 5e-4),
(29, 5, 1, 10, np.float64, 1e-8, 1e-8))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="On ROCm, this fails with mismatches at some locations "
"(possibly due to peculiarities of rocFFT - investigate)")
def test_gradients_numerical(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, forward_tol, backward_tol):
# TODO(rjryan): Investigate why STFT gradient error is so high.
signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1
def forward(signal):
return spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(
forward, [signal])
self.assertAllClose(f_jacob_t, f_jacob_n,
rtol=forward_tol, atol=forward_tol)
def backward(stft):
return spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length)
stft = forward(signal)
((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(
backward, [stft])
self.assertAllClose(b_jacob_t, b_jacob_n,
rtol=backward_tol, atol=backward_tol)
@parameterized.parameters(
itertools.product(
(4000,),
(256,),
(np.float32, np.float64),
("ortho", None),
("vorbis", "kaiser_bessel_derived", None),
(False, True)))
def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,
norm, window_type, pad_end):
if np_rtype == np.float32:
tol = 1e-5
else:
if window_type == "kaiser_bessel_derived":
tol = 1e-6
else:
tol = 1e-8
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
if window_type == "vorbis":
window_fn = window_ops.vorbis_window
elif window_type == "kaiser_bessel_derived":
window_fn = window_ops.kaiser_bessel_derived_window
elif window_type is None:
window_fn = None
mdct = spectral_ops.mdct(signal, frame_length, norm=norm,
window_fn=window_fn, pad_end=pad_end)
inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,
window_fn=window_fn)
inverse_mdct = self.evaluate(inverse_mdct)
# Truncate signal and inverse_mdct to their minimum length.
min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])
# Ignore the half_len samples at either edge.
half_len = frame_length // 2
signal = signal[half_len:min_length-half_len]
inverse_mdct = inverse_mdct[half_len:min_length-half_len]
# Check that the inverse and original signal are close.
self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)
if __name__ == "__main__":
test.main()
| SpectralOpsTest |
python | great-expectations__great_expectations | great_expectations/data_context/cloud_constants.py | {
"start": 283,
"end": 500
} | class ____(str, Enum):
BASE_URL = "GX_CLOUD_BASE_URL"
ORGANIZATION_ID = "GX_CLOUD_ORGANIZATION_ID"
ACCESS_TOKEN = "GX_CLOUD_ACCESS_TOKEN"
WORKSPACE_ID = "GX_CLOUD_WORKSPACE_ID"
| GXCloudEnvironmentVariable |
python | doocs__leetcode | solution/2900-2999/2981.Find Longest Special Substring That Occurs Thrice I/Solution.py | {
"start": 0,
"end": 615
} | class ____:
def maximumLength(self, s: str) -> int:
def check(x: int) -> bool:
cnt = defaultdict(int)
i = 0
while i < n:
j = i + 1
while j < n and s[j] == s[i]:
j += 1
cnt[s[i]] += max(0, j - i - x + 1)
i = j
return max(cnt.values()) >= 3
n = len(s)
l, r = 0, n
while l < r:
mid = (l + r + 1) >> 1
if check(mid):
l = mid
else:
r = mid - 1
return -1 if l == 0 else l
| Solution |
python | ray-project__ray | python/ray/tests/test_actor_retry_2.py | {
"start": 2033,
"end": 11991
} | class ____:
"""
Same as TroubleMaker, just all methods are async.
"""
def __init__(self, *, counter_key: Optional[str] = None):
self._counter_key = counter_key
@ray.method(max_task_retries=5, retry_exceptions=[MyError])
async def may_raise_n_times(self, counter, n):
c = await counter.increment.remote(self._counter_key)
print(f"may_raise_n_times, n = {n}, count = {c}")
if c < n:
print(f"method raises in {c} th call, want {n} times")
raise MyError()
return c
@ray.method(retry_exceptions=[MyError])
async def raise_or_exit(self, counter, actions):
c = await counter.increment.remote(self._counter_key)
action = "return" if c >= len(actions) else actions[c]
print(f"raise_or_exit, action = {action}, count = {c}")
if action == "raise":
raise MyError()
elif action == "exit":
# import signal
# sys.exit(1) -> hang
# ray.actor.exit_actor() -> failed, no retry
# os.kill(os.getpid(), signal.SIGTERM) -> ignored, continued to return
# os.kill(os.getpid(), signal.SIGKILL) -> retries
os._exit(0)
return -42
else:
return c
@ray.method(num_returns="streaming") # retry_exceptions=None aka False.
async def yield_or_raise(self, counter, actions):
while True:
c = await counter.increment.remote(self._counter_key)
a = actions[c]
if isinstance(a, BaseException):
raise a
else:
yield a
if c == len(actions) - 1:
# don't over call counter. Only call #yield and #raise times.
return
def test_generator_method_no_retry_without_retry_exceptions(ray_start_regular_shared):
counter = Counter.remote()
trouble_maker = AsyncTroubleMaker.remote()
gen = trouble_maker.yield_or_raise.remote(
counter,
[
# First round: 1 then raise
1,
MyError(),
# No retry, no second round
1,
2,
],
)
assert ray.get(next(gen)) == 1
with pytest.raises(MyError):
ray.get(next(gen))
with pytest.raises(StopIteration):
ray.get(next(gen))
assert ray.get(counter.get_count.remote()) == 2
def test_generator_method_retry_exact_times(ray_start_regular_shared):
counter = Counter.remote()
trouble_maker = AsyncTroubleMaker.remote()
# Should retry out max_task_retries=3 times
gen = trouble_maker.yield_or_raise.options(retry_exceptions=[MyError]).remote(
counter,
[
# First round
1,
MyError(),
# retry 1
1,
MyError(),
# retry 2
1,
MyError(),
# retry 3
1,
2,
3,
],
)
assert ray.get(next(gen)) == 1
assert ray.get(next(gen)) == 2
assert ray.get(next(gen)) == 3
with pytest.raises(StopIteration):
ray.get(next(gen))
assert ray.get(counter.get_count.remote()) == 9
def test_generator_method_does_not_over_retry(ray_start_regular_shared):
counter = Counter.remote()
trouble_maker = AsyncTroubleMaker.remote()
# Should retry out max_task_retries=3 times
gen = trouble_maker.yield_or_raise.options(retry_exceptions=[MyError]).remote(
counter,
[
# First round
1,
MyError(),
# retry 1
1,
MyError(),
# retry 2,
1,
MyError(),
# retry 3
1,
MyError(),
# no retry 4!
1,
2,
],
)
assert ray.get(next(gen)) == 1
with pytest.raises(MyError):
ray.get(next(gen))
with pytest.raises(StopIteration):
ray.get(next(gen))
assert ray.get(counter.get_count.remote()) == 8
@pytest.mark.parametrize(
"actions",
[
["exit", "exit"],
["exit", "raise"],
["raise", "exit"],
["raise", "raise"],
],
ids=lambda lst: ",".join(lst), # test case show name
)
@pytest.mark.parametrize(
"max_retries_and_restarts", [-1, 2], ids=lambda r: f"max_retries_and_restarts({r})"
)
def test_method_raise_and_exit(
actions, max_retries_and_restarts, ray_start_regular_shared
):
"""
Test we can endure a mix of raises and exits. Note the number of exits we can endure
is subject to max_restarts.
The retry behavior should work for Async actors and Threaded actors.
The retry behavior should work if the max_task_retries or max_restarts are -1
(infinite retry).
"""
# NOTE(edoakes): we test on all three types of actors in parallel to reduce the
# time taken to run the test in CI.
counter = Counter.remote()
sync_actor = TroubleMaker.options(max_restarts=max_retries_and_restarts).remote(
counter_key="sync",
)
async_actor = AsyncTroubleMaker.options(
max_restarts=max_retries_and_restarts
).remote(
counter_key="async",
)
threaded_actor = TroubleMaker.options(
max_restarts=max_retries_and_restarts, max_concurrency=2
).remote(counter_key="threaded")
assert ray.get(
[
actor.raise_or_exit.options(
max_task_retries=max_retries_and_restarts
).remote(counter, actions)
for actor in [sync_actor, async_actor, threaded_actor]
]
) == [2, 2, 2]
# Should expect 3 total tries from each actor: 1 initial + 2 retries
assert ray.get(
[counter.get_count.remote(option) for option in ["sync", "async", "threaded"]]
) == [3, 3, 3]
@pytest.mark.parametrize(
"actions_and_error",
[
(["raise", "raise", "raise"], MyError),
(["exit", "raise", "raise"], MyError),
(["raise", "exit", "raise"], MyError),
# Last try is exit, the actor restarted.
(["raise", "raise", "exit"], ray.exceptions.ActorUnavailableError),
# Last try is exit, the actor is dead (exceeded max_restarts).
(["raise", "exit", "exit"], ray.exceptions.ActorDiedError),
],
ids=lambda p: ",".join(p[0]), # test case show name
)
def test_method_raise_and_exit_no_over_retry(
actions_and_error, ray_start_regular_shared
):
"""
Test we do not over retry.
The retry behavior should work for Async actors and Threaded actors.
The retry behavior should work if the max_task_retries or max_restarts are -1
(infinite retry).
"""
max_restarts = 1
max_task_retries = 2
actions, error = actions_and_error
# NOTE(edoakes): we test on all three types of actors in parallel to reduce the
# time taken to run the test in CI.
counter = Counter.remote()
sync_actor = TroubleMaker.options(max_restarts=max_restarts).remote(
counter_key="sync",
)
async_actor = AsyncTroubleMaker.options(max_restarts=max_restarts).remote(
counter_key="async",
)
threaded_actor = TroubleMaker.options(
max_restarts=max_restarts, max_concurrency=2
).remote(counter_key="threaded")
for ref in [
a.raise_or_exit.options(max_task_retries=max_task_retries).remote(
counter, actions
)
for a in [sync_actor, async_actor, threaded_actor]
]:
with pytest.raises(error):
ray.get(ref)
# 3 = 1 initial + 2 retries (with the 1 restart included)
assert ray.get(
[counter.get_count.remote(key) for key in ["sync", "async", "threaded"]]
) == [3, 3, 3]
def test_task_retries_on_exit(ray_start_regular_shared):
"""Sanity check that task retries work when the actor exits."""
counter = Counter.remote()
sync_actor = TroubleMaker.options(max_restarts=2).remote(counter_key="sync")
async_actor = AsyncTroubleMaker.options(max_restarts=2).remote(counter_key="async")
for ref in [
a.raise_or_exit.options(max_task_retries=2).remote(
counter, ["exit", "exit", "exit"]
)
for a in [sync_actor, async_actor]
]:
with pytest.raises(ray.exceptions.RayActorError):
ray.get(ref)
# 3 = 1 initial + 2 retries (with the 2 restarts included)
assert ray.get([counter.get_count.remote(key) for key in ["sync", "async"]]) == [
3,
3,
]
def test_retry_dependent_task_on_same_actor(ray_start_regular_shared):
"""
1. Create an actor
2. Submit an actor task (one).
3. Submit another actor task (two) that depends on the output of one.
4. Allow the first attempt of one to fail.
5. Expect the second attempt of one to be run, and for two to be unblocked.
The goal of this test is to make sure later actor tasks with dependencies on
earlier ones don't result in deadlock when the earlier tasks need to be retried.
See https://github.com/ray-project/ray/pull/54034 for more context.
"""
@ray.remote
class Actor:
def __init__(self):
self._counter = 0
@ray.method(max_task_retries=1, retry_exceptions=[MyError])
def one(self, signal_actor):
ray.get(signal_actor.wait.remote())
self._counter += 1
# Fail on the first invocation.
if self._counter <= 1:
raise MyError()
return 1
def two(self, one_output):
return 2
signal_actor = SignalActor.remote()
actor = Actor.remote()
one_output_ref = actor.one.remote(signal_actor)
two_output_ref = actor.two.remote(one_output_ref)
# Unblock so the first attempt can fail and the second attempt gets submitted.
ray.get(signal_actor.send.remote())
assert ray.get(two_output_ref) == 2
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| AsyncTroubleMaker |
python | pennersr__django-allauth | allauth/socialaccount/providers/yandex/views.py | {
"start": 181,
"end": 1032
} | class ____(OAuth2Adapter):
provider_id = "yandex"
access_token_url = "https://oauth.yandex.ru/token" # nosec
authorize_url = "https://oauth.yandex.com/authorize"
profile_url = "https://login.yandex.ru/info"
def complete_login(self, request, app, token, **kwargs):
resp = (
get_adapter()
.get_requests_session()
.get(
self.profile_url,
params={"format": "json"},
headers={"Authorization": f"OAuth {token.token}"},
)
)
resp.raise_for_status()
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(YandexOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(YandexOAuth2Adapter)
| YandexOAuth2Adapter |
python | tiangolo__fastapi | fastapi/dependencies/utils.py | {
"start": 20715,
"end": 38641
} | class ____:
values: Dict[str, Any]
errors: List[Any]
background_tasks: Optional[StarletteBackgroundTasks]
response: Response
dependency_cache: Dict[DependencyCacheKey, Any]
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: Optional[StarletteBackgroundTasks] = None,
response: Optional[Response] = None,
dependency_overrides_provider: Optional[Any] = None,
dependency_cache: Optional[Dict[DependencyCacheKey, Any]] = None,
# TODO: remove this parameter later, no longer used, not removing it yet as some
# people might be monkey patching this function (although that's not supported)
async_exit_stack: AsyncExitStack,
embed_body_fields: bool,
) -> SolvedDependency:
request_astack = request.scope.get("fastapi_inner_astack")
assert isinstance(request_astack, AsyncExitStack), (
"fastapi_inner_astack not found in request scope"
)
function_astack = request.scope.get("fastapi_function_astack")
assert isinstance(function_astack, AsyncExitStack), (
"fastapi_function_astack not found in request scope"
)
values: Dict[str, Any] = {}
errors: List[Any] = []
if response is None:
response = Response()
del response.headers["content-length"]
response.status_code = None # type: ignore
if dependency_cache is None:
dependency_cache = {}
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable[..., Any], sub_dependant.call)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
scope=sub_dependant.scope,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
async_exit_stack=async_exit_stack,
embed_body_fields=embed_body_fields,
)
background_tasks = solved_result.background_tasks
if solved_result.errors:
errors.extend(solved_result.errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif (
use_sub_dependant.is_gen_callable or use_sub_dependant.is_async_gen_callable
):
use_astack = request_astack
if sub_dependant.scope == "function":
use_astack = function_astack
solved = await _solve_generator(
dependant=use_sub_dependant,
stack=use_astack,
sub_values=solved_result.values,
)
elif use_sub_dependant.is_coroutine_callable:
solved = await call(**solved_result.values)
else:
solved = await run_in_threadpool(call, **solved_result.values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
(
body_values,
body_errors,
) = await request_body_to_args( # body_params checked above
body_fields=dependant.body_params,
received_body=body,
embed_body_fields=embed_body_fields,
)
values.update(body_values)
errors.extend(body_errors)
if dependant.http_connection_param_name:
values[dependant.http_connection_param_name] = request
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return SolvedDependency(
values=values,
errors=errors,
background_tasks=background_tasks,
response=response,
dependency_cache=dependency_cache,
)
def _validate_value_with_model_field(
*, field: ModelField, value: Any, values: Dict[str, Any], loc: Tuple[str, ...]
) -> Tuple[Any, List[Any]]:
if value is None:
if field.required:
return None, [get_missing_field_error(loc=loc)]
else:
return deepcopy(field.default), []
v_, errors_ = field.validate(value, values, loc=loc)
if _is_error_wrapper(errors_): # type: ignore[arg-type]
return None, [errors_]
elif isinstance(errors_, list):
new_errors = may_v1._regenerate_error_with_loc(errors=errors_, loc_prefix=())
return None, new_errors
else:
return v_, []
def _get_multidict_value(
field: ModelField, values: Mapping[str, Any], alias: Union[str, None] = None
) -> Any:
alias = alias or field.alias
if is_sequence_field(field) and isinstance(values, (ImmutableMultiDict, Headers)):
value = values.getlist(alias)
else:
value = values.get(alias, None)
if (
value is None
or (
isinstance(field.field_info, (params.Form, temp_pydantic_v1_params.Form))
and isinstance(value, str) # For type checks
and value == ""
)
or (is_sequence_field(field) and len(value) == 0)
):
if field.required:
return
else:
return deepcopy(field.default)
return value
def request_params_to_args(
fields: Sequence[ModelField],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[Any]]:
values: Dict[str, Any] = {}
errors: List[Dict[str, Any]] = []
if not fields:
return values, errors
first_field = fields[0]
fields_to_extract = fields
single_not_embedded_field = False
default_convert_underscores = True
if len(fields) == 1 and lenient_issubclass(first_field.type_, BaseModel):
fields_to_extract = get_cached_model_fields(first_field.type_)
single_not_embedded_field = True
# If headers are in a Pydantic model, the way to disable convert_underscores
# would be with Header(convert_underscores=False) at the Pydantic model level
default_convert_underscores = getattr(
first_field.field_info, "convert_underscores", True
)
params_to_process: Dict[str, Any] = {}
processed_keys = set()
for field in fields_to_extract:
alias = None
if isinstance(received_params, Headers):
# Handle fields extracted from a Pydantic Model for a header, each field
# doesn't have a FieldInfo of type Header with the default convert_underscores=True
convert_underscores = getattr(
field.field_info, "convert_underscores", default_convert_underscores
)
if convert_underscores:
alias = (
field.alias
if field.alias != field.name
else field.name.replace("_", "-")
)
value = _get_multidict_value(field, received_params, alias=alias)
if value is not None:
params_to_process[field.name] = value
processed_keys.add(alias or field.alias)
processed_keys.add(field.name)
for key, value in received_params.items():
if key not in processed_keys:
params_to_process[key] = value
if single_not_embedded_field:
field_info = first_field.field_info
assert isinstance(field_info, (params.Param, temp_pydantic_v1_params.Param)), (
"Params must be subclasses of Param"
)
loc: Tuple[str, ...] = (field_info.in_.value,)
v_, errors_ = _validate_value_with_model_field(
field=first_field, value=params_to_process, values=values, loc=loc
)
return {first_field.name: v_}, errors_
for field in fields:
value = _get_multidict_value(field, received_params)
field_info = field.field_info
assert isinstance(field_info, (params.Param, temp_pydantic_v1_params.Param)), (
"Params must be subclasses of Param"
)
loc = (field_info.in_.value, field.alias)
v_, errors_ = _validate_value_with_model_field(
field=field, value=value, values=values, loc=loc
)
if errors_:
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def is_union_of_base_models(field_type: Any) -> bool:
"""Check if field type is a Union where all members are BaseModel subclasses."""
from fastapi.types import UnionType
origin = get_origin(field_type)
# Check if it's a Union type (covers both typing.Union and types.UnionType in Python 3.10+)
if origin is not Union and origin is not UnionType:
return False
union_args = get_args(field_type)
for arg in union_args:
if not _is_model_class(arg):
return False
return True
def _should_embed_body_fields(fields: List[ModelField]) -> bool:
if not fields:
return False
# More than one dependency could have the same field, it would show up as multiple
# fields but it's the same one, so count them by name
body_param_names_set = {field.name for field in fields}
# A top level field has to be a single field, not multiple
if len(body_param_names_set) > 1:
return True
first_field = fields[0]
# If it explicitly specifies it is embedded, it has to be embedded
if getattr(first_field.field_info, "embed", None):
return True
# If it's a Form (or File) field, it has to be a BaseModel (or a union of BaseModels) to be top level
# otherwise it has to be embedded, so that the key value pair can be extracted
if (
isinstance(first_field.field_info, (params.Form, temp_pydantic_v1_params.Form))
and not _is_model_class(first_field.type_)
and not is_union_of_base_models(first_field.type_)
):
return True
return False
async def _extract_form_body(
body_fields: List[ModelField],
received_body: FormData,
) -> Dict[str, Any]:
values = {}
for field in body_fields:
value = _get_multidict_value(field, received_body)
field_info = field.field_info
if (
isinstance(field_info, (params.File, temp_pydantic_v1_params.File))
and is_bytes_field(field)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
is_bytes_sequence_field(field)
and isinstance(field_info, (params.File, temp_pydantic_v1_params.File))
and value_is_sequence(value)
):
# For types
assert isinstance(value, sequence_types) # type: ignore[arg-type]
results: List[Union[bytes, str]] = []
async def process_fn(
fn: Callable[[], Coroutine[Any, Any, Any]],
) -> None:
result = await fn()
results.append(result) # noqa: B023
async with anyio.create_task_group() as tg:
for sub_value in value:
tg.start_soon(process_fn, sub_value.read)
value = serialize_sequence_value(field=field, value=results)
if value is not None:
values[field.alias] = value
for key, value in received_body.items():
if key not in values:
values[key] = value
return values
async def request_body_to_args(
body_fields: List[ModelField],
received_body: Optional[Union[Dict[str, Any], FormData]],
embed_body_fields: bool,
) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
values: Dict[str, Any] = {}
errors: List[Dict[str, Any]] = []
assert body_fields, "request_body_to_args() should be called with fields"
single_not_embedded_field = len(body_fields) == 1 and not embed_body_fields
first_field = body_fields[0]
body_to_process = received_body
fields_to_extract: List[ModelField] = body_fields
if (
single_not_embedded_field
and _is_model_class(first_field.type_)
and isinstance(received_body, FormData)
):
fields_to_extract = get_cached_model_fields(first_field.type_)
if isinstance(received_body, FormData):
body_to_process = await _extract_form_body(fields_to_extract, received_body)
if single_not_embedded_field:
loc: Tuple[str, ...] = ("body",)
v_, errors_ = _validate_value_with_model_field(
field=first_field, value=body_to_process, values=values, loc=loc
)
return {first_field.name: v_}, errors_
for field in body_fields:
loc = ("body", field.alias)
value: Optional[Any] = None
if body_to_process is not None:
try:
value = body_to_process.get(field.alias)
# If the received body is a list, not a dict
except AttributeError:
errors.append(get_missing_field_error(loc))
continue
v_, errors_ = _validate_value_with_model_field(
field=field, value=value, values=values, loc=loc
)
if errors_:
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_body_field(
*, flat_dependant: Dependant, name: str, embed_body_fields: bool
) -> Optional[ModelField]:
"""
Get a ModelField representing the request body for a path operation, combining
all body parameters into a single field if necessary.
Used to check if it's form data (with `isinstance(body_field, params.Form)`)
or JSON and to generate the JSON Schema for a request body.
This is **not** used to validate/parse the request body, that's done with each
individual body parameter.
"""
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
if not embed_body_fields:
return first_param
model_name = "Body_" + name
BodyModel = create_body_model(
fields=flat_dependant.body_params, model_name=model_name
)
required = any(True for f in flat_dependant.body_params if f.required)
BodyFieldInfo_kwargs: Dict[str, Any] = {
"annotation": BodyModel,
"alias": "body",
}
if not required:
BodyFieldInfo_kwargs["default"] = None
if any(isinstance(f.field_info, params.File) for f in flat_dependant.body_params):
BodyFieldInfo: Type[params.Body] = params.File
elif any(
isinstance(f.field_info, temp_pydantic_v1_params.File)
for f in flat_dependant.body_params
):
BodyFieldInfo: Type[temp_pydantic_v1_params.Body] = temp_pydantic_v1_params.File # type: ignore[no-redef]
elif any(isinstance(f.field_info, params.Form) for f in flat_dependant.body_params):
BodyFieldInfo = params.Form
elif any(
isinstance(f.field_info, temp_pydantic_v1_params.Form)
for f in flat_dependant.body_params
):
BodyFieldInfo = temp_pydantic_v1_params.Form # type: ignore[assignment]
else:
if annotation_is_pydantic_v1(BodyModel):
BodyFieldInfo = temp_pydantic_v1_params.Body # type: ignore[assignment]
else:
BodyFieldInfo = params.Body
body_param_media_types = [
f.field_info.media_type
for f in flat_dependant.body_params
if isinstance(f.field_info, (params.Body, temp_pydantic_v1_params.Body))
]
if len(set(body_param_media_types)) == 1:
BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0]
final_field = create_model_field(
name="body",
type_=BodyModel,
required=required,
alias="body",
field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
return final_field
| SolvedDependency |
python | lxml__lxml | src/lxml/html/tests/test_feedparser_data.py | {
"start": 656,
"end": 3172
} | class ____(unittest.TestCase):
def __init__(self, filename):
self.filename = filename
unittest.TestCase.__init__(self)
def parse(self):
with open(self.filename) as f:
headers = Message(f)
c = f.read()
if not c.strip():
c = headers.get_payload()
if not headers.keys():
raise Exception(
"File %s has no headers" % self.filename)
self.description = headers['Description']
self.expect = headers.get('Expect', '')
self.ignore = headers.get('Ignore')
self.options = [
o.strip() for o in headers.get('Options', '').split(',')
if o.strip()]
parts = bar_re.split(c)
self.input = parts[0].rstrip() + '\n'
if parts[1:]:
self.expect = parts[1].rstrip() + '\n'
else:
self.expect = None
def runTest(self):
self.parse()
if self.ignore:
# We've marked this test to be ignored.
return
kw = {}
for name in self.options:
if name.startswith('-'):
kw[name[1:]] = False
else:
kw[name] = True
if kw.get('clean', True):
transformed = Cleaner(**kw).clean_html(self.input)
else:
transformed = self.input
assert self.expect is not None, (
"No expected output in %s" % self.filename)
checker = LHTMLOutputChecker()
if not checker.check_output(self.expect, transformed, 0):
result = checker.output_difference(
DummyInput(want=self.expect), transformed, 0)
#result += '\noptions: %s %r' % (', '.join(self.options), kw)
#result += repr(transformed)
raise Exception("\n"+result)
def shortDescription(self):
return self.filename
def test_suite():
suite = unittest.TestSuite()
if not html_clean_available:
print("Skipping tests in feedparser_data - external lxml_html_clean package is not installed")
return suite
for dir in feed_dirs:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if fn.endswith('.data'):
case = FeedTestCase(fn)
suite.addTests([case])
# This is my lazy way of stopping on first error:
try:
case.runTest()
except:
break
return suite
| FeedTestCase |
python | huggingface__transformers | src/transformers/models/camembert/tokenization_camembert.py | {
"start": 1154,
"end": 7538
} | class ____(TokenizersBackend):
"""
Construct a "fast" CamemBERT tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
vocab_file (`str`, *optional*):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
additional_special_tokens=None,
add_prefix_space=True,
vocab_file=None,
vocab=None,
**kwargs,
):
self.vocab_file = vocab_file
self.add_prefix_space = add_prefix_space
mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
if additional_special_tokens is None:
additional_special_tokens = ["<s>NOTUSED", "</s>NOTUSED", "<unk>NOTUSED"]
if vocab is not None and isinstance(vocab, list):
self._vocab = list(vocab)
unk_index = next(i for i, (tok, _) in enumerate(self._vocab) if tok == str(unk_token))
self._tokenizer = Tokenizer(Unigram(self._vocab, unk_id=unk_index, byte_fallback=False))
else:
self._vocab = [
("<s>NOTUSED", 0.0),
(str(pad_token), 0.0),
("</s>NOTUSED", 0.0),
(str(unk_token), 0.0),
("<unk>NOTUSED", -100),
(str(mask_token), 0.0),
]
self._tokenizer = Tokenizer(Unigram(self._vocab, unk_id=3, byte_fallback=False))
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace("\n", " "),
normalizers.Replace("\r", " "),
normalizers.Replace("\t", " "),
normalizers.Strip(left=False, right=True),
normalizers.Replace(Regex(" {2,}"), "▁"),
]
)
prepend_scheme = "always" if add_prefix_space else "never"
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
add_prefix_space=add_prefix_space,
**kwargs,
)
# always adds BOS/EOS with "</s> </s>" separator for pairs
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{self.bos_token} $A {self.eos_token}",
pair=f"{self.bos_token} $A {self.eos_token} {self.eos_token} $B {self.eos_token}",
special_tokens=[
(self.bos_token, self.bos_token_id),
(self.eos_token, self.eos_token_id),
],
)
__all__ = ["CamembertTokenizer"]
| CamembertTokenizer |
python | tensorflow__tensorflow | tensorflow/dtensor/python/input_util.py | {
"start": 15142,
"end": 27727
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A dataset of DTensors.
DTensorDataset encapsulates a `tf.data.Dataset` whose elements are
automatically packed and returned as DTensors based on a given mesh and
layouts.
"""
def __init__(self,
dataset: data_types.DatasetV2,
*,
mesh: layout_lib.Mesh,
layouts: Any,
global_batch_size: int,
dataset_already_batched: bool = False,
batch_dim: Optional[str] = None,
prefetch: Optional[int] = None,
tf_data_service_config: Optional[TFDataServiceConfig] = None):
"""Creates a DTensorDataset.
DTensorDataset automatically handles distribution of the dataset elements to
each client's devices. It can be used to create an iterator that returns
DTensors of the input data on each iteration.
DTensorDataset works best with unbatched datasets. It takes the mesh and the
provided layouts to automatically calculate how to batch the input locally
for each replica.
If the provided dataset is already batched according to the per-replica
batch size, then `dataset_already_batched` must be set and DTensorDataset
will check that the batch size is consistent with the intended
`global_batch_size` using the layout information. Each replica receives a
separate slice of the global batch, thus the per-replica batch size can be
computed as the global batch size divided by the number of model replicas.
For a DTensor mesh, the number of replicas is equal to the size of the
mesh's batch dimension.
Note: `tf.experimental.dtensor.DTensorDataset` instances do *not* implement
the full interface of `tf.data.Dataset`. It only supports two usages we will
mention below: iteration and `element_spec`. We don't support any other APIs
to transform or inspect the dataset.
TODO(b/223275517): add support for input datasets that are already batched
to the global batch size.
Args:
dataset: a `tf.data.Dataset` object.
mesh: the DTensor mesh to place the dataset batches on.
layouts: a structure of DTensor layouts to be applied to the input dataset
values. This can be a single layout or (possibly nested) tuples or
dictionaries of layouts, and the structure must match the structure of
the dataset. Either all or none of the layouts should be sharded on the
batch dimension; having only a subset of layouts batch sharded will not
work and raises a ValueError.
global_batch_size: the desired global batch size.
dataset_already_batched: must be set only if the dataset is already
batched to the per-replica batch size. The batched dataset must have
`drop_remainder=True` set since DTensor requires static shapes for
slicing the input tensors.
batch_dim: the mesh dimension on which the input's batch dimension is
sharded. Set to None if the input layouts do not shard on the batch
dimension.
prefetch: number of batches to prefetch using Dataset.prefetch.
tf_data_service_config: if operating in multi-client mode, this config
specifies the tf.data service configuration to use.
Raises:
ValueError: on any of the following situations,
1. if the structures and ranks of layouts and the dataset do not match.
2. if the shapes in the dataset's spec are not fully defined.
3. if batch_dim is specified and all layouts are not batch-sharded.
4. if per_replica_batch_size is specified for an already batched Dataset
but it does not match the expected per-replica size based on the
provided mesh.
TypeError: if type of structures of layouts and the dataset do not match.
"""
super().__init__(dataset, dataset_ops.to_variant(dataset))
# TODO(b/271162918): fix multi-client use case.
if tf_data_service_config is not None:
raise NotImplementedError(
'Multi-client DTensorDataset is currently not supported.'
' Check b/271162918.')
self._mesh = mesh
self._layouts = layouts
self._batch_dim = batch_dim
self._prefetch = prefetch
self._tf_data_service_config = tf_data_service_config
nest.assert_same_structure(dataset.element_spec, layouts)
flattened_layouts = nest.flatten(layouts)
flattened_elem_spec = nest.flatten(dataset.element_spec)
if batch_dim:
self.num_global_replicas = mesh.dim_size(batch_dim)
self._local_replica_ids = list(
dict.fromkeys(
[loc[batch_dim] for loc in mesh.local_device_locations()]))
for layout in flattened_layouts:
if batch_dim != layout.sharding_specs[0]:
raise ValueError(
('batch_dim %s was specified but at least one layout did not '
'contain it: %s') % (batch_dim, layout))
else:
# Only one replica since there is no sharding on the batch dimension.
self.num_global_replicas = 1
self._local_replica_ids = [0]
# Validate layout and element spec compatibility, and raise ValueError if
# invalid.
_validate_input(
flattened_layouts,
flattened_elem_spec,
dataset_already_batched=dataset_already_batched)
expected_batch_size = global_batch_size // self.num_global_replicas
if not dataset_already_batched:
self._batched_dataset = dataset.batch(
expected_batch_size, drop_remainder=True)
else:
per_replica_batch_size = flattened_elem_spec[0].shape.as_list()[0]
if per_replica_batch_size != expected_batch_size:
raise ValueError(
('per_replica_batch_size does not matched expected size based on '
'the mesh, got %d but expected %d.') %
(per_replica_batch_size, expected_batch_size))
self._batched_dataset = dataset
# Construct a global element spec of the dataset.
flattened_global_elem_spec = []
batch_tensor_shape = tensor_shape.as_shape([global_batch_size])
for elem_spec in nest.flatten(self._batched_dataset.element_spec):
new_elem_spec = tensor_spec.TensorSpec(
shape=operator.concat(batch_tensor_shape, elem_spec.shape[1:]),
dtype=elem_spec.dtype,
name=elem_spec.name)
flattened_global_elem_spec.append(new_elem_spec)
self._global_element_spec = nest.pack_sequence_as(
dataset.element_spec, flattened_global_elem_spec)
num_global_devices_per_replica = config.num_global_devices(
mesh.device_type()) // self.num_global_replicas
self._num_local_replicas = len(self._local_replica_ids)
self._num_local_devices_per_replica = mesh.num_local_devices(
) // self._num_local_replicas
# The number of clients each replica is split over.
self._num_clients_per_replica = (
num_global_devices_per_replica // self._num_local_devices_per_replica)
# In the case where a replica is split across multiple clients, an offset
# needs to be added to the index used by the partitioning logic such that
# the local devices on that client can be correctly matched to slices of the
# input tensor(s). If replicas are wholly contained within a client, then
# this offset is always 0.
self._partition_offset = (config.client_id() % self._num_clients_per_replica
) * self._num_local_devices_per_replica
# Helper data structures used in partitioning the dataset tensors.
self._all_shard_counts = [
_shard_counts(layout, batch_dim) for layout in flattened_layouts
]
self._index_matrices = [
_index_matrix(layout, elem_spec)
for layout, elem_spec in zip(flattened_layouts, flattened_elem_spec)
]
def __iter__(self):
datasets: List[Tuple[int, data_types.DatasetV2]] = []
# Start with the batched the dataset.
local_dataset = self._batched_dataset
if self._batch_dim is not None:
if self._num_clients_per_replica > 1:
# If a replica is split over multiple clients then each batch needs to
# be repeated before distribution as many times as there are clients
# corresponding to that replica.
local_dataset = self._repeat_batch(local_dataset,
self._num_clients_per_replica)
sharding_policy = data_service_ops.ShardingPolicy.DATA
else:
# Replicas are unique to each client, so FILE based sharding can be used
# which is more performant since each worker does not need to read the
# entire dataset.
sharding_policy = data_service_ops.ShardingPolicy.FILE
else:
# No batch dimension sharding specified so disable dataset sharding during
# the distribute step.
sharding_policy = data_service_ops.ShardingPolicy.OFF
# Apply distribution here (if specified) so all remaining transformations
# are executed locally.
if self._tf_data_service_config is not None:
local_dataset = local_dataset.apply(
data_service_ops.distribute(
processing_mode=sharding_policy,
service=self._tf_data_service_config.dispatcher_address,
job_name=f'{self._tf_data_service_config.job_name}_{config.client_id()}',
target_workers='LOCAL'))
for local_replica_idx, replica_id in enumerate(self._local_replica_ids):
# Select the shard for the corresponding replica.
dataset = distribute._AutoShardDataset(
local_dataset,
num_workers=self._num_local_replicas,
index=local_replica_idx,
num_replicas=self.num_global_replicas)
# Repeat each batch for each local device in the replica.
dataset = self._repeat_batch(dataset, self._num_local_devices_per_replica)
# Slice each shard further for all non-batch dim shards. If there is no
# non-batch dim sharding, this slice is essentially a no-op.
dataset = self._partition(dataset)
# Apply prefetch as the last step. Since each batch is repeated, the
# number of elements to prefetch has to be scaled by the same size.
if self._prefetch is not None:
dataset = dataset.prefetch(
self._prefetch * self._num_local_devices_per_replica)
datasets.append((replica_id, dataset))
# Convert the datasets into iterators placed on the host.
d_iterator_resource = _pack_iterator_resource_dtensor(
datasets=datasets,
layouts=self._layouts,
mesh=self._mesh,
num_local_devices_per_replica=self._num_local_devices_per_replica)
return _DTensorIterator(
dtensor_components=(d_iterator_resource,),
global_element_spec=self._global_element_spec,
layouts=self._layouts)
def _repeat_batch(self, dataset, repeats):
if repeats == 1:
# Remove this shortcut if tf.data can optimize this away.
return dataset
def repeat(*x):
return dataset_ops.DatasetV2.from_tensors(x).repeat(repeats)
return dataset.flat_map(repeat)
def _partition(self, dataset):
"""Slices each dataset element on any sharded non-batch dimension."""
if self._num_local_devices_per_replica == 1 and self._partition_offset == 0:
# Remove this shortcut if tf.data can optimize this away.
return dataset
# TODO(b/223275517): decouple from self and make testable.
def slice_batch(index, batch):
flattened_batch = nest.flatten(batch)
flattened_output = []
norm_index = math_ops.cast(
index % self._num_local_devices_per_replica, dtype=dtypes.int32)
norm_index += self._partition_offset
coords = self._mesh.coords(norm_index)
coords = array_ops.reshape(coords, (1, -1))
for element, shard_counts, idx_matrix in zip(flattened_batch,
self._all_shard_counts,
self._index_matrices):
indexes = math_ops.matmul(coords, idx_matrix)
start = array_ops.reshape(indexes, (-1,))
size = array_ops.shape_v2(
element, out_type=dtypes.int32) // shard_counts
flattened_output.append(
array_ops.slice(element, begin=start, size=size))
return nest.pack_sequence_as(batch, flattened_output)
enumerated_dataset = dataset.enumerate()
partitioned_dataset = enumerated_dataset.map(slice_batch)
return partitioned_dataset
@property
def element_spec(self):
return self._global_element_spec
| DTensorDataset |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 22906,
"end": 24236
} | class ____(TypeDefinition):
__slots__ = ('loc', 'name', 'interfaces', 'directives', 'fields',)
_fields = ('name', 'interfaces', 'fields',)
def __init__(self, name, fields, interfaces=None, loc=None, directives=None):
self.loc = loc
self.name = name
self.interfaces = interfaces
self.fields = fields
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, ObjectTypeDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.interfaces == other.interfaces and
self.fields == other.fields and
self.directives == other.directives
)
)
def __repr__(self):
return ('ObjectTypeDefinition('
'name={self.name!r}'
', interfaces={self.interfaces!r}'
', fields={self.fields!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.fields,
self.interfaces,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
| ObjectTypeDefinition |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 39920,
"end": 40706
} | class ____(ColumnConstraint):
"""A column constraint that ensures all values in a pandas column are not null."""
def __init__(self):
description = "No Null values allowed."
super().__init__(error_description=description, markdown_description=description)
def validate(self, dataframe, column_name):
rows_with_null_columns = dataframe[dataframe[column_name].isna()]
if not rows_with_null_columns.empty:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=self.error_description,
column_name=column_name,
offending_rows=self.get_offending_row_pairs(rows_with_null_columns, column_name),
)
| NonNullableColumnConstraint |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 46465,
"end": 47795
} | class ____(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
solidSelection = graphene.List(graphene.NonNull(graphene.String))
runConfigYaml = graphene.NonNull(graphene.String)
mode = graphene.NonNull(graphene.String)
tags = non_null_list(GraphenePipelineTag)
class Meta:
name = "PipelinePreset"
def __init__(self, active_preset_data, pipeline_name):
super().__init__()
self._active_preset_data = check.inst_param(
active_preset_data, "active_preset_data", PresetSnap
)
self._job_name = check.str_param(pipeline_name, "pipeline_name")
def resolve_name(self, _graphene_info: ResolveInfo):
return self._active_preset_data.name
def resolve_solidSelection(self, _graphene_info: ResolveInfo):
return self._active_preset_data.op_selection
def resolve_runConfigYaml(self, _graphene_info: ResolveInfo):
return dump_run_config_yaml(self._active_preset_data.run_config) or ""
def resolve_mode(self, _graphene_info: ResolveInfo):
return self._active_preset_data.mode
def resolve_tags(self, _graphene_info: ResolveInfo):
return [
GraphenePipelineTag(key=key, value=value)
for key, value in self._active_preset_data.tags.items()
]
| GraphenePipelinePreset |
python | google__jax | jax/_src/config.py | {
"start": 45321,
"end": 56922
} | class ____(enum.StrEnum):
ALLOW = 'allow'
WARN = 'warn'
ERROR = 'error'
legacy_prng_key = enum_class_state(
name='jax_legacy_prng_key',
enum_class=LegacyPrngKeyState,
default=LegacyPrngKeyState.ALLOW,
help=('Specify the behavior when raw PRNG keys are passed to '
'jax.random APIs.')
)
enable_custom_prng = bool_state(
name='jax_enable_custom_prng',
default=False,
upgrade=True,
help=('Enables an internal upgrade that allows one to define custom '
'pseudo-random number generator implementations.'))
default_prng_impl = enum_state(
name='jax_default_prng_impl',
enum_values=['threefry2x32', 'rbg', 'unsafe_rbg'],
default='threefry2x32',
help=('Select the default PRNG implementation, used when one is not '
'explicitly provided at seeding time.'))
threefry_partitionable = bool_state(
name='jax_threefry_partitionable',
default=True,
upgrade=True,
help=('Enables internal threefry PRNG implementation changes that '
'render it automatically partitionable in some cases. Without this '
'flag, using the standard jax.random pseudo-random number generation '
'may result in extraneous communication and/or redundant distributed '
'computation. With this flag, the communication overheads disappear '
'in some cases.'),
include_in_jit_key=True,
include_in_trace_context=True)
threefry_gpu_kernel_lowering = bool_state(
name='jax_threefry_gpu_kernel_lowering',
default=False,
help=('On GPU, lower threefry PRNG operations to a kernel implementation. '
'This makes compile times faster at a potential runtime memory '
'cost.'),
include_in_jit_key=True,
include_in_trace_context=True)
use_direct_linearize = bool_state(
name='jax_use_direct_linearize',
default=True,
help=('Use direct linearization instead JVP followed by partial eval'),
include_in_jit_key=True,
include_in_trace_context=True)
use_simplified_jaxpr_constants = bool_state(
name='jax_use_simplified_jaxpr_constants',
default=False,
help=('Enable a simplification of the handling of closed-over constants '
'in Jaxpr. The value `True` enables the new behavior. '
'This flag will exist only briefly, while we transition '
'users. See https://github.com/jax-ml/jax/pull/29679.'
'DO NOT RELY ON THIS FLAG.'),
include_in_jit_key=True,
include_in_trace_context=True)
# This config is temporary and should go away since this is a user problem.
# If they don't want 1 sized mesh axis names to show up in sharding and vma
# bits on ShapedArray, then their mesh (which they pass to set_mesh) should not
# contain those axes at all.
remove_size_one_mesh_axis_from_type = bool_state(
name='jax_remove_size_one_mesh_axis_from_type',
default=False,
help="Removes mesh axes of size 1 from ShapedArray.sharding and vma",
include_in_jit_key=True,
include_in_trace_context=True)
# TODO make it so people don't use this, this is internal...
_check_vma = bool_state(
name='check_vma',
default=False,
help='internal implementation detail of shard_map, DO NOT USE',
include_in_jit_key=True,
include_in_trace_context=True)
softmax_custom_jvp = bool_state(
name='jax_softmax_custom_jvp',
default=False,
upgrade=True,
help=('Use a new custom_jvp rule for jax.nn.softmax. The new rule should '
'improve memory usage and stability. Set True to use new '
'behavior. See https://github.com/jax-ml/jax/pull/15677'),
include_in_jit_key=True,
include_in_trace_context=True)
enable_custom_vjp_by_custom_transpose = bool_state(
name='jax_enable_custom_vjp_by_custom_transpose',
default=False,
upgrade=True,
help=('Enables an internal upgrade that implements `jax.custom_vjp` by '
'reduction to `jax.custom_jvp` and `jax.custom_transpose`.'))
raise_persistent_cache_errors = bool_state(
name='jax_raise_persistent_cache_errors',
default=False,
help=('If true, exceptions raised when reading or writing to the '
'persistent compilation cache will be allowed through, halting '
'program execution if not manually caught. If false, exceptions are '
'caught and raised as warnings, allowing program execution to '
'continue. Defaults to false so cache bugs or intermittent issues '
'are non-fatal.'))
persistent_cache_min_compile_time_secs = float_state(
name='jax_persistent_cache_min_compile_time_secs',
default=1.,
help=('The minimum compile time of a computation to be written to the '
'persistent compilation cache. This threshold can be raised to '
'decrease the number of entries written to the cache.'))
persistent_cache_min_entry_size_bytes = int_state(
name='jax_persistent_cache_min_entry_size_bytes',
default=0,
help=('The minimum size (in bytes) of an entry that will be cached in the '
'persistent compilation cache: '
'* -1: disable the size restriction and prevent overrides. '
'* Leave at default (0) to allow for overrides. The override will '
' typically ensure that the minimum size is optimal for the '
' filesystem being used for the cache. '
'* > 0: the actual minimum size desired; no overrides.'))
# TODO: Change default to all
persistent_cache_enable_xla_caches = optional_string_state(
name='jax_persistent_cache_enable_xla_caches',
default='xla_gpu_per_fusion_autotune_cache_dir',
help=('When the persistent cache is enabled, additional XLA caching will '
'also be enabled automatically. This option can be used to configure'
'which XLA caching methods will be enabled.'),
)
compilation_cache_include_metadata_in_key = bool_state(
name='jax_compilation_cache_include_metadata_in_key',
default=False,
help=(
'Include metadata, such as file names and line numbers, in the'
' compilation cache key. If false, the cache will still get hits even'
' if functions or files are moved, etc. However, it means that'
' executables loaded from the cache may have stale metadata, which'
' may show up in, e.g., profiles.'
),
)
hlo_source_file_canonicalization_regex = optional_string_state(
name='jax_hlo_source_file_canonicalization_regex',
default=None,
help=('Used to canonicalize the source_path metadata of HLO instructions '
'by removing the given regex. If set, re.sub() is called on each '
'source_file with the given regex, and all matches are removed. '
'This can be used to avoid spurious cache misses when using the '
'persistent compilation cache, which includes HLO metadata in the '
'cache key.'),
include_in_trace_context=True)
include_full_tracebacks_in_locations = bool_state(
name='jax_include_full_tracebacks_in_locations',
default=True,
help=(
'Include Python tracebacks in MLIR locations in IR emitted by JAX.'
),
)
traceback_in_locations_limit = int_state(
name='jax_traceback_in_locations_limit',
default=10,
help=(
'Limit the number of frames at the Python traceback frames included in '
'MLIR locations. If set to the negative value, traceback will not be '
'limited.'
),
)
share_binary_between_hosts = bool_state(
name='jax_share_binary_between_hosts',
default=False,
help=(
'If set to True, the compiled module will be shared between hosts '
'directly.'
),
)
share_binary_between_hosts_timeout_ms = int_state(
name='jax_share_binary_between_hosts_timeout_ms',
default=20 * 60 * 1000,
help='Timeout for the compiled module share.',
)
enable_pgle = bool_state(
name='jax_enable_pgle',
default=False,
help=(
'If set to True and the property jax_pgle_profiling_runs is set to '
'greater than 0, the modules will be recompiled after running specified '
'number times with collected data provided to the profile guided latency '
'estimator.'
),
include_in_jit_key=True,
include_in_trace_context=True,
)
pgle_profiling_runs = int_state(
name='jax_pgle_profiling_runs',
default=3,
help=(
'Amount of times module should be profiled before recompilation when '
'PGLE is used.'
),
include_in_jit_key=True,
include_in_trace_context=True,
)
pgle_aggregation_percentile = int_state(
name='jax_pgle_aggregation_percentile',
default=90,
help='Percentile used to aggregate performance data between devices when '
'PGLE is used.',
)
enable_compilation_cache = bool_state(
name='jax_enable_compilation_cache',
default=True,
help=('If set to False, the compilation cache will be disabled regardless '
'of whether set_cache_dir() was called. If set to True, the '
'path could be set to a default value or via a call to '
'set_cache_dir().'),
)
compilation_cache_dir = optional_string_state(
name='jax_compilation_cache_dir',
default=None,
help=('Path for the cache. '
'Precedence: '
'1. A call to compilation_cache.set_cache_dir(). '
'2. The value of this flag set in the command line or by default.'),
)
compilation_cache_expect_pgle = bool_state(
name='jax_compilation_cache_expect_pgle',
default=False,
help=('If set to True, compilation cache entries that were compiled with '
'profile data (i.e. PGLE was enabled and the requisite number of '
'executions were profiled) will be preferentially loaded, even if '
'PGLE is not currently enabled. A warning will be printed when no '
'preferred cache entry is found.')
)
compilation_cache_max_size = int_state(
name='jax_compilation_cache_max_size',
default=-1,
help=('The maximum size (in bytes) allowed for the persistent compilation '
'cache. When set, the least recently accessed cache entry(s) '
'will be deleted once the total cache directory size '
'exceeds the specified limit. '
'Caching will be disabled if this value is set to 0. A '
'special value of -1 indicates no limit, allowing the cache '
'size to grow indefinitely.'),
)
remove_custom_partitioning_ptr_from_cache_key = bool_state(
name='jax_remove_custom_partitioning_ptr_from_cache_key',
default=False,
help=('If set to True, remove the custom partitioning pointer '
'present in the precompiled stableHLO before hashing '
'during cache key computation. This is a potentially '
'unsafe flag to set and only users who are sure of '
'what they are trying to achieve should set it.'),
)
def _default_dtype_bits_deprecation(new_val):
if new_val != '64':
deprecations.warn(
'default-dtype-bits-config',
(
'The jax_default_dtype_bits configuration is deprecated in JAX v0.7.1'
' and will be removed in JAX v0.9.0.'
),
stacklevel=4
)
default_dtype_bits = enum_state(
name='jax_default_dtype_bits',
enum_values=['32', '64'],
default='64',
help=('[deprecated]. This flag was an experiment in allowing users to specify the'
' default bit width. It was never fully supported or tested. It will '
' have no effect after JAX v0.9.0, and be removed entirely in JAX v0.10.0.'),
extra_validator=_default_dtype_bits_deprecation)
| LegacyPrngKeyState |
python | matplotlib__matplotlib | lib/matplotlib/hatch.py | {
"start": 1544,
"end": 2160
} | class ____(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = steps
vertices[0::2, 1] = 0.0
vertices[1::2, 0] = steps
vertices[1::2, 1] = 1.0
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
| VerticalHatch |
python | kamyu104__LeetCode-Solutions | Python/student-attendance-record-i.py | {
"start": 29,
"end": 443
} | class ____(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
count_A = 0
for i in xrange(len(s)):
if s[i] == 'A':
count_A += 1
if count_A == 2:
return False
if i < len(s) - 2 and s[i] == s[i+1] == s[i+2] == 'L':
return False
return True
| Solution |
python | doocs__leetcode | solution/3200-3299/3250.Find the Count of Monotonic Pairs I/Solution.py | {
"start": 0,
"end": 519
} | class ____:
def countOfPairs(self, nums: List[int]) -> int:
mod = 10**9 + 7
n, m = len(nums), max(nums)
f = [[0] * (m + 1) for _ in range(n)]
for j in range(nums[0] + 1):
f[0][j] = 1
for i in range(1, n):
s = list(accumulate(f[i - 1]))
for j in range(nums[i] + 1):
k = min(j, j + nums[i - 1] - nums[i])
if k >= 0:
f[i][j] = s[k] % mod
return sum(f[-1][: nums[-1] + 1]) % mod
| Solution |
python | realpython__materials | django-vue-graphql/source_code_final/back_end/blog/schema.py | {
"start": 219,
"end": 304
} | class ____(DjangoObjectType):
class Meta:
model = models.Profile
| AuthorType |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modeling_glm4v_moe.py | {
"start": 24822,
"end": 26766
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Glm4vMoeTextAttention(config=config, layer_idx=layer_idx)
if layer_idx >= config.first_k_dense_replace:
self.mlp = Glm4vMoeTextMoE(config)
else:
self.mlp = Glm4vMoeTextMLP(config)
self.input_layernorm = Glm4vMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Glm4vMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| Glm4vMoeTextDecoderLayer |
python | sqlalchemy__sqlalchemy | test/sql/test_from_linter.py | {
"start": 774,
"end": 14108
} | class ____(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table("table_a", metadata, Column("col_a", Integer, primary_key=True))
Table("table_b", metadata, Column("col_b", Integer, primary_key=True))
Table("table_c", metadata, Column("col_c", Integer, primary_key=True))
Table("table_d", metadata, Column("col_d", Integer, primary_key=True))
def setup_test(self):
self.a = self.tables.table_a
self.b = self.tables.table_b
self.c = self.tables.table_c
self.d = self.tables.table_d
@testing.variation(
"what_to_clone", ["nothing", "fromclause", "whereclause", "both"]
)
def test_cloned_aliases(self, what_to_clone):
a1 = self.a.alias()
b1 = self.b.alias()
c = self.c
j1 = a1.join(b1, a1.c.col_a == b1.c.col_b)
j1_from = j1
b1_where = b1
if what_to_clone.fromclause or what_to_clone.both:
a1c = a1._clone()
b1c = b1._clone()
j1_from = a1c.join(b1c, a1c.c.col_a == b1c.c.col_b)
if what_to_clone.whereclause or what_to_clone.both:
b1_where = b1_where._clone()
query = (
select(c)
.select_from(c, j1_from)
.where(b1_where.c.col_b == c.c.col_c)
)
for start in None, c:
froms, start = find_unmatching_froms(query, start)
assert not froms
def test_everything_is_connected(self):
query = (
select(self.a)
.select_from(self.a.join(self.b, self.a.c.col_a == self.b.c.col_b))
.select_from(self.c)
.select_from(self.d)
.where(self.d.c.col_d == self.b.c.col_b)
.where(self.c.c.col_c == self.d.c.col_d)
.where(self.c.c.col_c == 5)
)
froms, start = find_unmatching_froms(query)
assert not froms
for start in self.a, self.b, self.c, self.d:
froms, start = find_unmatching_froms(query, start)
assert not froms
def test_plain_cartesian(self):
query = select(self.a).where(self.b.c.col_b == 5)
froms, start = find_unmatching_froms(query, self.a)
assert start == self.a
assert froms == {self.b}
froms, start = find_unmatching_froms(query, self.b)
assert start == self.b
assert froms == {self.a}
@testing.combinations(("lateral",), ("cartesian",), ("join",))
def test_lateral_subqueries(self, control):
"""
.. sourcecode:: sql
test=> create table a (id integer);
CREATE TABLE
test=> create table b (id integer);
CREATE TABLE
test=> insert into a(id) values (1), (2), (3);
INSERT 0 3
test=> insert into b(id) values (1), (2), (3);
INSERT 0 3
test=> select * from (select id from a) as a1,
lateral (select id from b where id=a1.id) as b1;
id | id
----+----
1 | 1
2 | 2
3 | 3
(3 rows)
"""
p1 = select(self.a).subquery()
p2 = select(self.b).where(self.b.c.col_b == p1.c.col_a).subquery()
if control == "lateral":
p2 = p2.lateral()
query = select(p1, p2)
if control == "join":
query = query.join_from(p1, p2, p1.c.col_a == p2.c.col_b)
froms, start = find_unmatching_froms(query, p1)
if control == "cartesian":
assert start is p1
assert froms == {p2}
else:
assert start is None
assert froms is None
froms, start = find_unmatching_froms(query, p2)
if control == "cartesian":
assert start is p2
assert froms == {p1}
else:
assert start is None
assert froms is None
def test_lateral_subqueries_w_joins(self):
p1 = select(self.a).subquery()
p2 = (
select(self.b)
.where(self.b.c.col_b == p1.c.col_a)
.subquery()
.lateral()
)
p3 = (
select(self.c)
.where(self.c.c.col_c == p1.c.col_a)
.subquery()
.lateral()
)
query = select(p1, p2, p3).join_from(p1, p2, true()).join(p3, true())
for p in (p1, p2, p3):
froms, start = find_unmatching_froms(query, p)
assert start is None
assert froms is None
def test_lateral_subqueries_ok_do_we_still_find_cartesians(self):
p1 = select(self.a).subquery()
p3 = select(self.a).subquery()
p2 = select(self.b).where(self.b.c.col_b == p3.c.col_a).subquery()
p2 = p2.lateral()
query = select(p1, p2, p3)
froms, start = find_unmatching_froms(query, p1)
assert start is p1
assert froms == {p2, p3}
froms, start = find_unmatching_froms(query, p2)
assert start is p2
assert froms == {p1}
froms, start = find_unmatching_froms(query, p3)
assert start is p3
assert froms == {p1}
@testing.variation("additional_transformation", ["alias", "none"])
@testing.variation("joins_implicitly", [True, False])
@testing.variation(
"type_", ["table_valued", "table_valued_derived", "column_valued"]
)
def test_fn_valued(
self, joins_implicitly, additional_transformation, type_
):
"""test #7845, #9009"""
my_table = table(
"tbl",
column("id", Integer),
column("data", JSON()),
)
sub_dict = my_table.c.data["d"]
if type_.table_valued or type_.table_valued_derived:
tv = func.json_each(sub_dict)
tv = tv.table_valued("key", joins_implicitly=joins_implicitly)
if type_.table_valued_derived:
tv = tv.render_derived(name="tv", with_types=True)
if additional_transformation.alias:
tv = tv.alias()
has_key = tv.c.key == "f"
stmt = select(my_table.c.id).where(has_key)
elif type_.column_valued:
tv = func.json_array_elements(sub_dict)
if additional_transformation.alias:
tv = tv.alias(joins_implicitly=joins_implicitly).column
else:
tv = tv.column_valued("key", joins_implicitly=joins_implicitly)
stmt = select(my_table.c.id, tv)
else:
type_.fail()
froms, start = find_unmatching_froms(stmt, my_table)
if joins_implicitly:
is_(start, None)
is_(froms, None)
elif type_.column_valued:
assert start == my_table
assert froms == {tv.scalar_alias}
elif type_.table_valued or type_.table_valued_derived:
assert start == my_table
assert froms == {tv}
else:
type_.fail()
def test_count_non_eq_comparison_operators(self):
query = select(self.a).where(self.a.c.col_a > self.b.c.col_b)
froms, start = find_unmatching_froms(query, self.a)
is_(start, None)
is_(froms, None)
def test_dont_count_non_comparison_operators(self):
query = select(self.a).where(self.a.c.col_a + self.b.c.col_b == 5)
froms, start = find_unmatching_froms(query, self.a)
assert start == self.a
assert froms == {self.b}
def test_disconnect_between_ab_cd(self):
query = (
select(self.a)
.select_from(self.a.join(self.b, self.a.c.col_a == self.b.c.col_b))
.select_from(self.c)
.select_from(self.d)
.where(self.c.c.col_c == self.d.c.col_d)
.where(self.c.c.col_c == 5)
)
for start in self.a, self.b:
froms, start = find_unmatching_froms(query, start)
assert start == start
assert froms == {self.c, self.d}
for start in self.c, self.d:
froms, start = find_unmatching_froms(query, start)
assert start == start
assert froms == {self.a, self.b}
def test_c_and_d_both_disconnected(self):
query = (
select(self.a)
.select_from(self.a.join(self.b, self.a.c.col_a == self.b.c.col_b))
.where(self.c.c.col_c == 5)
.where(self.d.c.col_d == 10)
)
for start in self.a, self.b:
froms, start = find_unmatching_froms(query, start)
assert start == start
assert froms == {self.c, self.d}
froms, start = find_unmatching_froms(query, self.c)
assert start == self.c
assert froms == {self.a, self.b, self.d}
froms, start = find_unmatching_froms(query, self.d)
assert start == self.d
assert froms == {self.a, self.b, self.c}
def test_now_connected(self):
query = (
select(self.a)
.select_from(self.a.join(self.b, self.a.c.col_a == self.b.c.col_b))
.select_from(self.c.join(self.d, self.c.c.col_c == self.d.c.col_d))
.where(self.c.c.col_c == self.b.c.col_b)
.where(self.c.c.col_c == 5)
.where(self.d.c.col_d == 10)
)
froms, start = find_unmatching_froms(query)
assert not froms
for start in self.a, self.b, self.c, self.d:
froms, start = find_unmatching_froms(query, start)
assert not froms
def test_disconnected_subquery(self):
subq = (
select(self.a).where(self.a.c.col_a == self.b.c.col_b).subquery()
)
stmt = select(self.c).select_from(subq)
froms, start = find_unmatching_froms(stmt, self.c)
assert start == self.c
assert froms == {subq}
froms, start = find_unmatching_froms(stmt, subq)
assert start == subq
assert froms == {self.c}
def test_now_connect_it(self):
subq = (
select(self.a).where(self.a.c.col_a == self.b.c.col_b).subquery()
)
stmt = (
select(self.c)
.select_from(subq)
.where(self.c.c.col_c == subq.c.col_a)
)
froms, start = find_unmatching_froms(stmt)
assert not froms
for start in self.c, subq:
froms, start = find_unmatching_froms(stmt, start)
assert not froms
def test_right_nested_join_without_issue(self):
query = select(self.a).select_from(
self.a.join(
self.b.join(self.c, self.b.c.col_b == self.c.c.col_c),
self.a.c.col_a == self.b.c.col_b,
)
)
froms, start = find_unmatching_froms(query)
assert not froms
for start in self.a, self.b, self.c:
froms, start = find_unmatching_froms(query, start)
assert not froms
def test_join_on_true(self):
# test that a join(a, b) counts a->b as an edge even if there isn't
# actually a join condition. this essentially allows a cartesian
# product to be added explicitly.
query = select(self.a).select_from(self.a.join(self.b, true()))
froms, start = find_unmatching_froms(query)
assert not froms
def test_join_on_true_muti_levels(self):
"""test #6886"""
# test that a join(a, b).join(c) counts b->c as an edge even if there
# isn't actually a join condition. this essentially allows a cartesian
# product to be added explicitly.
query = select(self.a, self.b, self.c).select_from(
self.a.join(self.b, true()).join(self.c, true())
)
froms, start = find_unmatching_froms(query)
assert not froms
def test_right_nested_join_with_an_issue(self):
query = (
select(self.a)
.select_from(
self.a.join(
self.b.join(self.c, self.b.c.col_b == self.c.c.col_c),
self.a.c.col_a == self.b.c.col_b,
)
)
.where(self.d.c.col_d == 5)
)
for start in self.a, self.b, self.c:
froms, start = find_unmatching_froms(query, start)
assert start == start
assert froms == {self.d}
froms, start = find_unmatching_froms(query, self.d)
assert start == self.d
assert froms == {self.a, self.b, self.c}
def test_no_froms(self):
query = select(1)
froms, start = find_unmatching_froms(query)
assert not froms
@testing.variation("dml", ["update", "delete"])
@testing.combinations(
(False, False), (True, False), (True, True), argnames="twotable,error"
)
def test_dml(self, dml, twotable, error):
if dml.update:
stmt = update(self.a)
elif dml.delete:
stmt = delete(self.a)
else:
dml.fail()
stmt = stmt.where(self.a.c.col_a == "a1")
if twotable:
stmt = stmt.where(self.b.c.col_b == "a1")
if not error:
stmt = stmt.where(self.b.c.col_b == self.a.c.col_a)
froms, _ = find_unmatching_froms(stmt)
if error:
assert froms
else:
assert not froms
| TestFindUnmatchingFroms |
python | h5py__h5py | h5py/_hl/group.py | {
"start": 29629,
"end": 29812
} | class ____:
"""
Represents a hard link in an HDF5 file. Provided only so that
Group.get works in a sensible way. Has no other function.
"""
pass
| HardLink |
python | uqfoundation__dill | dill/tests/test_abc.py | {
"start": 988,
"end": 4227
} | class ____(OneTwoThree):
def __init__(self):
self._bar = None
def foo(self):
return "Instance Method FOO"
@property
def bar(self):
return self._bar
@bar.setter
def bar(self, value):
self._bar = value
@classmethod
def cfoo(cls):
return "Class Method CFOO"
@staticmethod
def sfoo():
return "Static Method SFOO"
def test_abc_non_local():
assert dill.copy(OneTwoThree) is not OneTwoThree
assert dill.copy(EasyAsAbc) is not EasyAsAbc
with warnings.catch_warnings():
warnings.simplefilter("ignore", dill.PicklingWarning)
assert dill.copy(OneTwoThree, byref=True) is OneTwoThree
assert dill.copy(EasyAsAbc, byref=True) is EasyAsAbc
instance = EasyAsAbc()
# Set a property that StockPickle can't preserve
instance.bar = lambda x: x**2
depickled = dill.copy(instance)
assert type(depickled) is type(instance) #NOTE: issue #612, test_abc_local
#NOTE: dill.copy of local (or non-local) classes should (not) be the same?
assert type(depickled.bar) is FunctionType
assert depickled.bar(3) == 9
assert depickled.sfoo() == "Static Method SFOO"
assert depickled.cfoo() == "Class Method CFOO"
assert depickled.foo() == "Instance Method FOO"
def test_abc_local():
"""
Test using locally scoped ABC class
"""
class LocalABC(ABC):
@abc.abstractmethod
def foo(self):
pass
def baz(self):
return repr(self)
labc = dill.copy(LocalABC)
assert labc is not LocalABC
assert type(labc) is type(LocalABC)
#NOTE: dill.copy of local (or non-local) classes should (not) be the same?
# <class '__main__.LocalABC'>
# <class '__main__.test_abc_local.<locals>.LocalABC'>
class Real(labc):
def foo(self):
return "True!"
def baz(self):
return "My " + super(Real, self).baz()
real = Real()
assert real.foo() == "True!"
try:
labc()
except TypeError as e:
# Expected error
pass
else:
print('Failed to raise type error')
assert False
labc2, pik = dill.copy((labc, Real()))
assert 'Real' == type(pik).__name__
assert '.Real' in type(pik).__qualname__
assert type(pik) is not Real
assert labc2 is not LocalABC
assert labc2 is not labc
assert isinstance(pik, labc2)
assert not isinstance(pik, labc)
assert not isinstance(pik, LocalABC)
assert pik.baz() == "My " + repr(pik)
def test_meta_local_no_cache():
"""
Test calling metaclass and cache registration
"""
LocalMetaABC = abc.ABCMeta('LocalMetaABC', (), {})
class ClassyClass:
pass
class KlassyClass:
pass
LocalMetaABC.register(ClassyClass)
assert not issubclass(KlassyClass, LocalMetaABC)
assert issubclass(ClassyClass, LocalMetaABC)
res = dill.dumps((LocalMetaABC, ClassyClass, KlassyClass))
lmabc, cc, kc = dill.loads(res)
assert type(lmabc) == type(LocalMetaABC)
assert not issubclass(kc, lmabc)
assert issubclass(cc, lmabc)
if __name__ == '__main__':
test_abc_non_local()
test_abc_local()
test_meta_local_no_cache()
| EasyAsAbc |
python | openai__openai-python | src/openai/resources/realtime/calls.py | {
"start": 31936,
"end": 32531
} | class ____:
def __init__(self, calls: Calls) -> None:
self._calls = calls
self.create = to_custom_streamed_response_wrapper(
calls.create,
StreamedBinaryAPIResponse,
)
self.accept = to_streamed_response_wrapper(
calls.accept,
)
self.hangup = to_streamed_response_wrapper(
calls.hangup,
)
self.refer = to_streamed_response_wrapper(
calls.refer,
)
self.reject = to_streamed_response_wrapper(
calls.reject,
)
| CallsWithStreamingResponse |
python | kamyu104__LeetCode-Solutions | Python/the-dining-philosophers.py | {
"start": 48,
"end": 995
} | class ____(object):
def __init__(self):
self._l = [threading.Lock() for _ in xrange(5)]
# call the functions directly to execute, for example, eat()
def wantsToEat(self, philosopher, pickLeftFork, pickRightFork, eat, putLeftFork, putRightFork):
"""
:type philosopher: int
:type pickLeftFork: method
:type pickRightFork: method
:type eat: method
:type putLeftFork: method
:type putRightFork: method
:rtype: void
"""
left, right = philosopher, (philosopher+4)%5
first, second = left, right
if philosopher%2 == 0:
first, second = left, right
else:
first, second = right, left
with self._l[first]:
with self._l[second]:
pickLeftFork()
pickRightFork()
eat()
putLeftFork()
putRightFork()
| DiningPhilosophers |
python | kamyu104__LeetCode-Solutions | Python/minimum-score-after-removals-on-a-tree.py | {
"start": 4362,
"end": 5897
} | class ____(object):
def minimumScore(self, nums, edges):
"""
:type nums: List[int]
:type edges: List[List[int]]
:rtype: int
"""
def iter_dfs(nums, adj, u, p):
result = []
stk = [(1, (u, p, [0]))]
while stk:
step, args = stk.pop()
if step == 1:
u, p, ret = args
new_rets = []
stk.append((2, (u, new_rets, ret)))
for v in adj[u]:
if v == p:
continue
new_rets.append([0])
stk.append((1, (v, u, new_rets[-1])))
elif step == 2:
u, new_rets, ret = args
ret[0] = nums[u]
for x in new_rets:
ret[0] ^= x[0]
result.append(ret[0])
return result
adj = [[] for _ in xrange(len(nums))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
total = reduce(lambda x, y: x^y, nums)
result = float("inf")
for u, v in edges:
for candidates in (iter_dfs(nums, adj, u, v), iter_dfs(nums, adj, v, u)):
total2 = candidates.pop()
for x in candidates:
a, b, c = total^total2, x, total2^x
result = min(result, max(a, b, c)-min(a, b, c))
return result
| Solution4 |
python | pytorch__pytorch | test/distributed/test_dist2.py | {
"start": 8091,
"end": 8654
} | class ____(Dist2MultiProcessTestCase):
@property
def device(self) -> torch.device:
return torch.device("cpu")
@requires_gloo()
def new_group(self) -> torch.distributed.ProcessGroup:
os.environ["RANK"] = str(self.rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
return dist2.new_group(
backend="gloo",
timeout=timedelta(seconds=60),
device=self.device,
)
| ProcessGroupGlooTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-manhattan-distance-after-k-changes.py | {
"start": 38,
"end": 525
} | class ____(object):
def maxDistance(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
result = x = y = 0
for i, c in enumerate(s, 1):
if c == 'E':
x += 1
elif c == 'W':
x -= 1
elif c == 'N':
y += 1
elif c == 'S':
y -= 1
result = max(result, min(abs(x)+abs(y)+2*k, i))
return result
| Solution |
python | astropy__astropy | astropy/wcs/tests/test_wcs.py | {
"start": 1693,
"end": 3152
} | class ____:
def setup_method(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames("data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding="binary"
)
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
| TestMaps |
python | astropy__astropy | astropy/nddata/mixins/ndio.py | {
"start": 234,
"end": 1880
} | class ____(registry.UnifiedReadWrite):
"""Read and parse gridded N-dimensional data and return as an NDData-derived
object.
This function provides the NDDataBase interface to the astropy unified I/O
layer. This allows easily reading a file in the supported data formats,
for example::
>>> from astropy.nddata import CCDData
>>> dat = CCDData.read('image.fits')
Get help on the available readers for ``CCDData`` using the``help()`` method::
>>> CCDData.read.help() # Get help reading CCDData and list supported formats
>>> CCDData.read.help('fits') # Get detailed help on CCDData FITS reader
>>> CCDData.read.list_formats() # Print list of available formats
For more information see:
- https://docs.astropy.org/en/stable/nddata
- https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
format : str, optional
File format specifier.
cache : bool, optional
Caching behavior if file is a URL.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `NDData` subclass
NDData-basd object corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "read", registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
return self.registry.read(self._cls, *args, **kwargs)
| NDDataRead |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_tags.py | {
"start": 4790,
"end": 8206
} | class ____(TestDagEndpoint):
"""Unit tests for Get DAG Tags."""
@pytest.mark.parametrize(
("query_params", "expected_status_code", "expected_dag_tags", "expected_total_entries"),
[
# test with offset, limit, and without any tag_name_pattern
(
{},
200,
[
"example",
"tag_1",
"tag_2",
],
3,
),
(
{"offset": 1},
200,
[
"tag_1",
"tag_2",
],
3,
),
(
{"limit": 2},
200,
[
"example",
"tag_1",
],
3,
),
(
{"offset": 1, "limit": 2},
200,
[
"tag_1",
"tag_2",
],
3,
),
# test with tag_name_pattern
(
{"tag_name_pattern": "invalid"},
200,
[],
0,
),
(
{"tag_name_pattern": "1"},
200,
["tag_1"],
1,
),
(
{"tag_name_pattern": "tag%"},
200,
["tag_1", "tag_2"],
2,
),
# test order_by
(
{"order_by": "-name"},
200,
["tag_2", "tag_1", "example"],
3,
),
# test all query params
(
{"tag_name_pattern": "t%", "order_by": "-name", "offset": 1, "limit": 1},
200,
["tag_1"],
2,
),
(
{"tag_name_pattern": "~", "offset": 1, "limit": 2},
200,
["tag_1", "tag_2"],
3,
),
# test invalid query params
(
{"order_by": "dag_id"},
400,
None,
None,
),
(
{"order_by": "-dag_id"},
400,
None,
None,
),
],
)
def test_get_dag_tags(
self, test_client, query_params, expected_status_code, expected_dag_tags, expected_total_entries
):
with assert_queries_count(3 if expected_status_code == 200 else 2):
response = test_client.get("/dagTags", params=query_params)
assert response.status_code == expected_status_code
if expected_status_code != 200:
return
res_json = response.json()
expected = {
"tags": expected_dag_tags,
"total_entries": expected_total_entries,
}
assert res_json == expected
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/dagTags")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/dagTags")
assert response.status_code == 403
| TestDagTags |
python | django__django | tests/migrations/test_executor.py | {
"start": 707,
"end": 34807
} | class ____(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = [
"migrations",
"migrations2",
"django.contrib.auth",
"django.contrib.contenttypes",
]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it
replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [
key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"
]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(
executor.loader.graph.nodes["migrations", "0001_squashed_0002"],
False,
),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"},
)
def test_migrate_backward_to_squashed_migration(self):
executor = MigrationExecutor(connection)
try:
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
executor.migrate([("migrations", "0001_squashed_0002")])
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
executor.loader.build_graph()
# Migrate backward to a squashed migration.
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_book")
finally:
# Unmigrate everything.
executor = MigrationExecutor(connection)
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}
)
def test_non_atomic_migration(self):
"""
Applying a non-atomic migration works as expected.
"""
executor = MigrationExecutor(connection)
with self.assertRaisesMessage(RuntimeError, "Abort migration"):
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_publisher")
migrations_apps = executor.loader.project_state(
("migrations", "0001_initial")
).apps
Publisher = migrations_apps.get_model("migrations", "Publisher")
self.assertTrue(Publisher.objects.exists())
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_atomic_operation"}
)
def test_atomic_operation_in_non_atomic_migration(self):
"""
An atomic operation is properly rolled back inside a non-atomic
migration.
"""
executor = MigrationExecutor(connection)
with self.assertRaisesMessage(RuntimeError, "Abort migration"):
executor.migrate([("migrations", "0001_initial")])
migrations_apps = executor.loader.project_state(
("migrations", "0001_initial")
).apps
Editor = migrations_apps.get_model("migrations", "Editor")
self.assertFalse(Editor.objects.exists())
# Record previous migration as successful.
executor.migrate([("migrations", "0001_initial")], fake=True)
# Rebuild the graph to reflect the new DB state.
executor.loader.build_graph()
# Migrating backwards is also atomic.
with self.assertRaisesMessage(RuntimeError, "Abort migration"):
executor.migrate([("migrations", None)])
self.assertFalse(Editor.objects.exists())
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
}
)
def test_empty_plan(self):
"""
Re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan(
[
("migrations", "0002_second"),
("migrations2", "0001_initial"),
]
)
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate(
[("migrations", "0002_second"), ("migrations2", "0001_initial")], fake=True
)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan(
[
("migrations", "0002_second"),
("migrations2", "0001_initial"),
]
)
self.assertEqual(plan, [])
# The resulting state should include applied migrations.
state = executor.migrate(
[
("migrations", "0002_second"),
("migrations2", "0001_initial"),
]
)
self.assertIn(("migrations", "book"), state.models)
self.assertIn(("migrations", "author"), state.models)
self.assertIn(("migrations2", "otherauthor"), state.models)
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2_no_deps",
}
)
def test_mixed_plan_not_supported(self):
"""
Although the MigrationExecutor interfaces allows for mixed migration
plans (combined forwards and backwards migrations) this is not
supported.
"""
# Prepare for mixed plan
executor = MigrationExecutor(connection)
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
executor.migrate(None, plan)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
self.assertIn(
("migrations", "0001_initial"), executor.loader.applied_migrations
)
self.assertIn(("migrations", "0002_second"), executor.loader.applied_migrations)
self.assertNotIn(
("migrations2", "0001_initial"), executor.loader.applied_migrations
)
# Generate mixed plan
plan = executor.migration_plan(
[
("migrations", None),
("migrations2", "0001_initial"),
]
)
msg = (
"Migration plans with both forwards and backwards migrations are "
"not supported. Please split your migration process into separate "
"plans of only forwards OR backwards migrations."
)
with self.assertRaisesMessage(InvalidMigrationPlan, msg) as cm:
executor.migrate(None, plan)
self.assertEqual(
cm.exception.args[1],
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate(
[
("migrations", None),
("migrations2", None),
]
)
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
self.assertTableNotExists("migrations2_otherauthor")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration=None, fake=None):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertIs(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertIs(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial
# migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
# Applying the migration should raise a database level error
# because we haven't given the --fake-initial option
with self.assertRaises(DatabaseError):
executor.migrate([("migrations", "0001_initial")])
# Reset the faked state
state = {"faked": None}
# Allow faking of initial CreateModel operations
executor.migrate([("migrations", "0001_initial")], fake_initial=True)
self.assertIs(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_custom_user",
"django.contrib.auth": "django.contrib.auth.migrations",
},
AUTH_USER_MODEL="migrations.Author",
)
def test_custom_user(self):
"""
Regression test for #22325 - references to a custom user model defined
in the same app are not resolved correctly.
"""
with isolate_lru_cache(global_apps.get_swappable_settings_name):
executor = MigrationExecutor(connection)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Migrate forwards
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# The soft-application detection works.
# Change table_names to not return auth_user during this as it
# wouldn't be there in a normal run, and ensure migrations.Author
# exists in the global app registry temporarily.
old_table_names = connection.introspection.table_names
connection.introspection.table_names = lambda c: [
x for x in old_table_names(c) if x != "auth_user"
]
migrations_apps = executor.loader.project_state(
("migrations", "0001_initial"),
).apps
global_apps.get_app_config("migrations").models["author"] = (
migrations_apps.get_model("migrations", "author")
)
try:
migration = executor.loader.get_migration("auth", "0001_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], True)
finally:
connection.introspection.table_names = old_table_names
del global_apps.get_app_config("migrations").models["author"]
# Migrate back to clean up the database.
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_add_many_to_many_field_initial",
},
)
def test_detect_soft_applied_add_field_manytomanyfield(self):
"""
executor.detect_soft_applied() detects ManyToManyField tables from an
AddField operation. This checks the case of AddField in a migration
with other operations (0001) and the case of AddField in its own
migration (0002).
"""
tables = [
# from 0001
"migrations_project",
"migrations_task",
"migrations_project_tasks",
# from 0002
"migrations_task_projects",
]
executor = MigrationExecutor(connection)
# Create the tables for 0001 but make it look like the migration hasn't
# been applied.
executor.migrate([("migrations", "0001_initial")])
executor.migrate([("migrations", None)], fake=True)
for table in tables[:3]:
self.assertTableExists(table)
# Table detection sees 0001 is applied but not 0002.
migration = executor.loader.get_migration("migrations", "0001_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], True)
migration = executor.loader.get_migration("migrations", "0002_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], False)
# Create the tables for both migrations but make it look like neither
# has been applied.
executor.loader.build_graph()
executor.migrate([("migrations", "0001_initial")], fake=True)
executor.migrate([("migrations", "0002_initial")])
executor.loader.build_graph()
executor.migrate([("migrations", None)], fake=True)
# Table detection sees 0002 is applied.
migration = executor.loader.get_migration("migrations", "0002_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], True)
# Leave the tables for 0001 except the many-to-many table. That missing
# table should cause detect_soft_applied() to return False.
with connection.schema_editor() as editor:
for table in tables[2:]:
editor.execute(editor.sql_delete_table % {"table": table})
migration = executor.loader.get_migration("migrations", "0001_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], False)
# Cleanup by removing the remaining tables.
with connection.schema_editor() as editor:
for table in tables[:2]:
editor.execute(editor.sql_delete_table % {"table": table})
for table in tables:
self.assertTableNotExists(table)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c",
]
)
def test_unrelated_model_lookups_forwards(self):
"""
#24123 - All models of apps already applied which are
unrelated to the first app being applied are part of the initial model
state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([("lookuperror_b", "0003_b3")])
self.assertTableExists("lookuperror_b_b3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate forwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is already applied
executor.migrate(
[
("lookuperror_a", "0004_a4"),
("lookuperror_c", "0003_c3"),
]
)
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate(
[
("lookuperror_a", None),
("lookuperror_b", None),
("lookuperror_c", None),
]
)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c",
]
)
def test_unrelated_model_lookups_backwards(self):
"""
#24123 - All models of apps being unapplied which are
unrelated to the first app being unapplied are part of the initial
model state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate(
[
("lookuperror_a", "0004_a4"),
("lookuperror_b", "0003_b3"),
("lookuperror_c", "0003_c3"),
]
)
self.assertTableExists("lookuperror_b_b3")
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate backwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is not in the initial state (unrelated to app c)
executor.migrate([("lookuperror_a", None)])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([("lookuperror_b", None), ("lookuperror_c", None)])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.mutate_state_a",
"migrations.migrations_test_apps.mutate_state_b",
]
)
def test_unrelated_applied_migrations_mutate_state(self):
"""
#26647 - Unrelated applied migrations should be part of the final
state in both directions.
"""
executor = MigrationExecutor(connection)
executor.migrate(
[
("mutate_state_b", "0002_add_field"),
]
)
# Migrate forward.
executor.loader.build_graph()
state = executor.migrate(
[
("mutate_state_a", "0001_initial"),
]
)
self.assertIn("added", state.models["mutate_state_b", "b"].fields)
executor.loader.build_graph()
# Migrate backward.
state = executor.migrate(
[
("mutate_state_a", None),
]
)
self.assertIn("added", state.models["mutate_state_b", "b"].fields)
executor.migrate(
[
("mutate_state_b", None),
]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_process_callback(self):
"""
#24129 - Tests callback process
"""
call_args_list = []
def callback(*args):
call_args_list.append(args)
executor = MigrationExecutor(connection, progress_callback=callback)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
executor.migrate(
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
]
)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate(
[
("migrations", None),
("migrations", None),
]
)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
migrations = executor.loader.graph.nodes
expected = [
("render_start",),
("render_success",),
("apply_start", migrations["migrations", "0001_initial"], False),
("apply_success", migrations["migrations", "0001_initial"], False),
("apply_start", migrations["migrations", "0002_second"], False),
("apply_success", migrations["migrations", "0002_second"], False),
("render_start",),
("render_success",),
("unapply_start", migrations["migrations", "0002_second"], False),
("unapply_success", migrations["migrations", "0002_second"], False),
("unapply_start", migrations["migrations", "0001_initial"], False),
("unapply_success", migrations["migrations", "0001_initial"], False),
]
self.assertEqual(call_args_list, expected)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_alter_id_type_with_fk(self):
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
# Apply initial migrations
executor.migrate(
[
("author_app", "0001_initial"),
("book_app", "0001_initial"),
]
)
self.assertTableExists("author_app_author")
self.assertTableExists("book_app_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Apply PK type alteration
executor.migrate([("author_app", "0002_alter_id")])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# We can't simply unapply the migrations here because there is no
# implicit cast from VARCHAR to INT on the database level.
with connection.schema_editor() as editor:
editor.execute(editor.sql_delete_table % {"table": "book_app_book"})
editor.execute(editor.sql_delete_table % {"table": "author_app_author"})
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
executor.migrate([("author_app", None)], fake=True)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_apply_all_replaced_marks_replacement_as_applied(self):
"""
Applying all replaced migrations marks replacement as applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Place the database in a state where the replaced migrations are
# partially applied: 0001 is applied, 0002 is not.
recorder.record_applied("migrations", "0001_initial")
executor = MigrationExecutor(connection)
# Use fake because we don't actually have the first migration
# applied, so the second will fail. And there's no need to actually
# create/modify tables here, we're just testing the
# MigrationRecord, which works the same with or without fake.
executor.migrate([("migrations", "0002_second")], fake=True)
# Because we've now applied 0001 and 0002 both, their squashed
# replacement should be marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_marks_replacement_applied_even_if_it_did_nothing(self):
"""
A new squash migration will be marked as applied even if all its
replaced migrations were previously already applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Record all replaced migrations as applied
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
executor = MigrationExecutor(connection)
executor.migrate([("migrations", "0001_squashed_0002")])
# Because 0001 and 0002 are both applied, even though this migrate run
# didn't apply anything new, their squashed replacement should be
# marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_marks_replacement_unapplied(self):
executor = MigrationExecutor(connection)
executor.migrate([("migrations", "0001_squashed_0002")])
try:
self.assertIn(
("migrations", "0001_squashed_0002"),
executor.recorder.applied_migrations(),
)
finally:
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertNotIn(
("migrations", "0001_squashed_0002"),
executor.recorder.applied_migrations(),
)
# When the feature is False, the operation and the record won't be
# performed in a transaction and the test will systematically pass.
@skipUnlessDBFeature("can_rollback_ddl")
def test_migrations_applied_and_recorded_atomically(self):
"""Migrations are applied and recorded atomically."""
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"model",
[
("id", models.AutoField(primary_key=True)),
],
),
]
executor = MigrationExecutor(connection)
with mock.patch(
"django.db.migrations.executor.MigrationExecutor.record_migration"
) as record_migration:
record_migration.side_effect = RuntimeError("Recording migration failed.")
with self.assertRaisesMessage(RuntimeError, "Recording migration failed."):
executor.apply_migration(
ProjectState(),
Migration("0001_initial", "record_migration"),
)
executor.migrate([("migrations", "0001_initial")])
# The migration isn't recorded as applied since it failed.
migration_recorder = MigrationRecorder(connection)
self.assertIs(
migration_recorder.migration_qs.filter(
app="record_migration",
name="0001_initial",
).exists(),
False,
)
self.assertTableNotExists("record_migration_model")
def test_migrations_not_applied_on_deferred_sql_failure(self):
"""Migrations are not recorded if deferred SQL application fails."""
class DeferredSQL:
def __str__(self):
raise DatabaseError("Failed to apply deferred SQL")
class Migration(migrations.Migration):
atomic = False
def apply(self, project_state, schema_editor, collect_sql=False):
schema_editor.deferred_sql.append(DeferredSQL())
executor = MigrationExecutor(connection)
with self.assertRaisesMessage(DatabaseError, "Failed to apply deferred SQL"):
executor.apply_migration(
ProjectState(),
Migration("0001_initial", "deferred_sql"),
)
# The migration isn't recorded as applied since it failed.
migration_recorder = MigrationRecorder(connection)
self.assertIs(
migration_recorder.migration_qs.filter(
app="deferred_sql",
name="0001_initial",
).exists(),
False,
)
@mock.patch.object(MigrationRecorder, "has_table", return_value=False)
def test_migrate_skips_schema_creation(self, mocked_has_table):
"""
The django_migrations table is not created if there are no migrations
to record.
"""
executor = MigrationExecutor(connection)
# 0 queries, since the query for has_table is being mocked.
with self.assertNumQueries(0):
executor.migrate([], plan=[])
| ExecutorTests |
python | getsentry__sentry | src/sentry/relay/config/metric_extraction.py | {
"start": 54208,
"end": 54604
} | class ____(TypedDict):
#: Whether a group of globally defined metrics and/or tags is enabled by default for every project.
#: This can be overridden in project configs.
isEnabled: bool
#: List of metrics to extract.
metrics: NotRequired[list[MetricSpec]]
#: List of tags to apply to previously extracted metrics.
tags: NotRequired[list[TagMapping]]
| MetricExtractionGroup |
python | nedbat__coveragepy | coverage/python.py | {
"start": 4583,
"end": 8753
} | class ____(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None:
self.coverage = coverage
filename = source_for_morf(morf)
fname = filename
canonicalize = True
if self.coverage is not None:
if self.coverage.config.relative_files:
canonicalize = False
if canonicalize:
fname = canonical_filename(filename)
super().__init__(fname)
if hasattr(morf, "__name__"):
name = morf.__name__.replace(".", os.sep)
if os.path.basename(filename).startswith("__init__."):
name += os.sep + "__init__"
name += ".py"
else:
name = relative_filename(filename)
self.relname = name
self._source: str | None = None
self._parser: PythonParser | None = None
self._excluded = None
def __repr__(self) -> str:
return f"<PythonFileReporter {self.filename!r}>"
def relative_filename(self) -> str:
return self.relname
@property
def parser(self) -> PythonParser:
"""Lazily create a :class:`PythonParser`."""
assert self.coverage is not None
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex("exclude"),
)
self._parser.parse_source()
return self._parser
def lines(self) -> set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.statements
def multiline_map(self) -> dict[TLineNo, TLineNo]:
"""A map of line numbers to first-line in a multi-line statement."""
return self.parser.multiline_map
def excluded_lines(self) -> set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.excluded
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
return self.parser.translate_arcs(arcs)
def no_branch_lines(self) -> set[TLineNo]:
assert self.coverage is not None
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list + self.coverage.config.partial_always_list)
)
return no_branch
def arcs(self) -> set[TArc]:
return self.parser.arcs()
def exit_counts(self) -> dict[TLineNo, int]:
return self.parser.exit_counts()
def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
executed_arcs: Iterable[TArc] | None = None,
) -> str:
return self.parser.missing_arc_description(start, end)
def arc_description(self, start: TLineNo, end: TLineNo) -> str:
return self.parser.arc_description(start, end)
def source(self) -> str:
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self) -> bool:
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith(".py"):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self) -> TSourceTokenLines:
return source_token_lines(self.source())
def code_regions(self) -> Iterable[CodeRegion]:
return code_regions(self.source())
def code_region_kinds(self) -> Iterable[tuple[str, str]]:
return [
("function", "functions"),
("class", "classes"),
]
| PythonFileReporter |
python | pytorch__pytorch | .ci/lumen_cli/tests/test_app.py | {
"start": 173,
"end": 1554
} | class ____(unittest.TestCase):
@patch("cli.build_cli.register_build.VllmBuildRunner.run", return_value=None)
@patch("cli.build_cli.register_build.VllmBuildRunner.__init__", return_value=None)
def test_cli_run_build_external(self, mock_init, mock_run):
from cli.run import main # import after patches if needed
test_args = ["cli.run", "build", "external", "vllm"]
with patch.object(sys, "argv", test_args):
# argparse may call sys.exit on error; capture to avoid test aborts
try:
main()
except SystemExit:
pass
mock_init.assert_called_once() # got constructed
mock_run.assert_called_once_with() # run() called
def test_build_help(self):
test_args = ["cli.run", "build", "--help"]
with patch.object(sys, "argv", test_args):
stdout = io.StringIO()
stderr = io.StringIO()
# --help always raises SystemExit(0)
with self.assertRaises(SystemExit) as cm:
with redirect_stdout(stdout), redirect_stderr(stderr):
main()
self.assertEqual(cm.exception.code, 0)
output = stdout.getvalue()
self.assertIn("usage", output)
self.assertIn("external", output)
if __name__ == "__main__":
unittest.main()
| TestArgparseCLI |
python | google__jax | jax/_src/pallas/mosaic_gpu/helpers.py | {
"start": 1130,
"end": 13545
} | class ____:
"""Container dataclass for loop iteration information.
Attributes:
index: The grid indices corresponding to the current loop iteration.
local_index: The local iteration index.
num_local_steps: The total number of local iterations to run. None
if unknown.
"""
index: tuple[jax.Array, ...]
local_index: jax.Array | int
num_local_steps: jax.Array | int | None
@overload
def nd_loop(
grid: Sequence[int],
*,
collective_axes: Sequence[Hashable] | Hashable,
tiling: Sequence[int] | None = None,
init_carry: None = None
) -> Callable[[Callable[[NDLoopInfo], None]], None]:
...
@overload
def nd_loop(
grid: Sequence[int],
*,
collective_axes: Sequence[Hashable] | Hashable,
tiling: Sequence[int] | None = None,
init_carry: _T
) -> Callable[[Callable[[NDLoopInfo, _T], _T]], _T]:
...
def nd_loop(grid, *, collective_axes, tiling=None, init_carry=None):
"""A loop over a multi-dimensional grid partitioned along the given axes.
The body of the loop a single argument ``loop_info`` which is an NDLoopInfo
object containing index and iteration information. However if a carry is
specified, the body will expect a second keyword argument `carry` containing
the loop carry.
For example, if ``collective_axes`` is ``"x"`` with :func:`lax.axis_size`
equal to 4 and the grid is (2, 3), the implementation would produce the
following iteration order
+-----------+--------+------------+
| loop step | index | axis index |
+===========+========+============+
| 0 | (0, 0) | 0 |
+-----------+--------+------------+
| 1 | (0, 1) | 1 |
+-----------+--------+------------+
| 2 | (0, 2) | 2 |
+-----------+--------+------------+
| 3 | (1, 0) | 3 |
+-----------+--------+------------+
| 4 | (1, 1) | 0 |
+-----------+--------+------------+
| 5 | (1, 2) | 1 |
+-----------+--------+------------+
which comes from partitioning the flat iteration space into chunks in an
interleaved fashion wrt the ``"x"`` axis index.
Note that in the example the total number of loop steps is not divisible
by the axis size of ``"x"``, and thus for some ``"x"`` axis indices the
loop will do one iteration less.
+------------+------------------+
| axis index | indices |
+============+==================+
| 0 | (0, 0), (1, 1) |
+------------+------------------+
| 1 | (0, 1), (1, 2) |
+------------+------------------+
| 2 | (0, 2) |
+------------+------------------+
| 3 | (1, 0) |
+------------+------------------+
If ``init_carry`` is passed then ``nd_loop()`` will expect the body to
take and return the carry. If it's ``None`` then no carry argument is
expected.
See also:
- :func:`jax.experimental.pallas.loop`: A loop over a single dimension.
"""
axis_index = lax.axis_index(collective_axes)
axis_size = lax.axis_size(collective_axes)
if tiling:
if len(grid) != len(tiling):
raise ValueError(f"{tiling=} and {grid=} must have same length.")
if any(dim % tile != 0 for dim, tile in zip(grid, tiling, strict=True)):
raise ValueError(f"Tiling {tiling} does not divide grid {grid}.")
tile_grid = tuple(
dim // tile for dim, tile in zip(grid, tiling, strict=True))
grid = (*tile_grid, *tiling)
grid_size = math.prod(grid)
def decorator(body):
def wrapper(wave_step, carry):
nonlocal body
step = wave_step * axis_size + axis_index
# The loop below is conceptually ``jnp.unravel_index``, but it uses
# ``lax`` APIs instead of ``jax.numpy`` to minimize the number of
# primitives used.
index = []
for grid_dim in reversed(grid):
grid_dim = lax.convert_element_type(grid_dim, step.dtype)
index.append(lax.rem(step, grid_dim))
step = lax.div(step, grid_dim)
index.reverse()
if tiling:
# Recompute index as if the grid was not tiled.
tile_indices, subtile_indices = index[:len(tiling)], index[len(tiling):]
untiled_index = []
for sub_idx, tile_idx, tile_dim in zip(
subtile_indices, tile_indices, tiling, strict=True):
untiled_index.append(sub_idx + tile_idx * tile_dim)
index = untiled_index
loop_info = NDLoopInfo(
index=tuple(index),
local_index=wave_step,
num_local_steps=upper
)
if init_carry is None:
body(loop_info)
else:
return body(loop_info, carry=carry)
upper = lax.div(grid_size, axis_size) + lax.convert_element_type(
axis_index < grid_size % axis_size, axis_index.dtype
)
return lax.fori_loop(0, upper, wrapper, init_carry)
return decorator
def format_tcgen05_sparse_metadata(meta):
"""Formats the sparse metadata for tcgen05.mma into the expected format.
See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-sparse-matrices-sparsity-selector-kind-f16-m128-256
for the documentation of the required layouts. The array can be copied into
SMEM, from where ``plgpu.async_copy_sparse_metadata_to_tmem`` can be used to
copy it over to TMEM.
"""
if meta.dtype != dtypes.uint2:
raise ValueError(f"Expected metadata dtype to be uint2, got: {meta.dtype}")
if meta.ndim != 3:
raise ValueError(
"Expected metadata to be 3-dimensional (M, K // 4, 2), but it is"
f" {meta.ndim}D"
)
m, k, _2 = meta.shape
if _2 != 2:
raise ValueError(
"Expected the trailing dimension of the metadata to be 2, got:"
f" {meta.shape[-1]}"
)
k *= 2
return (
meta.reshape(m // 128, 8, 2, 8, k // 64, 4, 2, 8)
.transpose(0, 4, 1, 6, 3, 5, 2, 7)
.reshape(m // 128, k // 64, 128, 64)
)
def find_swizzle(minor_dim_bits: int, what: str = ""):
"""Returns the largest swizzle that can be applied to a memory region.
Swizzling is usually necessary when dealing with 2D data in SMEM, especially
if the reference is used as an MMA operand. The returned swizzle is usually
applied as ``plgpu`` transform:
transforms = (
plgpu.TilingTransform((8, 8 * swizzle // elem_bits)),
plgpu.SwizzleTransform(swizzle))
)
Args:
minor_dim_bits: The number of bits in the minor (last) dimension of the
memory region. Usually computed as ``dim_size * jnp.finfo(dtype).bits``.
what: A string describing the operand for which the swizzle is being
computed. Improves the error message if specified.
"""
for swizzle_bytes in (128, 64, 32, 16):
if minor_dim_bits % (swizzle_bytes * 8) == 0:
return swizzle_bytes
if what:
what = " for " + what
raise ValueError(
f"No valid out swizzle{what}: minor dimension has"
f" {minor_dim_bits} bits, which is not a multiple of 128 (16 bytes)"
)
def planar_snake(
lin_idx: jax.Array, shape: tuple[int, int], minor_dim: int, tile_width: int
):
"""Converts a linear index into an index into shape, trying to optimize locality.
The "space filling curve" this function computes splits the minor dimension
into tiles of length ``tile_width``. Every other tile has its major dimension
inverted, so that the iteration order "snakes around" when going from one tile
to another.
For a shape of (8, 8), ``minor_dim=0`` and ``tile_width=2``, the iteration
order is::
0 2 4 6 8 10 12 14
1 3 5 7 9 11 13 15
30 28 26 24 22 20 18 16
31 29 27 25 23 21 19 17
32 34 36 38 40 42 44 46
33 35 37 39 41 43 45 47
62 60 58 56 54 52 50 48
63 61 59 57 55 53 51 49
Notice how each pair of rows forms a tile (``minor_dim=0``, ``tile_width=2``)
and when moving from one tile to another, the indices increase along columns
in one of them and decrease in the other.
"""
tile_width = np.int32(tile_width)
major_size = np.int32(shape[1 - minor_dim])
minor_size = np.int32(shape[minor_dim])
minor_tile_idx = lax.div(lin_idx, tile_width * major_size)
def tile_coordinates(lin_idx, width):
# if minor_dim == 0 then tiles are (tile_width, major_size) else (major_size, tile_width)
minor_within_tile = lax.rem(lin_idx, width)
major_within_tile = lax.rem(lax.div(lin_idx, width), major_size)
minor = minor_tile_idx * tile_width + minor_within_tile
major = lax.select(
lax.rem(minor_tile_idx, np.int32(2)) == 0,
major_within_tile,
major_size - 1 - major_within_tile,
)
return (minor, major) if minor_dim == 0 else (major, minor)
num_full_tiles = shape[minor_dim] // tile_width
full_tiles_minor_size = num_full_tiles * tile_width
num_full_tiles_elements = num_full_tiles * tile_width * major_size
is_full_tile = lin_idx < num_full_tiles_elements
return jax.tree.map(
functools.partial(jax.lax.select, is_full_tile),
tile_coordinates(lin_idx, tile_width),
tile_coordinates(lin_idx - num_full_tiles_elements, minor_size - full_tiles_minor_size)
)
@overload
def dynamic_scheduling_loop(
grid_names: Sequence[Hashable],
*,
thread_axis: Hashable | None = None,
init_carry: None = None
) -> Callable[[Callable[[NDLoopInfo], None]], None]:
...
@overload
def dynamic_scheduling_loop(
grid_names: Sequence[Hashable],
*,
thread_axis: Hashable | None = None,
init_carry: _T
) -> Callable[[Callable[[NDLoopInfo, _T], _T]], _T]:
...
def dynamic_scheduling_loop(
grid_names,
thread_axis = None,
init_carry = None):
"""A loop over program instances using dynamic work scheduling.
This loop will iterate through available program instances until all
work has been scheduled. The kernel should be instantiated with a grid
equal to the logical amount of work to be done (as opposed to a persistent
kernel where the grid is set to the number of cores). Each core running
this loop will continuously query the next available block of work and
the loop will terminate when the entire grid has been scheduled.
Example usage::
@plgpu.dynamic_scheduling_loop(grid_names)
def body(loop_info):
work(loop_info.index) # do work...
Args:
grid_names: The names of the axes in the grid.
thread_axis: The name of the thread axis. This must be passed in if
the kernel uses multiple threads.
init_carry: An optional initial carry for the loop. If passed in, the
body function should expect a ``carry`` keyword argument and return
the next carry value.
"""
if thread_axis is not None:
num_threads = lax.axis_size(thread_axis)
else:
num_threads = 1
user_carry = init_carry
def decorator(body):
grid_idx = tuple(lax.axis_index(axis_name) for axis_name in grid_names)
success = True
def _scoped(try_cancel_buffer, try_cancel_barrier):
def try_cancel_cond(carry):
_, success, _, _ = carry
return success
def try_cancel_body(carry):
grid_idx, _, wave_step, user_carry = carry
slot = lax.rem(wave_step, jnp.int32(2))
gpu_primitives.try_cluster_cancel(try_cancel_buffer.at[slot],
try_cancel_barrier.at[slot])
loop_info = NDLoopInfo(
index=grid_idx,
local_index=wave_step,
num_local_steps=None,
)
if user_carry is None:
body(loop_info)
else:
user_carry = body(loop_info, carry=user_carry)
gpu_primitives.barrier_wait(try_cancel_barrier.at[slot])
grid_idx, success = gpu_primitives.query_cluster_cancel(
try_cancel_buffer.at[slot],
grid_names=grid_names)
return (grid_idx, success, wave_step + jnp.int32(1), user_carry)
init_carry = (grid_idx, success, jnp.int32(0), user_carry)
final_carry = lax.while_loop(
try_cancel_cond,
try_cancel_body,
init_carry,
)
if user_carry is not None:
return final_carry[-1]
return pallas_primitives.run_scoped(
_scoped,
try_cancel_buffer=gpu_core.TryClusterCancelResult(2),
try_cancel_barrier=gpu_core.Barrier(num_arrivals=num_threads,
num_barriers=2),
collective_axes=thread_axis,
)
return decorator
| NDLoopInfo |
python | getsentry__sentry | src/sentry/replays/endpoints/data_export_notifications.py | {
"start": 467,
"end": 854
} | class ____(Endpoint):
"""PubSub notifications endpoint."""
owner = ApiOwner.REPLAY
publish_status = {"POST": ApiPublishStatus.PRIVATE}
permission_classes = (SentryIsAuthenticated,)
def post(self, request: Request) -> Response:
retry_transfer_job_run(request.data, request_run_transfer_job)
return Response("", status=200)
| DataExportNotificationsEndpoint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.