language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_role_details.py | {
"start": 2672,
"end": 3699
} | class ____(UserUserRolesTest):
method = "DELETE"
def test_removes_role(self) -> None:
role = UserRole.objects.create(name="support", permissions=["broadcasts.admin"])
role.users.add(self.user)
role2 = UserRole.objects.create(name="admin", permissions=["users.admin"])
role2.users.add(self.user)
resp = self.get_response("me", "support")
assert resp.status_code == 204
assert not UserRole.objects.filter(users=self.user, name="support").exists()
assert UserRole.objects.filter(users=self.user, name="admin").exists()
def test_invalid_role(self) -> None:
UserRole.objects.create(name="other", permissions=["users.edit"])
resp = self.get_response("me", "blah")
assert resp.status_code == 404
def test_nonexistant_role(self) -> None:
UserRole.objects.create(name="support", permissions=["broadcasts.admin"])
resp = self.get_response("me", "support")
assert resp.status_code == 404
| UserUserRolesDeleteTest |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 8544,
"end": 8645
} | class ____(GQLResult):
team: TeamFragment
role: RegistryRoleFragment
| TeamRegistryMemberFragment |
python | django__django | tests/model_forms/models.py | {
"start": 7821,
"end": 8108
} | class ____(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, models.SET_NULL, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ("title", "author")
| Book |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_typing.py | {
"start": 1664,
"end": 2490
} | class ____:
"""Type annotation or default values for first level methods can't refer to their own class"""
def incorrect_typing_method(
self, other: MyClass # <3.14:[undefined-variable]
) -> bool:
return self == other
def incorrect_nested_typing_method(
self, other: List[MyClass] # <3.14:[undefined-variable]
) -> bool:
return self == other[0]
def incorrect_default_method(
self, other=MyClass() # [undefined-variable]
) -> bool:
return self == other
def correct_string_typing_method(self, other: "MyClass") -> bool:
return self == other
def correct_inner_typing_method(self) -> bool:
def inner_method(self, other: MyClass) -> bool:
return self == other
return inner_method(self, MyClass())
| MyClass |
python | django__django | tests/admin_views/admin.py | {
"start": 3434,
"end": 3498
} | class ____(admin.TabularInline):
model = Chapter
| ChapterInline |
python | getsentry__sentry | tests/sentry/tasks/test_assemble.py | {
"start": 29386,
"end": 38423
} | class ____(TestCase):
def _create_bundle_and_bind_to_release(self, release, dist, bundle_id, indexing_state, date):
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
bundle_id=bundle_id,
file=File.objects.create(name="bundle.zip", type="artifact_bundle"),
artifact_count=10,
indexing_state=indexing_state,
date_uploaded=date,
date_added=date,
date_last_modified=date,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name=release,
dist_name=dist,
artifact_bundle=artifact_bundle,
date_added=date,
)
return artifact_bundle
def mock_assemble_result(self) -> AssembleResult:
bundle_file = self.create_artifact_bundle_zip(
fixture_path="artifact_bundle_debug_ids", project=self.project.id
)
blob1 = FileBlob.from_file_with_organization(ContentFile(bundle_file), self.organization)
total_checksum = sha1(bundle_file).hexdigest()
rv = assemble_file(
task=AssembleTask.ARTIFACT_BUNDLE,
org_or_project=self.organization,
name="bundle.zip",
checksum=total_checksum,
chunks=[blob1.checksum],
file_type="artifact.bundle",
)
assert rv is not None
return rv
@patch("sentry.tasks.assemble.index_artifact_bundles_for_release")
def test_index_if_needed_with_no_bundles(
self, index_artifact_bundles_for_release: MagicMock
) -> None:
release = "1.0"
dist = "android"
with ArtifactBundlePostAssembler(
assemble_result=self.mock_assemble_result(),
organization=self.organization,
release=release,
dist=dist,
project_ids=[],
) as post_assembler:
post_assembler._index_bundle_if_needed(
artifact_bundle=None,
release=release,
dist=dist,
)
index_artifact_bundles_for_release.assert_not_called()
@patch("sentry.tasks.assemble.index_artifact_bundles_for_release")
def test_index_if_needed_with_lower_bundles_than_threshold(
self, index_artifact_bundles_for_release
):
release = "1.0"
dist = "android"
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="2c5b367b-4fef-4db8-849d-b9e79607d630",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=1),
)
with ArtifactBundlePostAssembler(
assemble_result=self.mock_assemble_result(),
organization=self.organization,
release=release,
dist=dist,
project_ids=[],
) as post_assembler:
post_assembler._index_bundle_if_needed(
artifact_bundle=None,
release=release,
dist=dist,
)
index_artifact_bundles_for_release.assert_not_called()
@patch("sentry.tasks.assemble.index_artifact_bundles_for_release")
def test_index_if_needed_with_higher_bundles_than_threshold(
self, index_artifact_bundles_for_release
):
release = "1.0"
dist = "android"
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="2c5b367b-4fef-4db8-849d-b9e79607d630",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=2),
)
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="0cf678f2-0771-4e2f-8ace-d6cea8493f0c",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=1),
)
artifact_bundle_3 = self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="0cf678f2-0771-4e2f-8ace-d6cea8493f0d",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=1),
)
with ArtifactBundlePostAssembler(
assemble_result=self.mock_assemble_result(),
organization=self.organization,
release=release,
dist=dist,
project_ids=[],
) as post_assembler:
post_assembler._index_bundle_if_needed(
artifact_bundle=artifact_bundle_3,
release=release,
dist=dist,
)
index_artifact_bundles_for_release.assert_called_with(
organization_id=self.organization.id,
artifact_bundles=[(artifact_bundle_3, mock.ANY)],
)
@patch("sentry.tasks.assemble.index_artifact_bundles_for_release")
def test_index_if_needed_with_bundles_already_indexed(
self, index_artifact_bundles_for_release: MagicMock
) -> None:
release = "1.0"
dist = "android"
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="2c5b367b-4fef-4db8-849d-b9e79607d630",
indexing_state=ArtifactBundleIndexingState.WAS_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=2),
)
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="0cf678f2-0771-4e2f-8ace-d6cea8493f0d",
indexing_state=ArtifactBundleIndexingState.WAS_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=1),
)
with ArtifactBundlePostAssembler(
assemble_result=self.mock_assemble_result(),
organization=self.organization,
release=release,
dist=dist,
project_ids=[],
) as post_assembler:
post_assembler._index_bundle_if_needed(artifact_bundle=None, release=release, dist=dist)
index_artifact_bundles_for_release.assert_not_called()
@patch("sentry.tasks.assemble.index_artifact_bundles_for_release")
def test_index_if_needed_with_newer_bundle_already_stored(
self, index_artifact_bundles_for_release
):
release = "1.0"
dist = "android"
artifact_bundle_1 = self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="2c5b367b-4fef-4db8-849d-b9e79607d630",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=1),
)
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="2c5b367b-4fef-4db8-849d-b9e79607d630",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
date=datetime.now(UTC) - timedelta(hours=2),
)
self._create_bundle_and_bind_to_release(
release=release,
dist=dist,
bundle_id="0cf678f2-0771-4e2f-8ace-d6cea8493f0d",
indexing_state=ArtifactBundleIndexingState.NOT_INDEXED.value,
# We simulate that this bundle is into the database but was created after the assembling of bundle 1 started
# its progress but did not finish.
date=datetime.now(UTC) + timedelta(hours=1),
)
with ArtifactBundlePostAssembler(
assemble_result=self.mock_assemble_result(),
organization=self.organization,
release=release,
dist=dist,
project_ids=[],
) as post_assembler:
post_assembler._index_bundle_if_needed(
artifact_bundle=artifact_bundle_1,
release=release,
dist=dist,
)
index_artifact_bundles_for_release.assert_called_with(
organization_id=self.organization.id,
artifact_bundles=[(artifact_bundle_1, mock.ANY)],
)
@use_redis_cluster()
def test_redis_assemble_status() -> None:
task = AssembleTask.DIF
project_id = uuid.uuid4().hex
checksum = uuid.uuid4().hex
# If it doesn't exist, it should return correct values.
assert get_assemble_status(task=task, scope=project_id, checksum=checksum) == (None, None)
# Test setter
set_assemble_status(task, project_id, checksum, ChunkFileState.CREATED, detail="cylons")
assert get_assemble_status(task=task, scope=project_id, checksum=checksum) == (
"created",
"cylons",
)
# Deleting should actually delete it.
delete_assemble_status(task, project_id, checksum=checksum)
assert get_assemble_status(task=task, scope=project_id, checksum=checksum) == (None, None)
| ArtifactBundleIndexingTest |
python | allegroai__clearml | clearml/automation/job.py | {
"start": 32064,
"end": 32560
} | class ____(BaseJob):
"""
Wrapper to an already running Task
"""
def __init__(self, existing_task: Union[Task, str]) -> None: # noqa
super(RunningJob, self).__init__()
self.task = existing_task if isinstance(existing_task, Task) else Task.get_task(task_id=existing_task)
self.task_started = bool(self.task.status != Task.TaskStatusEnum.created)
def force_set_is_cached(self, cached: bool) -> None:
self._is_cached_task = bool(cached)
| RunningJob |
python | plotly__plotly.py | plotly/graph_objs/treemap/_root.py | {
"start": 233,
"end": 2662
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "treemap"
_path_str = "treemap.root"
_valid_props = {"color"}
@property
def color(self):
"""
sets the color of the root node for a sunburst/treemap/icicle
trace. this has no effect when a colorscale is used to set the
markers.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
sets the color of the root node for a
sunburst/treemap/icicle trace. this has no effect when
a colorscale is used to set the markers.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Root object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.treemap.Root`
color
sets the color of the root node for a
sunburst/treemap/icicle trace. this has no effect when
a colorscale is used to set the markers.
Returns
-------
Root
"""
super().__init__("root")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.treemap.Root
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Root`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Root |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_spanner.py | {
"start": 1506,
"end": 24466
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.spanner_hook_default_project_id = SpannerHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook.get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.spanner.Client")
def test_spanner_client_creation(self, mock_client, mock_get_creds):
result = self.spanner_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=CLIENT_INFO,
)
assert mock_client.return_value == result
assert self.spanner_hook_default_project_id._client == result
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_get_existing_instance(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_instance(
instance_id=SPANNER_INSTANCE, project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(instance_id="instance")
assert res is not None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_get_existing_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_instance(
instance_id=SPANNER_INSTANCE, project_id="new-project"
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(instance_id="instance")
assert res is not None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_create_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_default_project_id.create_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(
instance_id="instance",
configuration_name="configuration",
display_name="database-name",
node_count=1,
)
assert res is None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_create_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_default_project_id.create_instance(
project_id="new-project",
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(
instance_id="instance",
configuration_name="configuration",
display_name="database-name",
node_count=1,
)
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_update_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_default_project_id.update_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(
instance_id="instance",
configuration_name="configuration",
display_name="database-name",
node_count=2,
)
update_method.assert_called_once_with()
assert res is None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_update_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_default_project_id.update_instance(
project_id="new-project",
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(
instance_id="instance",
configuration_name="configuration",
display_name="database-name",
node_count=2,
)
update_method.assert_called_once_with()
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_default_project_id.delete_instance(
instance_id=SPANNER_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with("instance")
delete_method.assert_called_once_with()
assert res is None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_default_project_id.delete_instance(
project_id="new-project", instance_id=SPANNER_INSTANCE
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with("instance")
delete_method.assert_called_once_with()
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_get_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
database_exists_method.assert_called_once_with()
assert res is not None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_get_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_database(
project_id="new-project", instance_id=SPANNER_INSTANCE, database_id=SPANNER_DATABASE
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
database_exists_method.assert_called_once_with()
assert res is not None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_create_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_default_project_id.create_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name", ddl_statements=[])
database_create_method.assert_called_once_with()
assert res is None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_create_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_default_project_id.create_database(
project_id="new-project",
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name", ddl_statements=[])
database_create_method.assert_called_once_with()
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_update_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_default_project_id.update_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
assert res is None
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_update_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_default_project_id.update_database(
project_id="new-project",
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_delete_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.delete_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
assert res
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_delete_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.delete_database(
project_id="new-project", instance_id=SPANNER_INSTANCE, database_id=SPANNER_DATABASE
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
assert res
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_execute_dml(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_default_project_id.execute_dml(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries=[""],
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
run_in_transaction_method.assert_called_once_with(mock.ANY)
assert res == []
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_execute_dml_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_default_project_id.execute_dml(
project_id="new-project", instance_id=SPANNER_INSTANCE, database_id=SPANNER_DATABASE, queries=[""]
)
get_client.assert_called_once_with(project_id="new-project")
instance_method.assert_called_once_with(instance_id="instance")
database_method.assert_called_once_with(database_id="database-name")
run_in_transaction_method.assert_called_once_with(mock.ANY)
assert res == []
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_execute_dml_oqueries_row_count(self, get_client):
pass
@pytest.mark.parametrize(
("returned_items", "expected_counts"),
[
pytest.param(
[
("DELETE FROM T WHERE archived = TRUE", 5),
("SELECT * FROM T", 42),
("UPDATE U SET flag = FALSE WHERE x = 1", 3),
],
[5, 3],
),
pytest.param(
[
("DELETE FROM Logs WHERE created_at < '2024-01-01'", 7),
],
[7],
),
pytest.param(
[
(
"UPDATE Accounts SET active=false WHERE last_login < DATE_SUB(CURRENT_DATE(), INTERVAL 365 DAY)",
11,
),
("DELETE FROM Sessions WHERE expires_at < CURRENT_TIMESTAMP()", 23),
],
[11, 23],
),
pytest.param(
[
("SELECT COUNT(*) FROM Users", 50000),
("SELECT * FROM BigTable", 123456),
],
[],
),
pytest.param(
[],
[],
),
],
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_execute_dml_parametrized(self, get_client, returned_items, expected_counts):
instance_method = get_client.return_value.instance
database_method = instance_method.return_value.database
run_in_tx = database_method.return_value.run_in_transaction
returned_mapping = OrderedDict(returned_items)
run_in_tx.return_value = returned_mapping
res = self.spanner_hook_default_project_id.execute_dml(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries=[sql for sql, _ in returned_items],
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res == expected_counts
def test_get_uri(self):
self.spanner_hook_default_project_id._get_conn_params = MagicMock(return_value=SPANNER_CONN_PARAMS)
uri = self.spanner_hook_default_project_id.get_uri()
assert (
uri
== f"spanner+spanner:///projects/{SPANNER_PROJECT_ID}/instances/{SPANNER_INSTANCE}/databases/{SPANNER_DATABASE}"
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client")
def test_get_sqlalchemy_engine(self, get_client):
self.spanner_hook_default_project_id._get_conn_params = MagicMock(return_value=SPANNER_CONN_PARAMS)
engine = self.spanner_hook_default_project_id.get_sqlalchemy_engine()
assert isinstance(engine, sqlalchemy.engine.Engine)
assert engine.name == "spanner+spanner"
| TestGcpSpannerHookDefaultProjectId |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 6547,
"end": 8846
} | class ____(Basic):
"""CSV (comma-separated-values) table.
This file format may contain rows with fewer entries than the number of
columns, a situation that occurs in output from some spreadsheet editors.
The missing entries are marked as masked in the output table.
Masked values (indicated by an empty '' field value when reading) are
written out in the same way with an empty ('') field. This is different
from the typical default for `astropy.io.ascii` in which missing values are
indicated by ``--``.
By default leading or trailing whitespace in column names is stripped. If
you pass ``strip_column_names=False`` then this is disabled.
Since the `CSV format <https://tools.ietf.org/html/rfc4180>`_ does not
formally support comments, any comments defined for the table via
``tbl.meta['comments']`` are ignored by default. If you would still like to
write those comments then include a keyword ``comment='#'`` to the
``write()`` call.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211
2,38.12321,-88.1321,2.2,17.0
"""
_format_name = "csv"
_io_registry_format_aliases = ["csv"]
_io_registry_can_write = True
_io_registry_suffix = ".csv"
_description = "Comma-separated-values"
header_class = CsvHeader
data_class = CsvData
def __init__(self, *, strip_column_names=True):
super().__init__()
if not strip_column_names:
self.header.splitter.process_val = None
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust row if it is too short.
If a data row is shorter than the header, add empty values to make it the
right length.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table.
"""
if len(str_vals) < ncols:
str_vals.extend((ncols - len(str_vals)) * [""])
return str_vals
| Csv |
python | tensorflow__tensorflow | tensorflow/python/distribute/combinations.py | {
"start": 8200,
"end": 10884
} | class ____(combinations_lib.TestCombination):
"""Allow to request TPU hardware and skip non-TPU combinations.
This class expects test_combinations to be generated with `NamedDistribution`
wrapping instances of `tf.distribute.Strategy`.
Optionally, the `required_tpus` parameter is supported. TPU hardware is
required, if its argument is `True` or > 0.
Optionally, the `use_cloud_tpu` parameter is supported. If TPU hardware is
required by `required_tpus`, it specifically must be a Cloud TPU (specified
with `--tpu`) if `use_cloud_tpu` is `True`.
Attributes:
TPU_TEST: The environment is considered to have TPU hardware available if
the name of the program contains "test_tpu".
"""
TPU_TEST = False
if sys.argv:
TPU_TEST = "test_tpu" in sys.argv[0]
def should_execute_combination(self, kwargs):
distributions = [
v for v in kwargs.values() if isinstance(v, NamedDistribution)
]
# TODO(isaprykin): Migrate all tests away from using 'required_tpu' in favor
# of 'required_tpus'.
if "required_tpus" in kwargs and "required_tpu" in kwargs:
raise ValueError("Do not use `required_tpu`. Both `required_tpus` and "
"`required_tpu` were specified.")
required_tpus = kwargs.get("required_tpus", None) or kwargs.get(
"required_tpu", None)
if distributions and required_tpus:
raise ValueError("Do not use `required_tpus` and arguments of type "
"NamedDistribution together.")
# TODO(isaprykin): Add support for a particular number of TPUs. Right now
# it's binary.
number_of_required_tpus = max([required_tpus or 0] +
[d.required_tpu or 0 for d in distributions])
use_cloud_tpu = any([kwargs.get("use_cloud_tpu")] +
[d.use_cloud_tpu for d in distributions])
tpu = hasattr(flags.FLAGS, "tpu") and flags.FLAGS.tpu or ""
if not number_of_required_tpus and TPUCombination.TPU_TEST:
return (False, "Test that doesn't require TPUs.")
if number_of_required_tpus and not TPUCombination.TPU_TEST:
return (False, "Test requires a TPU, but it's not available.")
if use_cloud_tpu and not tpu:
return (False, "Test requires a Cloud TPU, but none specified.")
if not use_cloud_tpu and tpu:
return (False, "Test requires local TPU, but Cloud TPU specified.")
return (True, None)
def parameter_modifiers(self):
return [
combinations_lib.OptionalParameter("required_tpus"),
combinations_lib.OptionalParameter("required_tpu"),
combinations_lib.OptionalParameter("use_cloud_tpu"),
]
| TPUCombination |
python | django__django | tests/admin_views/models.py | {
"start": 15489,
"end": 15669
} | class ____(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne, models.CASCADE)
def __str__(self):
return self.name
| CyclicTwo |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-astra/destination_astra/config.py | {
"start": 2177,
"end": 2250
} | class ____(VectorDBConfigModel):
indexing: AstraIndexingModel
| ConfigModel |
python | PyCQA__pylint | tests/functional/m/membership_protocol.py | {
"start": 866,
"end": 987
} | class ____:
def __iter__(self):
return iter((1, 2, 3))
3 in CustomIterable()
# old-style iterable
| CustomIterable |
python | mlflow__mlflow | mlflow/server/auth/db/models.py | {
"start": 2514,
"end": 3213
} | class ____(Base):
__tablename__ = "scorer_permissions"
id = Column(Integer(), primary_key=True)
experiment_id = Column(String(255), nullable=False)
scorer_name = Column(String(256), nullable=False)
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
permission = Column(String(255))
__table_args__ = (
UniqueConstraint("experiment_id", "scorer_name", "user_id", name="unique_scorer_user"),
)
def to_mlflow_entity(self):
return ScorerPermission(
experiment_id=self.experiment_id,
scorer_name=self.scorer_name,
user_id=self.user_id,
permission=self.permission,
)
| SqlScorerPermission |
python | pydata__xarray | asv_bench/benchmarks/rolling.py | {
"start": 2590,
"end": 2967
} | class ____(Rolling):
def setup(self, *args, **kwargs):
requires_dask()
# TODO: Lazily skipped in CI as it is very demanding and slow.
# Improve times and remove errors.
_skip_slow()
super().setup(**kwargs)
self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50})
self.da_long = self.da_long.chunk({"x": 10000})
| RollingDask |
python | facelessuser__pymdown-extensions | pymdownx/caret.py | {
"start": 5127,
"end": 5329
} | class ____(util.PatternSequenceProcessor):
"""Just superscript processor."""
PATTERNS = [
util.PatSeqItem(re.compile(SUP, re.DOTALL | re.UNICODE), 'single', 'sup')
]
| CaretSupProcessor |
python | pypa__warehouse | warehouse/metrics/interfaces.py | {
"start": 78,
"end": 2678
} | class ____(Interface):
def gauge(metric, value, tags=None, sample_rate=1):
"""
Record the value of a gauge, optionally setting a list of tags and a
sample rate.
"""
def increment(metric, value=1, tags=None, sample_rate=1):
"""
Increment a counter, optionally setting a value, tags and a sample
rate.
"""
def decrement(metric, value=1, tags=None, sample_rate=1):
"""
Decrement a counter, optionally setting a value, tags and a sample
rate.
"""
def histogram(metric, value, tags=None, sample_rate=1):
"""
Sample a histogram value, optionally setting tags and a sample rate.
"""
def distribution(metric, value, tags=None, sample_rate=1):
"""
Send a global distribution value, optionally setting tags and a sample rate.
"""
def timing(metric, value, tags=None, sample_rate=1):
"""
Record a timing, optionally setting tags and a sample rate.
"""
def timed(metric=None, tags=None, sample_rate=1, use_ms=None):
"""
A decorator or context manager that will measure the distribution of a
function's/context's run time. Optionally specify a list of tags or a
sample rate. If the metric is not defined as a decorator, the module
name and function name will be used. The metric is required as a context
manager.
::
@IMetricService.timed('user.query.time', sample_rate=0.5)
def get_user(user_id):
# Do what you need to ...
pass
# Is equivalent to ...
with IMetricService.timed('user.query.time', sample_rate=0.5):
# Do what you need to ...
pass
# Is equivalent to ...
start = time.time()
try:
get_user(user_id)
finally:
IMetricService.timing('user.query.time', time.time() - start)
"""
def set(metric, value, tags=None, sample_rate=1):
"""
Sample a set value.
"""
def event(
title,
text,
alert_type=None,
aggregation_key=None,
source_type_name=None,
date_happened=None,
priority=None,
tags=None,
hostname=None,
):
"""
Send an event.
"""
def service_check(
check_name, status, tags=None, timestamp=None, hostname=None, message=None
):
"""
Send a service check run.
"""
| IMetricsService |
python | tartley__colorama | colorama/winterm.py | {
"start": 247,
"end": 408
} | class ____:
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
| WinColor |
python | conda__conda | conda/cli/condarc.py | {
"start": 1887,
"end": 2402
} | class ____:
"""Sentinel value to indicate a missing configuration key.
This is used by ConfigurationFile.get_key() to distinguish between a key that
doesn't exist and a key that exists but has a None value.
"""
def __repr__(self):
return "<MISSING>"
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, _MissingSentinel)
def __hash__(self):
return hash(_MissingSentinel)
MISSING = _MissingSentinel()
| _MissingSentinel |
python | huggingface__transformers | src/transformers/models/camembert/modular_camembert.py | {
"start": 1532,
"end": 1624
} | class ____(RobertaPreTrainedModel):
base_model_prefix = "roberta"
| CamembertPreTrainedModel |
python | tensorflow__tensorflow | tensorflow/python/framework/py_context_manager_test.py | {
"start": 917,
"end": 1820
} | class ____(object):
def __init__(self, behavior="basic"):
self.log = []
self.behavior = behavior
def __enter__(self):
self.log.append("__enter__()")
if self.behavior == "raise_from_enter":
raise ValueError("exception in __enter__")
return "var"
def __exit__(self, ex_type, ex_value, ex_tb):
self.log.append("__exit__(%s, %s, %s)" % (ex_type, ex_value, ex_tb))
if self.behavior == "raise_from_exit":
raise ValueError("exception in __exit__")
if self.behavior == "suppress_exception":
return True
# Expected log when the body doesn't raise an exception.
NO_EXCEPTION_LOG = """\
__enter__()
body('var')
__exit__(None, None, None)"""
# Expected log when the body does raise an exception. (Regular expression.)
EXCEPTION_LOG = """\
__enter__\\(\\)
body\\('var'\\)
__exit__\\(<class 'ValueError'>, Foo, <traceback object.*>\\)"""
| TestContextManager |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 147088,
"end": 148181
} | class ____(Response):
"""
Response of projects.move endpoint.
:param moved: The number of projects moved
:type moved: int
"""
_service = "projects"
_action = "move"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"moved": {
"description": "The number of projects moved",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, moved: Optional[int] = None, **kwargs: Any) -> None:
super(MoveResponse, self).__init__(**kwargs)
self.moved = moved
@schema_property("moved")
def moved(self) -> Optional[int]:
return self._property_moved
@moved.setter
def moved(self, value: Optional[int]) -> None:
if value is None:
self._property_moved = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "moved", six.integer_types)
self._property_moved = value
| MoveResponse |
python | ansible__ansible | lib/ansible/plugins/lookup/fileglob.py | {
"start": 1829,
"end": 3056
} | class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
term_file = os.path.basename(term)
found_paths = []
if term_file != term:
found_paths.append(self.find_file_in_search_path(variables, 'files', os.path.dirname(term)))
else:
# no dir, just file, so use paths and 'files' paths instead
if 'ansible_search_path' in variables:
paths = variables['ansible_search_path']
else:
paths = [self.get_basedir(variables)]
for p in paths:
found_paths.append(os.path.join(p, 'files'))
found_paths.append(p)
for dwimmed_path in found_paths:
if dwimmed_path:
globbed = glob.glob(to_bytes(os.path.join(dwimmed_path, term_file), errors='surrogate_or_strict'))
term_results = [to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g)]
if term_results:
ret.extend(term_results)
break
return ret
| LookupModule |
python | django__django | tests/test_utils/test_serializemixin.py | {
"start": 90,
"end": 450
} | class ____(SimpleTestCase):
def test_init_without_lockfile(self):
msg = (
"ExampleTests.lockfile isn't set. Set it to a unique value in the "
"base class."
)
with self.assertRaisesMessage(ValueError, msg):
class ExampleTests(SerializeMixin, SimpleTestCase):
pass
| TestSerializeMixin |
python | mlflow__mlflow | mlflow/server/auth/entities.py | {
"start": 0,
"end": 2883
} | class ____:
def __init__(
self,
id_,
username,
password_hash,
is_admin,
experiment_permissions=None,
registered_model_permissions=None,
scorer_permissions=None,
):
self._id = id_
self._username = username
self._password_hash = password_hash
self._is_admin = is_admin
self._experiment_permissions = experiment_permissions
self._registered_model_permissions = registered_model_permissions
self._scorer_permissions = scorer_permissions
@property
def id(self):
return self._id
@property
def username(self):
return self._username
@property
def password_hash(self):
return self._password_hash
@property
def is_admin(self):
return self._is_admin
@is_admin.setter
def is_admin(self, is_admin):
self._is_admin = is_admin
@property
def experiment_permissions(self):
return self._experiment_permissions
@experiment_permissions.setter
def experiment_permissions(self, experiment_permissions):
self._experiment_permissions = experiment_permissions
@property
def registered_model_permissions(self):
return self._registered_model_permissions
@registered_model_permissions.setter
def registered_model_permissions(self, registered_model_permissions):
self._registered_model_permissions = registered_model_permissions
@property
def scorer_permissions(self):
return self._scorer_permissions
@scorer_permissions.setter
def scorer_permissions(self, scorer_permissions):
self._scorer_permissions = scorer_permissions
def to_json(self):
return {
"id": self.id,
"username": self.username,
"is_admin": self.is_admin,
"experiment_permissions": [p.to_json() for p in self.experiment_permissions],
"registered_model_permissions": [
p.to_json() for p in self.registered_model_permissions
],
"scorer_permissions": [p.to_json() for p in self.scorer_permissions],
}
@classmethod
def from_json(cls, dictionary):
return cls(
id_=dictionary["id"],
username=dictionary["username"],
password_hash="REDACTED",
is_admin=dictionary["is_admin"],
experiment_permissions=[
ExperimentPermission.from_json(p) for p in dictionary["experiment_permissions"]
],
registered_model_permissions=[
RegisteredModelPermission.from_json(p)
for p in dictionary["registered_model_permissions"]
],
scorer_permissions=[
ScorerPermission.from_json(p) for p in dictionary["scorer_permissions"]
],
)
| User |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 32635,
"end": 36520
} | class ____(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode == 0
logs = self.client.logs(id)
assert logs == f"{snippet}\n".encode(encoding='ascii')
def test_logs_tail_option(self):
snippet = '''Line1
Line2'''
container = self.client.create_container(
TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode == 0
logs = self.client.logs(id, tail=1)
assert logs == 'Line2\n'.encode(encoding='ascii')
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
logs = b''
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
exitcode = self.client.wait(id)['StatusCode']
assert exitcode == 0
assert logs == f"{snippet}\n".encode(encoding='ascii')
@pytest.mark.timeout(5)
@pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
reason='No cancellable streams over SSH')
def test_logs_streaming_and_follow_and_cancel(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, f'sh -c "echo \\"{snippet}\\" && sleep 3"'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
logs = b''
generator = self.client.logs(id, stream=True, follow=True)
threading.Timer(1, generator.close).start()
for chunk in generator:
logs += chunk
assert logs == f"{snippet}\n".encode(encoding='ascii')
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode == 0
logs = self.client.logs(container)
assert logs == f"{snippet}\n".encode(encoding='ascii')
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode == 0
logs = self.client.logs(id, tail=0)
assert logs == ''.encode(encoding='ascii')
@requires_api_version('1.35')
def test_logs_with_until(self):
snippet = 'Shanghai Teahouse (Hong Meiling)'
container = self.client.create_container(
TEST_IMG, f'echo "{snippet}"'
)
self.tmp_containers.append(container)
self.client.start(container)
exitcode = self.client.wait(container)['StatusCode']
assert exitcode == 0
logs_until_1 = self.client.logs(container, until=1)
assert logs_until_1 == b''
logs_until_now = self.client.logs(container, datetime.now())
assert logs_until_now == f"{snippet}\n".encode(encoding='ascii')
| LogsTest |
python | openai__gym | gym/error.py | {
"start": 5390,
"end": 5545
} | class ____(Exception):
"""The space is a custom gym.Space instance, and is not supported by `AsyncVectorEnv` with `shared_memory=True`."""
| CustomSpaceError |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_protected_access.py | {
"start": 209,
"end": 436
} | class ____:
"""Test class"""
@staticmethod
def _a_private_method():
"""Private method that references the class itself"""
return MySecondClass.MyClass._a_private_method() # [protected-access]
| MyClass |
python | keras-team__keras | keras/src/layers/preprocessing/stft_spectrogram.py | {
"start": 290,
"end": 15058
} | class ____(layers.Layer):
"""Layer to compute the Short-Time Fourier Transform (STFT) on a 1D signal.
A layer that computes Spectrograms of the input signal to produce
a spectrogram. This layers utilizes Short-Time Fourier Transform (STFT) by
The layer computes Spectrograms based on STFT by utilizing convolution
kernels, which allows parallelization on GPUs and trainable kernels for
fine-tuning support. This layer allows different modes of output
(e.g., log-scaled magnitude, phase, power spectral density, etc.) and
provides flexibility in windowing, padding, and scaling options for the
STFT calculation.
Examples:
Apply it as a non-trainable preprocessing layer on 3 audio tracks of
1 channel, 10 seconds and sampled at 16 kHz.
>>> layer = keras.layers.STFTSpectrogram(
... mode='log',
... frame_length=256,
... frame_step=128, # 50% overlap
... fft_length=512,
... window="hann",
... padding="valid",
... trainable=False, # non-trainable, preprocessing only
... )
>>> layer(keras.random.uniform(shape=(3, 160000, 1))).shape
(3, 1249, 257)
Apply it as a trainable processing layer on 3 stereo audio tracks of
2 channels, 10 seconds and sampled at 16 kHz. This is initialized as the
non-trainable layer, but then can be trained jointly within a model.
>>> layer = keras.layers.STFTSpectrogram(
... mode='log',
... frame_length=256,
... frame_step=128, # 50% overlap
... fft_length=512,
... window="hamming", # hamming windowing function
... padding="same", # padding to preserve the time dimension
... trainable=True, # trainable, this is the default in keras
... )
>>> layer(keras.random.uniform(shape=(3, 160000, 2))).shape
(3, 1250, 514)
Similar to the last example, but add an extra dimension so the output is
an image to be used with image models. We apply this here on a signal of
3 input channels to output an image tensor, hence is directly applicable
with an image model.
>>> layer = keras.layers.STFTSpectrogram(
... mode='log',
... frame_length=256,
... frame_step=128,
... fft_length=512,
... padding="same",
... expand_dims=True, # this adds the extra dimension
... )
>>> layer(keras.random.uniform(shape=(3, 160000, 3))).shape
(3, 1250, 257, 3)
Args:
mode: String, the output type of the spectrogram. Can be one of
`"log"`, `"magnitude`", `"psd"`, `"real`", `"imag`", `"angle`",
`"stft`". Defaults to `"log`".
frame_length: Integer, The length of each frame (window) for STFT in
samples. Defaults to 256.
frame_step: Integer, the step size (hop length) between
consecutive frames. If not provided, defaults to half the
frame_length. Defaults to `frame_length // 2`.
fft_length: Integer, the size of frequency bins used in the Fast-Fourier
Transform (FFT) to apply to each frame. Should be greater than or
equal to `frame_length`. Recommended to be a power of two. Defaults
to the smallest power of two that is greater than or equal
to `frame_length`.
window: (String or array_like), the windowing function to apply to each
frame. Can be `"hann`" (default), `"hamming`", or a custom window
provided as an array_like.
periodic: Boolean, if True, the window function will be treated as
periodic. Defaults to `False`.
scaling: String, type of scaling applied to the window. Can be
`"density`", `"spectrum`", or None. Default is `"density`".
padding: String, padding strategy. Can be `"valid`" or `"same`".
Defaults to `"valid"`.
expand_dims: Boolean, if True, will expand the output into spectrograms
into two dimensions to be compatible with image models.
Defaults to `False`.
data_format: String, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. Defaults to `"channels_last"`.
Raises:
ValueError: If an invalid value is provided for `"mode`", `"scaling`",
`"padding`", or other input arguments.
TypeError: If the input data type is not one of `"float16`",
`"float32`", or `"float64`".
Input shape:
A 3D tensor of shape `(batch_size, time_length, input_channels)`, if
`data_format=="channels_last"`, and of shape
`(batch_size, input_channels, time_length)` if
`data_format=="channels_first"`, where `time_length` is the length of
the input signal, and `input_channels` is the number of input channels.
The same kernels are applied to each channel independently.
Output shape:
If `data_format=="channels_first" and not expand_dims`, a 3D tensor:
`(batch_size, input_channels * freq_channels, new_time_length)`
If `data_format=="channels_last" and not expand_dims`, a 3D tensor:
`(batch_size, new_time_length, input_channels * freq_channels)`
If `data_format=="channels_first" and expand_dims`, a 4D tensor:
`(batch_size, input_channels, new_time_length, freq_channels)`
If `data_format=="channels_last" and expand_dims`, a 4D tensor:
`(batch_size, new_time_length, freq_channels, input_channels)`
where `new_time_length` depends on the padding, and `freq_channels` is
the number of FFT bins `(fft_length // 2 + 1)`.
"""
def __init__(
self,
mode="log",
frame_length=256,
frame_step=None,
fft_length=None,
window="hann",
periodic=False,
scaling="density",
padding="valid",
expand_dims=False,
data_format=None,
**kwargs,
):
if frame_step is not None and (
frame_step > frame_length or frame_step < 1
):
raise ValueError(
"`frame_step` should be a positive integer not greater than "
f"`frame_length`. Received frame_step={frame_step}, "
f"frame_length={frame_length}"
)
if fft_length is not None and fft_length < frame_length:
raise ValueError(
"`fft_length` should be not less than `frame_length`. "
f"Received fft_length={fft_length}, frame_length={frame_length}"
)
if fft_length is not None and (fft_length & -fft_length) != fft_length:
warnings.warn(
"`fft_length` is recommended to be a power of two. "
f"Received fft_length={fft_length}"
)
all_modes = ["log", "magnitude", "psd", "real", "imag", "angle", "stft"]
if mode not in all_modes:
raise ValueError(
"Output mode is invalid, it must be one of "
f"{', '.join(all_modes)}. Received: mode={mode}"
)
if scaling is not None and scaling not in ["density", "spectrum"]:
raise ValueError(
"Scaling is invalid, it must be `None`, 'density' "
f"or 'spectrum'. Received scaling={scaling}"
)
if padding not in ["valid", "same"]:
raise ValueError(
"Padding is invalid, it should be 'valid', 'same'. "
f"Received: padding={padding}"
)
if isinstance(window, str):
# throws an exception for invalid window function
scipy.signal.get_window(window, 1)
super().__init__(**kwargs)
self.mode = mode
self.frame_length = frame_length
self.frame_step = frame_step
self._frame_step = frame_step or self.frame_length // 2
self.fft_length = fft_length
self._fft_length = fft_length or (
2 ** int(math.ceil(math.log2(frame_length)))
)
self.window = window
self.periodic = periodic
self.scaling = scaling
self.padding = padding
self.expand_dims = expand_dims
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = layers.input_spec.InputSpec(ndim=3)
def build(self, input_shape):
shape = (self.frame_length, 1, self._fft_length // 2 + 1)
if self.mode != "imag":
self.real_kernel = self.add_weight(
name="real_kernel",
shape=shape,
initializer=initializers.STFT(
"real", self.window, self.scaling, self.periodic
),
)
if self.mode != "real":
self.imag_kernel = self.add_weight(
name="imag_kernel",
shape=shape,
initializer=initializers.STFT(
"imag", self.window, self.scaling, self.periodic
),
)
def _adjust_shapes(self, outputs):
_, channels, freq_channels, time_seq = ops.shape(outputs)
batch_size = -1
if self.data_format == "channels_last":
if self.expand_dims:
outputs = ops.transpose(outputs, [0, 3, 2, 1])
# [batch_size, time_seq, freq_channels, input_channels]
else:
outputs = ops.reshape(
outputs,
[batch_size, channels * freq_channels, time_seq],
)
# [batch_size, input_channels * freq_channels, time_seq]
outputs = ops.transpose(outputs, [0, 2, 1])
else:
if self.expand_dims:
outputs = ops.transpose(outputs, [0, 1, 3, 2])
# [batch_size, channels, time_seq, freq_channels]
else:
outputs = ops.reshape(
outputs,
[batch_size, channels * freq_channels, time_seq],
)
return outputs
def _apply_conv(self, inputs, kernel):
if self.data_format == "channels_last":
_, time_seq, channels = ops.shape(inputs)
inputs = ops.transpose(inputs, [0, 2, 1])
inputs = ops.reshape(inputs, [-1, time_seq, 1])
else:
_, channels, time_seq = ops.shape(inputs)
inputs = ops.reshape(inputs, [-1, 1, time_seq])
outputs = ops.conv(
inputs,
ops.cast(kernel, backend.standardize_dtype(inputs.dtype)),
padding=self.padding,
strides=self._frame_step,
data_format=self.data_format,
)
batch_size = -1
if self.data_format == "channels_last":
_, time_seq, freq_channels = ops.shape(outputs)
outputs = ops.transpose(outputs, [0, 2, 1])
outputs = ops.reshape(
outputs,
[batch_size, channels, freq_channels, time_seq],
)
else:
_, freq_channels, time_seq = ops.shape(outputs)
outputs = ops.reshape(
outputs,
[batch_size, channels, freq_channels, time_seq],
)
return outputs
def call(self, inputs):
dtype = inputs.dtype
if backend.standardize_dtype(dtype) not in {
"float16",
"float32",
"float64",
}:
raise TypeError(
"Invalid input type. Expected `float16`, `float32` or "
f"`float64`. Received: input type={dtype}"
)
real_signal = None
imag_signal = None
power = None
if self.mode != "imag":
real_signal = self._apply_conv(inputs, self.real_kernel)
if self.mode != "real":
imag_signal = self._apply_conv(inputs, self.imag_kernel)
if self.mode == "real":
return self._adjust_shapes(real_signal)
elif self.mode == "imag":
return self._adjust_shapes(imag_signal)
elif self.mode == "angle":
return self._adjust_shapes(ops.arctan2(imag_signal, real_signal))
elif self.mode == "stft":
return self._adjust_shapes(
ops.concatenate([real_signal, imag_signal], axis=2)
)
else:
power = ops.square(real_signal) + ops.square(imag_signal)
if self.mode == "psd":
return self._adjust_shapes(
power
+ ops.pad(
power[:, :, 1:-1, :], [[0, 0], [0, 0], [1, 1], [0, 0]]
)
)
linear_stft = self._adjust_shapes(
ops.sqrt(ops.maximum(power, backend.epsilon()))
)
if self.mode == "magnitude":
return linear_stft
else:
return ops.log(ops.maximum(linear_stft, backend.epsilon()))
def compute_output_shape(self, input_shape):
if self.data_format == "channels_last":
channels = input_shape[-1]
else:
channels = input_shape[1]
freq_channels = self._fft_length // 2 + 1
if self.mode == "stft":
freq_channels *= 2
shape = ops.operation_utils.compute_conv_output_shape(
input_shape,
freq_channels * channels,
(self.frame_length,),
strides=self._frame_step,
padding=self.padding,
data_format=self.data_format,
)
if self.data_format == "channels_last":
batch_size, time_seq, _ = shape
else:
batch_size, _, time_seq = shape
if self.expand_dims:
if self.data_format == "channels_last":
return (batch_size, time_seq, freq_channels, channels)
else:
return (batch_size, channels, time_seq, freq_channels)
return shape
def get_config(self):
config = super().get_config()
config.update(
{
"mode": self.mode,
"frame_length": self.frame_length,
"frame_step": self.frame_step,
"fft_length": self.fft_length,
"window": self.window,
"periodic": self.periodic,
"scaling": self.scaling,
"padding": self.padding,
"data_format": self.data_format,
"expand_dims": self.expand_dims,
}
)
return config
| STFTSpectrogram |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 121111,
"end": 136333
} | class ____(Norm):
"""
A class which contains multiple scalar norms.
"""
def __init__(self, norms, vmin=None, vmax=None, clip=None):
"""
Parameters
----------
norms : list of (str or `Normalize`)
The constituent norms. The list must have a minimum length of 1.
vmin, vmax : None or list of (float or None)
Limits of the constituent norms.
If a list, one value is assigned to each of the constituent
norms.
If None, the limits of the constituent norms
are not changed.
clip : None or list of bools, default: None
Determines the behavior for mapping values outside the range
``[vmin, vmax]`` for the constituent norms.
If a list, each value is assigned to each of the constituent
norms.
If None, the behaviour of the constituent norms is not changed.
"""
if cbook.is_scalar_or_string(norms):
raise ValueError(
"MultiNorm must be assigned an iterable of norms, where each "
f"norm is of type `str`, or `Normalize`, not {type(norms)}")
if len(norms) < 1:
raise ValueError("MultiNorm must be assigned at least one norm")
def resolve(norm):
if isinstance(norm, str):
scale_cls = _api.check_getitem(scale._scale_mapping, norm=norm)
return mpl.colorizer._auto_norm_from_scale(scale_cls)()
elif isinstance(norm, Normalize):
return norm
else:
raise ValueError(
"Each norm assigned to MultiNorm must be "
f"of type `str`, or `Normalize`, not {type(norm)}")
self._norms = tuple(resolve(norm) for norm in norms)
self.callbacks = cbook.CallbackRegistry(signals=["changed"])
self.vmin = vmin
self.vmax = vmax
self.clip = clip
for n in self._norms:
n.callbacks.connect('changed', self._changed)
@property
def n_components(self):
"""Number of norms held by this `MultiNorm`."""
return len(self._norms)
@property
def norms(self):
"""The individual norms held by this `MultiNorm`."""
return self._norms
@property
def vmin(self):
"""The lower limit of each constituent norm."""
return tuple(n.vmin for n in self._norms)
@vmin.setter
def vmin(self, values):
if values is None:
return
if not np.iterable(values) or len(values) != self.n_components:
raise ValueError("*vmin* must have one component for each norm. "
f"Expected an iterable of length {self.n_components}, "
f"but got {values!r}")
with self.callbacks.blocked():
for norm, v in zip(self.norms, values):
norm.vmin = v
self._changed()
@property
def vmax(self):
"""The upper limit of each constituent norm."""
return tuple(n.vmax for n in self._norms)
@vmax.setter
def vmax(self, values):
if values is None:
return
if not np.iterable(values) or len(values) != self.n_components:
raise ValueError("*vmax* must have one component for each norm. "
f"Expected an iterable of length {self.n_components}, "
f"but got {values!r}")
with self.callbacks.blocked():
for norm, v in zip(self.norms, values):
norm.vmax = v
self._changed()
@property
def clip(self):
"""The clip behaviour of each constituent norm."""
return tuple(n.clip for n in self._norms)
@clip.setter
def clip(self, values):
if values is None:
return
if not np.iterable(values) or len(values) != self.n_components:
raise ValueError("*clip* must have one component for each norm. "
f"Expected an iterable of length {self.n_components}, "
f"but got {values!r}")
with self.callbacks.blocked():
for norm, v in zip(self.norms, values):
norm.clip = v
self._changed()
def _changed(self):
"""
Call this whenever the norm is changed to notify all the
callback listeners to the 'changed' signal.
"""
self.callbacks.process('changed')
def __call__(self, values, clip=None):
"""
Normalize the data and return the normalized data.
Each component of the input is normalized via the constituent norm.
Parameters
----------
values : array-like
The input data, as an iterable or a structured numpy array.
- If iterable, must be of length `n_components`. Each element can be a
scalar or array-like and is normalized through the corresponding norm.
- If structured array, must have `n_components` fields. Each field
is normalized through the corresponding norm.
clip : list of bools or None, optional
Determines the behavior for mapping values outside the range
``[vmin, vmax]``. See the description of the parameter *clip* in
`.Normalize`.
If ``None``, defaults to ``self.clip`` (which defaults to
``False``).
Returns
-------
tuple
Normalized input values
Notes
-----
If not already initialized, ``self.vmin`` and ``self.vmax`` are
initialized using ``self.autoscale_None(values)``.
"""
if clip is None:
clip = self.clip
if not np.iterable(clip) or len(clip) != self.n_components:
raise ValueError("*clip* must have one component for each norm. "
f"Expected an iterable of length {self.n_components}, "
f"but got {clip!r}")
values = self._iterable_components_in_data(values, self.n_components)
result = tuple(n(v, clip=c) for n, v, c in zip(self.norms, values, clip))
return result
def inverse(self, values):
"""
Map the normalized values (i.e., index in the colormap) back to data values.
Parameters
----------
values : array-like
The input data, as an iterable or a structured numpy array.
- If iterable, must be of length `n_components`. Each element can be a
scalar or array-like and is mapped through the corresponding norm.
- If structured array, must have `n_components` fields. Each field
is mapped through the the corresponding norm.
"""
values = self._iterable_components_in_data(values, self.n_components)
result = tuple(n.inverse(v) for n, v in zip(self.norms, values))
return result
def autoscale(self, A):
"""
For each constituent norm, set *vmin*, *vmax* to min, max of the corresponding
component in *A*.
Parameters
----------
A : array-like
The input data, as an iterable or a structured numpy array.
- If iterable, must be of length `n_components`. Each element
is used for the limits of one constituent norm.
- If structured array, must have `n_components` fields. Each field
is used for the limits of one constituent norm.
"""
with self.callbacks.blocked():
A = self._iterable_components_in_data(A, self.n_components)
for n, a in zip(self.norms, A):
n.autoscale(a)
self._changed()
def autoscale_None(self, A):
"""
If *vmin* or *vmax* are not set on any constituent norm,
use the min/max of the corresponding component in *A* to set them.
Parameters
----------
A : array-like
The input data, as an iterable or a structured numpy array.
- If iterable, must be of length `n_components`. Each element
is used for the limits of one constituent norm.
- If structured array, must have `n_components` fields. Each field
is used for the limits of one constituent norm.
"""
with self.callbacks.blocked():
A = self._iterable_components_in_data(A, self.n_components)
for n, a in zip(self.norms, A):
n.autoscale_None(a)
self._changed()
def scaled(self):
"""Return whether both *vmin* and *vmax* are set on all constituent norms."""
return all(n.scaled() for n in self.norms)
@staticmethod
def _iterable_components_in_data(data, n_components):
"""
Provides an iterable over the components contained in the data.
An input array with `n_components` fields is returned as a tuple of length n
referencing slices of the original array.
Parameters
----------
data : array-like
The input data, as an iterable or a structured numpy array.
- If iterable, must be of length `n_components`
- If structured array, must have `n_components` fields.
Returns
-------
tuple of np.ndarray
"""
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# structured array
if len(data.dtype.fields) != n_components:
raise ValueError(
"Structured array inputs to MultiNorm must have the same "
"number of fields as components in the MultiNorm. Expected "
f"{n_components}, but got {len(data.dtype.fields)} fields"
)
else:
return tuple(data[field] for field in data.dtype.names)
try:
n_elements = len(data)
except TypeError:
raise ValueError("MultiNorm expects a sequence with one element per "
f"component as input, but got {data!r} instead")
if n_elements != n_components:
if isinstance(data, np.ndarray) and data.shape[-1] == n_components:
if len(data.shape) == 2:
raise ValueError(
f"MultiNorm expects a sequence with one element per component. "
"You can use `data_transposed = data.T` "
"to convert the input data of shape "
f"{data.shape} to a compatible shape {data.shape[::-1]}")
else:
raise ValueError(
f"MultiNorm expects a sequence with one element per component. "
"You can use `data_as_list = [data[..., i] for i in "
"range(data.shape[-1])]` to convert the input data of shape "
f" {data.shape} to a compatible list")
raise ValueError(
"MultiNorm expects a sequence with one element per component. "
f"This MultiNorm has {n_components} components, but got a sequence "
f"with {n_elements} elements"
)
return tuple(data[i] for i in range(n_elements))
def rgb_to_hsv(arr):
"""
Convert an array of float RGB values (in the range [0, 1]) to HSV values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
(..., 3) `~numpy.ndarray`
Colors converted to HSV values in range [0, 1]
"""
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
f"shape {arr.shape} was found.")
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
# Check if input is in the expected range
if np.any(arr_max > 1):
raise ValueError(
"Input array must be in the range [0, 1]. "
f"Found a maximum value of {arr_max.max()}"
)
if arr.min() < 0:
raise ValueError(
"Input array must be in the range [0, 1]. "
f"Found a minimum value of {arr.min()}"
)
ipos = arr_max > 0
delta = np.ptp(arr, -1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
return out.reshape(in_shape)
def hsv_to_rgb(hsv):
"""
Convert HSV values to RGB.
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
(..., 3) `~numpy.ndarray`
Colors converted to RGB values in range [0, 1]
"""
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
f"shape {hsv.shape} was found.")
in_shape = hsv.shape
hsv = np.array(
hsv, copy=False,
dtype=np.promote_types(hsv.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.stack([r, g, b], axis=-1)
return rgb.reshape(in_shape)
def _vector_magnitude(arr):
# things that don't work here:
# * np.linalg.norm: drops mask from ma.array
# * np.sum: drops mask from ma.array unless entire vector is masked
sum_sq = 0
for i in range(arr.shape[-1]):
sum_sq += arr[..., i, np.newaxis] ** 2
return np.sqrt(sum_sq)
| MultiNorm |
python | sphinx-doc__sphinx | tests/roots/test-root/autodoc_target.py | {
"start": 4466,
"end": 4624
} | class ____(enum.Enum):
"""this is enum class"""
#: doc for val1
val1 = 12
val2 = 23 #: doc for val2
val3 = 34
"""doc for val3"""
| EnumCls |
python | pytorch__pytorch | torch/_inductor/fx_passes/overlap_preserving_bucketer.py | {
"start": 3586,
"end": 32519
} | class ____:
"""
Buckets collective operations while preserving compute-collective overlap relationships.
Uses an augmented graph to track dependencies between compute and collective operations.
"""
def __init__(
self,
graph: fx.Graph,
collective_info: dict[fx.Node, CollectiveInfo],
node_ancestors: dict[fx.Node, OrderedSet[fx.Node]],
scheduled: OrderedSet[fx.Node],
max_bucket_memory_gb: float = 1.0,
max_coll_distance: int = 1000,
insert_overlap_deps: bool = False,
bucket_mode: BucketMode = "custom_ops_multidtype",
):
self.graph = graph
self.collective_info = collective_info
self.node_ancestors = node_ancestors
self.scheduled = scheduled
self.max_bucket_memory_gb = max_bucket_memory_gb
self.node_idx = {n: i for i, n in enumerate(scheduled)}
self.aug_graph = AugmentedGraphHelper(self.graph, self.node_ancestors)
self.max_coll_distance = max_coll_distance
self.insert_overlap_deps = insert_overlap_deps
self.bucket_mode = bucket_mode
self.node_to_event: dict[fx.Node, PGEvent] = {}
self.pg_to_timeline_head: dict[str, Optional[PGEvent]] = self.build_timelines()
self._add_hiding_interval_constraints()
def build_timelines(self) -> dict[str, Optional[PGEvent]]:
"Construct each process groups ordered series of event"
all_pgs: OrderedSet[str] = OrderedSet()
for start in self.collective_info:
pg = get_group_name(start)
all_pgs.add(pg)
pg_timeline: dict[str, Optional[PGEvent]] = {}
for pg in all_pgs:
pg_timeline[pg] = self.build_timeline(pg)
return pg_timeline
def build_timeline(self, pg: str) -> Optional[PGEvent]:
"""
Build a timeline of important events (starts, waits, hiding compute) for this process group
and constrain this ordering in the augmented graph.
Sequential dependencies are added between all events because NCCL collectives on the same
process group execute on the same CUDA stream, enforcing LIFO semantics where later-issued
collectives must complete before earlier ones can finish.
"""
head = None
prev_event = None
position = 0
hiding_nodes = OrderedSet()
for node in self.scheduled:
node_type = None
# Determine if this node is relevant for this PG
if node in self.collective_info and get_group_name(node) == pg:
node_type = "starts"
hiding_nodes |= self.collective_info[node].hiding_nodes
elif _schedulable_wait_node(node):
wait_input = node.args[0]
if isinstance(wait_input, fx.Node) and get_group_name(wait_input) == pg:
node_type = "waits"
elif is_compute_node(node) or node in hiding_nodes:
node_type = "compute"
if node_type is None:
continue
event = PGEvent(node=node, event_type=node_type, position=position) # type: ignore[arg-type]
event.insert_between(prev_event, None)
# Add sequential dependency to augmented graph
if prev_event:
self.aug_graph.add_extra_dep(n=event.node, dep=prev_event.node)
else:
head = event
prev_event = event
position += 1
return head
def _populate_node_to_event(self, pg: str) -> None:
"""Populate node_to_event mapping for a specific PG's timeline."""
self.node_to_event.clear()
head = self.pg_to_timeline_head[pg]
curr = head
while curr is not None:
self.node_to_event[curr.node] = curr
curr = curr.next
def _add_hiding_interval_constraints(self) -> None:
"""
Add hiding interval constraints: start -> compute -> wait.
"""
for start, info in self.collective_info.items():
if info.is_exposed:
continue
for hn in info.hiding_nodes:
# Enforce: start -> compute -> wait
self.aug_graph.add_extra_dep(n=hn, dep=start)
self.aug_graph.add_extra_dep(n=info.wait_node, dep=hn)
def bucket_collectives(self) -> None:
# Group collectives by PG first
pg_collectives: dict[str, OrderedSet[fx.Node]] = defaultdict(OrderedSet)
for start in self.collective_info:
pg = get_group_name(start)
pg_collectives[pg].add(start)
all_buckets: list[CollBucket] = []
for pg, collectives in pg_collectives.items():
# Populate node_to_event for this PG's timeline
self._populate_node_to_event(pg)
# Group by bucket key within this PG
grouped_collectives: dict[object, OrderedSet[fx.Node]] = defaultdict(
OrderedSet
)
for start in collectives:
key = bucket_key(start, self.bucket_mode)
if key is not None:
grouped_collectives[key].add(start)
# Find buckets for this PG
for key, collective_group in grouped_collectives.items():
bucket_log.debug(
"bucketing collective group with key %s: %s",
key,
[n.name for n in collective_group],
)
buckets = self._find_buckets(collective_group)
all_buckets.extend(buckets)
# Apply bucketing transformations
# Dependencies are tracked in aug_graph.extra_deps during bucketing
for coll_bucket in all_buckets:
if len(coll_bucket.collectives) <= 1:
continue
counters["inductor"]["collective_buckets"] += 1
self._apply_bucket(coll_bucket)
# Extract all dependencies from augmented graph
# This includes:
# - Sequential timeline deps (added during build_timeline)
# - Hiding interval deps (added during _add_hiding_interval_constraints)
# - All transferred deps from bucketing (transferred during _apply_bucket)
additional_deps = self.aug_graph.get_all_extra_deps()
# Apply topological sort with all dependencies
from torch._dynamo.graph_deduplication import _stable_topological_sort
for n, deps in additional_deps.items():
torch._check(
not n._erased, lambda: f"Erased node deps not transferred: {n}"
)
for d in deps:
torch._check(
not d._erased, lambda: f"Erased node deps not transferred: {d}"
)
_stable_topological_sort(self.graph, additional_deps)
# After topological sort, preserve dependencies using effect tokens
# Only preserve edges where NOT both nodes are collective starts or waits
if self.insert_overlap_deps:
filtered_deps: dict[fx.Node, OrderedSet[fx.Node]] = {}
for node, deps in additional_deps.items():
filtered_node_deps: OrderedSet[fx.Node] = OrderedSet()
# only preserve comm-comptue overlap for now, although we could more
# generally constrain
for dep in deps:
if not (is_collective_or_wait(node) and is_collective_or_wait(dep)):
filtered_node_deps.add(dep)
if filtered_node_deps:
filtered_deps[node] = filtered_node_deps
self._preserve_dependencies_with_tokens(filtered_deps)
self.graph.lint()
def _find_buckets(
self,
collective_group: OrderedSet[fx.Node],
) -> list[CollBucket]:
"""Find valid buckets within a group of similar collectives."""
max_bucket_bytes = int(self.max_bucket_memory_gb * 1024 * 1024 * 1024)
buckets = []
processed: OrderedSet[fx.Node] = OrderedSet()
# Sort collectives by node index for efficient distance checking
sorted_collectives = sorted(collective_group, key=lambda n: self.node_idx[n])
for i, start_node in enumerate(sorted_collectives):
if start_node in processed:
continue
# Initialize bucket with first collective
bucket_info = CollBucket(
collectives=[start_node],
total_bytes=self.collective_info[start_node].size_bytes,
)
processed.add(start_node)
# Check candidates in sorted order, break when beyond max distance
for candidate in sorted_collectives[i + 1 : i + 1 + self.max_coll_distance]:
if candidate in processed:
continue
candidate_bytes = self.collective_info[candidate].size_bytes
# proxy on memory use, if we see a too large bucket,
# dont look for another, later bucket
if bucket_info.total_bytes + candidate_bytes > max_bucket_bytes:
break
if self._can_add_to_bucket(bucket_info, candidate):
bucket_info.collectives.append(candidate)
bucket_info.total_bytes += candidate_bytes
processed.add(candidate)
if len(bucket_info.collectives) > 1:
buckets.append(bucket_info)
return buckets
def _ancestor_dep(self, n1: fx.Node, n2: fx.Node) -> bool:
"""Check if there's an ancestor relationship between two nodes."""
return n1 in self.node_ancestors[n2] or n2 in self.node_ancestors[n1]
def _get_intervals(
self, event: PGEvent
) -> tuple[Optional[tuple[int, int]], list[tuple[int, int]]]:
"""Get (execution_interval, hiding_intervals) for a collective event.
Returns:
(execution_interval, hiding_intervals) where:
- execution_interval is (start_pos, wait_pos) or None
- hiding_intervals is a list of (start_pos, compute_pos) tuples, one for each hiding node
Works for both start and wait events by looking up the collective info.
"""
# For start events, directly use the node
if event.is_start:
coll = event.node
# For wait events, look up the start node from the event's args
elif event.is_wait:
wait_input = event.node.args[0]
if not isinstance(wait_input, fx.Node):
return None, []
coll = wait_input
else:
return None, []
if coll not in self.collective_info:
return None, []
info = self.collective_info[coll]
start_event = self.node_to_event[coll]
wait_event = self.node_to_event[info.wait_node]
execution_interval = (start_event.position, wait_event.position)
hiding_intervals = []
if info.hiding_nodes:
for hiding_node in info.hiding_nodes:
hiding_intervals.append(
(
start_event.position,
self.node_to_event[hiding_node].position,
)
)
return execution_interval, hiding_intervals
def _preserves_hiding_intervals(
self,
bucket_info: CollBucket,
candidate: fx.Node,
start_pos: fx.Node,
wait_pos: fx.Node,
why: WhyNoBucket,
) -> bool:
"""
Check that (start_pos, wait_pos) doesn't violate any hiding intervals or collectives.
Collects all execution and hiding intervals in the affected timeline regions,
then checks:
1. All bucket hiding compute stays between new start/wait
2. No other collective's compute interval is enclosed by bucket execution interval
3. No other collective's execution interval encloses bucket compute intervals
"""
# Collect all collectives being bucketed
all_bucketed_colls = [candidate] + list(bucket_info.collectives)
all_bucketed_waits = [
self.collective_info[coll].wait_node for coll in all_bucketed_colls
]
# Collect hiding compute positions for the bucket
bucket_hiding_compute_positions = []
for coll in all_bucketed_colls:
for coll_hiding_node in self.collective_info[coll].hiding_nodes:
bucket_hiding_compute_positions.append(
self.node_to_event[coll_hiding_node].position
)
# Get new positions
new_start_event = self.node_to_event[start_pos]
new_wait_event = self.node_to_event[wait_pos]
# Check 1: All bucket hiding compute must be between new start and wait
for compute_pos in bucket_hiding_compute_positions:
if not (new_start_event.position < compute_pos < new_wait_event.position):
why(
"hiding compute at pos %d not between start %d and wait %d",
compute_pos,
new_start_event.position,
new_wait_event.position,
)
return False
def get_wait(n: fx.Node) -> fx.Node:
return self.collective_info[n].wait_node
def get_pos(n: fx.Node) -> int:
return self.node_to_event[n].position
latest_start_pos = max(get_pos(candidate), get_pos(bucket_info.collectives[0]))
earliest_wait_pos = min(
get_pos(get_wait(candidate)), get_pos(get_wait(bucket_info.collectives[0]))
)
# Bucket execution interval
bucket_execution_interval = (new_start_event.position, new_wait_event.position)
# Because collectives on the same PG operate under LIFO semantics,
# it's only possible for us to force an early realization of an unrelated collective
# by delaying a start or raising a wait.
# We search in the interval from old_start -> new_start, to see if would be
# forcing another collective to be realized prior to its hiding nodes.
# Similarly, we search from old_wait -> new_wait, in the reverse direction,
# to check the same thing.
execution_intervals = [bucket_execution_interval]
hiding_intervals = [
(bucket_execution_interval[0], pos)
for pos in bucket_hiding_compute_positions
]
curr_event = new_start_event.next
while curr_event is not None and curr_event.position < latest_start_pos:
if (
curr_event.node not in all_bucketed_colls
and curr_event.node not in all_bucketed_waits
):
exec_interval, hiding_interval_list = self._get_intervals(curr_event)
if exec_interval:
execution_intervals.append(exec_interval)
hiding_intervals.extend(hiding_interval_list)
curr_event = curr_event.next
curr_event = new_wait_event.prev
while curr_event is not None and curr_event.position > earliest_wait_pos:
if (
curr_event.node not in all_bucketed_colls
and curr_event.node not in all_bucketed_waits
):
exec_interval, hiding_interval_list = self._get_intervals(curr_event)
if exec_interval:
execution_intervals.append(exec_interval)
hiding_intervals.extend(hiding_interval_list)
curr_event = curr_event.prev
# Check: no hiding interval should be enclosed by any execution interval
def enclosed_interval(inner: tuple[int, int], outer: tuple[int, int]) -> bool:
return outer[0] < inner[0] and inner[1] < outer[1]
for hiding_interval in hiding_intervals:
for execution_interval in execution_intervals:
if enclosed_interval(hiding_interval, execution_interval):
why(
"hiding interval %s enclosed by execution interval %s",
hiding_interval,
execution_interval,
)
return False
return True
def remove_from_event(
self, node: fx.Node
) -> tuple[Optional[PGEvent], Optional[PGEvent]]:
"""Remove node from timeline and return (prev_event, next_event)."""
event = self.node_to_event[node]
assert not event.is_compute, "Cannot remove compute events from timeline"
prev_event, next_event = event.unlink()
# Remove augmented graph dependency
if prev_event:
self.aug_graph.remove_extra_dep(n=node, dep=prev_event.node)
if next_event:
self.aug_graph.remove_extra_dep(n=next_event.node, dep=node)
# Add bypass dependency
if prev_event and next_event:
self.aug_graph.add_extra_dep(n=next_event.node, dep=prev_event.node)
return prev_event, next_event
def restore_to_event(
self,
node: fx.Node,
prev_event: Optional[PGEvent],
next_event: Optional[PGEvent],
) -> None:
"""Restore node to timeline after failed merge attempt."""
event = self.node_to_event[node]
# Reinsert into linked list
event.insert_between(prev_event, next_event)
if prev_event:
self.aug_graph.add_extra_dep(n=node, dep=prev_event.node)
if next_event and not prev_event:
self.aug_graph.add_extra_dep(n=next_event.node, dep=node)
# Remove bypass dependency
if prev_event and next_event:
self.aug_graph.remove_extra_dep(n=next_event.node, dep=prev_event.node)
def _try_timeline_position(
self,
bucket_info: CollBucket,
candidate: fx.Node,
start_pos: fx.Node,
wait_pos: fx.Node,
why: WhyNoBucket,
) -> bool:
"""
Try a specific timeline position for the candidate.
Returns True if valid and merges are successful.
"""
candidate_info = self.collective_info[candidate]
candidate_wait = candidate_info.wait_node
# Quick check: does this violate hiding intervals?
if not self._preserves_hiding_intervals(
bucket_info, candidate, start_pos, wait_pos, why
):
return False
# Determine which start needs to move
existing_coll = bucket_info.collectives[0]
if start_pos == existing_coll:
start_to_move = candidate
else:
assert start_pos == candidate
start_to_move = existing_coll
# Remove start from timeline
start_prev, start_next = self.remove_from_event(start_to_move)
# Check if starts can be merged
if self.aug_graph.has_path(existing_coll, candidate) or self.aug_graph.has_path(
candidate, existing_coll
):
# Restore start constraints
self.restore_to_event(start_to_move, start_prev, start_next)
why("path exists between starts")
return False
# Merge starts
self.aug_graph.merge_to_set(existing_coll, candidate)
# Determine which wait needs to move
existing_wait = self.collective_info[existing_coll].wait_node
candidate_wait = self.collective_info[candidate].wait_node
if wait_pos == existing_wait:
wait_to_move = candidate_wait
else:
wait_to_move = existing_wait
# Remove wait from timeline
wait_prev, wait_next = self.remove_from_event(wait_to_move)
# Check if waits can be merged
if self.aug_graph.has_path(
existing_wait, candidate_wait
) or self.aug_graph.has_path(candidate_wait, existing_wait):
# Restore wait constraints
self.restore_to_event(wait_to_move, wait_prev, wait_next)
# Unmerge the start we just merged
self.aug_graph.unmerge_node(candidate)
# Restore start constraints
self.restore_to_event(start_to_move, start_prev, start_next)
why("path exists between waits")
return False
# Merge waits - success!
self.aug_graph.merge_to_set(existing_wait, candidate_wait)
# Update node_to_event for moved nodes
target_start_event = self.node_to_event[start_pos]
target_wait_event = self.node_to_event[wait_pos]
self.node_to_event[candidate] = target_start_event
self.node_to_event[candidate_wait] = target_wait_event
self.node_to_event[existing_coll] = target_start_event
self.node_to_event[existing_wait] = target_wait_event
return True
def _has_ancestor_conflicts(
self, bucket_info: CollBucket, candidate: fx.Node
) -> bool:
"""
Check if candidate has ancestor conflicts with bucket collectives.
Returns True if there are conflicts.
"""
candidate_info = self.collective_info[candidate]
candidate_wait = candidate_info.wait_node
for coll in bucket_info.collectives:
# Check if collectives are ancestors of each other
if self._ancestor_dep(coll, candidate):
return True
# Check if waits are ancestors of each other
coll_wait = self.collective_info[coll].wait_node
if self._ancestor_dep(candidate_wait, coll_wait):
return True
# Check if existing hiding node conflicts with candidate wait
for old_hiding_node in self.collective_info[coll].hiding_nodes:
if self._ancestor_dep(old_hiding_node, candidate_wait):
return True
# Check if candidate hiding node conflicts with existing wait
for new_hiding_node in candidate_info.hiding_nodes:
if self._ancestor_dep(new_hiding_node, coll_wait):
return True
return False
def _can_add_to_bucket(
self,
bucket_info: CollBucket,
candidate: fx.Node,
) -> bool:
"""
Check if candidate can be added to bucket without breaking comm/compute overlap.
Strategy: Try all timeline positions - combinations of [existing_start, candidate_start]
x [existing_wait, candidate_wait]. For each position, verify:
1. Hiding intervals preserved - for any (start, hiding_compute, wait) interval, no other
collective's (start, wait) pair falls between start and hiding_compute, which would
force realization and break overlap due to LIFO semantics
2. Topologically valid (no dependency cycles)
Return True if any timeline position satisfies both constraints.
"""
existing_coll = bucket_info.collectives[0]
why = WhyNoBucket(existing_coll, candidate)
candidate_info = self.collective_info[candidate]
# Step 1: Quick check using precomputed ancestors
# These ancestors are computed prior to adding augmented dependencies and not updated,
# so if any of these checks fail then the merge will not be topologically valid
# even ignoring comm/compute overlap
if self._has_ancestor_conflicts(bucket_info, candidate):
why("has ancestor conflicts")
return False
# Step 2: Try different rail positions
existing_wait = self.collective_info[existing_coll].wait_node
candidate_start = candidate
candidate_wait = candidate_info.wait_node
# Try combinations in order of likelihood to succeed
# (early start, later wait is most likely to work)
combinations = [
(
existing_coll,
candidate_wait,
), # Move candidate start early, keep wait late
(
existing_coll,
existing_wait,
), # Move candidate start early, move wait early
(candidate_start, candidate_wait), # Keep both in place
(candidate_start, existing_wait), # Keep start in place, move wait early
]
for i, (start_pos, wait_pos) in enumerate(combinations):
if self._try_timeline_position(
bucket_info, candidate, start_pos, wait_pos, why
):
bucket_log.debug(
"bucketed %s with %s using timeline position %d: (start=%s, wait=%s)",
candidate.name,
existing_coll.name,
i + 1,
start_pos.name,
wait_pos.name,
)
return True
why("all timeline positions failed")
return False
def _apply_bucket(self, bucket_info: CollBucket) -> None:
"""
Apply bucketing transformation.
Dependencies are added to aug_graph.extra_deps and transferred from old nodes.
"""
from torch._inductor.fx_passes.bucketing import (
is_all_reduce_tensor,
merge_all_gather_bucket,
merge_all_reduce_bucket,
merge_reduce_scatter_bucket,
)
bucket = bucket_info.collectives
# Collect old nodes BEFORE they're erased
old_starts = list(bucket)
old_waits = [self.collective_info[n].wait_node for n in bucket]
fused_convert_dtypes = []
for n in old_starts:
if has_mergeable_all_gather_convert_dtype(n):
fused_convert_dtypes.append(n.args[0])
# Find where to place the bucketed operations
next_node = bucket[0]
while next_node in bucket:
next_node = next_node.next
# Don't use wait_insertion_point - let merge functions place waits naturally
# The wait_insertion_point feature tries to move waits to a specific location,
# but this can cause issues when that location is one of the nodes being erased
# Create bucketed collective (this will erase old nodes)
if is_all_gather(bucket[0]):
new_nodes, replacements = merge_all_gather_bucket(
self.graph,
bucket,
insert_before=next_node,
mode="custom_ops",
)
elif is_all_reduce_tensor(bucket[0]):
new_nodes, replacements = merge_all_reduce_bucket(
self.graph,
bucket,
mode="custom_ops",
insert_before=next_node,
)
else:
assert is_reduce_scatter(bucket[0])
new_nodes, replacements = merge_reduce_scatter_bucket(
self.graph,
bucket,
insert_before=next_node,
mode="custom_ops",
)
# Get new nodes
new_waits = [n for n in new_nodes if _schedulable_wait_node(n)]
assert len(new_waits) == 1
new_wait = new_waits[0]
new_start = new_wait.args[0]
assert isinstance(new_start, fx.Node)
# Create mapping of all erased nodes to their replacements
erased_to_new = {}
for old_start in old_starts:
erased_to_new[old_start] = new_start
for old_wait in old_waits:
erased_to_new[old_wait] = new_wait
# Handle convert_element_type nodes that were fused and erased
# The bucketed operation may have a _pre_bucket op that handles dtype conversion
if fused_convert_dtypes:
# all gather bucketing may fuse in dtype conversion into the bucketing
# if so, we need to transfer hiding deps from the old dtype conversion
# to the new bucketing node
new_convert_dtypes_node = new_start.kwargs["out"]
assert isinstance(new_convert_dtypes_node, fx.Node)
assert (
new_convert_dtypes_node.target
== torch.ops.bucketing._pre_bucket_all_gather.default
)
for n in fused_convert_dtypes:
erased_to_new[n] = new_convert_dtypes_node
# Transfer all dependencies from old nodes to new nodes
self.aug_graph.transfer_erased_node_deps(erased_to_new)
def _preserve_dependencies_with_tokens(
self, additional_deps: dict[fx.Node, OrderedSet[fx.Node]]
) -> None:
"""
Preserve dependencies using effect tokens and with_effects higher-order op.
Uses the standalone token_dependencies utility for consistent behavior
across different overlap scheduling approaches.
"""
from torch._inductor.fx_passes.control_dependencies import (
preserve_node_ordering,
)
preserve_node_ordering(self.graph, additional_deps)
| OverlapPreservingBucketer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1302228,
"end": 1303152
} | class ____(sgqlc.types.Type, Node):
"""A Pinned Issue is a issue pinned to a repository's index page."""
__schema__ = github_schema
__field_names__ = ("database_id", "full_database_id", "issue", "pinned_by", "repository")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
full_database_id = sgqlc.types.Field(BigInt, graphql_name="fullDatabaseId")
"""Identifies the primary key from the database as a BigInt."""
issue = sgqlc.types.Field(sgqlc.types.non_null(Issue), graphql_name="issue")
"""The issue that was pinned."""
pinned_by = sgqlc.types.Field(sgqlc.types.non_null(Actor), graphql_name="pinnedBy")
"""The actor that pinned this issue."""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The repository that this issue was pinned to."""
| PinnedIssue |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 13847,
"end": 15626
} | class ____(TypedDict, total=False):
type: Required[Literal['function-wrap']]
function: Required[WrapSerializerFunction]
is_field_serializer: bool # default False
info_arg: bool # default False
schema: CoreSchema # if omitted, the schema on which this serializer is defined is used
return_schema: CoreSchema # if omitted, AnySchema is used
when_used: WhenUsed # default: 'always'
def wrap_serializer_function_ser_schema(
function: WrapSerializerFunction,
*,
is_field_serializer: bool | None = None,
info_arg: bool | None = None,
schema: CoreSchema | None = None,
return_schema: CoreSchema | None = None,
when_used: WhenUsed = 'always',
) -> WrapSerializerFunctionSerSchema:
"""
Returns a schema for serialization with a wrap function, can be either a "general" or "field" function.
Args:
function: The function to use for serialization
is_field_serializer: Whether the serializer is for a field, e.g. takes `model` as the first argument,
and `info` includes `field_name`
info_arg: Whether the function takes an `info` argument
schema: The schema to use for the inner serialization
return_schema: Schema to use for serializing return value
when_used: When the function should be called
"""
if when_used == 'always':
# just to avoid extra elements in schema, and to use the actual default defined in rust
when_used = None # type: ignore
return _dict_not_none(
type='function-wrap',
function=function,
is_field_serializer=is_field_serializer,
info_arg=info_arg,
schema=schema,
return_schema=return_schema,
when_used=when_used,
)
| WrapSerializerFunctionSerSchema |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_sheet_data.py | {
"start": 301,
"end": 785
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_sheet_data() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_data(self):
"""Test the _write_sheet_data() method"""
self.worksheet._write_sheet_data()
exp = """<sheetData/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteSheetData |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/pooling.py | {
"start": 45167,
"end": 47047
} | class ____(GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format='channels_first'`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
| GlobalAveragePooling3D |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 117365,
"end": 118689
} | class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(
self,
client_id: str,
client_secret: str,
access_token: str,
subdomain: Optional[str] = None,
):
self.auth_type = "oauth2.0"
self.subdomain = check.opt_str_param(subdomain, "subdomain")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.access_token = check.str_param(access_token, "access_token")
class APIToken:
@public
def __init__(self, api_token: str):
self.auth_type = "api_token"
self.api_token = check.str_param(api_token, "api_token")
@public
def __init__(
self, name: str, credentials: Union["MondaySource.OAuth20", "MondaySource.APIToken"]
):
"""Airbyte Source for Monday.
Documentation can be found at https://docs.airbyte.com/integrations/sources/monday
Args:
name (str): The name of the destination.
"""
self.credentials = check.inst_param(
credentials, "credentials", (MondaySource.OAuth20, MondaySource.APIToken)
)
super().__init__("Monday", name)
| MondaySource |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-artifact-editor/tests/test_artifact_editor.py | {
"start": 235,
"end": 397
} | class ____(BaseModel):
"""Address model for testing nested objects."""
street: str
city: str
zipcode: str
country: Optional[str] = None
| Address |
python | getsentry__sentry | tests/sentry/workflow_engine/migration_helpers/test_migrate_alert_rule.py | {
"start": 38289,
"end": 40623
} | class ____(BaseMetricAlertMigrationTest):
def setUp(self) -> None:
self.metric_alert = self.create_alert_rule()
self.alert_rule_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="critical", alert_threshold=200
)
self.create_migrated_metric_alert_objects(self.metric_alert)
(
self.critical_detector_trigger,
self.critical_action_filter,
self.critical_resolve_action_filter,
) = self.create_migrated_metric_alert_rule_trigger_objects(
self.alert_rule_trigger, DetectorPriorityLevel.HIGH, Condition.GREATER
)
self.resolve_detector_trigger = self.create_migrated_metric_alert_rule_resolve_objects(
self.metric_alert, 200, Condition.LESS_OR_EQUAL
)
def test_dual_update_metric_alert_threshold_type(self) -> None:
# This field affects the data conditions, but it lives on the alert rule.
updated_fields: dict[str, Any] = {}
updated_fields = {"threshold_type": AlertRuleThresholdType.BELOW.value}
self.metric_alert.update(**updated_fields)
dual_update_migrated_alert_rule(self.metric_alert)
self.critical_detector_trigger.refresh_from_db()
self.resolve_detector_trigger.refresh_from_db()
assert self.critical_detector_trigger.type == Condition.LESS
assert self.resolve_detector_trigger.type == Condition.GREATER_OR_EQUAL
def test_dual_update_metric_alert_resolve_threshold(self) -> None:
# This field affects the data conditions, but it lives on the alert rule.
updated_fields: dict[str, Any] = {}
updated_fields = {"resolve_threshold": 10}
self.metric_alert.update(**updated_fields)
dual_update_resolve_condition(self.metric_alert)
self.resolve_detector_trigger.refresh_from_db()
assert self.resolve_detector_trigger.comparison == 10
def test_dual_update_trigger_threshold(self) -> None:
updated_fields = {"alert_threshold": 314}
self.alert_rule_trigger.update(**updated_fields)
dual_update_migrated_alert_rule_trigger(self.alert_rule_trigger)
self.critical_detector_trigger.refresh_from_db()
assert self.critical_detector_trigger.comparison == 314
| DualUpdateAlertRuleTriggerTest |
python | pypa__build | src/build/_exceptions.py | {
"start": 69,
"end": 175
} | class ____(Exception):
"""
Exception raised by :class:`build.ProjectBuilder`.
"""
| BuildException |
python | readthedocs__readthedocs.org | readthedocs/organizations/models.py | {
"start": 9219,
"end": 9516
} | class ____(models.Model):
"""Intermediate table for Organization <-> User relationships."""
owner = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
organization = models.ForeignKey(
Organization,
on_delete=models.CASCADE,
)
| OrganizationOwner |
python | squidfunk__mkdocs-material | material/plugins/tags/structure/mapping/manager/__init__.py | {
"start": 1638,
"end": 8402
} | class ____:
"""
A mapping manager.
The mapping manager is responsible for collecting all tags from the front
matter of pages, and for building a tag structure from them, nothing more.
"""
def __init__(self, config: TagsConfig):
"""
Initialize the mapping manager.
Arguments:
config: The configuration.
"""
self.config = config
self.format = TagSet(allowed = self.config.tags_allowed)
self.data = {}
def __repr__(self) -> str:
"""
Return a printable representation of the mapping manager.
Returns:
Printable representation.
"""
return _print(self)
def __iter__(self) -> Iterator[Mapping]:
"""
Iterate over mappings.
Yields:
The current mapping.
"""
return iter(self.data.values())
# -------------------------------------------------------------------------
config: TagsConfig
"""
The configuration.
"""
format: TagSet
"""
The mapping format.
This is the validator that is used to check if tags are valid, including
the tags in the front matter of pages, as well as the tags defined in the
configuration. Numbers and booleans are always converted to strings before
creating tags, and the allow list is checked as well, if given.
"""
data: dict[str, Mapping]
"""
The mappings.
"""
# -------------------------------------------------------------------------
def add(self, page: Page, markdown: str) -> Mapping | None:
"""
Add page.
This method is called by the tags plugin to retrieve all tags of a page.
It extracts all tags from the front matter of the given page, and adds
them to the mapping. If no tags are found, no mapping is created and
nothing is returned.
Note that this method is intended to be called with the page during the
`on_page_markdown` event, as it reads the front matter of a page. Also,
the Markdown must be explicitly passed, as we could otherwise run into
inconsistencies when other plugins modify the Markdown.
Arguments:
page: The page.
markdown: The page's Markdown.
Returns:
The mapping or nothing.
"""
assert isinstance(markdown, str)
# Return nothing if page doesn't have tags
tags = self.config.tags_name_property
if not page.meta.get(tags, []):
return
# Create mapping and associate with page
mapping = Mapping(page)
self.data[page.url] = mapping
# Retrieve and validate tags, and add to mapping
for tag in self.format.validate(page.meta[tags]):
mapping.tags.add(self._configure(tag))
# Return mapping
return mapping
def get(self, page: Page) -> Mapping | None:
"""
Get mapping for page, if any.
Arguments:
page: The page.
Returns:
The mapping or nothing.
"""
if page.url in self.data:
return self.data[page.url]
# -------------------------------------------------------------------------
def _configure(self, tag: Tag) -> Tag:
"""
Configure tag.
This method is called by the mapping manager to configure a tag for the
the tag structure. Depending on the configuration, the tag is expanded
into a hierarchy of tags, and can be marked as hidden if it is a shadow
tag, hiding it from mappings and listings when rendering.
Arguments:
tag: The tag.
Returns:
The configured tag.
"""
if self.config.tags_hierarchy:
return self._configure_hierarchy(tag)
else:
return self._configure_shadow(tag, tag.name)
def _configure_hierarchy(self, tag: Tag) -> Tag:
"""
Configure hierarchical tag.
Note that shadow tags that occur as part of a tag hierarchy propagate
their hidden state to all of their children.
Arguments:
tag: The tag.
Returns:
The configured tag.
"""
separator = self.config.tags_hierarchy_separator
root, *rest = tag.name.split(separator)
# Create tag root and hierarchy
tag = self._configure_shadow(Tag(root), root)
for name in rest:
tag = self._configure_shadow(Tag(
separator.join([tag.name, name]),
parent = tag, hidden = tag.hidden
), name)
# Return tag
return tag
def _configure_shadow(self, tag: Tag, name: str) -> Tag:
"""
Configure shadow tag.
Regardless of the configuration, tags are always marked as hidden if
they're classified as shadow tags, e.g., if their name matches the
configured shadow prefix or suffix, or if they're part of the list of
shadow tags. Whether they're displayed is decided before rendering.
The tag name must be passed separately, as it may be different from the
tag's name, e.g., when creating a tag hierarchy. In this case, the name
represents the part that was added to the tag, essentially the suffix.
The name is checked for shadow prefixes and suffixes.
Arguments:
tag: The tag.
name: The tag name.
Returns:
The configured tag.
"""
if not tag.hidden:
tag.hidden = tag in self.config.shadow_tags
# Check if tag matches shadow prefix, if defined
if not tag.hidden and self.config.shadow_tags_prefix:
tag.hidden = name.startswith(self.config.shadow_tags_prefix)
# Check if tag matches shadow suffix, if defined
if not tag.hidden and self.config.shadow_tags_suffix:
tag.hidden = name.endswith(self.config.shadow_tags_suffix)
# Return tag
return tag
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def _print(manager: MappingManager, indent: int = 0) -> str:
"""
Return a printable representation of a mapping manager.
Arguments:
manager: The mapping manager.
indent: The indentation level.
Returns:
Printable representation.
"""
lines: list[str] = []
lines.append(" " * indent + f"MappingManager()")
# Print mappings
for mapping in manager:
lines.append(" " * (indent + 2) + repr(mapping))
# Concatenate everything
return "\n".join(lines)
| MappingManager |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_rotation.py | {
"start": 394,
"end": 9635
} | class ____(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly rotates images during training.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, pass `training=True` when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating
counter clock-wise,
while a negative value means clock-wise.
When represented as a single
float, this value is used for both the upper and lower bound.
For instance, `factor=(-0.2, 0.3)`
results in an output rotation by a random
amount in the range `[-20% * 360, 30% * 360]`.
`factor=0.2` results in an
output rotating by a random amount
in the range `[-20% * 360, 20% * 360]`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about
the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by
filling all values beyond the edge with
the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
"""
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
data_format=None,
**kwargs,
):
super().__init__(factor=factor, data_format=data_format, **kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.fill_mode = fill_mode
self.interpolation = interpolation
self.fill_value = fill_value
self.supports_jit = False
if self.fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if self.interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self.backend.image.affine_transform(
images=images,
transform=transformation["rotation_matrix"],
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training:
ops = self.backend
boxes = bounding_boxes["boxes"]
height = transformation["image_height"]
width = transformation["image_width"]
batch_size = transformation["batch_size"]
boxes = converters.affine_transform(
boxes=boxes,
angle=transformation["angle"],
translate_x=ops.numpy.zeros([batch_size]),
translate_y=ops.numpy.zeros([batch_size]),
scale=ops.numpy.ones([batch_size]),
shear_x=ops.numpy.zeros([batch_size]),
shear_y=ops.numpy.zeros([batch_size]),
height=height,
width=width,
)
bounding_boxes["boxes"] = boxes
bounding_boxes = converters.clip_to_image_size(
bounding_boxes,
height=height,
width=width,
bounding_box_format="xyxy",
)
bounding_boxes = converters.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
height=height,
width=width,
)
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def get_random_transformation(self, data, training=True, seed=None):
ops = self.backend
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
shape = ops.core.shape(images)
if len(shape) == 4:
batch_size = shape[0]
if self.data_format == "channels_last":
image_height = shape[1]
image_width = shape[2]
else:
image_height = shape[2]
image_width = shape[3]
else:
batch_size = 1
if self.data_format == "channels_last":
image_height = shape[0]
image_width = shape[1]
else:
image_height = shape[1]
image_width = shape[2]
if seed is None:
seed = self._get_seed_generator(ops._backend)
lower = self.factor[0] * 360.0
upper = self.factor[1] * 360.0
angle = ops.random.uniform(
shape=(batch_size,),
minval=lower,
maxval=upper,
seed=seed,
)
center_x, center_y = 0.5, 0.5
rotation_matrix = self._compute_affine_matrix(
center_x=center_x,
center_y=center_y,
angle=angle,
translate_x=ops.numpy.zeros([batch_size]),
translate_y=ops.numpy.zeros([batch_size]),
scale=ops.numpy.ones([batch_size]),
shear_x=ops.numpy.zeros([batch_size]),
shear_y=ops.numpy.zeros([batch_size]),
height=image_height,
width=image_width,
)
if len(shape) == 3:
rotation_matrix = self.backend.numpy.squeeze(
rotation_matrix, axis=0
)
return {
"angle": angle,
"rotation_matrix": rotation_matrix,
"image_height": image_height,
"image_width": image_width,
"batch_size": batch_size,
}
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"data_format": self.data_format,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| RandomRotation |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 12551,
"end": 12857
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
self.result = int(self.result)
######################################################################
| ushortTestCase |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 482406,
"end": 483050
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("OrganizationEnterpriseOwnerEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| OrganizationEnterpriseOwnerConnection |
python | apache__thrift | test/py/TSimpleJSONProtocolTest.py | {
"start": 993,
"end": 3951
} | class ____(unittest.TestCase):
protocol_factory = TJSONProtocol.TSimpleJSONProtocolFactory()
def _assertDictEqual(self, a, b, msg=None):
if hasattr(self, 'assertDictEqual'):
# assertDictEqual only in Python 2.7. Depends on your machine.
self.assertDictEqual(a, b, msg)
return
# Substitute implementation not as good as unittest library's
self.assertEquals(len(a), len(b), msg)
for k, v in a.iteritems():
self.assertTrue(k in b, msg)
self.assertEquals(b.get(k), v, msg)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testWriteOnly(self):
self.assertRaises(NotImplementedError,
self._deserialize, VersioningTestV1, b'{}')
def testSimpleMessage(self):
v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321)
expected = dict(begin_in_both=v1obj.begin_in_both,
old_string=v1obj.old_string,
end_in_both=v1obj.end_in_both)
actual = json.loads(self._serialize(v1obj).decode('ascii'))
self._assertDictEqual(expected, actual)
def testComplicated(self):
v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7, 8, 9],
newset=set([42, 1, 8]),
newmap={1: 2, 2: 3},
newstring="Hola!",
end_in_both=54321)
expected = dict(begin_in_both=v2obj.begin_in_both,
newint=v2obj.newint,
newbyte=v2obj.newbyte,
newshort=v2obj.newshort,
newlong=v2obj.newlong,
newdouble=v2obj.newdouble,
newstruct=dict(message=v2obj.newstruct.message,
type=v2obj.newstruct.type),
newlist=v2obj.newlist,
newset=list(v2obj.newset),
newmap=v2obj.newmap,
newstring=v2obj.newstring,
end_in_both=v2obj.end_in_both)
# Need to load/dump because map keys get escaped.
expected = json.loads(json.dumps(expected))
actual = json.loads(self._serialize(v2obj).decode('ascii'))
self._assertDictEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| SimpleJSONProtocolTest |
python | hynek__structlog | src/structlog/stdlib.py | {
"start": 16284,
"end": 21787
} | class ____:
"""
Wraps a `BoundLogger` & exposes its logging methods as ``async`` versions.
Instead of blocking the program, they are run asynchronously in a thread
pool executor.
This means more computational overhead per log call. But it also means that
the processor chain (e.g. JSON serialization) and I/O won't block your
whole application.
Only available for Python 3.7 and later.
.. versionadded:: 20.2.0
.. versionchanged:: 20.2.0 fix _dispatch_to_sync contextvars usage
.. deprecated:: 23.1.0
Use the regular `BoundLogger` with its a-prefixed methods instead.
.. versionchanged:: 23.3.0
Callsite parameters are now also collected for async log methods.
"""
__slots__ = ("_loop", "sync_bl")
#: The wrapped synchronous logger. It is useful to be able to log
#: synchronously occasionally.
sync_bl: BoundLogger
_executor = None
_bound_logger_factory = BoundLogger
def __init__(
self,
logger: logging.Logger,
processors: Iterable[Processor],
context: Context,
*,
# Only as an optimization for binding!
_sync_bl: Any = None, # *vroom vroom* over purity.
_loop: Any = None,
):
if _sync_bl:
self.sync_bl = _sync_bl
self._loop = _loop
return
self.sync_bl = self._bound_logger_factory(
logger=logger, processors=processors, context=context
)
self._loop = asyncio.get_running_loop()
# Instances would've been correctly recognized as such, however the class
# not and we need the class in `structlog.configure()`.
@property
def _context(self) -> Context:
return self.sync_bl._context
def bind(self, **new_values: Any) -> Self:
return self.__class__(
# logger, processors and context are within sync_bl. These
# arguments are ignored if _sync_bl is passed. *vroom vroom* over
# purity.
logger=None, # type: ignore[arg-type]
processors=(),
context={},
_sync_bl=self.sync_bl.bind(**new_values),
_loop=self._loop,
)
def new(self, **new_values: Any) -> Self:
return self.__class__(
# c.f. comment in bind
logger=None, # type: ignore[arg-type]
processors=(),
context={},
_sync_bl=self.sync_bl.new(**new_values),
_loop=self._loop,
)
def unbind(self, *keys: str) -> Self:
return self.__class__(
# c.f. comment in bind
logger=None, # type: ignore[arg-type]
processors=(),
context={},
_sync_bl=self.sync_bl.unbind(*keys),
_loop=self._loop,
)
def try_unbind(self, *keys: str) -> Self:
return self.__class__(
# c.f. comment in bind
logger=None, # type: ignore[arg-type]
processors=(),
context={},
_sync_bl=self.sync_bl.try_unbind(*keys),
_loop=self._loop,
)
async def _dispatch_to_sync(
self,
meth: Callable[..., Any],
event: str,
args: tuple[Any, ...],
kw: dict[str, Any],
) -> None:
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
try:
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
async def debug(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.debug, event, args, kw)
async def info(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.info, event, args, kw)
async def warning(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
async def warn(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
async def error(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.error, event, args, kw)
async def critical(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
async def fatal(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
async def exception(self, event: str, *args: Any, **kw: Any) -> None:
# To make `log.exception("foo") work, we have to check if the user
# passed an explicit exc_info and if not, supply our own.
ei = kw.pop("exc_info", None)
if ei is None and kw.get("exception") is None:
ei = sys.exc_info()
kw["exc_info"] = ei
await self._dispatch_to_sync(self.sync_bl.exception, event, args, kw)
async def log(self, level: Any, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(
partial(self.sync_bl.log, level), event, args, kw
)
| AsyncBoundLogger |
python | eventlet__eventlet | benchmarks/__init__.py | {
"start": 779,
"end": 6973
} | class ____:
func = None
name = ''
iters = 0
ns_per_op = 0
allocs_per_op = 0
mb_per_s = 0
def __init__(self, **kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
raise AttributeError(k)
setattr(self, k, v)
def __str__(self):
kvs = ', '.join('{}={}'.format(k, v) for k, v in self.__dict__.items() if not k.startswith('_'))
return 'Benchmark<{}>'.format(kvs)
__repr__ = __str__
def format_result(self, name_pad_to=64):
# format compatible with golang.org/x/tools/cmd/benchcmp
return "Benchmark_{b.name}{pad}\t{b.iters}\t{b.ns_per_op} ns/op".format(
b=self, pad=' ' * (name_pad_to + 1 - len(self.name)))
def run(self, repeat=5):
wrapper_time = _run_timeit(self.func, 0)
times = []
for _ in range(repeat):
t = _run_timeit(self.func, self.iters)
if t == 0.0:
raise Exception('{} time=0'.format(repr(self)))
times.append(t)
best_time = min(times) - wrapper_time
self.ns_per_op = int((best_time * 1e9) / self.iters)
def _run_timeit(func, number):
# common setup
gc.collect()
manager = getattr(func, '_benchmark_manager', None)
try:
# TODO collect allocations count, memory usage
# TODO collect custom MB/sec metric reported by benchmark
if manager is not None:
with manager(number) as ctx:
return timeit.Timer(lambda: func(ctx)).timeit(number=number)
else:
return timeit.Timer(func).timeit(number=number)
finally:
# common cleanup
eventlet.sleep(0.01)
def optimal_iters(func, target_time):
'''Find optimal number of iterations to run func closely >= target_time.
'''
iters = 1
target_time = float(target_time)
max_iters = int(getattr(func, '_benchmark_max_iters', 0))
# TODO automatically detect non-linear time growth
scale_factor = getattr(func, '_benchmark_scale_factor', 0.0)
for _ in range(10):
if max_iters and iters > max_iters:
return max_iters
# print('try iters={iters}'.format(**locals()))
t = _run_timeit(func, number=iters)
# print('... t={t}'.format(**locals()))
if t >= target_time:
return iters
if scale_factor:
iters *= scale_factor
continue
# following assumes and works well for linear complexity target functions
if t < (target_time / 2):
# roughly target half optimal time, ensure iterations keep increasing
iters = iters * (target_time / t / 2) + 1
# round up to nearest power of 10
iters = int(10 ** math.ceil(math.log10(iters)))
elif t < target_time:
# half/double dance is less prone to overshooting iterations
iters *= 2
raise Exception('could not find optimal iterations for time={} func={}'.format(target_time, repr(func)))
def collect(filter_fun):
# running `python benchmarks/__init__.py` or `python -m benchmarks`
# puts .../eventlet/benchmarks at top of sys.path, fix it to project root
if sys.path[0].endswith('/benchmarks'):
path = sys.path.pop(0)
correct = path.rsplit('/', 1)[0]
sys.path.insert(0, correct)
common_prefix = 'benchmark_'
result = []
# TODO step 1: put all toplevel benchmarking code under `if __name__ == '__main__'`
# TODO step 2: auto import benchmarks/*.py, remove whitelist below
# TODO step 3: convert existing benchmarks
for name in ('hub_timers', 'spawn'):
mod = importlib.import_module('benchmarks.' + name)
for name, obj in inspect.getmembers(mod):
if name.startswith(common_prefix) and inspect.isfunction(obj):
useful_name = name[len(common_prefix):]
if filter_fun(useful_name):
result.append(Benchmark(name=useful_name, func=obj))
return result
def noop(*a, **kw):
pass
def configure(manager=None, scale_factor=0.0, max_iters=0):
def wrapper(func):
func._benchmark_manager = manager
func._benchmark_scale_factor = scale_factor
func._benchmark_max_iters = max_iters
return func
return wrapper
def main():
cmdline = argparse.ArgumentParser(description='Run benchmarks')
cmdline.add_argument('-autotime', default=3.0, type=float, metavar='seconds',
help='''autoscale iterations close to this time per benchmark,
in seconds (default: %(default).1f)''')
cmdline.add_argument('-collect', default=False, action='store_true',
help='stop after collecting, useful for debugging this tool')
cmdline.add_argument('-filter', default='', metavar='regex',
help='process benchmarks matching regex (default: all)')
cmdline.add_argument('-iters', default=None, type=int, metavar='int',
help='force this number of iterations (default: auto)')
cmdline.add_argument('-repeat', default=5, type=int, metavar='int',
help='repeat each benchmark, report best result (default: %(default)d)')
args = cmdline.parse_args()
filter_re = re.compile(args.filter)
bs = collect(filter_re.search)
if args.filter and not bs:
# TODO stderr
print('error: no benchmarks matched by filter "{}"'.format(args.filter))
sys.exit(1)
if args.collect:
bs.sort(key=lambda b: b.name)
print('\n'.join(b.name for b in bs))
return
if not bs:
raise Exception('no benchmarks to run')
# execute in random order
random.shuffle(bs)
for b in bs:
b.iters = args.iters or optimal_iters(b.func, target_time=args.autotime)
b.run()
# print results in alphabetic order
max_name_len = max(len(b.name) for b in bs)
bs.sort(key=lambda b: b.name)
for b in bs:
print(b.format_result(name_pad_to=max_name_len))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| Benchmark |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 49195,
"end": 49242
} | class ____(armpl_info):
pass
| lapack_armpl_info |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/slack/tests.py | {
"start": 238,
"end": 1031
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = SlackProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"ok": true,
"sub": "U0R7JM",
"https://slack.com/user_id": "U0R7JM",
"https://slack.com/team_id": "T0R7GR",
"email": "krane@slack-corp.com",
"email_verified": true,
"date_email_verified": 1622128723,
"name": "krane",
"picture": "https://secure.gravatar.com/....png",
"given_name": "Bront",
"family_name": "Labradoodle",
"locale": "en-US",
"https://slack.com/team_name": "kraneflannel",
"https://slack.com/team_domain": "kraneflannel"
}""",
) # noqa
def get_expected_to_str(self):
return "krane@slack-corp.com"
| SlackOAuth2Tests |
python | joke2k__faker | faker/providers/lorem/zh_CN/__init__.py | {
"start": 68,
"end": 5093
} | class ____(LoremProvider):
"""Implement lorem provider for ``zh_CN`` locale."""
word_connector = ""
word_list = (
"一个",
"我们",
"时间",
"中国",
"可以",
"公司",
"没有",
"信息",
"下载",
"软件",
"注册",
"自己",
"产品",
"工作",
"论坛",
"企业",
"这个",
"他们",
"管理",
"已经",
"问题",
"内容",
"使用",
"进行",
"市场",
"服务",
"如果",
"系统",
"技术",
"发展",
"现在",
"作者",
"就是",
"网络",
"提供",
"相关",
"我的",
"文章",
"方式",
"电话",
"发表",
"所有",
"时候",
"因为",
"北京",
"有限",
"公司",
"什么",
"还是",
"开始",
"本站",
"发布",
"自己",
"支持",
"在线",
"国家",
"生活",
"联系",
"积分",
"主题",
"所以",
"不能",
"的人",
"上海",
"中心",
"世界",
"游戏",
"需要",
"价格",
"用户",
"通过",
"要求",
"不是",
"免费",
"个人",
"但是",
"地址",
"网站",
"情况",
"最后",
"设计",
"同时",
"这些",
"活动",
"手机",
"推荐",
"一些",
"主要",
"大家",
"发现",
"目前",
"文件",
"你的",
"不过",
"评论",
"生产",
"美国",
"图片",
"经济",
"功能",
"国际",
"的是",
"选择",
"其他",
"这样",
"会员",
"环境",
"来自",
"日期",
"成为",
"他的",
"最新",
"专业",
"一下",
"人员",
"任何",
"教育",
"资料",
"状态",
"都是",
"点击",
"为了",
"不会",
"出现",
"知道",
"社会",
"名称",
"而且",
"介绍",
"音乐",
"等级",
"可能",
"这种",
"建设",
"朋友",
"虽然",
"电子",
"资源",
"看到",
"精华",
"电影",
"如何",
"新闻",
"阅读",
"安全",
"全国",
"只有",
"回复",
"大学",
"学生",
"学习",
"关于",
"项目",
"不同",
"以及",
"有关",
"那么",
"开发",
"还有",
"只是",
"非常",
"研究",
"广告",
"首页",
"方法",
"希望",
"地方",
"也是",
"单位",
"怎么",
"应该",
"今天",
"以上",
"更新",
"帖子",
"显示",
"能力",
"电脑",
"记者",
"查看",
"位置",
"不要",
"由于",
"无法",
"详细",
"投资",
"是一",
"一般",
"进入",
"发生",
"这里",
"感觉",
"更多",
"你们",
"的话",
"起来",
"标准",
"一样",
"认为",
"女人",
"那个",
"设备",
"搜索",
"之后",
"然后",
"学校",
"销售",
"组织",
"说明",
"提高",
"为什",
"作品",
"或者",
"喜欢",
"东西",
"方面",
"简介",
"必须",
"经营",
"科技",
"作为",
"其中",
"运行",
"工程",
"解决",
"操作",
"经验",
"地区",
"重要",
"直接",
"登录",
"合作",
"结果",
"影响",
"这是",
"行业",
"对于",
"表示",
"程序",
"包括",
"留言",
"规定",
"处理",
"男人",
"各种",
"部门",
"数据",
"具有",
"商品",
"系列",
"大小",
"因此",
"关系",
"可是",
"比较",
"文化",
"一直",
"法律",
"这么",
"您的",
"城市",
"分析",
"基本",
"最大",
"类别",
"两个",
"日本",
"得到",
"一次",
"继续",
"成功",
"她的",
"责任",
"深圳",
"业务",
"欢迎",
"加入",
"能够",
"觉得",
"部分",
"中文",
"根据",
"人民",
"政府",
"控制",
"其实",
"之间",
"一种",
"威望",
"实现",
"语言",
"出来",
"谢谢",
"社区",
"品牌",
"是否",
"工具",
"完全",
"决定",
"很多",
"网上",
"事情",
"今年",
"国内",
"以后",
"制作",
"浏览",
"过程",
"完成",
"类型",
"来源",
"质量",
"有些",
"一起",
"当然",
"汽车",
"一点",
"帮助",
"增加",
"历史",
"以下",
"不断",
"应用",
"那些",
"密码",
"计划",
"如此",
"次数",
"到了",
"拥有",
"孩子",
"原因",
"参加",
"只要",
"报告",
"当前",
"客户",
"正在",
"注意",
"标题",
"空间",
"一定",
"一切",
"特别",
"全部",
"准备",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | boto__boto3 | tests/functional/dynamodb/test_table.py | {
"start": 652,
"end": 4390
} | class ____(unittest.TestCase):
maxDiff = None
def setUp(self):
self.resource = boto3.resource('dynamodb', 'us-east-1')
self.table = self.resource.Table('mytable')
def test_resource_has_batch_writer_added(self):
assert hasattr(self.table, 'batch_writer')
def test_operation_without_output(self):
stubber = Stubber(self.table.meta.client)
stubber.add_response('tag_resource', {})
arn = 'arn:aws:dynamodb:us-west-2:123456789:table/mytable'
with stubber:
self.table.meta.client.tag_resource(
ResourceArn=arn, Tags=[{'Key': 'project', 'Value': 'val'}]
)
stubber.assert_no_pending_responses()
def test_batch_write_does_not_double_serialize(self):
# If multiple items reference the same Python object, the
# object does not get double-serialized.
# https://github.com/boto/boto3/issues/3474
used_twice = {'pkey': 'foo1', 'otherfield': {'foo': 1, 'bar': 2}}
batch_writer = self.table.batch_writer()
# The default Stubber compares the request payload to the
# "expected_params" before automatic serialization happens. This custom
# event handler uses the same technique as the Stubber to record the
# serialized request body, but later in the request lifecycle.
class LateStubber:
def __init__(self, client):
self.intercepted_request_body = None
client.meta.events.register_first(
'before-call.*.*',
self.late_request_interceptor,
)
def late_request_interceptor(self, event_name, params, **kwargs):
if self.intercepted_request_body is not None:
raise AssertionError(
'LateStubber was called more than once, but only one '
'request is expected'
)
body_str = params.get('body', b'').decode('utf-8')
try:
self.intercepted_request_body = json.loads(body_str)
except Exception:
raise AssertionError(
'Expected JSON request body, but failed to JSON decode'
)
late_stubber = LateStubber(self.table.meta.client)
with Stubber(self.table.meta.client) as stubber:
stubber.add_response(
'batch_write_item',
service_response={'UnprocessedItems': {}},
)
batch_writer.put_item(Item=used_twice)
batch_writer.put_item(Item=used_twice)
batch_writer._flush()
expected_request_body = {
'RequestItems': {
'mytable': [
{
'PutRequest': {
'Item': {
'pkey': {'S': 'foo1'},
'otherfield': {
'M': {'foo': {'N': '1'}, 'bar': {'N': '2'}}
},
}
}
},
{
'PutRequest': {
'Item': {
'pkey': {'S': 'foo1'},
'otherfield': {
'M': {'foo': {'N': '1'}, 'bar': {'N': '2'}}
},
}
}
},
]
}
}
assert late_stubber.intercepted_request_body == expected_request_body
| TestTableResourceCustomizations |
python | kamyu104__LeetCode-Solutions | Python/utf-8-validation.py | {
"start": 29,
"end": 644
} | class ____(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
count = 0
for c in data:
if count == 0:
if (c >> 5) == 0b110:
count = 1
elif (c >> 4) == 0b1110:
count = 2
elif (c >> 3) == 0b11110:
count = 3
elif (c >> 7):
return False
else:
if (c >> 6) != 0b10:
return False
count -= 1
return count == 0
| Solution |
python | getsentry__sentry-python | sentry_sdk/crons/decorator.py | {
"start": 592,
"end": 3913
} | class ____: # noqa: N801
"""
Decorator/context manager to capture checkin events for a monitor.
Usage (as decorator):
```
import sentry_sdk
app = Celery()
@app.task
@sentry_sdk.monitor(monitor_slug='my-fancy-slug')
def test(arg):
print(arg)
```
This does not have to be used with Celery, but if you do use it with celery,
put the `@sentry_sdk.monitor` decorator below Celery's `@app.task` decorator.
Usage (as context manager):
```
import sentry_sdk
def test(arg):
with sentry_sdk.monitor(monitor_slug='my-fancy-slug'):
print(arg)
```
"""
def __init__(self, monitor_slug=None, monitor_config=None):
# type: (Optional[str], Optional[MonitorConfig]) -> None
self.monitor_slug = monitor_slug
self.monitor_config = monitor_config
def __enter__(self):
# type: () -> None
self.start_timestamp = now()
self.check_in_id = capture_checkin(
monitor_slug=self.monitor_slug,
status=MonitorStatus.IN_PROGRESS,
monitor_config=self.monitor_config,
)
def __exit__(self, exc_type, exc_value, traceback):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
duration_s = now() - self.start_timestamp
if exc_type is None and exc_value is None and traceback is None:
status = MonitorStatus.OK
else:
status = MonitorStatus.ERROR
capture_checkin(
monitor_slug=self.monitor_slug,
check_in_id=self.check_in_id,
status=status,
duration=duration_s,
monitor_config=self.monitor_config,
)
if TYPE_CHECKING:
@overload
def __call__(self, fn):
# type: (Callable[P, Awaitable[Any]]) -> Callable[P, Awaitable[Any]]
# Unfortunately, mypy does not give us any reliable way to type check the
# return value of an Awaitable (i.e. async function) for this overload,
# since calling iscouroutinefunction narrows the type to Callable[P, Awaitable[Any]].
...
@overload
def __call__(self, fn):
# type: (Callable[P, R]) -> Callable[P, R]
...
def __call__(
self,
fn, # type: Union[Callable[P, R], Callable[P, Awaitable[Any]]]
):
# type: (...) -> Union[Callable[P, R], Callable[P, Awaitable[Any]]]
if iscoroutinefunction(fn):
return self._async_wrapper(fn)
else:
if TYPE_CHECKING:
fn = cast("Callable[P, R]", fn)
return self._sync_wrapper(fn)
def _async_wrapper(self, fn):
# type: (Callable[P, Awaitable[Any]]) -> Callable[P, Awaitable[Any]]
@wraps(fn)
async def inner(*args: "P.args", **kwargs: "P.kwargs"):
# type: (...) -> R
with self:
return await fn(*args, **kwargs)
return inner
def _sync_wrapper(self, fn):
# type: (Callable[P, R]) -> Callable[P, R]
@wraps(fn)
def inner(*args: "P.args", **kwargs: "P.kwargs"):
# type: (...) -> R
with self:
return fn(*args, **kwargs)
return inner
| monitor |
python | numba__numba | numba/tests/test_api.py | {
"start": 131,
"end": 791
} | class ____(TestCase):
"""
Test the APIs exposed by the top-level `numba` module.
"""
def check_member(self, name):
self.assertTrue(hasattr(numba, name), name)
self.assertIn(name, numba.__all__)
@always_test
def test_numba_module(self):
# jit
self.check_member("jit")
self.check_member("vectorize")
self.check_member("guvectorize")
self.check_member("njit")
# errors
self.check_member("NumbaError")
self.check_member("TypingError")
# types
self.check_member("int32")
# misc
numba.__version__ # not in __all__
| TestNumbaModule |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 504423,
"end": 506892
} | class ____(Response):
"""
Response of tasks.started endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param started: Number of tasks started (0 or 1)
:type started: int
"""
_service = "tasks"
_action = "started"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"started": {
"description": "Number of tasks started (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, started=None, **kwargs):
super(StartedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.started = started
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("started")
def started(self):
return self._property_started
@started.setter
def started(self, value):
if value is None:
self._property_started = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "started", six.integer_types)
self._property_started = value
| StartedResponse |
python | ray-project__ray | python/ray/train/v2/_internal/execution/callback.py | {
"start": 2937,
"end": 5377
} | class ____(RayTrainCallback):
def after_controller_start(self, train_run_context: "TrainRunContext"):
"""Called immediately after `TrainController.run` is called,
before the control loop starts executing."""
pass
# TODO(matthewdeng): Revisit this callback interface for better extensibility.
# This hook was added for the specific use case of setting a `bundle_label_selector`
# for new worker groups (e.g., for TPU reservations). The current interface is
# tightly coupled to this purpose and limits its reuse for other use-cases.
def on_controller_start_worker_group(
self, *, scaling_config: ScalingConfig, num_workers: int
) -> Optional[Dict[str, str]]:
"""Called by the TrainController before the worker group is started.
This hook can be used to perform setup that modifies the worker group's
placement, such as reserving an accelerator slice.
Args:
scaling_config: The scaling configuration for the run.
num_workers: The number of workers to be started.
Returns:
An optional dictionary defining a `bundle_label_selector`
to gang schedule the worker group on the reserved TPU slice.
"""
return None
def before_controller_shutdown(self):
"""Called before `TrainController.run` exits,
after the control loop has exited."""
pass
def after_controller_state_update(
self,
previous_state: "TrainControllerState",
current_state: "TrainControllerState",
):
"""Called whenever the controller state is updated."""
pass
def before_controller_execute_failure_decision(
self,
failure_decision: "FailureDecision",
):
"""Called before the controller executes a failure decision."""
pass
def before_controller_execute_resize_decision(
self,
resize_decision: "ResizeDecision",
):
"""Called before the controller executes a resize decision."""
pass
def after_controller_finish(self, result: "Result"):
"""Called after the training run completes, providing access to the final result.
Args:
result: The final training result containing metrics and checkpoint.
"""
pass
# TODO: consider consolidating all metrics into one dict, possibly with UDF
@DeveloperAPI
| ControllerCallback |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/errors.py | {
"start": 16983,
"end": 17290
} | class ____(Exception):
"""This is part of the formal API for implementing post_process
methods on config types. Throw this error to indicate a
that post processing cannot happen, and that the user
must make a configuration and environment change in
order resolve.
"""
| PostProcessingError |
python | Netflix__metaflow | test/core/tests/resume_ubf_basic_foreach.py | {
"start": 72,
"end": 2177
} | class ____(MetaflowTest):
RESUME = True
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["start"])
def step_start(self):
self.data = "start"
self.after = False
@steps(0, ["foreach-split-small"], required=True)
def split(self):
self.my_index = None
from metaflow.plugins import InternalTestUnboundedForeachInput
self.arr = InternalTestUnboundedForeachInput(range(2))
@tag("unbounded_test_foreach_internal")
@steps(0, ["foreach-inner-small"], required=True)
def inner(self):
# index must stay constant over multiple steps inside foreach
if self.my_index is None:
self.my_index = self.index
assert_equals(self.my_index, self.index)
assert_equals(self.input, self.arr[self.index])
self.my_input = self.input
@steps(0, ["foreach-join-small"], required=True)
def join(self, inputs):
if is_resumed():
self.data = "resume"
self.after = True
got = sorted([inp.my_input for inp in inputs])
assert_equals(list(range(2)), got)
else:
self.data = "run"
raise ResumeFromHere()
@steps(2, ["all"])
def step_all(self):
if self.after:
assert_equals("resume", self.data)
else:
assert_equals("start", self.data)
def check_results(self, flow, checker):
run = checker.get_run()
if type(checker).__name__ == "CliCheck":
# CliCheck doesn't support enlisting of tasks.
assert run is None
else:
assert run is not None
tasks = run["foreach_inner"].tasks()
task_list = list(tasks)
assert_equals(3, len(task_list))
assert_equals(1, len(list(run["foreach_inner"].control_tasks())))
| BasicUnboundedForeachResumeTest |
python | huggingface__transformers | tests/models/pegasus/test_modeling_pegasus.py | {
"start": 16782,
"end": 23998
} | class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = PegasusConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
num_hidden_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = PegasusDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = PegasusDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(
next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| PegasusStandaloneDecoderModelTester |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 18985,
"end": 19401
} | class ____(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(
dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype),
)
assert_(consistent_subclass(evectors, a))
@instantiate_parametrized_tests
| EigCases |
python | doocs__leetcode | solution/3400-3499/3452.Sum of Good Numbers/Solution.py | {
"start": 0,
"end": 322
} | class ____:
def sumOfGoodNumbers(self, nums: List[int], k: int) -> int:
ans = 0
for i, x in enumerate(nums):
if i >= k and x <= nums[i - k]:
continue
if i + k < len(nums) and x <= nums[i + k]:
continue
ans += x
return ans
| Solution |
python | gevent__gevent | src/gevent/_config.py | {
"start": 10663,
"end": 11308
} | class ____(ImportableSetting, Setting):
desc = """\
The kind of the loop we use.
On Windows, this defaults to libuv, while on
other platforms it defaults to libev.
"""
default = [
'libev-cext',
'libev-cffi',
'libuv-cffi',
] if not WIN else [
'libuv-cffi',
'libev-cext',
'libev-cffi',
]
shortname_map = { # pylint:disable=dict-init-mutate
'libev-cext': 'gevent.libev.corecext.loop',
'libev-cffi': 'gevent.libev.corecffi.loop',
'libuv-cffi': 'gevent.libuv.loop.loop',
}
shortname_map['libuv'] = shortname_map['libuv-cffi']
| Loop |
python | ZoranPandovski__al-go-rithms | search/TreeCommonAncestor/python/TreeCommonAncestor.py | {
"start": 0,
"end": 981
} | class ____:
# A Binary tree node
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def lca(root, n1, n2):
# Base Case
if root is None:
return None
if(root.data > n1 and root.data > n2):
return lca(root.left, n1, n2)
if(root.data < n1 and root.data < n2):
return lca(root.right, n1, n2)
return root
if __name__ == "__main__":
root = Node(20)
root.left = Node(8)
root.right = Node(22)
root.left.left = Node(4)
root.left.right = Node(12)
root.left.right.left = Node(10)
root.left.right.right = Node(14)
n1 = 12 ; n2 = 18
t = lca(root, n1, n2)
print(f"LCA of {n1} and {n2} is {t.data}")
n1 = 16 ; n2 = 10
t = lca(root, n1, n2)
print(f"LCA of {n1} and {n2} is {t.data}")
n1 = 18 ; n2 = 22
t = lca(root, n1, n2)
print(f"LCA of {n1} and {n2} is {t.data}") | Node |
python | sphinx-doc__sphinx | sphinx/events.py | {
"start": 2050,
"end": 11808
} | class ____:
"""Event manager for Sphinx."""
def __init__(self, app: Sphinx) -> None:
self._app = app
self.events = core_events.copy()
self.listeners: dict[str, list[EventListener]] = defaultdict(list)
self.next_listener_id = 0
# pass through errors for debugging.
self._reraise_errors: bool = app.pdb
def add(self, name: str) -> None:
"""Register a custom Sphinx event."""
if name in self.events:
raise ExtensionError(__('Event %r already present') % name)
self.events[name] = ''
@property
def app(self) -> Sphinx:
_deprecation_warning(__name__, 'EventManager.app', remove=(11, 0))
return self._app
# ---- Core events -------------------------------------------------------
@overload
def connect(
self,
name: Literal['config-inited'],
callback: Callable[[Sphinx, Config], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['builder-inited'],
callback: Callable[[Sphinx], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-get-outdated'],
callback: Callable[
[Sphinx, BuildEnvironment, Set[str], Set[str], Set[str]], Sequence[str]
],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-before-read-docs'],
callback: Callable[[Sphinx, BuildEnvironment, list[str]], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-purge-doc'],
callback: Callable[[Sphinx, BuildEnvironment, str], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['source-read'],
callback: Callable[[Sphinx, str, list[str]], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['include-read'],
callback: Callable[[Sphinx, Path, str, list[str]], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['doctree-read'],
callback: Callable[[Sphinx, nodes.document], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-merge-info'],
callback: Callable[
[Sphinx, BuildEnvironment, Set[str], BuildEnvironment], None
],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-updated'],
callback: Callable[[Sphinx, BuildEnvironment], str],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-get-updated'],
callback: Callable[[Sphinx, BuildEnvironment], Iterable[str]],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['env-check-consistency'],
callback: Callable[[Sphinx, BuildEnvironment], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['write-started'],
callback: Callable[[Sphinx, Builder], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['doctree-resolved'],
callback: Callable[[Sphinx, nodes.document, str], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['missing-reference'],
callback: Callable[
[Sphinx, BuildEnvironment, addnodes.pending_xref, nodes.TextElement],
nodes.reference | None,
],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['warn-missing-reference'],
callback: Callable[[Sphinx, Domain, addnodes.pending_xref], bool | None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['build-finished'],
callback: Callable[[Sphinx, Exception | None], None],
priority: int,
) -> int: ...
# ---- Events from builtin builders --------------------------------------
@overload
def connect(
self,
name: Literal['html-collect-pages'],
callback: Callable[[Sphinx], Iterable[tuple[str, dict[str, Any], str]]],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['html-page-context'],
callback: Callable[
[Sphinx, str, str, dict[str, Any], nodes.document], str | None
],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['linkcheck-process-uri'],
callback: Callable[[Sphinx, str], str | None],
priority: int,
) -> int: ...
# ---- Events from builtin extensions-- ----------------------------------
@overload
def connect(
self,
name: Literal['object-description-transform'],
callback: Callable[[Sphinx, str, str, addnodes.desc_content], None],
priority: int,
) -> int: ...
# ---- Events from first-party extensions --------------------------------
@overload
def connect(
self,
name: Literal['autodoc-process-docstring'],
callback: _AutodocProcessDocstringListener,
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['autodoc-before-process-signature'],
callback: Callable[[Sphinx, Any, bool], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['autodoc-process-signature'],
callback: _AutodocProcessSignatureListener,
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['autodoc-process-bases'],
callback: Callable[[Sphinx, str, Any, dict[str, bool], list[str]], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['autodoc-skip-member'],
callback: _AutodocSkipMemberListener,
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['todo-defined'],
callback: Callable[[Sphinx, todo_node], None],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['viewcode-find-source'],
callback: Callable[
[Sphinx, str],
tuple[str, dict[str, tuple[Literal['class', 'def', 'other'], int, int]]],
],
priority: int,
) -> int: ...
@overload
def connect(
self,
name: Literal['viewcode-follow-imported'],
callback: Callable[[Sphinx, str, str], str | None],
priority: int,
) -> int: ...
# ---- Catch-all ---------------------------------------------------------
@overload
def connect(
self,
name: str,
callback: Callable[..., Any],
priority: int,
) -> int: ...
def connect(self, name: str, callback: Callable[..., Any], priority: int) -> int:
"""Connect a handler to specific event."""
if name not in self.events:
raise ExtensionError(__('Unknown event name: %s') % name)
listener_id = self.next_listener_id
self.next_listener_id += 1
self.listeners[name].append(EventListener(listener_id, callback, priority))
return listener_id
def disconnect(self, listener_id: int) -> None:
"""Disconnect a handler."""
for listeners in self.listeners.values():
for listener in listeners.copy():
if listener.id == listener_id:
listeners.remove(listener)
def emit(
self,
name: str,
*args: Any,
allowed_exceptions: tuple[type[Exception], ...] = (),
) -> list[Any]:
"""Emit a Sphinx event."""
# not every object likes to be repr()'d (think
# random stuff coming via autodoc)
try:
repr_args = repr(args)
except Exception:
pass
else:
logger.debug('[app] emitting event: %r%s', name, repr_args)
results = []
listeners = sorted(self.listeners[name], key=attrgetter('priority'))
for listener in listeners:
try:
results.append(listener.handler(self._app, *args))
except allowed_exceptions:
# pass through the errors specified as *allowed_exceptions*
raise
except SphinxError:
raise
except Exception as exc:
if self._reraise_errors:
raise
modname = safe_getattr(listener.handler, '__module__', None)
raise ExtensionError(
__('Handler %r for event %r threw an exception')
% (listener.handler, name),
exc,
modname=modname,
) from exc
return results
def emit_firstresult(
self,
name: str,
*args: Any,
allowed_exceptions: tuple[type[Exception], ...] = (),
) -> Any:
"""Emit a Sphinx event and returns first result.
This returns the result of the first handler that doesn't return ``None``.
"""
for result in self.emit(name, *args, allowed_exceptions=allowed_exceptions):
if result is not None:
return result
return None
| EventManager |
python | mlflow__mlflow | mlflow/tracing/utils/search.py | {
"start": 1855,
"end": 2493
} | class ____:
"""
Wraps an iterator and allows peeking at the next element without consuming it.
"""
def __init__(self, it):
self.it = iter(it)
self._next = None
def __iter__(self):
return self
def __next__(self):
if self._next is not None:
next_value = self._next
self._next = None
return next_value
return next(self.it)
def peek(self):
if self._next is None:
try:
self._next = next(self.it)
except StopIteration:
return None
return self._next
| _PeekableIterator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 570181,
"end": 570492
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("Reaction", graphql_name="node")
| ReactionEdge |
python | getsentry__sentry | src/sentry/api/base.py | {
"start": 6228,
"end": 7087
} | class ____(abc.ABC):
"""
Inherit from this class when adding mixin classes that call `Endpoint` methods. This allows typing to
work correctly
"""
@abc.abstractmethod
def create_audit_entry(
self, request: Request, transaction_id=None, *, data: dict[str, Any], **kwargs
):
pass
@abc.abstractmethod
def respond(self, context: object | None = None, **kwargs: Any) -> Response:
pass
@abc.abstractmethod
def paginate(
self,
request,
on_results=None,
paginator=None,
paginator_cls=Paginator,
default_per_page: int | None = None,
max_per_page: int | None = None,
cursor_cls=Cursor,
response_cls=Response,
response_kwargs=None,
count_hits=None,
**paginator_kwargs,
):
pass
| BaseEndpointMixin |
python | huggingface__transformers | tests/models/qwen2_vl/test_video_processing_qwen2_vl.py | {
"start": 1293,
"end": 4551
} | class ____:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
temporal_patch_size=2,
patch_size=14,
min_pixels=20 * 20,
max_pixels=100 * 100,
merge_size=2,
):
size = size if size is not None else {"shortest_edge": 20}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.temporal_patch_size = temporal_patch_size
self.patch_size = patch_size
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.merge_size = merge_size
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"temporal_patch_size": self.temporal_patch_size,
"patch_size": self.patch_size,
"min_pixels": self.min_pixels,
"max_pixels": self.max_pixels,
"merge_size": self.merge_size,
}
@require_vision
def expected_output_video_shape(self, videos, num_frames=None):
num_frames = num_frames if num_frames is not None else self.num_frames
grid_t = num_frames // self.temporal_patch_size
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
seq_len = 0
for video in videos:
if isinstance(video[0], Image.Image):
video = np.stack([np.array(frame) for frame in video])
height, width = get_image_size(video)
resized_height, resized_width = smart_resize(
height,
width,
factor=self.patch_size * self.merge_size,
min_pixels=self.min_pixels,
max_pixels=self.max_pixels,
)
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
seq_len += grid_t * grid_h * grid_w
return [seq_len, hidden_dim]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
| Qwen2VLVideoProcessingTester |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 113659,
"end": 114554
} | class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
"""
Create a DataArray with a time-axis that contains cftime.datetime
objects.
"""
# case for 1d array
data = np.random.rand(4, 12)
time = xr.date_range(
start="2017", periods=12, freq="1ME", calendar="noleap", use_cftime=True
)
darray = DataArray(data, dims=["x", "time"])
darray.coords["time"] = time
self.darray = darray
def test_cfdatetime_line_plot(self) -> None:
self.darray.isel(x=0).plot.line()
def test_cfdatetime_pcolormesh_plot(self) -> None:
self.darray.plot.pcolormesh()
def test_cfdatetime_contour_plot(self) -> None:
self.darray.plot.contour()
@requires_cftime
@pytest.mark.skipif(has_nc_time_axis, reason="nc_time_axis is installed")
| TestCFDatetimePlot |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 1913,
"end": 6327
} | class ____(GoogleCloudBaseOperator):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreCreateInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance_id",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance_id: str,
instance: dict | Instance,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance_id = instance_id
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance_id,
"location_id": self.location,
}
def execute(self, context: Context):
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.create_instance(
location=self.location,
instance_id=self.instance_id,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(result)
| CloudMemorystoreCreateInstanceOperator |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_text.py | {
"start": 8130,
"end": 11271
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
# get all proj
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_values is not None:
# decoder-only data2vec_text can have a simple dynamic cache for example
current_past_key_values = past_key_values
if isinstance(past_key_values, EncoderDecoderCache):
current_past_key_values = past_key_values.self_attention_cache
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
key_layer, value_layer = current_past_key_values.update(
key_layer,
value_layer,
self.layer_idx,
{"cache_position": cache_position},
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return attn_output, attn_weights
| Data2VecTextSelfAttention |
python | numba__numba | numba/tests/test_dicts.py | {
"start": 648,
"end": 6066
} | class ____(TestCase):
"""Testing `dict()` and `{}` usage that are redirected to
`numba.typed.Dict`.
"""
def test_use_dict(self):
# Test dict()
@njit
def foo():
d = dict()
d[1] = 2
return d
d = foo()
self.assertEqual(d, {1: 2})
def test_use_dict_iterable_args(self):
# Test dict(iterable)
@njit
def dict_iterable_1(a, b):
d = dict(zip(a, b))
return d
@njit
def dict_iterable_2():
# from python docs
return dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
inps = (
([1, 2, 3], [4, 5, 6]),
(np.arange(4), np.arange(4)),
([1, 2, 3], 'abc'),
([1, 2, 3, 4], 'abc'),
)
for a, b in inps:
d = dict_iterable_1(a, b)
self.assertEqual(d, dict(zip(a, b)))
self.assertEqual(dict_iterable_2(), dict_iterable_2.py_func())
def test_ctor_iterable_tuple(self):
@njit
def ctor():
return dict(((1, 2), (1, 2)))
expected = dict({1: 2})
got = ctor()
self.assertEqual(expected, got)
def test_unsupported_dict_usage(self):
# Test dict(dict())
from numba.core.typing.dictdecl import _message_dict_support
@njit
def ctor1():
d = dict()
d[1] = 2
return dict(d)
@njit
def ctor2():
return dict(((1, 2), (3, 'a')))
@njit
def ctor3():
return dict((('a', 'b', 'c'), ('d', 'e', 'f')))
@njit
def ctor4():
return dict((({}, 1), ({}, 2)))
_non_iter_args = "Non-iterable args used in dict(iterable)"
_dict_upd_item_len = "dictionary update sequence element has length 3;"
_unhashable_type = "Unhashable type"
inputs = [
(ctor1, TypingError, _message_dict_support),
(ctor2, TypingError, _non_iter_args),
(ctor3, TypingError, _dict_upd_item_len),
(ctor4, TypingError, _unhashable_type),
]
for func, exc, msg in inputs:
with self.assertRaises(exc) as raises:
func()
self.assertIn(msg, str(raises.exception))
def test_use_curlybraces(self):
# Test {} with empty args
@njit
def foo():
d = {}
d[1] = 2
return d
d = foo()
self.assertEqual(d, {1: 2})
def test_use_curlybraces_with_init1(self):
# Test {} with 1 item
@njit
def foo():
return {1: 2}
d = foo()
self.assertEqual(d, {1: 2})
def test_use_curlybraces_with_initmany(self):
# Test {} with many items
@njit
def foo():
return {1: 2.2, 3: 4.4, 5: 6.6}
d = foo()
self.assertEqual(d, {1: 2.2, 3: 4.4, 5: 6.6})
def test_curlybraces_init_with_coercion(self):
# Type coercion at dict init is tested
@njit
def foo():
return {1: 2.2, 3: 4, 5: 6}
self.assertEqual(foo(), foo.py_func())
def test_use_curlybraces_with_manyvar(self):
# Test using variable in {}
@njit
def foo(x, y):
return {x: 1, y: x + y}
x, y = 10, 20
self.assertEqual(foo(x, y), foo.py_func(x, y))
def test_mixed_curlybraces_and_dict(self):
# Test mixed use of {} and dict()
@njit
def foo():
k = dict()
k[1] = {1: 3}
k[2] = {4: 2}
return k
self.assertEqual(foo(), foo.py_func())
def test_dict_use_with_none_value(self):
# Test that NoneType cannot be used as value for Dict
@njit
def foo():
k = {1: None}
return k
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"Dict.value_type cannot be of type none",
str(raises.exception),
)
def test_dict_use_with_optional_value(self):
# Test that Optional cannot be used as value for Dict
@njit
def foo(choice):
optional = 2.5 if choice else None
k = {1: optional}
return k
with self.assertRaises(TypingError) as raises:
foo(True)
self.assertIn(
"Dict.value_type cannot be of type OptionalType(float64)",
str(raises.exception),
)
def test_dict_use_with_optional_key(self):
# Test that Optional cannot be used as a key for Dict
@njit
def foo(choice):
k = {2.5 if choice else None: 1}
return k
with self.assertRaises(TypingError) as raises:
foo(True)
self.assertIn(
"Dict.key_type cannot be of type OptionalType(float64)",
str(raises.exception),
)
def test_dict_use_with_none_key(self):
# Test that NoneType cannot be used as a key for Dict
@njit
def foo():
k = {None: 1}
return k
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"Dict.key_type cannot be of type none",
str(raises.exception),
)
if __name__ == '__main__':
unittest.main()
| TestCompiledDict |
python | tensorflow__tensorflow | tensorflow/python/framework/config_test.py | {
"start": 1964,
"end": 13771
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.disable_eager_op_as_function('b/204320409')
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
context.ensure_initialized()
def copy_tensor(dtype=dtypes.int32):
with ops.device('CPU:0'):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
config.set_intra_op_parallelism_threads(10)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
config.set_inter_op_parallelism_threads(10)
@test_util.run_gpu_only
@reset_eager
def testSoftPlacement(self):
if context.executing_eagerly():
self.assertTrue(config.get_soft_device_placement())
else:
self.assertFalse(config.get_soft_device_placement())
def test_attr():
with ops.device('/device:GPU:0'):
return test_ops.test_attr(T=dtypes.float32, name='test_attr')
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is enabled, the test_attr operation should fallback
# to CPU with pure eager execution as well as functions
test_attr()
def_function.function(test_attr)()
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is disabled, the test_attr operation should fail on
# GPU with pure eager execution as well as functions
with self.assertRaises(errors.InvalidArgumentError):
test_attr()
with self.assertRaises(errors.InvalidArgumentError):
def_function.function(test_attr)()
@reset_eager
def testLogDevicePlacement(self):
self.assertFalse(context.get_log_device_placement())
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(context.get_log_device_placement(),
context.context().log_device_placement)
context.ensure_initialized()
# Changing the device placement should not throw an exception
context.set_log_device_placement(True)
@reset_eager
def testEnableMlirBridge(self):
# Default value of enable_mlir_bridge is false.
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_UNSPECIFIED)
# Tests enabling mlir bridge.
config.enable_mlir_bridge()
self.assertTrue(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED)
# Tests disabling mlir bridge.
config.disable_mlir_bridge()
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_DISABLED)
@reset_eager
def testResetMlirFlags(self):
# Default value of enable_mlir_bridge is false.
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_UNSPECIFIED)
@test_util.run_gpu_only
@reset_eager
def testJit(self):
self.assertEqual(config.get_optimizer_jit(), '')
# the following function should cause Op fusion to occur. However, there is
# unfortunately no straightforward way to ensure this. We will just have to
# settle for creating a test that can trigger JIT.
@def_function.function
def fun(a, b):
c = a * b
d = c + a
return d
a = constant_op.constant([2., 2.])
b = constant_op.constant([2., 2.])
self.evaluate(fun(a, b))
config.set_optimizer_jit('autoclustering')
self.assertEqual(config.get_optimizer_jit(), 'autoclustering')
self.evaluate(fun(a, b))
config.set_optimizer_jit('')
self.assertEqual(config.get_optimizer_jit(), '')
self.evaluate(fun(a, b))
@parameterized.named_parameters(
('LayoutOptimizer', 'layout_optimizer'),
('ConstantFolding', 'constant_folding'),
('ShapeOptimization', 'shape_optimization'), ('Remapping', 'remapping'),
('ArithmeticOptimization', 'arithmetic_optimization'),
('DependencyOptimization', 'dependency_optimization'),
('LoopOptimization', 'loop_optimization'),
('FunctionOptimization', 'function_optimization'),
('DebugStripper', 'debug_stripper'),
('ScopedAllocatorOptimization', 'scoped_allocator_optimization'),
('ImplementationSelector', 'implementation_selector'),
('AutoMixedPrecision', 'auto_mixed_precision'))
@reset_eager
def testOptimizerToggleOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
@parameterized.named_parameters(
('DisableModelPruning', 'disable_model_pruning'),
('DisableMetaOptimizer', 'disable_meta_optimizer'))
@reset_eager
def testOptimizerBoolOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertFalse(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
@test_util.run_gpu_only
@reset_eager
def testOptimizerToggleOptionPinToHost(self):
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get('pin_to_host_optimization'))
@def_function.function
def fun():
op = test_ops.device_placement_op()
return op
# Force optimizer to run for all graphs
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
options['min_graph_nodes'] = -1
# Since pin to host is disabled, the operation should go on GPU
gpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': True})
options['pin_to_host_optimization'] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
# Since pin to host is enabled, the operation should go on CPU
cpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('CPU'), cpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': False})
options['pin_to_host_optimization'] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
# Since pin to host is disabled again, the operation should go on GPU
gpu2 = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu2)
| ConfigTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 5378,
"end": 5707
} | class ____(graphene.InputObjectType):
repositoryName = graphene.NonNull(graphene.String)
repositoryLocationName = graphene.NonNull(graphene.String)
class Meta:
description = """This type represents the fields necessary to identify a repository."""
name = "RepositorySelector"
| GrapheneRepositorySelector |
python | getsentry__sentry | src/sentry/api/endpoints/prompts_activity.py | {
"start": 808,
"end": 1336
} | class ____(serializers.Serializer):
feature = serializers.CharField(required=True)
status = serializers.ChoiceField(
choices=list(zip(VALID_STATUSES, VALID_STATUSES)), required=True
)
def validate_feature(self, value):
if value is None:
raise serializers.ValidationError("Must specify feature name")
if not prompt_config.has(value):
raise serializers.ValidationError("Not a valid feature prompt")
return value
@region_silo_endpoint
| PromptsActivitySerializer |
python | huggingface__transformers | src/transformers/models/hgnet_v2/configuration_hgnet_v2.py | {
"start": 1433,
"end": 8535
} | class ____(BackboneConfigMixin, PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`HGNetV2Backbone`]. It is used to instantiate a HGNet-V2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of D-FINE-X-COCO B4 "[ustc-community/dfine_x_coco"](https://huggingface.co/ustc-community/dfine_x_coco").
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
hidden_sizes (`list[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
stem_channels (`list[int]`, *optional*, defaults to `[3, 32, 48]`):
Channel dimensions for the stem layers:
- First number (3) is input image channels
- Second number (32) is intermediate stem channels
- Third number (48) is output stem channels
stage_in_channels (`list[int]`, *optional*, defaults to `[48, 128, 512, 1024]`):
Input channel dimensions for each stage of the backbone.
This defines how many channels the input to each stage will have.
stage_mid_channels (`list[int]`, *optional*, defaults to `[48, 96, 192, 384]`):
Mid-channel dimensions for each stage of the backbone.
This defines the number of channels used in the intermediate layers of each stage.
stage_out_channels (`list[int]`, *optional*, defaults to `[128, 512, 1024, 2048]`):
Output channel dimensions for each stage of the backbone.
This defines how many channels the output of each stage will have.
stage_num_blocks (`list[int]`, *optional*, defaults to `[1, 1, 3, 1]`):
Number of blocks to be used in each stage of the backbone.
This controls the depth of each stage by specifying how many convolutional blocks to stack.
stage_downsample (`list[bool]`, *optional*, defaults to `[False, True, True, True]`):
Indicates whether to downsample the feature maps at each stage.
If `True`, the spatial dimensions of the feature maps will be reduced.
stage_light_block (`list[bool]`, *optional*, defaults to `[False, False, True, True]`):
Indicates whether to use light blocks in each stage.
Light blocks are a variant of convolutional blocks that may have fewer parameters.
stage_kernel_size (`list[int]`, *optional*, defaults to `[3, 3, 5, 5]`):
Kernel sizes for the convolutional layers in each stage.
stage_numb_of_layers (`list[int]`, *optional*, defaults to `[6, 6, 6, 6]`):
Number of layers to be used in each block of the stage.
use_learnable_affine_block (`bool`, *optional*, defaults to `False`):
Whether to use Learnable Affine Blocks (LAB) in the network.
LAB adds learnable scale and bias parameters after certain operations.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "hgnet_v2"
def __init__(
self,
num_channels=3,
embedding_size=64,
depths=[3, 4, 6, 3],
hidden_sizes=[256, 512, 1024, 2048],
hidden_act="relu",
out_features=None,
out_indices=None,
stem_channels=[3, 32, 48],
stage_in_channels=[48, 128, 512, 1024],
stage_mid_channels=[48, 96, 192, 384],
stage_out_channels=[128, 512, 1024, 2048],
stage_num_blocks=[1, 1, 3, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[6, 6, 6, 6],
use_learnable_affine_block=False,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.embedding_size = embedding_size
self.depths = depths
self.hidden_sizes = hidden_sizes
self.hidden_act = hidden_act
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self.stem_channels = stem_channels
self.stage_in_channels = stage_in_channels
self.stage_mid_channels = stage_mid_channels
self.stage_out_channels = stage_out_channels
self.stage_num_blocks = stage_num_blocks
self.stage_downsample = stage_downsample
self.stage_light_block = stage_light_block
self.stage_kernel_size = stage_kernel_size
self.stage_numb_of_layers = stage_numb_of_layers
self.use_learnable_affine_block = use_learnable_affine_block
self.initializer_range = initializer_range
if not (
len(stage_in_channels)
== len(stage_mid_channels)
== len(stage_out_channels)
== len(stage_num_blocks)
== len(stage_downsample)
== len(stage_light_block)
== len(stage_kernel_size)
== len(stage_numb_of_layers)
):
raise ValueError("All stage configuration lists must have the same length.")
__all__ = ["HGNetV2Config"]
| HGNetV2Config |
python | ray-project__ray | release/long_running_tests/workloads/serve.py | {
"start": 1399,
"end": 4068
} | class ____:
@serve.batch(max_batch_size=MAX_BATCH_SIZE)
async def handle_batch(self, requests):
time.sleep(0.01)
return ["hi" for _ in range(len(requests))]
async def __call__(self, request):
return await self.handle_batch(request)
serve.run(Echo.bind(), route_prefix="/echo")
print("Warming up")
for _ in range(5):
resp = requests.get("http://127.0.0.1:8000/echo").text
print(resp)
time.sleep(0.5)
print("Started load testing with the following config: ")
print(f"num_connections: {NUM_CONNECTIONS}")
print(f"num_threads: {NUM_THREADS}")
print(f"time_per_cycle: {TIME_PER_CYCLE}")
while True:
proc = subprocess.Popen(
[
"wrk",
"-c",
str(NUM_CONNECTIONS),
"-t",
str(NUM_THREADS),
"-d",
TIME_PER_CYCLE,
"http://127.0.0.1:8000/echo",
],
stdout=PIPE,
stderr=PIPE,
)
proc.wait()
out, err = proc.communicate()
# Check if wrk command succeeded. If this happens repeatedly, the release test
# infrastructure will correctly fail the test with "Last update to results json
# was too long ago."
if proc.returncode != 0:
print("wrk failed with the following error: ")
print(err)
print("Will try again in 5 seconds")
time.sleep(5)
continue
# Sample wrk stdout:
#
# Running 10s test @ http://127.0.0.1:8000/echo
# 2 threads and 84 connections
# Thread Stats Avg Stdev Max +/- Stdev
# Latency 59.33ms 13.51ms 113.83ms 64.20%
# Req/Sec 709.16 61.73 848.00 78.50%
# 14133 requests in 10.02s, 2.08MB read
# Requests/sec: 1410.71
# Transfer/sec: 212.16KB
metrics_dict = {}
for line in out.decode().splitlines():
parsed = re.split(r"\s+", line.strip())
if parsed[0] == "Latency":
metrics_dict["latency_avg"] = parsed[1]
metrics_dict["latency_stdev"] = parsed[2]
metrics_dict["latency_max"] = parsed[3]
metrics_dict["latency_+/-_stdev"] = parsed[4]
elif parsed[0] == "Req/Sec":
metrics_dict["req/sec_avg"] = parsed[1]
metrics_dict["req/sec_stdev"] = parsed[2]
metrics_dict["req/sec_max"] = parsed[3]
metrics_dict["req/sec_+/-_stdev"] = parsed[4]
elif parsed[0] == "Requests/sec:":
metrics_dict["requests/sec"] = parsed[1]
elif parsed[0] == "Transfer/sec:":
metrics_dict["transfer/sec"] = parsed[1]
print(out.decode())
print(err.decode())
update_progress(metrics_dict)
| Echo |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 4127,
"end": 4350
} | class ____(str, _Action, Enum):
READ = "read_groups"
ASSIGN_AND_REVOKE = "assign_and_revoke_groups"
@staticmethod
def values() -> List[str]:
return [action.value for action in GroupAction]
| GroupAction |
python | huggingface__transformers | tests/models/owlv2/test_modeling_owlv2.py | {
"start": 23441,
"end": 35622
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "google/owlv2-base-patch16"
model = Owlv2Model.from_pretrained(model_name).to(torch_device)
image_processor = OwlViTImageProcessor.from_pretrained(model_name)
processor = OwlViTProcessor.from_pretrained(model_name, image_processor=image_processor)
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[-6.2229, -8.2601]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
model_name = "google/owlv2-base-patch16"
model = Owlv2Model.from_pretrained(model_name).to(torch_device)
image_processor = OwlViTImageProcessor.from_pretrained(model_name)
processor = OwlViTProcessor.from_pretrained(model_name, image_processor=image_processor)
processor.image_processor.size = {"height": 1024, "width": 1024}
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[-6.2520, -8.2970]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
expected_shape = torch.Size((1, 4097, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
# Owlv2ForObjectDetection part.
model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device)
processor.image_processor.size = {"height": 1024, "width": 1024}
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
num_queries = int((inputs.pixel_values.shape[-1] / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.2407, 0.0553, 0.4636], [0.1082, 0.0494, 0.1861], [0.2459, 0.0527, 0.4398]]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device)
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs, interpolate_pos_encoding=True)
# No need to check the logits, we just check inference runs fine.
num_queries = int((inputs.pixel_values.shape[-1] / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
# Deactivate interpolate_pos_encoding on same model, and use default image size.
# Verify the dynamic change caused by the activation/deactivation of interpolate_pos_encoding of variables: self.sqrt_num_patches, self.box_bias from (OwlViTForObjectDetection).
image_processor = OwlViTImageProcessor.from_pretrained(model_name)
processor = OwlViTProcessor.from_pretrained(model_name, image_processor=image_processor)
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=False)
num_queries = int((inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_default_box_bias = torch.tensor(
[
[-4.0717, -4.0717, -4.0717, -4.0717],
[-3.3644, -4.0717, -4.0717, -4.0717],
[-2.9425, -4.0717, -4.0717, -4.0717],
]
)
torch.testing.assert_close(model.box_bias[:3, :4], expected_default_box_bias, rtol=1e-4, atol=1e-4)
# Interpolate with any resolution size.
processor.image_processor.size = {"height": 1264, "width": 1024}
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
num_queries = int(
(inputs.pixel_values.shape[-2] // model.config.vision_config.patch_size)
* (inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size)
)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.2438, 0.0945, 0.4675], [0.1361, 0.0431, 0.2406], [0.2465, 0.0428, 0.4429]]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs, interpolate_pos_encoding=True)
# No need to check the logits, we just check inference runs fine.
num_queries = int(
(inputs.pixel_values.shape[-2] // model.config.vision_config.patch_size)
* (inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size)
)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
@slow
def test_inference_object_detection(self):
model_name = "google/owlv2-base-patch16"
model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device)
image_processor = OwlViTImageProcessor.from_pretrained(model_name)
processor = OwlViTProcessor.from_pretrained(model_name, image_processor=image_processor)
image = prepare_img()
text_labels = [["a photo of a cat", "a photo of a dog"]]
inputs = processor(
text=text_labels,
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_logits = torch.tensor(
[[-21.413497, -21.612638], [-19.008193, -19.548841], [-20.958896, -21.382694]]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4)
expected_slice_boxes = torch.tensor(
[[0.241309, 0.051896, 0.453267], [0.139474, 0.045701, 0.250660], [0.233022, 0.050479, 0.427671]],
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
resulted_slice_boxes = outputs.pred_boxes[0, :3, :3]
max_diff = torch.max(torch.abs(resulted_slice_boxes - expected_slice_boxes)).item()
self.assertLess(max_diff, 3e-4)
# test post-processing
post_processed_output = processor.post_process_grounded_object_detection(outputs)
self.assertIsNone(post_processed_output[0]["text_labels"])
post_processed_output_with_text_labels = processor.post_process_grounded_object_detection(
outputs, text_labels=text_labels
)
objects_labels = post_processed_output_with_text_labels[0]["labels"].tolist()
self.assertListEqual(objects_labels, [0, 0])
objects_text_labels = post_processed_output_with_text_labels[0]["text_labels"]
self.assertIsNotNone(objects_text_labels)
self.assertListEqual(objects_text_labels, ["a photo of a cat", "a photo of a cat"])
@slow
def test_inference_one_shot_object_detection(self):
model_name = "google/owlv2-base-patch16"
model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device)
image_processor = OwlViTImageProcessor.from_pretrained(model_name)
processor = OwlViTProcessor.from_pretrained(model_name, image_processor=image_processor)
image = prepare_img()
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs)
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.2413, 0.0519, 0.4533], [0.1395, 0.0457, 0.2507], [0.2330, 0.0505, 0.4277]],
).to(torch_device)
torch.testing.assert_close(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
@slow
@require_torch_accelerator
@require_torch_fp16
def test_inference_one_shot_object_detection_fp16(self):
model_name = "google/owlv2-base-patch16"
model = Owlv2ForObjectDetection.from_pretrained(model_name, dtype=torch.float16).to(torch_device)
image_processor = OwlViTImageProcessor.from_pretrained(model_name)
processor = OwlViTProcessor.from_pretrained(model_name, image_processor=image_processor)
image = prepare_img()
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs)
# No need to check the logits, we just check inference runs fine.
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
| Owlv2ModelIntegrationTest |
python | ray-project__ray | rllib/examples/_old_api_stack/models/shared_weights_model.py | {
"start": 3723,
"end": 5311
} | class ____(TFModelV2):
"""The "other" TFModelV2 using the same shared space as the one above."""
def __init__(
self, observation_space, action_space, num_outputs, model_config, name
):
super().__init__(
observation_space, action_space, num_outputs, model_config, name
)
inputs = tf.keras.layers.Input(observation_space.shape)
# Weights shared with SharedWeightsModel1.
with tf1.variable_scope(
tf1.VariableScope(tf1.AUTO_REUSE, "shared"),
reuse=tf1.AUTO_REUSE,
auxiliary_name_scope=False,
):
last_layer = tf.keras.layers.Dense(
units=64, activation=tf.nn.relu, name="fc1"
)(inputs)
output = tf.keras.layers.Dense(
units=num_outputs, activation=None, name="fc_out"
)(last_layer)
vf = tf.keras.layers.Dense(units=1, activation=None, name="value_out")(
last_layer
)
self.base_model = tf.keras.models.Model(inputs, [output, vf])
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(input_dict["obs"])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
TORCH_GLOBAL_SHARED_LAYER = None
if torch:
# The global, shared layer to be used by both models.
TORCH_GLOBAL_SHARED_LAYER = SlimFC(
64,
64,
activation_fn=nn.ReLU,
initializer=torch.nn.init.xavier_uniform_,
)
| SharedWeightsModel2 |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/assertion_test.py | {
"start": 860,
"end": 1445
} | class ____(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
self.autograph_opts = tf.autograph.experimental.Feature.ASSERT_STATEMENTS
def test_basic(self):
self.assertFunctionMatchesEager(simple_assertion, 1)
self.assertFunctionMatchesEager(simple_assertion, tf.constant(1))
with self.assertRaises(AssertionError):
self.function(simple_assertion)(0)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.function(simple_assertion)(tf.constant(0))
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | google__pytype | pytype/overlays/attr_overlay.py | {
"start": 2395,
"end": 9376
} | class ____(classgen.Decorator):
"""Base class for @attr.s and @attrs.define."""
def init_name(self, attr):
# attrs removes leading underscores from attrib names when generating kwargs
# for __init__.
return attr.name.lstrip("_")
def _handle_auto_attribs(
self, auto_attribs: bool | None, local_ops, cls_name: str
) -> tuple[bool | None | _NoChange, Any]:
del local_ops, cls_name # unused
# Why _NO_CHANGE instead of passing auto_attribs?
# Because result is going into potentially an OrderedDict, where
# writing even the same value might have a side effect (changing ordering).
return _NO_CHANGE, _ordering_for_auto_attrib(auto_attribs)
def decorate(self, node, cls):
"""Processes the attrib members of a class."""
# Collect classvars to convert them to attrs.
new_auto_attribs, ordering = self._handle_auto_attribs(
self.args[cls]["auto_attribs"],
self.ctx.vm.local_ops.get(cls.name, ()),
cls.name,
)
if new_auto_attribs is not _NO_CHANGE:
self.args[cls]["auto_attribs"] = new_auto_attribs
ordered_locals = classgen.get_class_locals(
cls.name, allow_methods=False, ordering=ordering, ctx=self.ctx
)
own_attrs = []
for name, local in ordered_locals.items():
typ, orig = local.get_type(node, name), local.orig
if is_attrib(orig):
attrib = orig.data[0]
attr = Attribute(
name=name,
typ=None,
init=attrib.init,
init_type=attrib.init_type,
kw_only=attrib.kw_only,
default=attrib.default,
)
if typ:
if attrib.type_source == TypeSource.TYPE:
# We cannot have both a type annotation and a type argument.
msg = "attr.ib cannot have both a 'type' arg and a type annotation."
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.stack(), typ, details=msg
)
attr.typ = self.ctx.convert.unsolvable
elif attrib.type_source == TypeSource.CONVERTER:
msg = "attr.ib type has been assigned by the converter."
self.ctx.check_annotation_type_mismatch(
node,
name,
typ,
attrib.typ.instantiate(node),
local.stack,
allow_none=True,
details=msg,
)
attr.typ = typ
else:
# cls.members[name] has already been set via a type annotation
# Use the annotation type as the field type in all circumstances; if
# it conflicts with the `type` or `converter` args we will raise an
# error above, and if it is compatible but not identical we treat it
# as the type most expressive of the code's intent.
attr.typ = typ
else:
# Replace the attrib in the class dict with its type.
attr.typ = attrib.typ
classgen.add_member(node, cls, name, attr.typ)
if attrib.type_source == TypeSource.TYPE and isinstance(
cls, abstract.InterpreterClass
):
# Add the attrib to the class's __annotations__ dict.
annotations_dict = classgen.get_or_create_annotations_dict(
cls.members, self.ctx
)
annotations_dict.annotated_locals[name] = abstract_utils.Local(
node, None, attrib.typ, orig, self.ctx
)
# TODO(mdemello): This will raise a confusing 'annotation-type-mismatch'
# error even if the type has been set via the type or converter arg
# rather than a type annotation. We need a more general 'type-mismatch'
# error to cover overlays that do runtime type checking. We will work
# around it with a footnote for now.
msg = (
"Note: The 'assignment' here is the 'default' or 'factory' arg,"
" which conflicts with the field type (set via annotation or a"
" 'type' or 'converter' arg)."
)
self.ctx.check_annotation_type_mismatch(
node,
attr.name,
attr.typ,
attr.default,
local.stack,
allow_none=True,
details=msg,
)
own_attrs.append(attr)
elif self.args[cls]["auto_attribs"]:
if not match_classvar(typ):
self.ctx.check_annotation_type_mismatch(
node, name, typ, orig, local.stack, allow_none=True
)
attr = Attribute(
name=name, typ=typ, init=True, kw_only=False, default=orig
)
if not orig:
classgen.add_member(node, cls, name, typ)
own_attrs.append(attr)
cls.record_attr_ordering(own_attrs)
attrs = cls.compute_attr_metadata(own_attrs, "attr.s")
# Add an __init__.
# If the "init" parameter of decorator is False, instead an
# __attrs_init__ function is generated, which is what would have been
# generated for __init__ if the init parameter was True.
# This logic was added in attrs version 21.1.0
init_method_name = (
"__init__" if self.args[cls]["init"] else "__attrs_init__"
)
init_method = self.make_init(node, cls, attrs, init_method_name)
cls.members[init_method_name] = init_method
# Add the __attrs_attrs__ attribute, the presence of which `attr.has` uses
# to determine if an object has `attrs` attributes.
attr_types = self.ctx.convert.merge_values({attr.typ for attr in attrs})
generic_attribute = abstract.ParameterizedClass(
self.ctx.convert.lookup_value("attr", "Attribute"),
{abstract_utils.T: attr_types},
self.ctx,
)
attr_attribute_params = {abstract_utils.T: generic_attribute}
attr_attribute_type = abstract.ParameterizedClass(
self.ctx.convert.tuple_type, attr_attribute_params, self.ctx
)
classgen.add_member(node, cls, "__attrs_attrs__", attr_attribute_type)
annotations_dict = classgen.get_or_create_annotations_dict(
cls.members, self.ctx
)
annotations_dict.annotated_locals["__attrs_attrs__"] = abstract_utils.Local(
node, None, attr_attribute_type, None, self.ctx
)
if isinstance(cls, abstract.InterpreterClass):
# Replaces all decorators with equivalent behavior with @attr.s.
cls.decorators = [
d
for d in cls.decorators
if class_mixin.get_metadata_key(d) != "__attrs_attrs__"
] + ["attr.s"]
# Fix up type parameters in methods added by the decorator.
cls.update_method_type_params()
def to_metadata(self):
# For simplicity, we give all attrs decorators with the same behavior as
# attr.s the same tag.
args = self._current_args or self.DEFAULT_ARGS
return {
"tag": "attr.s",
"init": args["init"],
"kw_only": args["kw_only"],
"auto_attribs": args["auto_attribs"],
}
| AttrsBase |
python | openai__openai-python | src/openai/types/beta/realtime/error_event.py | {
"start": 664,
"end": 877
} | class ____(BaseModel):
error: Error
"""Details of the error."""
event_id: str
"""The unique ID of the server event."""
type: Literal["error"]
"""The event type, must be `error`."""
| ErrorEvent |
python | walkccc__LeetCode | solutions/2788. Split Strings by Separator/2788.py | {
"start": 0,
"end": 250
} | class ____:
def splitWordsBySeparator(
self,
words: list[str],
separator: str,
) -> list[str]:
return [splitWord
for word in words
for splitWord in word.split(separator)
if splitWord]
| Solution |
python | tornadoweb__tornado | tornado/test/ioloop_test.py | {
"start": 16180,
"end": 17419
} | class ____(unittest.TestCase):
def setUp(self):
setup_with_context_manager(self, ignore_deprecation())
self.io_loop = None # type: typing.Optional[IOLoop]
IOLoop.clear_current()
def tearDown(self):
if self.io_loop is not None:
self.io_loop.close()
def test_non_current(self):
self.io_loop = IOLoop(make_current=False)
# The new IOLoop is not initially made current.
self.assertIsNone(IOLoop.current(instance=False))
# Starting the IOLoop makes it current, and stopping the loop
# makes it non-current. This process is repeatable.
for i in range(3):
def f():
self.current_io_loop = IOLoop.current()
assert self.io_loop is not None
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
# Now that the loop is stopped, it is no longer current.
self.assertIsNone(IOLoop.current(instance=False))
def test_force_current(self):
self.io_loop = IOLoop(make_current=True)
self.assertIs(self.io_loop, IOLoop.current())
| TestIOLoopCurrent |
python | pytorch__pytorch | torch/_inductor/select_algorithm.py | {
"start": 152136,
"end": 158146
} | class ____:
"""
Wrapper around a grid function that allows either int or sympy inputs.
@SymbolicGridFn
def grid(x, meta, *, cdiv):
return cdiv(x, meta["BLOCK_X"])
"""
def __init__(self, fn: Callable[..., tuple[Any, Any, Any]]):
self.fn = fn
self.kwargs_int = {}
self.kwargs_sym = {}
params = inspect.signature(fn).parameters
for name, fn_sym, fn_int in [
("cdiv", CeilDiv, ceildiv),
("min", sympy.Min, min),
("max", sympy.Max, max),
]:
if name in params:
self.kwargs_int[name] = fn_int
self.kwargs_sym[name] = fn_sym
def __call__(self, *args, **kwargs) -> tuple[int, int, int]:
return self.fn(*args, **kwargs, **self.kwargs_int)
def sympy_call(self, *args, **kwargs):
return self.fn(*args, **kwargs, **self.kwargs_sym)
def _autotune_metadata(input_nodes):
"""Helper function to extract autotune metadata from input nodes."""
return {
"autotune_strides": ", ".join([str(n.get_stride()) for n in input_nodes]),
"autotune_dtypes": ", ".join([str(n.get_dtype()) for n in input_nodes]),
"autotune_shape": ", ".join(
["x".join(map(str, n.get_size())) for n in input_nodes]
),
"autotune_offset": ", ".join([str(n.get_layout().offset) for n in input_nodes]),
# TODO(coconutruben): replace this with taking KernelInputs as the
# argument, and extracting those out there directly
"autotune_strides_hinted": ", ".join(
[
str(
V.graph.sizevars.size_hints(
n.get_stride(),
fallback=config.unbacked_symint_fallback,
)
)
for n in input_nodes
]
),
"autotune_shape_hinted": ", ".join(
[
"x".join(
map(
str,
V.graph.sizevars.size_hints(
n.get_size(),
fallback=config.unbacked_symint_fallback,
),
)
)
for n in input_nodes
]
),
}
def _log_autotune_choices_stats(
event_name: str, timings: dict[ChoiceCaller, float]
) -> None:
"""Helper function to extract autotune metadata from benchmark results."""
if not timings:
return None
metadata: dict[str, Union[int, float, str]] = {
"num_choices": len(timings),
"num_triton_choices": len(
[c for c in timings if isinstance(c, TritonTemplateCaller)]
),
}
sorted_choices = sorted(timings, key=timings.__getitem__)
best_choice = sorted_choices[0]
metadata["best_kernel"] = best_choice.name
if best_choice.description:
metadata["best_kernel_desc"] = best_choice.description
metadata["best_time"] = timings[best_choice]
best_triton_pos = next(
(
i
for i, choice in enumerate(sorted_choices)
if isinstance(choice, TritonTemplateCaller)
),
None,
)
if best_triton_pos is not None:
metadata["best_triton_pos"] = best_triton_pos
best_triton_kernel = sorted_choices[best_triton_pos]
if best_triton_pos != 0:
metadata["best_triton_time"] = timings[best_triton_kernel]
metadata["best_triton_kernel"] = best_triton_kernel.name
if best_triton_kernel.description:
metadata["best_triton_kernel_desc"] = best_triton_kernel.description
payload = json.dumps(metadata)
get_chromium_event_logger().add_event_data(
event_name, autotune_choices_stats=payload
)
sys.stderr.write(f"Autotune Choices Stats:\n{payload}\n")
def _log_autotune_exceptions(
exceptions: list[tuple[ChoiceCaller, BaseException]],
) -> None:
"""Log autotune exceptions to chromium event logger."""
if not exceptions:
return
try:
pt2_compile_substack = get_chromium_event_logger().get_pt2_compile_substack()
if not pt2_compile_substack:
return
current_event = pt2_compile_substack[-1]
if not current_event.endswith("_template_precompiling"):
return
exception_details = []
for choice, exc in exceptions:
try:
choice_type = (
"triton" if isinstance(choice, TritonTemplateCaller) else "other"
)
data = {
"choice_type": choice_type,
"choice": choice.description,
"exception_message": str(exc),
}
exc_type_match = re.search(r"(\w+):", str(exc))
if exc_type_match:
data["exception"] = exc_type_match.group(1)
if "OutOfMemoryError" in str(exc):
required_match = re.search(r"Required: (\d+)", str(exc))
if required_match:
data["required_memory"] = required_match.group(1)
limit_match = re.search(r"Hardware limit:\s*(\d+)", str(exc))
if limit_match:
data["hardware_limit"] = limit_match.group(1)
exception_details.append(data)
except Exception:
# Don't let logging errors break the main flow
continue
if exception_details:
metadata = json.dumps({"exceptions": exception_details})
get_chromium_event_logger().try_add_event_data(
current_event, metadata=metadata
)
except Exception:
# Silently ignore logging errors to avoid breaking autotune
pass
# ensure lowering is imported so that `extern_kernels.*` is populated
from . import lowering # noqa: F401
| SymbolicGridFn |
python | scipy__scipy | scipy/optimize/tests/test_linesearch.py | {
"start": 1595,
"end": 11598
} | class ____:
# -- scalar functions; must have dphi(0.) < 0
def _scalar_func_1(self, s): # skip name check
if not hasattr(self.fcount, 'c'):
self.fcount.c = 0
self.fcount.c += 1
p = -s - s**3 + s**4
dp = -1 - 3*s**2 + 4*s**3
return p, dp
def _scalar_func_2(self, s): # skip name check
if not hasattr(self.fcount, 'c'):
self.fcount.c = 0
self.fcount.c += 1
p = np.exp(-4*s) + s**2
dp = -4*np.exp(-4*s) + 2*s
return p, dp
def _scalar_func_3(self, s): # skip name check
if not hasattr(self.fcount, 'c'):
self.fcount.c = 0
self.fcount.c += 1
p = -np.sin(10*s)
dp = -10*np.cos(10*s)
return p, dp
# -- n-d functions
def _line_func_1(self, x): # skip name check
if not hasattr(self.fcount, 'c'):
self.fcount.c = 0
self.fcount.c += 1
f = np.dot(x, x)
df = 2*x
return f, df
def _line_func_2(self, x): # skip name check
if not hasattr(self.fcount, 'c'):
self.fcount.c = 0
self.fcount.c += 1
f = np.dot(x, np.dot(self.A, x)) + 1
df = np.dot(self.A + self.A.T, x)
return f, df
# --
def setup_method(self):
self.scalar_funcs = []
self.line_funcs = []
self.N = 20
self.fcount = threading.local()
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
for name in sorted(dir(self)):
if name.startswith('_scalar_func_'):
value = getattr(self, name)
self.scalar_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
elif name.startswith('_line_func_'):
value = getattr(self, name)
self.line_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
# the choice of seed affects whether the tests pass
rng = np.random.default_rng(1231892908)
self.A = rng.standard_normal((self.N, self.N))
def scalar_iter(self):
rng = np.random.default_rng(2231892908)
for name, phi, derphi in self.scalar_funcs:
for old_phi0 in rng.standard_normal(3):
yield name, phi, derphi, old_phi0
def line_iter(self):
rng = np.random.default_rng(2231892908)
for name, f, fprime in self.line_funcs:
k = 0
while k < 9:
x = rng.standard_normal(self.N)
p = rng.standard_normal(self.N)
if np.dot(p, fprime(x)) >= 0:
# always pick a descent direction
continue
k += 1
old_fv = float(rng.standard_normal())
yield name, f, fprime, x, p, old_fv
# -- Generic scalar searches
def test_scalar_search_wolfe1(self):
c = 0
for name, phi, derphi, old_phi0 in self.scalar_iter():
c += 1
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
assert_wolfe(s, phi, derphi, err_msg=name)
assert c > 3 # check that the iterator really works...
def test_scalar_search_wolfe2(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
phi, derphi, phi(0), old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
if derphi1 is not None:
assert_fp_equal(derphi1, derphi(s), name)
assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}")
def test_scalar_search_wolfe2_with_low_amax(self):
def phi(alpha):
return (alpha - 5) ** 2
def derphi(alpha):
return 2 * (alpha - 5)
alpha_star, _, _, derphi_star = ls.scalar_search_wolfe2(phi, derphi, amax=0.001)
assert alpha_star is None # Not converged
assert derphi_star is None # Not converged
def test_scalar_search_wolfe2_regression(self):
# Regression test for gh-12157
# This phi has its minimum at alpha=4/3 ~ 1.333.
def phi(alpha):
if alpha < 1:
return - 3*np.pi/2 * (alpha - 1)
else:
return np.cos(3*np.pi/2 * alpha - np.pi)
def derphi(alpha):
if alpha < 1:
return - 3*np.pi/2
else:
return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)
s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)
# Without the fix in gh-13073, the scalar_search_wolfe2
# returned s=2.0 instead.
assert s < 1.5
def test_scalar_search_armijo(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
assert_fp_equal(phi1, phi(s), name)
assert_armijo(s, phi, err_msg=f"{name} {old_phi0:g}")
# -- Generic line searches
def test_line_search_wolfe1(self):
c = 0
smax = 100
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount.c = 0
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount.c, fc+gc)
assert_fp_equal(ofv, f(x))
if s is None:
continue
assert_fp_equal(fv, f(x + s*p))
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert c > 3 # check that the iterator really works...
def test_line_search_wolfe2(self):
c = 0
smax = 512
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount.c = 0
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"The line search algorithm could not find a solution",
LineSearchWarning)
warnings.filterwarnings(
"ignore",
"The line search algorithm did not converge",
LineSearchWarning)
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount.c, fc+gc)
assert_fp_equal(ofv, f(x))
assert_fp_equal(fv, f(x + s*p))
if gv is not None:
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert c > 3 # check that the iterator really works...
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
def f(x):
return np.dot(x, x)
def fp(x):
return 2 * x
p = np.array([1, 0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
assert_line_wolfe(x, p, s, f, fp)
with pytest.warns(LineSearchWarning):
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p,
amax=29, c2=c2)
assert s is None
# s=30 will only be tried on the 6th iteration, so this won't converge
with pytest.warns(LineSearchWarning):
ls.line_search_wolfe2(f, fp, x, p, c2=c2, maxiter=5)
def test_line_search_armijo(self):
c = 0
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount.c = 0
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
c += 1
assert_equal(self.fcount.c, fc)
assert_fp_equal(fv, f(x + s*p))
assert_line_armijo(x, p, s, f, err_msg=name)
assert c >= 9
# -- More specific tests
def test_armijo_terminate_1(self):
# Armijo should evaluate the function only once if the trial step
# is already suitable
count = [0]
def phi(s):
count[0] += 1
return -s + 0.01*s**2
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
assert_equal(s, 1)
assert_equal(count[0], 2)
assert_armijo(s, phi)
def test_wolfe_terminate(self):
# wolfe1 and wolfe2 should also evaluate the function only a few
# times if the trial step is already suitable
def phi(s):
count[0] += 1
return -s + 0.05*s**2
def derphi(s):
count[0] += 1
return -1 + 0.05*2*s
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
count = [0]
r = func(phi, derphi, phi(0), None, derphi(0))
assert r[0] is not None, (r, func)
assert count[0] <= 2 + 2, (count, func)
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
| TestLineSearch |
python | chroma-core__chroma | chromadb/test/conftest.py | {
"start": 33488,
"end": 37456
} | class ____(Protocol):
def __call__(
self,
producer: Producer,
collection_id: UUID,
embeddings: Iterator[OperationRecord],
n: int,
) -> Tuple[Sequence[OperationRecord], Sequence[SeqId]]:
...
def produce_n_single(
producer: Producer,
collection_id: UUID,
embeddings: Iterator[OperationRecord],
n: int,
) -> Tuple[Sequence[OperationRecord], Sequence[SeqId]]:
submitted_embeddings = []
seq_ids = []
for _ in range(n):
e = next(embeddings)
seq_id = producer.submit_embedding(collection_id, e)
submitted_embeddings.append(e)
seq_ids.append(seq_id)
return submitted_embeddings, seq_ids
def produce_n_batch(
producer: Producer,
collection_id: UUID,
embeddings: Iterator[OperationRecord],
n: int,
) -> Tuple[Sequence[OperationRecord], Sequence[SeqId]]:
submitted_embeddings = []
seq_ids: Sequence[SeqId] = []
for _ in range(n):
e = next(embeddings)
submitted_embeddings.append(e)
seq_ids = producer.submit_embeddings(collection_id, submitted_embeddings)
return submitted_embeddings, seq_ids
def produce_fn_fixtures() -> List[ProducerFn]:
return [produce_n_single, produce_n_batch]
@pytest.fixture(scope="module", params=produce_fn_fixtures())
def produce_fns(
request: pytest.FixtureRequest,
) -> Generator[ProducerFn, None, None]:
yield request.param
def pytest_configure(config): # type: ignore
embeddings_queue._called_from_test = True
def is_client_in_process(client: ClientAPI) -> bool:
"""Returns True if the client is in-process (a SQLite client), False if it's out-of-process (a HTTP client)."""
return client.get_settings().chroma_server_http_port is None
@pytest.fixture(autouse=True)
def log_tests(request: pytest.FixtureRequest) -> Generator[None, None, None]:
"""Automatically logs the start and end of each test."""
test_name = request.node.name
logger.debug(f"Starting test: {test_name}")
# Yield control back to the test, allowing it to execute
yield
logger.debug(f"Finished test: {test_name}")
@pytest.fixture
def mock_embeddings() -> Callable[[Documents], Embeddings]:
"""Return mock embeddings for testing"""
def _mock_embeddings(input: Documents) -> Embeddings:
return [np.array([0.1, 0.2, 0.3], dtype=np.float32) for _ in input]
return _mock_embeddings
@pytest.fixture
def mock_common_deps(monkeypatch: MonkeyPatch) -> MonkeyPatch:
"""Mock common dependencies"""
# Create mock modules
mock_modules = {
"PIL": MagicMock(),
"torch": MagicMock(),
"openai": MagicMock(),
"cohere": MagicMock(),
"sentence_transformers": MagicMock(),
"ollama": MagicMock(),
"InstructorEmbedding": MagicMock(),
"voyageai": MagicMock(),
"text2vec": MagicMock(),
"open_clip": MagicMock(),
"boto3": MagicMock(),
}
# Mock all modules at once using monkeypatch.setitem
monkeypatch.setattr(sys, "modules", dict(sys.modules, **mock_modules))
# Mock submodules and attributes
mock_attributes = {
"PIL.Image": MagicMock(),
"sentence_transformers.SentenceTransformer": MagicMock(),
"ollama.Client": MagicMock(),
"InstructorEmbedding.INSTRUCTOR": MagicMock(),
"voyageai.Client": MagicMock(),
"text2vec.SentenceModel": MagicMock(),
}
# Setup OpenCLIP mock with specific behavior
mock_model = MagicMock()
mock_model.encode_text.return_value = np.array([[0.1, 0.2, 0.3]])
mock_model.encode_image.return_value = np.array([[0.1, 0.2, 0.3]])
mock_modules["open_clip"].create_model_and_transforms.return_value = (
mock_model,
MagicMock(),
mock_model,
)
# Mock all attributes
for path, mock in mock_attributes.items():
monkeypatch.setattr(path, mock, raising=False)
return monkeypatch
| ProducerFn |
python | doocs__leetcode | lcof2/剑指 Offer II 083. 没有重复元素集合的全排列/Solution.py | {
"start": 0,
"end": 509
} | class ____:
def permute(self, nums: List[int]) -> List[List[int]]:
n = len(nums)
res = []
path = [0] * n
used = [False] * n
def dfs(u):
if u == n:
res.append(path.copy())
return
for i in range(n):
if not used[i]:
path[u] = nums[i]
used[i] = True
dfs(u + 1)
used[i] = False
dfs(0)
return res
| Solution |
python | Textualize__textual | src/textual/app.py | {
"start": 7298,
"end": 7475
} | class ____(Exception):
"""Raised when an invalid theme is set."""
ReturnType = TypeVar("ReturnType")
CallThreadReturnType = TypeVar("CallThreadReturnType")
| InvalidThemeError |
python | PyCQA__pylint | tests/functional/i/iterable_context.py | {
"start": 2211,
"end": 2556
} | class ____:
access_requirements = None
def get_access_requirements(self):
return self.access_requirements
def dispatch(self, *_args, **_kwargs):
classes = self.get_access_requirements()
# no error should be emitted here
for requirement in classes:
print(requirement)
| ManagedAccessViewMixin |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 59259,
"end": 59438
} | class ____(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
| NullNode |
python | huggingface__transformers | src/transformers/models/m2m_100/modeling_m2m_100.py | {
"start": 3015,
"end": 8710
} | class ____(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights, persistent=False)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
past_key_values_length: int = 0,
):
if input_ids is not None:
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
position_ids = self.create_position_ids_from_inputs_embeds(
inputs_embeds, past_key_values_length, self.padding_idx
)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
@staticmethod
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_input_ids
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->M2M100
| M2M100SinusoidalPositionalEmbedding |
python | sympy__sympy | sympy/functions/elementary/complexes.py | {
"start": 13639,
"end": 21703
} | class ____(DefinedFunction):
"""
Return the absolute value of the argument.
Explanation
===========
This is an extension of the built-in function ``abs()`` to accept symbolic
values. If you pass a SymPy expression to the built-in ``abs()``, it will
pass it automatically to ``Abs()``.
Examples
========
>>> from sympy import Abs, Symbol, S, I
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
>>> Abs(3*x + 2*I)
sqrt(9*x**2 + 4)
>>> Abs(8*I)
8
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a SymPy object.
Parameters
==========
arg : Expr
Real or complex expression.
Returns
=======
expr : Expr
Absolute value returned can be an expression or integer depending on
input arg.
See Also
========
sign, conjugate
"""
args: tuple[Expr]
is_extended_real = True
is_extended_negative = False
is_extended_nonnegative = True
unbranched = True
_singularities = True # non-holomorphic
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
n, d = arg.as_numer_denom()
if d.free_symbols and not n.free_symbols:
return cls(n)/cls(d)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
if t.is_Pow and t.exp.is_integer and t.exp.is_negative:
bnew = cls(t.base)
if isinstance(bnew, cls):
unk.append(t)
else:
known.append(Pow(bnew, t.exp))
else:
tnew = cls(t)
if isinstance(tnew, cls):
unk.append(t)
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg is S.ComplexInfinity:
return oo
from sympy.functions.elementary.exponential import exp, log
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_extended_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
return Abs(base)**exponent
if base.is_extended_nonnegative:
return base**re(exponent)
if base.is_extended_negative:
return (-base)**re(exponent)*exp(-pi*im(exponent))
return
elif not base.has(Symbol): # complex base
# express base**exponent as exp(exponent*log(base))
a, b = log(base).as_real_imag()
z = a + I*b
return exp(re(exponent*z))
if isinstance(arg, exp):
return exp(re(arg.args[0]))
if isinstance(arg, AppliedUndef):
if arg.is_positive:
return arg
elif arg.is_negative:
return -arg
return
if arg.is_Add and arg.has(oo, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return oo
if arg.is_zero:
return S.Zero
if arg.is_extended_nonnegative:
return arg
if arg.is_extended_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -I * arg
if arg2.is_extended_nonnegative:
return arg2
if arg.is_extended_real:
return
# reject result if all new conjugates are just wrappers around
# an expression that was already in the arg
conj = signsimp(arg.conjugate(), evaluate=False)
new_conj = conj.atoms(conjugate) - arg.atoms(conjugate)
if new_conj and all(arg.has(i.args[0]) for i in new_conj):
return
if arg != conj and arg != -conj:
ignore = arg.atoms(Abs)
abs_free_arg = arg.xreplace({i: Dummy(real=True) for i in ignore})
unk = [a for a in abs_free_arg.free_symbols if a.is_extended_real is None]
if not unk or not all(conj.has(conjugate(u)) for u in unk):
return sqrt(expand_mul(arg*conj))
def _eval_is_real(self):
if self.args[0].is_finite:
return True
def _eval_is_integer(self):
if self.args[0].is_extended_real:
return self.args[0].is_integer
def _eval_is_extended_nonzero(self):
return fuzzy_not(self._args[0].is_zero)
def _eval_is_zero(self):
return self._args[0].is_zero
def _eval_is_extended_positive(self):
return fuzzy_not(self._args[0].is_zero)
def _eval_is_rational(self):
if self.args[0].is_extended_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_extended_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_extended_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_extended_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx, cdir=0):
from sympy.functions.elementary.exponential import log
direction = self.args[0].leadterm(x)[0]
if direction.has(log(x)):
direction = direction.subs(log(x), logx)
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
return (sign(direction)*s).expand()
def _eval_derivative(self, x):
if self.args[0].is_extended_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
rv = (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
return rv.rewrite(sign)
def _eval_rewrite_as_Heaviside(self, arg, **kwargs):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
from sympy.functions.special.delta_functions import Heaviside
if arg.is_extended_real:
return arg*(Heaviside(arg) - Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg, **kwargs):
if arg.is_extended_real:
return Piecewise((arg, arg >= 0), (-arg, True))
elif arg.is_imaginary:
return Piecewise((I*arg, I*arg >= 0), (-I*arg, True))
def _eval_rewrite_as_sign(self, arg, **kwargs):
return arg/sign(arg)
def _eval_rewrite_as_conjugate(self, arg, **kwargs):
return sqrt(arg*conjugate(arg))
| Abs |
python | django-extensions__django-extensions | django_extensions/management/jobs.py | {
"start": 711,
"end": 5408
} | class ____(BaseJob):
when = "yearly"
def my_import(name):
try:
imp = __import__(name)
except ImportError as err:
raise JobError("Failed to import %s with error %s" % (name, err))
mods = name.split(".")
if len(mods) > 1:
for mod in mods[1:]:
imp = getattr(imp, mod)
return imp
def find_jobs(jobs_dir):
try:
return sorted(
[
f[:-3]
for f in os.listdir(jobs_dir)
if not f.startswith("_") and f.endswith(".py")
]
)
except OSError:
return []
def find_job_module(app_name: str, when: Optional[str] = None) -> str:
"""Find the directory path to a job module."""
parts = app_name.split(".")
parts.append("jobs")
if when:
parts.append(when)
module_name = ".".join(parts)
module = importlib.import_module(module_name)
if not hasattr(module, "__path__"):
# module here is a non-package module, eg jobs.py
raise ImportError
return module.__path__[0]
def import_job(app_name, name, when=None):
jobmodule = "%s.jobs.%s%s" % (app_name, when and "%s." % when or "", name)
job_mod = my_import(jobmodule)
# todo: more friendly message for AttributeError if job_mod does not exist
try:
job = job_mod.Job
except AttributeError:
raise JobError(
"Job module %s does not contain class instance named 'Job'" % jobmodule
)
if when and not (job.when == when or job.when is None):
raise JobError("Job %s is not a %s job." % (jobmodule, when))
return job
def get_jobs(when=None, only_scheduled=False):
"""
Return a dictionary mapping of job names together with their respective
application class.
"""
# FIXME: HACK: make sure the project dir is on the path when executed as ./manage.py
try:
cpath = os.path.dirname(os.path.realpath(sys.argv[0]))
ppath = os.path.dirname(cpath)
if ppath not in sys.path:
sys.path.append(ppath)
except Exception:
pass
_jobs = {}
for app_name in [app.name for app in apps.get_app_configs()]:
scandirs = (
None,
"minutely",
"quarter_hourly",
"hourly",
"daily",
"weekly",
"monthly",
"yearly",
)
if when:
scandirs = None, when
for subdir in scandirs:
try:
path = find_job_module(app_name, subdir)
for name in find_jobs(path):
if (app_name, name) in _jobs:
raise JobError("Duplicate job %s" % name)
job = import_job(app_name, name, subdir)
if only_scheduled and job.when is None:
# only include jobs which are scheduled
continue
if when and job.when != when:
# generic job not in same schedule
continue
_jobs[(app_name, name)] = job
except ImportError:
# No job module -- continue scanning
pass
return _jobs
def get_job(app_name, job_name):
jobs = get_jobs()
if app_name:
return jobs[(app_name, job_name)]
else:
for a, j in jobs.keys():
if j == job_name:
return jobs[(a, j)]
raise KeyError("Job not found: %s" % job_name)
def print_jobs(
when=None,
only_scheduled=False,
show_when=True,
show_appname=False,
show_header=True,
):
jobmap = get_jobs(when, only_scheduled=only_scheduled)
print("Job List: %i jobs" % len(jobmap))
jlist = sorted(jobmap.keys())
if not jlist:
return
appname_spacer = "%%-%is" % max(len(e[0]) for e in jlist)
name_spacer = "%%-%is" % max(len(e[1]) for e in jlist)
when_spacer = "%%-%is" % max(len(e.when) for e in jobmap.values() if e.when)
if show_header:
line = " "
if show_appname:
line += appname_spacer % "appname" + " - "
line += name_spacer % "jobname"
if show_when:
line += " - " + when_spacer % "when"
line += " - help"
print(line)
print("-" * 80)
for app_name, job_name in jlist:
job = jobmap[(app_name, job_name)]
line = " "
if show_appname:
line += appname_spacer % app_name + " - "
line += name_spacer % job_name
if show_when:
line += " - " + when_spacer % (job.when and job.when or "")
line += " - " + job.help
print(line)
| YearlyJob |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 104261,
"end": 115355
} | class ____:
def test_empty(self, xp):
with eager_warns(SmallSampleWarning, match=too_small_1d_not_omit, xp=xp):
vals, counts = stats.mode(xp.asarray([]))
xp_assert_equal(vals, xp.asarray(xp.nan))
xp_assert_equal(counts, xp.asarray(0.))
def test_scalar(self):
vals, counts = stats.mode(4.)
assert_equal(vals, np.array([4.]))
assert_equal(counts, np.array([1]))
def test_basic(self, xp):
data1 = xp.asarray([3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6])
vals = stats.mode(data1)
xp_assert_equal(vals[0], xp.asarray(6))
xp_assert_equal(vals[1], xp.asarray(3))
def test_axes_keepdims(self, xp):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = xp.asarray([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None, keepdims=True)
xp_assert_equal(vals[0], xp.asarray([[30]]))
xp_assert_equal(vals[1], xp.asarray([[8]]))
vals = stats.mode(arr, axis=0, keepdims=True)
xp_assert_equal(vals[0], xp.asarray([[10, 10, 30, 30]]))
xp_assert_equal(vals[1], xp.asarray([[2, 3, 3, 2]]))
vals = stats.mode(arr, axis=1, keepdims=True)
xp_assert_equal(vals[0], xp.asarray([[10], [10], [20], [30], [30]]))
xp_assert_equal(vals[1], xp.asarray([[2], [4], [3], [4], [3]]))
def test_axes(self, xp):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = xp.asarray([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None)
xp_assert_equal(vals[0], xp.asarray(30))
xp_assert_equal(vals[1], xp.asarray(8))
vals = stats.mode(arr, axis=0)
xp_assert_equal(vals[0], xp.asarray([10, 10, 30, 30]))
xp_assert_equal(vals[1], xp.asarray([2, 3, 3, 2]))
vals = stats.mode(arr, axis=1)
xp_assert_equal(vals[0], xp.asarray([10, 10, 20, 30, 30]))
xp_assert_equal(vals[1], xp.asarray([2, 4, 3, 4, 3]))
@pytest.mark.parametrize('axis', range(-4, 0))
def test_negative_axes_gh_15375(self, axis, xp):
rng = np.random.default_rng(7090348401)
a = xp.asarray(rng.random((10, 11, 12, 13)))
res0 = stats.mode(a, axis=a.ndim+axis)
res1 = stats.mode(a, axis=axis)
xp_assert_equal(res0.mode, res1.mode)
xp_assert_equal(res0.count, res1.count)
def test_mode_result_attributes(self, xp):
data1 = xp.asarray([3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6])
data2 = xp.asarray([])
actual = stats.mode(data1)
attributes = ('mode', 'count')
check_named_results(actual, attributes, xp=xp)
with eager_warns(SmallSampleWarning, match=too_small_1d_not_omit, xp=xp):
actual2 = stats.mode(data2)
check_named_results(actual2, attributes, xp=xp)
def test_nan_propagate(self, xp):
data1 = xp.asarray([3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6])
actual = stats.mode(data1)
xp_assert_equal(actual[0], xp.asarray(6, dtype=data1.dtype))
xp_assert_equal(actual[1], xp.asarray(3))
@skip_xp_backends(eager_only=True, reason="lazy arrays don't do 'raise'.")
def test_nan_omit(self, xp):
data1 = xp.asarray([3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6])
res = stats.mode(data1, nan_policy='omit')
xp_assert_equal(res.mode, xp.asarray(6.))
xp_assert_equal(res.count, xp.asarray(3))
assert_raises(ValueError, stats.mode, data1, nan_policy='raise')
assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')
@skip_xp_backends(eager_only=True, reason="lazy arrays don't do 'omit'.")
@pytest.mark.parametrize("data", [
[3, 5, 1, 1, 3.],
[3, np.nan, 5, 1, 1, 3],
[3, 5, 1.],
[3, np.nan, 5, 1],
])
@pytest.mark.parametrize('keepdims', [False, True])
def test_smallest_equal(self, data, keepdims, xp):
result = stats.mode(xp.asarray(data), nan_policy='omit', keepdims=keepdims)
if keepdims:
xp_assert_equal(result[0][0], xp.asarray(1.))
else:
xp_assert_equal(result[0], xp.asarray(1.))
@pytest.mark.parametrize('axis', range(-3, 3))
def test_mode_shape_gh_9955(self, axis, xp):
rng = np.random.default_rng(984213899)
a = xp.asarray(rng.uniform(size=(3, 4, 5)))
res = stats.mode(a, axis=axis, keepdims=False)
reference_shape = list(a.shape)
reference_shape.pop(axis)
np.testing.assert_array_equal(res.mode.shape, reference_shape)
np.testing.assert_array_equal(res.count.shape, reference_shape)
def test_nan_policy_propagate_gh_9815(self, xp):
# mode should treat np.nan as it would any other object when
# nan_policy='propagate'
a = xp.asarray([2, np.nan, 1, np.nan])
res = stats.mode(a)
assert xp.isnan(res.mode) and res.count == 2
def test_keepdims_empty(self, xp):
# test empty arrays
a = xp.zeros((1, 2, 3, 0))
res = stats.mode(a, axis=1, keepdims=False)
assert res.mode.shape == res.count.shape == (1, 3, 0)
res = stats.mode(a, axis=1, keepdims=True)
assert res.mode.shape == res.count.shape == (1, 1, 3, 0)
def test_keepdims_nonempty(selfself, xp):
# test nan_policy='propagate'
a = xp.asarray([[1, 3, 3, np.nan], [1, 1, np.nan, 1]])
res = stats.mode(a, axis=1, keepdims=False)
xp_assert_equal(res.mode, xp.asarray([3., 1.]))
xp_assert_equal(res.count, xp.asarray([2, 3]))
res = stats.mode(a, axis=1, keepdims=True)
xp_assert_equal(res.mode, xp.asarray([[3.], [1.]]))
xp_assert_equal(res.count, xp.asarray([[2], [3]]))
a = xp.asarray(a)
res = stats.mode(a, axis=None, keepdims=False)
ref = stats.mode(xp_ravel(a), keepdims=False)
xp_assert_equal(res.mode, ref.mode)
xp_assert_equal(res.count, ref.count)
assert res.mode.shape == ref.mode.shape == ()
res = stats.mode(a, axis=None, keepdims=True)
ref = stats.mode(xp_ravel(a), keepdims=True)
xp_assert_equal(xp_ravel(res.mode), xp_ravel(ref.mode))
assert res.mode.shape == (1, 1)
xp_assert_equal(xp_ravel(res.count), xp_ravel(ref.count))
assert res.count.shape == (1, 1)
def test_keepdims_nan_omit(self):
# test nan_policy='omit'
a = [[1, np.nan, np.nan, np.nan, 1],
[np.nan, np.nan, np.nan, np.nan, 2],
[1, 2, np.nan, 5, 5]]
res = stats.mode(a, axis=1, keepdims=False, nan_policy='omit')
assert_array_equal(res.mode, [1, 2, 5])
assert_array_equal(res.count, [2, 1, 2])
res = stats.mode(a, axis=1, keepdims=True, nan_policy='omit')
assert_array_equal(res.mode, [[1], [2], [5]])
assert_array_equal(res.count, [[2], [1], [2]])
a = np.array(a)
res = stats.mode(a, axis=None, keepdims=False, nan_policy='omit')
ref = stats.mode(a.ravel(), keepdims=False, nan_policy='omit')
assert_array_equal(res, ref)
assert res.mode.shape == ref.mode.shape == ()
res = stats.mode(a, axis=None, keepdims=True, nan_policy='omit')
ref = stats.mode(a.ravel(), keepdims=True, nan_policy='omit')
assert_equal(res.mode.ravel(), ref.mode.ravel())
assert res.mode.shape == (1, 1)
assert_equal(res.count.ravel(), ref.count.ravel())
assert res.count.shape == (1, 1)
@pytest.mark.parametrize("nan_policy", ['propagate', 'omit'])
def test_gh16955(self, nan_policy):
# Check that bug reported in gh-16955 is resolved
shape = (4, 3)
data = np.ones(shape)
data[0, 0] = np.nan
res = stats.mode(a=data, axis=1, keepdims=False, nan_policy=nan_policy)
assert_array_equal(res.mode, [1, 1, 1, 1])
assert_array_equal(res.count, [2, 3, 3, 3])
# Test with input from gh-16595. Support for non-numeric input
# was deprecated, so check for the appropriate error.
my_dtype = np.dtype([('asdf', np.uint8), ('qwer', np.float64, (3,))])
test = np.zeros(10, dtype=my_dtype)
message = "Argument `a` is not....|An argument has dtype...|The DType..."
with pytest.raises(TypeError, match=message):
stats.mode(test, nan_policy=nan_policy)
def test_gh9955(self):
# The behavior of mode with empty slices (whether the input was empty
# or all elements were omitted) was inconsistent. Test that this is
# resolved: the mode of an empty slice is NaN and the count is zero.
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = stats.mode([])
ref = (np.nan, 0)
assert_equal(res, ref)
with pytest.warns(SmallSampleWarning, match=too_small_1d_omit):
res = stats.mode([np.nan], nan_policy='omit')
assert_equal(res, ref)
a = [[10., 20., 20.], [np.nan, np.nan, np.nan]]
with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
res = stats.mode(a, axis=1, nan_policy='omit')
ref = ([20, np.nan], [2, 0])
assert_equal(res, ref)
res = stats.mode(a, axis=1, nan_policy='propagate')
ref = ([20, np.nan], [2, 3])
assert_equal(res, ref)
z = np.array([[], []])
with pytest.warns(SmallSampleWarning, match=too_small_nd_not_omit):
res = stats.mode(z, axis=1)
ref = ([np.nan, np.nan], [0, 0])
assert_equal(res, ref)
@pytest.mark.filterwarnings('ignore::RuntimeWarning') # np.mean warns
@pytest.mark.parametrize('z', [np.empty((0, 1, 2)), np.empty((1, 1, 2))])
def test_gh17214(self, z, xp):
z = xp.asarray(z)
if z.size == 0:
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = stats.mode(z, axis=None, keepdims=True)
else:
res = stats.mode(z, axis=None, keepdims=True)
ref = xp.mean(z, axis=None, keepdims=True)
assert res[0].shape == res[1].shape == ref.shape == (1, 1, 1)
def test_raise_non_numeric_gh18254(self):
class ArrLike:
def __init__(self, x):
self._x = x
def __array__(self, dtype=None, copy=None):
return self._x.astype(object)
message = ("...only boolean and numerical dtypes..." if SCIPY_ARRAY_API
else "Cannot interpret...")
with pytest.raises(TypeError, match=message):
stats.mode(ArrLike(np.arange(3)))
message = ("...only boolean and numerical dtypes..." if SCIPY_ARRAY_API
else "Argument `a` is not recognized as numeric.")
with pytest.raises(TypeError, match=message):
stats.mode(np.arange(3, dtype=object))
@make_xp_test_case(stats.sem)
| TestMode |
python | gevent__gevent | src/gevent/tests/test__socket_dns.py | {
"start": 29207,
"end": 30975
} | class ____(TestCase):
if PY2:
# We expect these to raise UnicodeEncodeError, which is a
# subclass of ValueError
REAL_ERRORS = set(TestCase.REAL_ERRORS) - {ValueError,}
if RESOLVER_ARES:
def test_russian_getaddrinfo_http(self):
# And somehow, test_russion_getaddrinfo_http (``getaddrinfo(name, 'http')``)
# manages to work with recent versions of Python 2, but our preemptive encoding
# to ASCII causes it to fail with the c-ares resolver; but only that one test out of
# all of them.
self.skipTest("ares fails to encode.")
# dns python can actually resolve these: it uses
# the 2008 version of idna encoding, whereas on Python 2,
# with the default resolver, it tries to encode to ascii and
# raises a UnicodeEncodeError. So we get different results.
# Starting 20221027, on GitHub Actions and *some* versions of Python,
# we started getting a different error result from our own resolver
# compared to the system. This is very weird because our own resolver
# calls the system. I can't reproduce locally. Perhaps the two
# different answers are because of caching? One from the real DNS
# server, one from the local resolver library? Hence
# require_equal_errors=False
# ('system:', "herror(2, 'Host name lookup failure')",
# 'gevent:', "herror(1, 'Unknown host')")
add(TestInternational, u'президент.рф', 'russian',
skip=(PY2 and RESOLVER_DNSPYTHON),
skip_reason="dnspython can actually resolve these",
require_equal_errors=False)
add(TestInternational, u'президент.рф'.encode('idna'), 'idna',
require_equal_errors=False)
@skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo")
| TestInternational |
python | encode__django-rest-framework | rest_framework/pagination.py | {
"start": 12675,
"end": 20175
} | class ____(BasePagination):
"""
A limit/offset based style. For example:
http://api.example.org/accounts/?limit=100
http://api.example.org/accounts/?offset=400&limit=100
"""
default_limit = api_settings.PAGE_SIZE
limit_query_param = 'limit'
limit_query_description = _('Number of results to return per page.')
offset_query_param = 'offset'
offset_query_description = _('The initial index from which to return the results.')
max_limit = None
template = 'rest_framework/pagination/numbers.html'
def paginate_queryset(self, queryset, request, view=None):
self.request = request
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.count = self.get_count(queryset)
self.offset = self.get_offset(request)
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
if self.count == 0 or self.offset > self.count:
return []
return list(queryset[self.offset:self.offset + self.limit])
def get_paginated_response(self, data):
return Response({
'count': self.count,
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'results': data
})
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'required': ['count', 'results'],
'properties': {
'count': {
'type': 'integer',
'example': 123,
},
'next': {
'type': 'string',
'nullable': True,
'format': 'uri',
'example': 'http://api.example.org/accounts/?{offset_param}=400&{limit_param}=100'.format(
offset_param=self.offset_query_param, limit_param=self.limit_query_param),
},
'previous': {
'type': 'string',
'nullable': True,
'format': 'uri',
'example': 'http://api.example.org/accounts/?{offset_param}=200&{limit_param}=100'.format(
offset_param=self.offset_query_param, limit_param=self.limit_query_param),
},
'results': schema,
},
}
def get_limit(self, request):
if self.limit_query_param:
with contextlib.suppress(KeyError, ValueError):
return _positive_int(
request.query_params[self.limit_query_param],
strict=True,
cutoff=self.max_limit
)
return self.default_limit
def get_offset(self, request):
try:
return _positive_int(
request.query_params[self.offset_query_param],
)
except (KeyError, ValueError):
return 0
def get_next_link(self):
if self.offset + self.limit >= self.count:
return None
url = self.request.build_absolute_uri()
url = replace_query_param(url, self.limit_query_param, self.limit)
offset = self.offset + self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_previous_link(self):
if self.offset <= 0:
return None
url = self.request.build_absolute_uri()
url = replace_query_param(url, self.limit_query_param, self.limit)
if self.offset - self.limit <= 0:
return remove_query_param(url, self.offset_query_param)
offset = self.offset - self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
if self.limit:
current = _divide_with_ceil(self.offset, self.limit) + 1
# The number of pages is a little bit fiddly.
# We need to sum both the number of pages from current offset to end
# plus the number of pages up to the current offset.
# When offset is not strictly divisible by the limit then we may
# end up introducing an extra page as an artifact.
final = (
_divide_with_ceil(self.count - self.offset, self.limit) +
_divide_with_ceil(self.offset, self.limit)
)
final = max(final, 1)
else:
current = 1
final = 1
if current > final:
current = final
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.offset_query_param)
else:
offset = self.offset + ((page_number - current) * self.limit)
return replace_query_param(base_url, self.offset_query_param, offset)
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = self.get_html_context()
return template.render(context)
def get_count(self, queryset):
"""
Determine an object count, supporting either querysets or regular lists.
"""
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
if coreapi is not None:
warnings.warn('CoreAPI compatibility is deprecated and will be removed in DRF 3.18', RemovedInDRF318Warning)
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.limit_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Limit',
description=force_str(self.limit_query_description)
)
),
coreapi.Field(
name=self.offset_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Offset',
description=force_str(self.offset_query_description)
)
)
]
def get_schema_operation_parameters(self, view):
parameters = [
{
'name': self.limit_query_param,
'required': False,
'in': 'query',
'description': force_str(self.limit_query_description),
'schema': {
'type': 'integer',
},
},
{
'name': self.offset_query_param,
'required': False,
'in': 'query',
'description': force_str(self.offset_query_description),
'schema': {
'type': 'integer',
},
},
]
return parameters
| LimitOffsetPagination |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.