language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_group_index.py | {
"start": 3392,
"end": 114619
} | class ____(APITestCase, SnubaTestCase, SearchIssueTestMixin):
endpoint = "sentry-api-0-organization-group-index"
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
def _parse_links(self, header: str) -> dict[str | None, dict[str, str | None]]:
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).items():
links[attrs["rel"]] = attrs
attrs["href"] = url
return links
def get_response(self, *args: Organization, **kwargs: Any) -> Response:
if not args:
org = self.project.organization.slug
else:
org = args[0]
return super().get_response(org, **kwargs)
def test_sort_by_date_with_tag(self) -> None:
# XXX(dcramer): this tests a case where an ambiguous column name existed
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": before_now(seconds=1).isoformat()},
project_id=self.project.id,
)
group = event.group
self.login_as(user=self.user)
response = self.get_success_response(sort_by="date", query="is:unresolved")
assert len(response.data) == 1
assert response.data[0]["id"] == str(group.id)
def test_query_for_archived(self) -> None:
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": before_now(seconds=1).isoformat()},
project_id=self.project.id,
)
group = event.group
group.status = GroupStatus.IGNORED
group.substatus = None
group.save()
self.login_as(user=self.user)
response = self.get_success_response(sort_by="date", query="is:archived")
assert len(response.data) == 1
assert response.data[0]["id"] == str(group.id)
def test_sort_by_trends(self) -> None:
group = self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
).group
self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(hours=13).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
group_2 = self.store_event(
data={
"timestamp": before_now(seconds=5).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
).group
self.store_event(
data={
"timestamp": before_now(hours=13).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
)
self.login_as(user=self.user)
aggregate_kwargs: dict[str, str] = {
"log_level": "3",
"has_stacktrace": "5",
"relative_volume": "1",
"event_halflife_hours": "4",
"issue_halflife_hours": "4",
"v2": "true",
"norm": "False",
}
response = self.get_success_response(
sort="trends",
query="is:unresolved",
limit=25,
start=before_now(days=1).isoformat(),
end=before_now(seconds=1).isoformat(),
**aggregate_kwargs,
)
assert len(response.data) == 2
assert [item["id"] for item in response.data] == [str(group.id), str(group_2.id)]
def test_sort_by_inbox(self) -> None:
group_1 = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
).group
inbox_1 = add_group_to_inbox(group_1, GroupInboxReason.NEW)
group_2 = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
).group
inbox_2 = add_group_to_inbox(group_2, GroupInboxReason.NEW)
inbox_2.update(date_added=inbox_1.date_added - timedelta(hours=1))
self.login_as(user=self.user)
response = self.get_success_response(
sort="inbox", query="is:unresolved is:for_review", limit=1
)
assert len(response.data) == 1
assert response.data[0]["id"] == str(group_1.id)
header_links = parse_link_header(response["Link"])
cursor = [link for link in header_links.values() if link["rel"] == "next"][0]["cursor"]
response = self.get_response(
sort="inbox", cursor=cursor, query="is:unresolved is:for_review", limit=1
)
assert [item["id"] for item in response.data] == [str(group_2.id)]
def test_sort_by_inbox_me_or_none(self) -> None:
group_1 = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
).group
inbox_1 = add_group_to_inbox(group_1, GroupInboxReason.NEW)
group_2 = self.store_event(
data={
"event_id": "b" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
).group
inbox_2 = add_group_to_inbox(group_2, GroupInboxReason.NEW)
inbox_2.update(date_added=inbox_1.date_added - timedelta(hours=1))
GroupOwner.objects.create(
group=group_2,
project=self.project,
organization=self.organization,
type=GroupOwnerType.OWNERSHIP_RULE.value,
user_id=self.user.id,
)
owner_by_other = self.store_event(
data={
"event_id": "c" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-3"],
},
project_id=self.project.id,
).group
inbox_3 = add_group_to_inbox(owner_by_other, GroupInboxReason.NEW)
inbox_3.update(date_added=inbox_1.date_added - timedelta(hours=1))
other_user = self.create_user()
GroupOwner.objects.create(
group=owner_by_other,
project=self.project,
organization=self.organization,
type=GroupOwnerType.OWNERSHIP_RULE.value,
user_id=other_user.id,
)
owned_me_assigned_to_other = self.store_event(
data={
"event_id": "d" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-4"],
},
project_id=self.project.id,
).group
inbox_4 = add_group_to_inbox(owned_me_assigned_to_other, GroupInboxReason.NEW)
inbox_4.update(date_added=inbox_1.date_added - timedelta(hours=1))
GroupAssignee.objects.assign(owned_me_assigned_to_other, other_user)
GroupOwner.objects.create(
group=owned_me_assigned_to_other,
project=self.project,
organization=self.organization,
type=GroupOwnerType.OWNERSHIP_RULE.value,
user_id=self.user.id,
)
unowned_assigned_to_other = self.store_event(
data={
"event_id": "e" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"fingerprint": ["group-5"],
},
project_id=self.project.id,
).group
inbox_5 = add_group_to_inbox(unowned_assigned_to_other, GroupInboxReason.NEW)
inbox_5.update(date_added=inbox_1.date_added - timedelta(hours=1))
GroupAssignee.objects.assign(unowned_assigned_to_other, other_user)
self.login_as(user=self.user)
response = self.get_success_response(
sort="inbox",
query="is:unresolved is:for_review assigned_or_suggested:[me, none]",
limit=10,
)
assert [item["id"] for item in response.data] == [str(group_1.id), str(group_2.id)]
def test_trace_search(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(seconds=1).isoformat(),
"contexts": {
"trace": {
"parent_span_id": "8988cec7cc0779c1",
"type": "trace",
"op": "foobar",
"trace_id": "a7d67cf796774551a95be6543cacd459",
"span_id": "babaae0d4b7512d9",
"status": "ok",
}
},
},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_success_response(
sort_by="date", query="is:unresolved trace:a7d67cf796774551a95be6543cacd459"
)
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
response = self.get_success_response(
sort_by="date",
query="is:unresolved trace:a7d67cf796774551a95be6543cacd459",
)
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
def test_with_all_projects(self) -> None:
# ensure there are two or more projects
self.create_project(organization=self.project.organization)
self.login_as(user=self.user)
response = self.get_success_response(project_id=[-1])
assert response.status_code == 200
def test_boolean_search_feature_flag(self) -> None:
self.login_as(user=self.user)
response = self.get_response(sort_by="date", query="title:hello OR title:goodbye")
assert response.status_code == 400
assert (
response.data["detail"]
== 'Error parsing search query: Boolean statements containing "OR" or "AND" are not supported in this search'
)
response = self.get_response(sort_by="date", query="title:hello AND title:goodbye")
assert response.status_code == 400
assert (
response.data["detail"]
== 'Error parsing search query: Boolean statements containing "OR" or "AND" are not supported in this search'
)
def test_invalid_query(self) -> None:
now = timezone.now()
self.create_group(last_seen=now - timedelta(seconds=1))
self.login_as(user=self.user)
response = self.get_response(sort_by="date", query="timesSeen:>1t")
assert response.status_code == 400
assert "Invalid number" in response.data["detail"]
def test_valid_numeric_query(self) -> None:
now = timezone.now()
self.create_group(last_seen=now - timedelta(seconds=1))
self.login_as(user=self.user)
response = self.get_response(sort_by="date", query="timesSeen:>1k")
assert response.status_code == 200
def test_invalid_sort_key(self) -> None:
now = timezone.now()
self.create_group(last_seen=now - timedelta(seconds=1))
self.login_as(user=self.user)
response = self.get_response(sort="meow", query="is:unresolved")
assert response.status_code == 400
def test_simple_pagination(self) -> None:
event1 = self.store_event(
data={"timestamp": before_now(seconds=2).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group1 = event1.group
event2 = self.store_event(
data={"timestamp": before_now(seconds=1).isoformat(), "fingerprint": ["group-2"]},
project_id=self.project.id,
)
group2 = event2.group
self.login_as(user=self.user)
response = self.get_success_response(sort_by="date", limit=1)
assert len(response.data) == 1
assert response.data[0]["id"] == str(group2.id)
links = self._parse_links(response["Link"])
assert links["previous"]["results"] == "false"
assert links["next"]["results"] == "true"
assert links["next"]["href"] is not None
response = self.client.get(links["next"]["href"], format="json")
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["id"] == str(group1.id)
links = self._parse_links(response["Link"])
assert links["previous"]["results"] == "true"
assert links["next"]["results"] == "false"
def test_stats_period(self) -> None:
# TODO(dcramer): this test really only checks if validation happens
# on groupStatsPeriod
now = timezone.now()
self.create_group(last_seen=now - timedelta(seconds=1))
self.create_group(last_seen=now)
self.login_as(user=self.user)
self.get_success_response(groupStatsPeriod="24h")
self.get_success_response(groupStatsPeriod="14d")
self.get_success_response(groupStatsPeriod="")
response = self.get_response(groupStatsPeriod="48h")
assert response.status_code == 400
def test_environment(self) -> None:
self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"timestamp": self.min_ago.isoformat(),
"environment": "production",
},
project_id=self.project.id,
)
self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"timestamp": self.min_ago.isoformat(),
"environment": "staging",
},
project_id=self.project.id,
)
self.login_as(user=self.user)
sleep(1)
response = self.get_success_response(environment="production")
assert len(response.data) == 1
response = self.get_response(environment="garbage")
assert response.status_code == 404
def test_project(self) -> None:
self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"timestamp": self.min_ago.isoformat(),
"environment": "production",
},
project_id=self.project.id,
)
project = self.project
self.login_as(user=self.user)
response = self.get_success_response(query=f"project:{project.slug}")
assert len(response.data) == 1
response = self.get_success_response(query=f"project:{project.slug}")
assert len(response.data) == 1
def test_auto_resolved(self) -> None:
project = self.project
project.update_option("sentry:resolve_age", 1)
self.store_event(
data={"event_id": "a" * 32, "timestamp": before_now(seconds=1).isoformat()},
project_id=project.id,
)
event2 = self.store_event(
data={"event_id": "b" * 32, "timestamp": before_now(seconds=1).isoformat()},
project_id=project.id,
)
group2 = event2.group
self.login_as(user=self.user)
response = self.get_success_response()
assert len(response.data) == 1
assert response.data[0]["id"] == str(group2.id)
def test_perf_issue(self) -> None:
perf_group = self.create_group(type=PerformanceNPlusOneGroupType.type_id)
self.login_as(user=self.user)
with self.feature(
{
"organizations:issue-search-allow-postgres-only-search": True,
}
):
response = self.get_success_response(query="issue.category:performance")
assert len(response.data) == 1
assert response.data[0]["id"] == str(perf_group.id)
def test_lookup_by_event_id(self) -> None:
project = self.project
project.update_option("sentry:resolve_age", 1)
event_id = "c" * 32
event = self.store_event(
data={"event_id": event_id, "timestamp": self.min_ago.isoformat()},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_success_response(query="c" * 32)
assert response["X-Sentry-Direct-Hit"] == "1"
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
assert response.data[0]["matchingEventId"] == event_id
def test_lookup_by_event_id_incorrect_project_id(self) -> None:
self.store_event(
data={"event_id": "a" * 32, "timestamp": self.min_ago.isoformat()},
project_id=self.project.id,
)
event_id = "b" * 32
event = self.store_event(
data={"event_id": event_id, "timestamp": self.min_ago.isoformat()},
project_id=self.project.id,
)
other_project = self.create_project(teams=[self.team])
user = self.create_user()
self.create_member(organization=self.organization, teams=[self.team], user=user)
self.login_as(user=user)
response = self.get_success_response(query=event_id, project=[other_project.id])
assert response["X-Sentry-Direct-Hit"] == "1"
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
assert response.data[0]["matchingEventId"] == event_id
def test_lookup_by_event_id_with_whitespace(self) -> None:
project = self.project
project.update_option("sentry:resolve_age", 1)
event_id = "c" * 32
event = self.store_event(
data={"event_id": event_id, "timestamp": self.min_ago.isoformat()},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_success_response(query=" {} ".format("c" * 32))
assert response["X-Sentry-Direct-Hit"] == "1"
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
assert response.data[0]["matchingEventId"] == event_id
def test_lookup_by_unknown_event_id(self) -> None:
project = self.project
project.update_option("sentry:resolve_age", 1)
self.create_group()
self.create_group()
self.login_as(user=self.user)
response = self.get_success_response(query="c" * 32)
assert len(response.data) == 0
def test_lookup_by_short_id(self) -> None:
group = self.group
short_id = group.qualified_short_id
self.login_as(user=self.user)
response = self.get_success_response(query=short_id, shortIdLookup=1)
assert len(response.data) == 1
assert response["X-Sentry-Direct-Hit"] == "1"
def test_lookup_by_short_id_alias(self) -> None:
event_id = "f" * 32
group = self.store_event(
data={"event_id": event_id, "timestamp": before_now(seconds=1).isoformat()},
project_id=self.project.id,
).group
short_id = group.qualified_short_id
self.login_as(user=self.user)
response = self.get_success_response(query=f"issue:{short_id}", shortIdLookup=1)
assert len(response.data) == 1
assert response["X-Sentry-Direct-Hit"] == "1"
def test_lookup_by_multiple_short_id_alias(self) -> None:
self.login_as(self.user)
project = self.project
project2 = self.create_project(name="baz", organization=project.organization)
event = self.store_event(
data={"timestamp": before_now(seconds=2).isoformat()},
project_id=project.id,
)
event2 = self.store_event(
data={"timestamp": before_now(seconds=1).isoformat()},
project_id=project2.id,
)
response = self.get_success_response(
query=f"issue:[{event.group.qualified_short_id},{event2.group.qualified_short_id}]",
shortIdLookup=1,
)
assert len(response.data) == 2
assert response.get("X-Sentry-Direct-Hit") != "1"
response = self.get_success_response(
query=f"issue:[{event.group.qualified_short_id},{event2.group.qualified_short_id}]",
shortIdLookup=1,
)
assert len(response.data) == 2
assert response.get("X-Sentry-Direct-Hit") != "1"
def test_lookup_by_short_id_ignores_project_list(self) -> None:
organization = self.create_organization()
project = self.create_project(organization=organization)
project2 = self.create_project(organization=organization)
group = self.create_group(project=project2)
user = self.create_user()
self.create_member(organization=organization, user=user)
short_id = group.qualified_short_id
self.login_as(user=user)
response = self.get_success_response(
organization.slug, project=project.id, query=short_id, shortIdLookup=1
)
assert len(response.data) == 1
assert response.get("X-Sentry-Direct-Hit") == "1"
def test_lookup_by_short_id_no_perms(self) -> None:
organization = self.create_organization()
project = self.create_project(organization=organization)
group = self.create_group(project=project)
user = self.create_user()
self.create_member(organization=organization, user=user, has_global_access=False)
short_id = group.qualified_short_id
self.login_as(user=user)
response = self.get_success_response(organization.slug, query=short_id, shortIdLookup=1)
assert len(response.data) == 0
assert response.get("X-Sentry-Direct-Hit") != "1"
def test_lookup_by_group_id(self) -> None:
self.login_as(user=self.user)
response = self.get_success_response(group=self.group.id)
assert len(response.data) == 1
assert response.data[0]["id"] == str(self.group.id)
group_2 = self.create_group()
response = self.get_success_response(group=[self.group.id, group_2.id])
assert {g["id"] for g in response.data} == {str(self.group.id), str(group_2.id)}
def test_lookup_by_group_id_no_perms(self) -> None:
organization = self.create_organization()
project = self.create_project(organization=organization)
group = self.create_group(project=project)
user = self.create_user()
self.create_member(organization=organization, user=user, has_global_access=False)
self.login_as(user=user)
response = self.get_response(group=[group.id])
assert response.status_code == 403
def test_lookup_by_first_release(self) -> None:
self.login_as(self.user)
project = self.project
project2 = self.create_project(name="baz", organization=project.organization)
release = Release.objects.create(organization=project.organization, version="12345")
release.add_project(project)
release.add_project(project2)
event = self.store_event(
data={"release": release.version, "timestamp": before_now(seconds=2).isoformat()},
project_id=project.id,
)
event2 = self.store_event(
data={"release": release.version, "timestamp": before_now(seconds=1).isoformat()},
project_id=project2.id,
)
response = self.get_success_response(**{"query": 'first-release:"%s"' % release.version})
issues = json.loads(response.content)
assert len(issues) == 2
assert int(issues[0]["id"]) == event2.group.id
assert int(issues[1]["id"]) == event.group.id
response = self.get_success_response(**{"query": 'first-release:"%s"' % release.version})
issues = json.loads(response.content)
assert len(issues) == 2
assert int(issues[0]["id"]) == event2.group.id
assert int(issues[1]["id"]) == event.group.id
def test_lookup_by_release(self) -> None:
self.login_as(self.user)
project = self.project
release = Release.objects.create(organization=project.organization, version="12345")
release.add_project(project)
event = self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"tags": {"sentry:release": release.version},
},
project_id=project.id,
)
response = self.get_success_response(release=release.version)
issues = json.loads(response.content)
assert len(issues) == 1
assert int(issues[0]["id"]) == event.group.id
def test_release_package_in(self) -> None:
self.login_as(self.user)
project = self.project
release1 = Release.objects.create(organization=project.organization, version="foo@1.0.0.0")
release2 = Release.objects.create(organization=project.organization, version="bar@1.2.0.0")
release3 = Release.objects.create(organization=project.organization, version="cat@1.2.0.0")
release1.add_project(project)
release2.add_project(project)
event1 = self.store_event(
data={
"release": release1.version,
"timestamp": before_now(seconds=3).isoformat(),
"fingerprint": ["1"],
},
project_id=project.id,
)
event2 = self.store_event(
data={
"release": release2.version,
"timestamp": before_now(seconds=2).isoformat(),
"fingerprint": ["2"],
},
project_id=project.id,
)
self.store_event(
data={
"release": release3.version,
"timestamp": before_now(seconds=2).isoformat(),
"fingerprint": ["3"],
},
project_id=project.id,
)
response = self.get_success_response(**{"query": 'release.package:["foo", "bar"]'})
issues = json.loads(response.content)
assert len(issues) == 2
assert int(issues[0]["id"]) == event2.group.id
assert int(issues[1]["id"]) == event1.group.id
def test_lookup_by_release_wildcard(self) -> None:
self.login_as(self.user)
project = self.project
release = Release.objects.create(organization=project.organization, version="12345")
release.add_project(project)
event = self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"tags": {"sentry:release": release.version},
},
project_id=project.id,
)
response = self.get_success_response(release=release.version[:3] + "*")
issues = json.loads(response.content)
assert len(issues) == 1
assert int(issues[0]["id"]) == event.group.id
def test_lookup_by_regressed_in_release(self) -> None:
self.login_as(self.user)
project = self.project
release = self.create_release()
event = self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"tags": {"sentry:release": release.version},
},
project_id=project.id,
)
record_group_history(event.group, GroupHistoryStatus.REGRESSED, release=release)
response = self.get_success_response(query=f"regressed_in_release:{release.version}")
issues = json.loads(response.content)
assert [int(issue["id"]) for issue in issues] == [event.group.id]
def test_pending_delete_pending_merge_excluded(self) -> None:
events = []
for i in "abcd":
events.append(
self.store_event(
data={
"event_id": i * 32,
"fingerprint": [i],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
)
events[0].group.update(status=GroupStatus.PENDING_DELETION, substatus=None)
events[2].group.update(status=GroupStatus.DELETION_IN_PROGRESS, substatus=None)
events[3].group.update(status=GroupStatus.PENDING_MERGE, substatus=None)
self.login_as(user=self.user)
response = self.get_success_response()
assert len(response.data) == 1
assert response.data[0]["id"] == str(events[1].group.id)
def test_filters_based_on_retention(self) -> None:
self.login_as(user=self.user)
self.create_group(last_seen=timezone.now() - timedelta(days=2))
with self.options({"system.event-retention-days": 1}):
response = self.get_success_response()
assert len(response.data) == 0
def test_token_auth(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["event:read"])
response = self.client.get(
reverse("sentry-api-0-organization-group-index", args=[self.project.organization.slug]),
format="json",
HTTP_AUTHORIZATION=f"Bearer {token.token}",
)
assert response.status_code == 200, response.content
def test_date_range(self) -> None:
with self.options({"system.event-retention-days": 2}):
event = self.store_event(
data={"timestamp": before_now(hours=5).isoformat()}, project_id=self.project.id
)
group = event.group
self.login_as(user=self.user)
response = self.get_success_response(statsPeriod="6h")
assert len(response.data) == 1
assert response.data[0]["id"] == str(group.id)
response = self.get_success_response(statsPeriod="1h")
assert len(response.data) == 0
@patch("sentry.analytics.record")
def test_advanced_search_errors(self, mock_record: MagicMock) -> None:
self.login_as(user=self.user)
response = self.get_response(sort_by="date", query="!has:user")
assert response.status_code == 200, response.data
assert not any(
c[0][0] == "advanced_search.feature_gated" for c in mock_record.call_args_list
)
with self.feature({"organizations:advanced-search": False}):
response = self.get_response(sort_by="date", query="!has:user")
assert response.status_code == 400, response.data
assert (
"You need access to the advanced search feature to use negative "
"search" == response.data["detail"]
)
assert_last_analytics_event(
mock_record,
AdvancedSearchFeatureGateEvent(
user_id=self.user.id,
default_user_id=self.user.id,
organization_id=self.organization.id,
),
)
# This seems like a random override, but this test needed a way to override
# the orderby being sent to snuba for a certain call. This function has a simple
# return value and can be used to set variables in the snuba payload.
@patch("sentry.utils.snuba.get_query_params_to_update_for_projects")
def test_assigned_to_pagination(self, patched_params_update: MagicMock) -> None:
old_sample_size = options.get("snuba.search.hits-sample-size")
assert options.set("snuba.search.hits-sample-size", 1)
days = reversed(range(4))
self.login_as(user=self.user)
groups = []
for day in days:
patched_params_update.side_effect = [
(self.organization.id, {"project": [self.project.id]})
]
group = self.store_event(
data={
"timestamp": before_now(days=day).isoformat(),
"fingerprint": [f"group-{day}"],
},
project_id=self.project.id,
).group
groups.append(group)
assigned_groups = groups[:2]
for ag in assigned_groups:
ag.update(
status=GroupStatus.RESOLVED, resolved_at=before_now(seconds=5), substatus=None
)
GroupAssignee.objects.assign(ag, self.user)
# This side_effect is meant to override the `calculate_hits` snuba query specifically.
# If this test is failing it's because the -last_seen override is being applied to
# different snuba query.
def _my_patched_params(
query_params: SnubaQueryParams, **kwargs: Any
) -> tuple[int, dict[str, Any]]:
if query_params.aggregations == [
["uniq", "group_id", "total"],
["multiply(toUInt64(max(timestamp)), 1000)", "", "last_seen"],
]:
return (
self.organization.id,
{"project": [self.project.id], "orderby": ["-last_seen"]},
)
else:
return (self.organization.id, {"project": [self.project.id]})
patched_params_update.side_effect = _my_patched_params
response = self.get_response(limit=1, query=f"assigned:{self.user.email}")
assert len(response.data) == 1
assert response.data[0]["id"] == str(assigned_groups[1].id)
header_links = parse_link_header(response["Link"])
cursor = [link for link in header_links.values() if link["rel"] == "next"][0]["cursor"]
response = self.get_response(limit=1, cursor=cursor, query=f"assigned:{self.user.email}")
assert len(response.data) == 1
assert response.data[0]["id"] == str(assigned_groups[0].id)
assert options.set("snuba.search.hits-sample-size", old_sample_size)
@patch("sentry.search.snuba.executors.PostgresSnubaQueryExecutor.calculate_hits")
def test_hits_capped_when_overestimated(self, mock_calculate_hits: MagicMock) -> None:
"""
Test that when sampling overestimates the hit count and all results fit on one page,
the X-Hits header is capped to the actual number of results returned.
This prevents UI bugs like showing "(6-11) of 11" when there are only 6 results.
"""
self.login_as(user=self.user)
# Create 6 groups
groups = []
for i in range(6):
event = self.store_event(
data={
"timestamp": before_now(days=i).isoformat(),
"fingerprint": [f"group-{i}"],
},
project_id=self.project.id,
)
groups.append(event.group)
# Mock calculate_hits to return an overestimate (simulating sampling inaccuracy)
# This would happen when Snuba thinks there are 11 groups but Postgres only has 6
mock_calculate_hits.return_value = 11
# Make a request that returns all 6 groups on one page
response = self.get_success_response(limit=25, query="is:unresolved")
# Should return all 6 groups
assert len(response.data) == 6
# X-Hits should be corrected to 6, not the overestimated 11
assert response["X-Hits"] == "6"
# Verify no next page exists (we have all results)
links = self._parse_links(response["Link"])
assert links["next"]["results"] == "false"
def test_assigned_me_none(self) -> None:
self.login_as(user=self.user)
groups = []
for i in range(5):
group = self.store_event(
data={
"timestamp": before_now(minutes=10, days=i).isoformat(),
"fingerprint": [f"group-{i}"],
},
project_id=self.project.id,
).group
groups.append(group)
assigned_groups = groups[:2]
for ag in assigned_groups:
GroupAssignee.objects.assign(ag, self.user)
response = self.get_response(limit=10, query="assigned:me")
assert [row["id"] for row in response.data] == [str(g.id) for g in assigned_groups]
response = self.get_response(limit=10, query="assigned:[me, none]")
assert len(response.data) == 5
GroupAssignee.objects.assign(assigned_groups[1], self.create_user("other@user.com"))
sleep(1)
response = self.get_response(limit=10, query="assigned:[me, none]")
assert len(response.data) == 4
def test_seen_stats(self) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
before_now_300_seconds = before_now(seconds=300).isoformat()
before_now_350_seconds = before_now(seconds=350).isoformat()
event2 = self.store_event(
data={"timestamp": before_now_300_seconds, "fingerprint": ["group-2"]},
project_id=self.project.id,
)
group2 = event2.group
group2.first_seen = datetime.fromisoformat(before_now_350_seconds)
group2.times_seen = 55
group2.save()
before_now_250_seconds = before_now(seconds=250).replace(microsecond=0).isoformat()
self.store_event(
data={
"timestamp": before_now_250_seconds,
"fingerprint": ["group-2"],
"tags": {"server": "example.com", "trace": "meow", "message": "foo"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=200).isoformat(),
"fingerprint": ["group-1"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
before_now_150_seconds = before_now(seconds=150).replace(microsecond=0).isoformat()
self.store_event(
data={
"timestamp": before_now_150_seconds,
"fingerprint": ["group-2"],
"tags": {"trace": "ribbit", "server": "example.com"},
},
project_id=self.project.id,
)
before_now_100_seconds = before_now(seconds=100).replace(microsecond=0).isoformat()
self.store_event(
data={
"timestamp": before_now_100_seconds,
"fingerprint": ["group-2"],
"tags": {"message": "foo", "trace": "meow"},
},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query="server:example.com")
assert response.status_code == 200
assert len(response.data) == 2
assert int(response.data[0]["id"]) == group2.id
assert response.data[0]["lifetime"] is not None
assert response.data[0]["filtered"] is not None
assert response.data[0]["filtered"]["stats"] is not None
assert response.data[0]["lifetime"]["stats"] is None
assert response.data[0]["filtered"]["stats"] != response.data[0]["stats"]
assert response.data[0]["lifetime"]["firstSeen"] == datetime.fromisoformat(
before_now_350_seconds # Should match overridden value, not event value
)
assert response.data[0]["lifetime"]["lastSeen"] == datetime.fromisoformat(
before_now_100_seconds
)
assert response.data[0]["lifetime"]["count"] == "55"
assert response.data[0]["filtered"]["count"] == "2"
assert response.data[0]["filtered"]["firstSeen"] == datetime.fromisoformat(
before_now_250_seconds
)
assert response.data[0]["filtered"]["lastSeen"] == datetime.fromisoformat(
before_now_150_seconds
)
# Empty filter test:
response = self.get_response(sort_by="date", limit=10, query="")
assert response.status_code == 200
assert len(response.data) == 2
assert int(response.data[0]["id"]) == group2.id
assert response.data[0]["lifetime"] is not None
assert response.data[0]["filtered"] is None
assert response.data[0]["lifetime"]["stats"] is None
assert response.data[0]["lifetime"]["count"] == "55"
assert response.data[0]["lifetime"]["firstSeen"] == datetime.fromisoformat(
before_now_350_seconds # Should match overridden value, not event value
)
assert response.data[0]["lifetime"]["lastSeen"] == datetime.fromisoformat(
before_now_100_seconds
)
response = self.get_response(sort_by="date", limit=10, query="server:example.com")
assert response.status_code == 200
assert len(response.data) == 2
assert int(response.data[0]["id"]) == group2.id
def test_semver_seen_stats(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test@1.2.4")
release_3 = self.create_release(version="test@1.2.5")
release_1_e_1 = self.store_event(
data={
"timestamp": before_now(minutes=5).replace(microsecond=0).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
)
group_1 = release_1_e_1.group
release_2_e_1 = self.store_event(
data={
"timestamp": before_now(minutes=3).replace(microsecond=0).isoformat(),
"fingerprint": ["group-1"],
"release": release_2.version,
},
project_id=self.project.id,
)
release_3_e_1 = self.store_event(
data={
"timestamp": before_now(minutes=1).replace(microsecond=0).isoformat(),
"fingerprint": ["group-1"],
"release": release_3.version,
},
project_id=self.project.id,
)
group_1.update(times_seen=3)
self.login_as(user=self.user)
response = self.get_success_response(
sort_by="date", limit=10, query="release.version:1.2.3"
)
assert [int(row["id"]) for row in response.data] == [group_1.id]
group_data = response.data[0]
assert group_data["lifetime"]["firstSeen"] == release_1_e_1.datetime
assert group_data["filtered"]["firstSeen"] == release_1_e_1.datetime
assert group_data["lifetime"]["lastSeen"] == release_3_e_1.datetime
assert group_data["filtered"]["lastSeen"] == release_1_e_1.datetime
assert int(group_data["lifetime"]["count"]) == 3
assert int(group_data["filtered"]["count"]) == 1
response = self.get_success_response(
sort_by="date", limit=10, query="release.version:>=1.2.3"
)
assert [int(row["id"]) for row in response.data] == [group_1.id]
group_data = response.data[0]
assert group_data["lifetime"]["firstSeen"] == release_1_e_1.datetime
assert group_data["filtered"]["firstSeen"] == release_1_e_1.datetime
assert group_data["lifetime"]["lastSeen"] == release_3_e_1.datetime
assert group_data["filtered"]["lastSeen"] == release_3_e_1.datetime
assert int(group_data["lifetime"]["count"]) == 3
assert int(group_data["filtered"]["count"]) == 3
response = self.get_success_response(
sort_by="date", limit=10, query="release.version:=1.2.4"
)
assert [int(row["id"]) for row in response.data] == [group_1.id]
group_data = response.data[0]
assert group_data["lifetime"]["firstSeen"] == release_1_e_1.datetime
assert group_data["filtered"]["firstSeen"] == release_2_e_1.datetime
assert group_data["lifetime"]["lastSeen"] == release_3_e_1.datetime
assert group_data["filtered"]["lastSeen"] == release_2_e_1.datetime
assert int(group_data["lifetime"]["count"]) == 3
assert int(group_data["filtered"]["count"]) == 1
def test_inbox_search(self) -> None:
self.store_event(
data={
"timestamp": before_now(seconds=200).isoformat(),
"fingerprint": ["group-1"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
event = self.store_event(
data={
"timestamp": before_now(seconds=200).isoformat(),
"fingerprint": ["group-2"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=200).isoformat(),
"fingerprint": ["group-3"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
add_group_to_inbox(event.group, GroupInboxReason.NEW)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved is:for_review", expand=["inbox"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["inbox"] is not None
assert response.data[0]["inbox"]["reason"] == GroupInboxReason.NEW.value
def test_inbox_search_outside_retention(self) -> None:
self.login_as(user=self.user)
response = self.get_response(
sort="inbox",
limit=10,
query="is:unresolved is:for_review",
collapse="stats",
expand=["inbox", "owners"],
start=before_now(days=20).isoformat(),
end=before_now(days=15).isoformat(),
)
assert response.status_code == 200
assert len(response.data) == 0
def test_assigned_or_suggested_search(self) -> None:
event = self.store_event(
data={
"timestamp": before_now(seconds=180).isoformat(),
"fingerprint": ["group-1"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
event1 = self.store_event(
data={
"timestamp": before_now(seconds=185).isoformat(),
"fingerprint": ["group-2"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
event2 = self.store_event(
data={
"timestamp": before_now(seconds=190).isoformat(),
"fingerprint": ["group-3"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
assigned_event = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-4"],
},
project_id=self.project.id,
)
assigned_to_other_event = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-5"],
},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query="assigned_or_suggested:me")
assert response.status_code == 200
assert len(response.data) == 0
GroupOwner.objects.create(
group=assigned_to_other_event.group,
project=assigned_to_other_event.group.project,
organization=assigned_to_other_event.group.project.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
GroupOwner.objects.create(
group=event.group,
project=event.group.project,
organization=event.group.project.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
response = self.get_response(sort_by="date", limit=10, query="assigned_or_suggested:me")
assert response.status_code == 200
assert len(response.data) == 2
assert int(response.data[0]["id"]) == event.group.id
assert int(response.data[1]["id"]) == assigned_to_other_event.group.id
# Because assigned_to_other_event is assigned to self.other_user, it should not show up in assigned_or_suggested search for anyone but self.other_user. (aka. they are now the only owner)
other_user = self.create_user("other@user.com", is_superuser=False)
GroupAssignee.objects.create(
group=assigned_to_other_event.group,
project=assigned_to_other_event.group.project,
user_id=other_user.id,
)
response = self.get_response(sort_by="date", limit=10, query="assigned_or_suggested:me")
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
response = self.get_response(
sort_by="date", limit=10, query=f"assigned_or_suggested:{other_user.email}"
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == assigned_to_other_event.group.id
GroupAssignee.objects.create(
group=assigned_event.group, project=assigned_event.group.project, user_id=self.user.id
)
response = self.get_response(
sort_by="date", limit=10, query=f"assigned_or_suggested:{self.user.email}"
)
assert response.status_code == 200
assert len(response.data) == 2
assert int(response.data[0]["id"]) == event.group.id
assert int(response.data[1]["id"]) == assigned_event.group.id
response = self.get_response(
sort_by="date", limit=10, query=f"assigned_or_suggested:#{self.team.slug}"
)
assert response.status_code == 200
assert len(response.data) == 0
GroupOwner.objects.create(
group=event.group,
project=event.group.project,
organization=event.group.project.organization,
type=0,
team_id=self.team.id,
user_id=None,
)
response = self.get_response(
sort_by="date", limit=10, query=f"assigned_or_suggested:#{self.team.slug}"
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
response = self.get_response(
sort_by="date", limit=10, query="assigned_or_suggested:[me, none]"
)
assert response.status_code == 200
assert len(response.data) == 4
assert int(response.data[0]["id"]) == event.group.id
assert int(response.data[1]["id"]) == event1.group.id
assert int(response.data[2]["id"]) == event2.group.id
assert int(response.data[3]["id"]) == assigned_event.group.id
not_me = self.create_user(email="notme@sentry.io")
GroupOwner.objects.create(
group=event2.group,
project=event2.group.project,
organization=event2.group.project.organization,
type=0,
team_id=None,
user_id=not_me.id,
)
response = self.get_response(
sort_by="date", limit=10, query="assigned_or_suggested:[me, none]"
)
assert response.status_code == 200
assert len(response.data) == 3
assert int(response.data[0]["id"]) == event.group.id
assert int(response.data[1]["id"]) == event1.group.id
assert int(response.data[2]["id"]) == assigned_event.group.id
GroupOwner.objects.create(
group=event2.group,
project=event2.group.project,
organization=event2.group.project.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
# Should now include event2 as it has shared ownership.
response = self.get_response(
sort_by="date", limit=10, query="assigned_or_suggested:[me, none]"
)
assert response.status_code == 200
assert len(response.data) == 4
assert int(response.data[0]["id"]) == event.group.id
assert int(response.data[1]["id"]) == event1.group.id
assert int(response.data[2]["id"]) == event2.group.id
assert int(response.data[3]["id"]) == assigned_event.group.id
# Assign group to another user and now it shouldn't show up in owner search for this team.
GroupAssignee.objects.create(
group=event.group,
project=event.group.project,
user_id=other_user.id,
)
response = self.get_response(
sort_by="date", limit=10, query=f"assigned_or_suggested:#{self.team.slug}"
)
assert response.status_code == 200
assert len(response.data) == 0
def test_semver(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test@1.2.4")
release_3 = self.create_release(version="test@1.2.5")
release_1_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group.id
release_1_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-2"],
"release": release_1.version,
},
project_id=self.project.id,
).group.id
release_2_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-3"],
"release": release_2.version,
},
project_id=self.project.id,
).group.id
release_2_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=4).isoformat(),
"fingerprint": ["group-4"],
"release": release_2.version,
},
project_id=self.project.id,
).group.id
release_3_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=5).isoformat(),
"fingerprint": ["group-5"],
"release": release_3.version,
},
project_id=self.project.id,
).group.id
release_3_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=6).isoformat(),
"fingerprint": ["group-6"],
"release": release_3.version,
},
project_id=self.project.id,
).group.id
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_ALIAS}:>1.2.3")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_2_g_1,
release_2_g_2,
release_3_g_1,
release_3_g_2,
]
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_ALIAS}:>=1.2.3")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_1_g_1,
release_1_g_2,
release_2_g_1,
release_2_g_2,
release_3_g_1,
release_3_g_2,
]
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_ALIAS}:<1.2.4")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [release_1_g_1, release_1_g_2]
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_ALIAS}:<1.0")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == []
response = self.get_response(sort_by="date", limit=10, query=f"!{SEMVER_ALIAS}:1.2.4")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_1_g_1,
release_1_g_2,
release_3_g_1,
release_3_g_2,
]
def test_release_stage(self) -> None:
replaced_release = self.create_release(
version="replaced_release",
environments=[self.environment],
adopted=timezone.now(),
unadopted=timezone.now(),
)
adopted_release = self.create_release(
version="adopted_release",
environments=[self.environment],
adopted=timezone.now(),
)
self.create_release(version="not_adopted_release", environments=[self.environment])
adopted_release_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"release": adopted_release.version,
"environment": self.environment.name,
},
project_id=self.project.id,
).group.id
adopted_release_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-2"],
"release": adopted_release.version,
"environment": self.environment.name,
},
project_id=self.project.id,
).group.id
replaced_release_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-3"],
"release": replaced_release.version,
"environment": self.environment.name,
},
project_id=self.project.id,
).group.id
replaced_release_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=4).isoformat(),
"fingerprint": ["group-4"],
"release": replaced_release.version,
"environment": self.environment.name,
},
project_id=self.project.id,
).group.id
self.login_as(user=self.user)
response = self.get_response(
sort_by="date",
limit=10,
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}",
environment=self.environment.name,
)
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
adopted_release_g_1,
adopted_release_g_2,
]
response = self.get_response(
sort_by="date",
limit=10,
query=f"!{RELEASE_STAGE_ALIAS}:{ReleaseStages.LOW_ADOPTION.value}",
environment=self.environment.name,
)
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
adopted_release_g_1,
adopted_release_g_2,
replaced_release_g_1,
replaced_release_g_2,
]
response = self.get_response(
sort_by="date",
limit=10,
query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.ADOPTED.value}, {ReleaseStages.REPLACED.value}]",
environment=self.environment.name,
)
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
adopted_release_g_1,
adopted_release_g_2,
replaced_release_g_1,
replaced_release_g_2,
]
response = self.get_response(
sort_by="date",
limit=10,
query=f"!{RELEASE_STAGE_ALIAS}:[{ReleaseStages.LOW_ADOPTION.value}, {ReleaseStages.REPLACED.value}]",
environment=self.environment.name,
)
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
adopted_release_g_1,
adopted_release_g_2,
]
def test_semver_package(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test2@1.2.4")
release_1_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group.id
release_1_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-2"],
"release": release_1.version,
},
project_id=self.project.id,
).group.id
release_2_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-3"],
"release": release_2.version,
},
project_id=self.project.id,
).group.id
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_PACKAGE_ALIAS}:test")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_1_g_1,
release_1_g_2,
]
response = self.get_response(
sort_by="date", limit=10, query=f"{SEMVER_PACKAGE_ALIAS}:test2"
)
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_2_g_1,
]
def test_semver_build(self) -> None:
release_1 = self.create_release(version="test@1.2.3+123")
release_2 = self.create_release(version="test2@1.2.4+124")
release_1_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group.id
release_1_g_2 = self.store_event(
data={
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-2"],
"release": release_1.version,
},
project_id=self.project.id,
).group.id
release_2_g_1 = self.store_event(
data={
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-3"],
"release": release_2.version,
},
project_id=self.project.id,
).group.id
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_BUILD_ALIAS}:123")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_1_g_1,
release_1_g_2,
]
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_BUILD_ALIAS}:124")
assert response.status_code == 200, response.content
assert [int(r["id"]) for r in response.data] == [
release_2_g_1,
]
response = self.get_response(sort_by="date", limit=10, query=f"{SEMVER_BUILD_ALIAS}:[124]")
assert response.status_code == 400, response.content
def test_aggregate_stats_regression_test(self) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="times_seen:>0 last_seen:-1h date:-1h"
)
assert response.status_code == 200
assert len(response.data) == 1
def test_skipped_fields(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=200).isoformat(),
"fingerprint": ["group-1"],
"tags": {"server": "example.com", "trace": "woof", "message": "foo"},
},
project_id=self.project.id,
)
query = "server:example.com"
query += " status:unresolved"
query += " first_seen:" + before_now(seconds=500).isoformat()
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=query)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["lifetime"] is not None
assert response.data[0]["filtered"] is not None
def test_inbox_fields(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
add_group_to_inbox(event.group, GroupInboxReason.NEW)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=query, expand=["inbox"])
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["inbox"] is not None
assert response.data[0]["inbox"]["reason"] == GroupInboxReason.NEW.value
assert response.data[0]["inbox"]["reason_details"] is None
remove_group_from_inbox(event.group)
snooze_details: InboxReasonDetails = {
"until": None,
"count": 3,
"window": None,
"user_count": None,
"user_window": 5,
}
add_group_to_inbox(event.group, GroupInboxReason.UNIGNORED, snooze_details)
response = self.get_response(sort_by="date", limit=10, query=query, expand=["inbox"])
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["inbox"] is not None
assert response.data[0]["inbox"]["reason"] == GroupInboxReason.UNIGNORED.value
assert response.data[0]["inbox"]["reason_details"] == snooze_details
def test_inbox_fields_issue_states(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
add_group_to_inbox(event.group, GroupInboxReason.NEW)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=query, expand=["inbox"])
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["inbox"]["reason"] == GroupInboxReason.NEW.value
remove_group_from_inbox(event.group)
snooze_details: InboxReasonDetails = {
"until": None,
"count": 3,
"window": None,
"user_count": None,
"user_window": 5,
}
add_group_to_inbox(event.group, GroupInboxReason.ONGOING, snooze_details)
response = self.get_response(sort_by="date", limit=10, query=query, expand=["inbox"])
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["inbox"] is not None
assert response.data[0]["inbox"]["reason"] == GroupInboxReason.ONGOING.value
assert response.data[0]["inbox"]["reason_details"] == snooze_details
def test_expand_string(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
add_group_to_inbox(event.group, GroupInboxReason.NEW)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query=query, expand="inbox")
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["inbox"] is not None
assert response.data[0]["inbox"]["reason"] == GroupInboxReason.NEW.value
assert response.data[0]["inbox"]["reason_details"] is None
def test_expand_plugin_actions_and_issues(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["pluginActions", "pluginIssues"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["pluginActions"] is not None
assert response.data[0]["pluginIssues"] is not None
# Test with no expand
response = self.get_response(sort_by="date", limit=10, query=query)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "pluginActions" not in response.data[0]
assert "pluginIssues" not in response.data[0]
def test_expand_integration_issues(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["integrationIssues"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["integrationIssues"] is not None
# Test with no expand
response = self.get_response(sort_by="date", limit=10, query=query)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "integrationIssues" not in response.data[0]
integration_jira = self.create_integration(
organization=event.group.organization,
provider="jira",
external_id="jira_external_id",
name="Jira",
metadata={"base_url": "https://example.com", "domain_name": "test/"},
)
external_issue_1 = self.create_integration_external_issue(
group=event.group,
integration=integration_jira,
key="APP-123-JIRA",
title="jira issue 1",
description="this is an example description",
)
external_issue_2 = self.create_integration_external_issue(
group=event.group,
integration=integration_jira,
key="APP-456-JIRA",
title="jira issue 2",
description="this is an example description",
)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["integrationIssues"]
)
assert response.status_code == 200
assert len(response.data[0]["integrationIssues"]) == 2
assert response.data[0]["integrationIssues"][0]["title"] == external_issue_1.title
assert response.data[0]["integrationIssues"][1]["title"] == external_issue_2.title
def test_expand_sentry_app_issues(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["sentryAppIssues"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["sentryAppIssues"] is not None
# Test with no expand
response = self.get_response(sort_by="date", limit=10, query=query)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "sentryAppIssues" not in response.data[0]
issue_1 = PlatformExternalIssue.objects.create(
group_id=event.group.id,
project_id=event.group.project.id,
service_type="sentry-app",
display_name="App#issue-1",
web_url="https://example.com/app/issues/1",
)
issue_2 = PlatformExternalIssue.objects.create(
group_id=event.group.id,
project_id=event.group.project.id,
service_type="sentry-app-2",
display_name="App#issue-2",
web_url="https://example.com/app/issues/1",
)
PlatformExternalIssue.objects.create(
group_id=1234,
project_id=event.group.project.id,
service_type="sentry-app-3",
display_name="App#issue-1",
web_url="https://example.com/app/issues/1",
)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["sentryAppIssues"]
)
assert response.status_code == 200
assert len(response.data[0]["sentryAppIssues"]) == 2
assert response.data[0]["sentryAppIssues"][0]["issueId"] == str(issue_1.group_id)
assert response.data[0]["sentryAppIssues"][1]["issueId"] == str(issue_2.group_id)
assert response.data[0]["sentryAppIssues"][0]["displayName"] == issue_1.display_name
assert response.data[0]["sentryAppIssues"][1]["displayName"] == issue_2.display_name
@with_feature("organizations:event-attachments")
def test_expand_latest_event_has_attachments(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["latestEventHasAttachments"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
# No attachments
assert response.data[0]["latestEventHasAttachments"] is False
# Test with no expand
response = self.get_response(sort_by="date", limit=10, query=query)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "latestEventHasAttachments" not in response.data[0]
# Add 1 attachment
EventAttachment.objects.create(
group_id=event.group.id,
event_id=event.event_id,
project_id=event.project_id,
name="hello.png",
content_type="image/png",
)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["latestEventHasAttachments"]
)
assert response.status_code == 200
assert response.data[0]["latestEventHasAttachments"] is True
@with_feature("organizations:event-attachments")
@patch("sentry.models.Group.get_latest_event", return_value=None)
def test_expand_no_latest_event_has_no_attachments(self, mock_latest_event: MagicMock) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
query = "status:unresolved"
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query=query, expand=["latestEventHasAttachments"]
)
assert response.status_code == 200
# Expand should not execute since there is no latest event
assert "latestEventHasAttachments" not in response.data[0]
def test_expand_owners(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
query = "status:unresolved"
self.login_as(user=self.user)
# Test with no owner
response = self.get_response(sort_by="date", limit=10, query=query, expand="owners")
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["owners"] is None
# Test with owners
GroupOwner.objects.create(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
)
GroupOwner.objects.create(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.OWNERSHIP_RULE.value,
team=self.team,
)
GroupOwner.objects.create(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.CODEOWNERS.value,
team=self.team,
)
GroupOwner.objects.create(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=None,
team=None,
)
response = self.get_response(sort_by="date", limit=10, query=query, expand="owners")
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert response.data[0]["owners"] is not None
assert len(response.data[0]["owners"]) == 3
assert response.data[0]["owners"][0]["owner"] == f"user:{self.user.id}"
assert response.data[0]["owners"][1]["owner"] == f"team:{self.team.id}"
assert response.data[0]["owners"][2]["owner"] == f"team:{self.team.id}"
assert (
response.data[0]["owners"][0]["type"] == GROUP_OWNER_TYPE[GroupOwnerType.SUSPECT_COMMIT]
)
assert (
response.data[0]["owners"][1]["type"] == GROUP_OWNER_TYPE[GroupOwnerType.OWNERSHIP_RULE]
)
assert response.data[0]["owners"][2]["type"] == GROUP_OWNER_TYPE[GroupOwnerType.CODEOWNERS]
def test_default_search(self) -> None:
event1 = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
event2 = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-2"]},
project_id=self.project.id,
)
event2.group.update(status=GroupStatus.RESOLVED, substatus=None)
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, expand="inbox", collapse="stats")
assert response.status_code == 200
assert [int(r["id"]) for r in response.data] == [event1.group.id]
def test_collapse_stats(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", expand="inbox", collapse="stats"
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "stats" not in response.data[0]
assert "firstSeen" not in response.data[0]
assert "lastSeen" not in response.data[0]
assert "count" not in response.data[0]
assert "userCount" not in response.data[0]
assert "lifetime" not in response.data[0]
assert "filtered" not in response.data[0]
def test_collapse_lifetime(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", collapse="lifetime"
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "stats" in response.data[0]
assert "firstSeen" in response.data[0]
assert "lastSeen" in response.data[0]
assert "count" in response.data[0]
assert "lifetime" not in response.data[0]
assert "filtered" in response.data[0]
def test_collapse_filtered(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", collapse="filtered"
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "stats" in response.data[0]
assert "firstSeen" in response.data[0]
assert "lastSeen" in response.data[0]
assert "count" in response.data[0]
assert "lifetime" in response.data[0]
assert "filtered" not in response.data[0]
def test_collapse_lifetime_and_filtered(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", collapse=["filtered", "lifetime"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "stats" in response.data[0]
assert "firstSeen" in response.data[0]
assert "lastSeen" in response.data[0]
assert "count" in response.data[0]
assert "lifetime" not in response.data[0]
assert "filtered" not in response.data[0]
def test_collapse_base(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", collapse=["base"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "title" not in response.data[0]
assert "hasSeen" not in response.data[0]
assert "stats" in response.data[0]
assert "firstSeen" in response.data[0]
assert "lastSeen" in response.data[0]
assert "count" in response.data[0]
assert "lifetime" in response.data[0]
assert "filtered" in response.data[0]
def test_collapse_stats_group_snooze_bug(self) -> None:
# There was a bug where we tried to access attributes on seen_stats if this feature is active
# but seen_stats could be null when we collapse stats.
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
GroupSnooze.objects.create(
group=event.group,
user_count=10,
until=timezone.now() + timedelta(days=1),
count=10,
state={"times_seen": 0},
)
self.login_as(user=self.user)
# The presence of the group above with attached GroupSnooze would have previously caused this error.
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", expand="inbox", collapse="stats"
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
def test_collapse_unhandled(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date", limit=10, query="is:unresolved", collapse=["unhandled"]
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
assert "isUnhandled" not in response.data[0]
def test_selected_saved_search(self) -> None:
saved_search = SavedSearch.objects.create(
name="Saved Search",
query="ZeroDivisionError",
organization=self.organization,
owner_id=self.user.id,
)
event = self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-1"],
"message": "ZeroDivisionError",
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-2"],
"message": "TypeError",
},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date",
limit=10,
collapse=["unhandled"],
savedSearch=0,
searchId=saved_search.id,
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
def test_pinned_saved_search(self) -> None:
SavedSearch.objects.create(
name="Saved Search",
query="ZeroDivisionError",
organization=self.organization,
owner_id=self.user.id,
visibility=Visibility.OWNER_PINNED,
)
event = self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-1"],
"message": "ZeroDivisionError",
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-2"],
"message": "TypeError",
},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date",
limit=10,
collapse=["unhandled"],
savedSearch=0,
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
def test_pinned_saved_search_with_query(self) -> None:
SavedSearch.objects.create(
name="Saved Search",
query="TypeError",
organization=self.organization,
owner_id=self.user.id,
visibility=Visibility.OWNER_PINNED,
)
event = self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-1"],
"message": "ZeroDivisionError",
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-2"],
"message": "TypeError",
},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(
sort_by="date",
limit=10,
collapse=["unhandled"],
query="ZeroDivisionError",
savedSearch=0,
)
assert response.status_code == 200
assert len(response.data) == 1
assert int(response.data[0]["id"]) == event.group.id
def test_query_status_and_substatus_overlapping(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
event.group.update(status=GroupStatus.UNRESOLVED, substatus=GroupSubStatus.ONGOING)
self.login_as(user=self.user)
get_query_response = functools.partial(
self.get_response, sort_by="date", limit=10, expand="inbox", collapse="stats"
)
response0 = get_query_response(
query="is:unresolved",
)
response1 = get_query_response(
query="is:ongoing"
) # (status=unresolved, substatus=(ongoing))
response2 = get_query_response(query="is:unresolved") # (status=unresolved, substatus=*)
response3 = get_query_response(
query="is:unresolved is:ongoing !is:regressed"
) # (status=unresolved, substatus=(ongoing, !regressed))
response4 = get_query_response(
query="is:unresolved is:ongoing !is:ignored"
) # (status=unresolved, substatus=(ongoing, !ignored))
response5 = get_query_response(
query="!is:regressed is:unresolved"
) # (status=unresolved, substatus=(!regressed))
response6 = get_query_response(
query="!is:archived_until_escalating"
) # (status=(!unresolved), substatus=(!archived_until_escalating))
assert (
response0.status_code
== response1.status_code
== response2.status_code
== response3.status_code
== response4.status_code
== response5.status_code
== response6.status_code
== 200
)
assert (
[int(r["id"]) for r in response0.data]
== [int(r["id"]) for r in response1.data]
== [int(r["id"]) for r in response2.data]
== [int(r["id"]) for r in response3.data]
== [int(r["id"]) for r in response4.data]
== [int(r["id"]) for r in response5.data]
== [int(r["id"]) for r in response6.data]
== [event.group.id]
)
def test_query_status_and_substatus_nonoverlapping(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
event.group.update(status=GroupStatus.UNRESOLVED, substatus=GroupSubStatus.ONGOING)
self.login_as(user=self.user)
get_query_response = functools.partial(
self.get_response, sort_by="date", limit=10, expand="inbox", collapse="stats"
)
response1 = get_query_response(query="is:escalating")
response2 = get_query_response(query="is:new")
response3 = get_query_response(query="is:regressed")
response4 = get_query_response(query="is:archived_forever")
response5 = get_query_response(query="is:archived_until_condition_met")
response6 = get_query_response(query="is:archived_until_escalating")
response7 = get_query_response(query="is:resolved")
response8 = get_query_response(query="is:ignored")
response9 = get_query_response(query="is:muted")
response10 = get_query_response(query="!is:unresolved")
assert (
response1.status_code
== response2.status_code
== response3.status_code
== response4.status_code
== response5.status_code
== response6.status_code
== response7.status_code
== response8.status_code
== response9.status_code
== response10.status_code
== 200
)
assert (
[int(r["id"]) for r in response1.data]
== [int(r["id"]) for r in response2.data]
== [int(r["id"]) for r in response3.data]
== [int(r["id"]) for r in response4.data]
== [int(r["id"]) for r in response5.data]
== [int(r["id"]) for r in response6.data]
== [int(r["id"]) for r in response7.data]
== [int(r["id"]) for r in response8.data]
== [int(r["id"]) for r in response9.data]
== [int(r["id"]) for r in response10.data]
== []
)
def test_query_detector_filter(self) -> None:
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group = event.group
event2 = self.store_event(
data={"timestamp": before_now(seconds=400).isoformat(), "fingerprint": ["group-2"]},
project_id=self.project.id,
)
assert event2.group.id != group.id
detector_id = 12345 # intentionally multi-digit
detector = self.create_detector(
id=detector_id,
name=f"Test Detector {detector_id}",
project=self.project,
type="error",
)
self.create_detector_group(
detector=detector,
group=group,
)
self.login_as(user=self.user)
# Query for the specific detector ID
response = self.get_response(sort_by="date", query=f"detector:{detector_id}")
assert response.status_code == 200
# Should return only the group associated with the detector
assert len(response.data) == 1
assert int(response.data[0]["id"]) == group.id
response_empty = self.get_response(sort_by="date", query="detector:99999")
assert response_empty.status_code == 200
assert len(response_empty.data) == 0
def test_first_seen_and_last_seen_filters(self) -> None:
self.login_as(user=self.user)
project = self.project
# Create 4 issues at different times
times = [
(before_now(hours=1), before_now(hours=1)), # Two events for issue 0
(before_now(hours=6), before_now(hours=3)), # Two events for issue 1
(before_now(hours=11), before_now(hours=10)), # Two events for issue 2
(before_now(hours=23), before_now(minutes=30)), # Two events for issue 3
]
for i, (time1, time2) in enumerate(times):
self.store_event(
data={
"timestamp": time1.isoformat(),
"message": f"Error {i}",
"fingerprint": [f"group-{i}"],
},
project_id=project.id,
)
self.store_event(
data={
"timestamp": time2.isoformat(),
"message": f"Error {i} - additional event",
"fingerprint": [f"group-{i}"],
},
project_id=project.id,
)
# Test firstSeen filter
twenty_four_hours_ago = before_now(hours=24).isoformat()
response = self.get_success_response(query=f"firstSeen:<{twenty_four_hours_ago}")
assert len(response.data) == 0
response = self.get_success_response(query="firstSeen:-24h")
assert len(response.data) == 4
# Test lastSeen filter
response = self.get_success_response(query="lastSeen:-6h")
assert len(response.data) == 3
response = self.get_success_response(query="lastSeen:-12h")
assert len(response.data) == 4
# Test lastSeen filter with an absolute date using before_now
absolute_date = before_now(days=1).isoformat() # Assuming 365 days before now as an example
response = self.get_success_response(query=f"lastSeen:>{absolute_date}")
assert len(response.data) == 4
response = self.get_success_response(query=f"lastSeen:<{absolute_date}")
assert len(response.data) == 0
def test_filter_by_bookmarked_by(self) -> None:
self.login_as(user=self.user)
project = self.project
user2 = self.create_user(email="user2@example.com")
# Create two issues, one bookmarked by each user
event1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"message": "Error 1",
"fingerprint": ["group-1"],
},
project_id=project.id,
)
group1 = event1.group
GroupBookmark.objects.create(user_id=self.user.id, group=group1, project_id=project.id)
event2 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"message": "Error 2",
"fingerprint": ["group-2"],
},
project_id=project.id,
)
group2 = event2.group
GroupBookmark.objects.create(user_id=user2.id, group=group2, project_id=project.id)
# Filter by bookmarked_by the first user
response = self.get_success_response(query=f"bookmarked_by:{self.user.email}")
assert len(response.data) == 1
assert int(response.data[0]["id"]) == group1.id
# Filter by bookmarked_by the second user
response = self.get_success_response(query=f"bookmarked_by:{user2.email}")
assert len(response.data) == 1
assert int(response.data[0]["id"]) == group2.id
def test_filter_by_linked(self) -> None:
self.login_as(user=self.user)
project = self.project
# Create two issues, one linked and one not linked
event1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"message": "Error 1",
"fingerprint": ["group-1"],
},
project_id=project.id,
)
group1 = event1.group
GroupLink.objects.create(
group_id=group1.id,
project=project,
linked_type=GroupLink.LinkedType.issue,
linked_id=1,
)
event2 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"message": "Error 2",
"fingerprint": ["group-2"],
},
project_id=project.id,
)
group2 = event2.group
# Filter by linked issues
response = self.get_success_response(query="is:linked")
assert len(response.data) == 1
assert int(response.data[0]["id"]) == group1.id
# Ensure the unlinked issue is not returned
response = self.get_success_response(query="is:unlinked")
assert len(response.data) == 1
assert int(response.data[0]["id"]) == group2.id
def test_filter_by_subscribed_by(self) -> None:
self.login_as(user=self.user)
project = self.project
# Create two issues, one subscribed by user1 and one not subscribed
event1 = self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"message": "Error 1",
"fingerprint": ["group-1"],
},
project_id=project.id,
)
group1 = event1.group
GroupSubscription.objects.create(
user_id=self.user.id,
group=group1,
project=project,
is_active=True,
)
self.store_event(
data={
"timestamp": before_now(minutes=1).isoformat(),
"message": "Error 2",
"fingerprint": ["group-2"],
},
project_id=project.id,
)
# Filter by subscriptions
response = self.get_success_response(query=f"subscribed:{self.user.email}")
assert len(response.data) == 1
assert int(response.data[0]["id"]) == group1.id
# ensure we don't return ny results
response = self.get_success_response(query="subscribed:fake@fake.com")
assert len(response.data) == 0
def test_lookup_by_release_build(self) -> None:
for i in range(3):
j = 119 + i
self.create_release(version=f"steve@1.2.{i}+{j}")
self.login_as(self.user)
project = self.project
release = self.create_release(version="steve@1.2.7+123")
event = self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"tags": {"sentry:release": release.version},
},
project_id=project.id,
)
response = self.get_success_response(query="release.build:123")
issues = json.loads(response.content)
assert len(issues) == 1
assert int(issues[0]["id"]) == event.group.id
response = self.get_success_response(query="release.build:122")
issues = json.loads(response.content)
assert len(issues) == 0
def test_error_main_thread_condition(self) -> None:
self.login_as(user=self.user)
project = self.project
# Simulate sending an event with main_thread set to true
event1 = self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"message": "MainThreadError",
"exception": {
"values": [
{
"type": "Error",
"value": "Error in main thread",
"thread_id": 1,
}
]
},
"threads": {"values": [{"id": 1, "main": True}]},
},
project_id=project.id,
)
# Simulate sending an event with main_thread set to false
event2 = self.store_event(
data={
"timestamp": before_now(seconds=2).isoformat(),
"message": "WorkerThreadError",
"exception": {
"values": [
{
"type": "Error",
"value": "Error in worker thread",
"thread_id": 2,
}
]
},
"threads": {"values": [{"id": 2, "main": False}]},
},
project_id=project.id,
)
# Query for events where main_thread is true
response = self.get_success_response(query="error.main_thread:true")
issues = json.loads(response.content)
assert len(issues) == 1
assert int(issues[0]["id"]) == event1.group.id
# Query for events where main_thread is false
response = self.get_success_response(query="error.main_thread:false")
issues = json.loads(response.content)
assert len(issues) == 1
assert int(issues[0]["id"]) == event2.group.id
def test_feedback_filtered_by_default(self) -> None:
with Feature(
{
FeedbackGroup.build_visible_feature_name(): True,
FeedbackGroup.build_ingest_feature_name(): True,
}
):
event = self.store_event(
data={"event_id": uuid4().hex, "timestamp": before_now(seconds=1).isoformat()},
project_id=self.project.id,
)
assert event.group is not None
feedback_event = mock_feedback_event(self.project.id, before_now(seconds=1))
create_feedback_issue(
feedback_event, self.project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE
)
self.login_as(user=self.user)
res = self.get_success_response()
# test that the issue returned is NOT the feedback issue.
assert len(res.data) == 1
issue = res.data[0]
feedback_group = Group.objects.get(type=FeedbackGroup.type_id)
assert int(issue["id"]) != feedback_group.id
assert issue["issueCategory"] != "feedback"
def test_feedback_category_filter(self) -> None:
with Feature(
{
FeedbackGroup.build_visible_feature_name(): True,
FeedbackGroup.build_ingest_feature_name(): True,
}
):
event = self.store_event(
data={"event_id": uuid4().hex, "timestamp": before_now(seconds=1).isoformat()},
project_id=self.project.id,
)
assert event.group is not None
feedback_event = mock_feedback_event(self.project.id, before_now(seconds=1))
create_feedback_issue(
feedback_event, self.project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE
)
self.login_as(user=self.user)
res = self.get_success_response(query="issue.category:feedback")
# test that the issue returned IS the feedback issue.
assert len(res.data) == 1
issue = res.data[0]
feedback_group = Group.objects.get(type=FeedbackGroup.type_id)
assert int(issue["id"]) == feedback_group.id
assert issue["issueCategory"] == "feedback"
def test_flags_and_tags_query(self) -> None:
self.login_as(self.user)
project = self.project
self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"contexts": {"flags": {"values": [{"flag": "test:flag", "result": True}]}},
},
project_id=project.id,
)
response = self.get_success_response(query="flags[test:flag]:true")
assert len(json.loads(response.content)) == 1
response = self.get_success_response(query="flags[test:flag]:false")
assert len(json.loads(response.content)) == 0
@patch(
"sentry.search.snuba.executors.PostgresSnubaQueryExecutor.query",
side_effect=PostgresSnubaQueryExecutor.query,
autospec=True,
)
def test_postgres_query_timeout(self, mock_query: MagicMock) -> None:
"""Test that a Postgres OperationalError with QueryCanceled pgcode becomes a 429 error
only when it's a statement timeout, and remains a 500 for user cancellation"""
class TimeoutError(OperationalError):
def __str__(self) -> str:
return "canceling statement due to statement timeout"
class UserCancelError(OperationalError):
def __str__(self) -> str:
return "canceling statement due to user request"
self.login_as(user=self.user)
mock_query.side_effect = TimeoutError()
response = self.get_response()
assert response.status_code == 429
assert (
response.data["detail"]
== "Query timeout. Please try with a smaller date range or fewer conditions."
)
mock_query.side_effect = UserCancelError()
response = self.get_response()
assert response.status_code == 500
def test_wildcard_operator_with_backslash(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"user": {
"id": "1",
"email": "foo@example.com",
"username": r"foo\bar",
"ip_address": "192.168.0.1",
},
},
project_id=self.project.id,
)
assert event.group
response = self.get_success_response(query=r"user.username:foo\bar")
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
response = self.get_success_response(query=r"user.username:*foo\\bar*")
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
response = self.get_success_response(query="user.username:\uf00dContains\uf00dfoo\\bar")
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
response = self.get_success_response(query="user.username:\uf00dStartsWith\uf00dfoo\\bar")
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
response = self.get_success_response(query="user.username:\uf00dEndsWith\uf00dfoo\\bar")
assert len(response.data) == 1
assert response.data[0]["id"] == str(event.group.id)
| GroupListTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 3324,
"end": 3520
} | class ____(Proto_ContraRecurs):
# This should generate a reportIncompatibleMethodOverride error.
def m[T: Impl_ContraGenericExplicit1](self: T, x: T) -> None: ...
| Impl_ContraGenericExplicit1 |
python | python-openxml__python-docx | tests/oxml/test__init__.py | {
"start": 4006,
"end": 4050
} | class ____(BaseOxmlElement):
pass
| CustElmCls |
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 22403,
"end": 24435
} | class ____(db.Model):
__tablename__ = "organization_manual_activations"
__repr__ = make_repr("organization_id", "seat_limit", "expires")
organization_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("organizations.id", ondelete="CASCADE"),
primary_key=True,
comment="Foreign key to organization",
)
organization: Mapped[Organization] = relationship(
back_populates="manual_activation"
)
seat_limit: Mapped[int] = mapped_column(
comment="Maximum number of organization members allowed"
)
expires: Mapped[datetime.date] = mapped_column(
comment="Expiration date for the manual activation"
)
created: Mapped[datetime_now] = mapped_column(
comment="Datetime when manual activation was created"
)
created_by_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("users.id"),
comment="Admin user who created the manual activation",
)
created_by: Mapped[User] = relationship()
id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
server_default=text("gen_random_uuid()"),
)
@property
def is_active(self) -> bool:
"""Check if manual activation is currently active (not expired)."""
return datetime.date.today() < self.expires
@property
def current_member_count(self) -> int:
"""Get the current number of organization members."""
# Use roles count instead of users relationship for more reliable counting
return len([role for role in self.organization.roles if role.user_id])
@property
def has_available_seats(self) -> bool:
"""Check if there are available seats for new members."""
return self.current_member_count < self.seat_limit
@property
def available_seats(self) -> int:
"""Get the number of available seats for new members."""
return max(0, self.seat_limit - self.current_member_count)
| OrganizationManualActivation |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/nn_functional.py | {
"start": 10620,
"end": 12209
} | class ____(Operator):
"""Operator for torch.nn.functional.dropout."""
def __init__(self):
super().__init__("torch.nn.functional.dropout")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nn.functional.dropout"
def can_produce(self, output_spec: Spec) -> bool:
"""Dropout can produce tensor outputs with floating point dtypes."""
if not isinstance(output_spec, TensorSpec):
return False
return is_float_dtype(output_spec.dtype)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for dropout operation.
Dropout is element-wise, input shape matches output shape.
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("DropoutOperator can only produce TensorSpec outputs")
# Input tensor has same shape and dtype as output
input_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
return [input_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for dropout operation."""
if len(input_names) != 1:
raise ValueError("Dropout requires exactly 1 input")
input_name = input_names[0]
# Use training=False to make it deterministic for testing
return f"{output_name} = torch.nn.functional.dropout({input_name}, p=0.1, training=False)"
| DropoutOperator |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/main_widget.py | {
"start": 2850,
"end": 25245
} | class ____(ShellConnectMainWidget):
# PluginMainWidget class constants
ENABLE_SPINNER = True
SHOW_MESSAGE_WHEN_EMPTY = True
IMAGE_WHEN_EMPTY = "variable-explorer"
MESSAGE_WHEN_EMPTY = _("No variables to show")
DESCRIPTION_WHEN_EMPTY = _(
"Run code in the Editor or IPython console to see any global "
"variables listed here for exploration and editing."
)
# Other class constants
INITIAL_FREE_MEMORY_TIME_TRIGGER = 60 * 1000 # ms
SECONDARY_FREE_MEMORY_TIME_TRIGGER = 180 * 1000 # ms
sig_open_preferences_requested = Signal()
"""
Signal to open the variable explorer preferences.
"""
sig_show_figure_requested = Signal(bytes, str, object)
"""
This is emitted to request that a figure be shown in the Plots plugin.
Parameters
----------
image: bytes
The image to show.
mime_type: str
The image's mime type.
shellwidget: ShellWidget
The shellwidget associated with the figure.
"""
def __init__(self, name=None, plugin=None, parent=None):
super().__init__(name, plugin, parent)
# Widgets
self.context_menu = None
self.empty_context_menu = None
self.filter_button = None
# Attributes
self._is_filter_button_checked = True
self.plots_plugin_enabled = False
# ---- PluginMainWidget API
# ------------------------------------------------------------------------
def get_title(self):
return _('Variable Explorer')
def setup(self):
# ---- Options menu actions
self.show_minmax_action = self.create_action(
VariableExplorerWidgetActions.ToggleMinMax,
text=_("Show arrays min/max"),
tip=_("Show minimum and maximum of arrays"),
toggled=True,
option='minmax'
)
# ---- Toolbar actions
import_data_action = self.create_action(
VariableExplorerWidgetActions.ImportData,
text=_('Import data'),
icon=self.create_icon('fileimport'),
triggered=lambda x: self.import_data(),
)
save_action = self.create_action(
VariableExplorerWidgetActions.SaveData,
text=_("Save data"),
icon=self.create_icon('filesave'),
triggered=lambda x: self.save_data(),
)
save_as_action = self.create_action(
VariableExplorerWidgetActions.SaveDataAs,
text=_("Save data as..."),
icon=self.create_icon('filesaveas'),
triggered=lambda x: self.save_data(),
)
reset_namespace_action = self.create_action(
VariableExplorerWidgetActions.ResetNamespace,
text=_("Remove all variables"),
icon=self.create_icon('editdelete'),
triggered=lambda x: self.reset_namespace(),
)
# ---- Context menu actions
resize_rows_action = self.create_action(
VariableExplorerWidgetActions.ResizeRowsAction,
text=_("Resize rows to contents"),
icon=self.create_icon('collapse_row'),
triggered=self.resize_rows
)
resize_columns_action = self.create_action(
VariableExplorerWidgetActions.ResizeColumnsAction,
_("Resize columns to contents"),
icon=self.create_icon('collapse_column'),
triggered=self.resize_columns
)
self.paste_action = self.create_action(
VariableExplorerContextMenuActions.PasteAction,
_("Paste"),
icon=self.create_icon('editpaste'),
triggered=self.paste
)
self.copy_action = self.create_action(
VariableExplorerContextMenuActions.CopyAction,
_("Copy"),
icon=self.create_icon('editcopy'),
triggered=self.copy
)
self.edit_action = self.create_action(
VariableExplorerContextMenuActions.EditAction,
_("Edit"),
icon=self.create_icon('edit'),
triggered=self.edit_item
)
self.plot_action = self.create_action(
VariableExplorerContextMenuActions.PlotAction,
_("Plot"),
icon=self.create_icon('plot'),
triggered=self.plot_item
)
self.plot_action.setVisible(False)
self.hist_action = self.create_action(
VariableExplorerContextMenuActions.HistogramAction,
_("Histogram"),
icon=self.create_icon('hist'),
triggered=self.histogram_item
)
self.hist_action.setVisible(False)
self.imshow_action = self.create_action(
VariableExplorerContextMenuActions.ImshowAction,
_("Show image"),
icon=self.create_icon('imshow'),
triggered=self.imshow_item
)
self.imshow_action.setVisible(False)
self.save_array_action = self.create_action(
VariableExplorerContextMenuActions.SaveArrayAction,
_("Save array"),
icon=self.create_icon('filesave'),
triggered=self.save_array
)
self.save_array_action.setVisible(False)
self.insert_action = self.create_action(
VariableExplorerContextMenuActions.InsertAction,
_("Insert"),
icon=self.create_icon('insert'),
triggered=self.insert_item
)
self.edit_filters = self.create_action(
VariableExplorerContextMenuActions.EditFiltersAction,
_("Edit filters"),
icon=self.create_icon('filter'),
triggered=self.sig_open_preferences_requested
)
self.remove_action = self.create_action(
VariableExplorerContextMenuActions.RemoveAction,
_("Remove"),
icon=self.create_icon('editdelete'),
triggered=self.remove_item
)
self.rename_action = self.create_action(
VariableExplorerContextMenuActions.RenameAction,
_("Rename"),
icon=self.create_icon('rename'),
triggered=self.rename_item
)
self.duplicate_action = self.create_action(
VariableExplorerContextMenuActions.DuplicateAction,
_("Duplicate"),
icon=self.create_icon('edit_add'),
triggered=self.duplicate_item
)
self.view_action = self.create_action(
VariableExplorerContextMenuActions.ViewAction,
_("View with the Object Explorer"),
icon=self.create_icon('outline_explorer'),
triggered=self.view_item
)
# Options menu
options_menu = self.get_options_menu()
for item in [self.exclude_private_action,
self.exclude_uppercase_action,
self.exclude_capitalized_action,
self.exclude_unsupported_action,
self.exclude_callables_and_modules_action,
self.show_minmax_action]:
self.add_item_to_menu(
item,
menu=options_menu,
section=VariableExplorerWidgetOptionsMenuSections.Display,
)
self._enable_filter_actions(self.get_conf('filter_on'))
# Resize
for item in [resize_rows_action, resize_columns_action]:
self.add_item_to_menu(
item,
menu=options_menu,
section=VariableExplorerWidgetOptionsMenuSections.Resize,
)
# Main toolbar
main_toolbar = self.get_main_toolbar()
for item in [import_data_action, save_action, save_as_action,
reset_namespace_action]:
self.add_item_to_toolbar(
item,
toolbar=main_toolbar,
section=VariableExplorerWidgetMainToolBarSections.Main,
)
save_action.setEnabled(False)
# Search, Filter and Refresh buttons are added in _setup()
# ---- Context menu to show when there are variables present
self.context_menu = self.create_menu(
VariableExplorerWidgetMenus.PopulatedContextMenu)
for item in [self.edit_action, self.copy_action, self.paste_action,
self.rename_action, self.remove_action,
self.save_array_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=VariableExplorerContextMenuSections.Edit,
)
for item in [self.insert_action, self.duplicate_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=VariableExplorerContextMenuSections.Insert,
)
for item in [self.edit_filters]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=VariableExplorerContextMenuSections.Filter,
)
for item in [self.view_action, self.plot_action, self.hist_action,
self.imshow_action]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=VariableExplorerContextMenuSections.View,
)
# ---- Context menu when the variable explorer is empty
self.empty_context_menu = self.create_menu(
VariableExplorerWidgetMenus.EmptyContextMenu)
for item in [self.insert_action, self.paste_action]:
self.add_item_to_menu(
item,
menu=self.empty_context_menu,
section=VariableExplorerContextMenuSections.Edit,
)
def _setup(self):
"""
Create options menu and adjacent toolbar buttons, etc.
This creates base actions related with Search, Filter and Refresh.
This calls the parent's method to setup default actions, create the
spinner and the options menu, and connect signals. After that, it adds
the Search, Filter and Refresh buttons between the spinner and the
options menu.
"""
super()._setup()
# ---- Base Options menu actions
self.exclude_private_action = self.create_action(
VariableExplorerWidgetActions.ToggleExcludePrivate,
text=_("Exclude private variables"),
tip=_("Exclude variables that start with an underscore"),
toggled=True,
option='exclude_private',
)
self.exclude_uppercase_action = self.create_action(
VariableExplorerWidgetActions.ToggleExcludeUpperCase,
text=_("Exclude all-uppercase variables"),
tip=_("Exclude variables whose name is uppercase"),
toggled=True,
option='exclude_uppercase',
)
self.exclude_capitalized_action = self.create_action(
VariableExplorerWidgetActions.ToggleExcludeCapitalized,
text=_("Exclude capitalized variables"),
tip=_("Exclude variables whose name starts with a capital "
"letter"),
toggled=True,
option='exclude_capitalized',
)
self.exclude_unsupported_action = self.create_action(
VariableExplorerWidgetActions.ToggleExcludeUnsupported,
text=_("Exclude unsupported data types"),
tip=_("Exclude references to data types that don't have "
"an specialized viewer or can't be edited."),
toggled=True,
option='exclude_unsupported',
)
self.exclude_callables_and_modules_action = self.create_action(
VariableExplorerWidgetActions.ToggleExcludeCallablesAndModules,
text=_("Exclude callables and modules"),
tip=_("Exclude references to functions, modules and "
"any other callable."),
toggled=True,
option='exclude_callables_and_modules'
)
# ---- Base Toolbar actions
self.search_action = self.create_action(
VariableExplorerWidgetActions.Search,
text=_("Search variable names and types"),
icon=self.create_icon('find'),
toggled=self.toggle_finder,
register_shortcut=True
)
self.refresh_action = self.create_action(
VariableExplorerWidgetActions.Refresh,
text=_("Refresh variables"),
icon=self.create_icon('refresh'),
triggered=self.refresh_table,
register_shortcut=True,
)
self.filter_button = self.create_action(
VariableExplorerWidgetActions.ToggleFilter,
text="",
icon=ima.icon('filter'),
toggled=self._enable_filter_actions,
option='filter_on',
tip=_("Filter variables")
)
self.filter_button.setCheckable(True)
self.filter_button.toggled.connect(self._set_filter_button_state)
for action in [
self.search_action,
self.filter_button,
self.refresh_action,
]:
self.add_corner_widget(action, before=self._options_button)
def update_actions(self):
"""Update the actions."""
if self.is_current_widget_error_message():
self._set_main_toolbar_state(False)
return
else:
self._set_main_toolbar_state(True)
action = self.get_action(VariableExplorerWidgetActions.ToggleMinMax)
action.setEnabled(is_module_installed('numpy'))
nsb = self.current_widget()
if nsb:
save_data_action = self.get_action(
VariableExplorerWidgetActions.SaveData)
save_data_action.setEnabled(nsb.filename is not None)
search_action = self.get_action(VariableExplorerWidgetActions.Search)
if nsb is None:
checked = False
else:
checked = nsb.finder_is_visible()
search_action.setChecked(checked)
@on_conf_change
def on_section_conf_change(self, section):
for index in range(self.count()):
widget = self._stack.widget(index)
if widget:
widget.setup()
def set_plots_plugin_enabled(self, value: bool):
"""
Change whether the Plots plugin is enabled.
This stores the information in this widget and propagates it to every
NamespaceBrowser.
"""
self.plots_plugin_enabled = value
for index in range(self.count()):
nsb = self._stack.widget(index)
if nsb:
nsb.plots_plugin_enabled = value
# ---- Stack accesors
# ------------------------------------------------------------------------
def switch_widget(self, nsb, old_nsb):
"""Set the current NamespaceBrowser."""
pass
# ---- Public API
# ------------------------------------------------------------------------
def create_new_widget(self, shellwidget):
"""Create new NamespaceBrowser."""
nsb = NamespaceBrowser(self)
nsb.sig_hide_finder_requested.connect(self.hide_finder)
nsb.sig_free_memory_requested.connect(self.free_memory)
nsb.sig_start_spinner_requested.connect(self.start_spinner)
nsb.sig_stop_spinner_requested.connect(self.stop_spinner)
nsb.sig_show_figure_requested.connect(self.sig_show_figure_requested)
nsb.sig_show_empty_message_requested.connect(
self.switch_empty_message
)
nsb.set_shellwidget(shellwidget)
nsb.plots_plugin_enabled = self.plots_plugin_enabled
nsb.setup()
self._set_actions_and_menus(nsb)
# To update the Variable Explorer after execution
shellwidget.sig_kernel_state_arrived.connect(nsb.update_view)
shellwidget.sig_config_spyder_kernel.connect(
nsb.set_namespace_view_settings
)
return nsb
def close_widget(self, nsb):
"""Close NamespaceBrowser."""
nsb.sig_hide_finder_requested.disconnect(self.hide_finder)
nsb.sig_free_memory_requested.disconnect(self.free_memory)
nsb.sig_start_spinner_requested.disconnect(self.start_spinner)
nsb.sig_stop_spinner_requested.disconnect(self.stop_spinner)
nsb.sig_show_figure_requested.disconnect(
self.sig_show_figure_requested)
nsb.shellwidget.sig_kernel_state_arrived.disconnect(nsb.update_view)
nsb.shellwidget.sig_config_spyder_kernel.disconnect(
nsb.set_namespace_view_settings
)
nsb.close()
nsb.setParent(None)
def import_data(self, filenames=None):
"""
Import data in current namespace.
"""
if not self.is_current_widget_error_message():
nsb = self.current_widget()
nsb.refresh_table()
nsb.import_data(filenames=filenames)
def save_data(self):
if not self.is_current_widget_error_message():
nsb = self.current_widget()
nsb.save_data()
self.update_actions()
def reset_namespace(self):
if not self.is_current_widget_error_message():
nsb = self.current_widget()
nsb.reset_namespace()
@Slot(bool)
def toggle_finder(self, checked):
"""Hide or show the finder."""
widget = self.current_widget()
if widget is None or self.is_current_widget_error_message():
return
widget.toggle_finder(checked)
@Slot()
def hide_finder(self):
"""Hide the finder."""
action = self.get_action(VariableExplorerWidgetActions.Search)
action.setChecked(False)
def refresh_table(self):
if not self.is_current_widget_error_message():
nsb = self.current_widget()
nsb.refresh_table()
@Slot()
def free_memory(self):
"""
Free memory signal.
"""
self.sig_free_memory_requested.emit()
QTimer.singleShot(self.INITIAL_FREE_MEMORY_TIME_TRIGGER,
self.sig_free_memory_requested)
QTimer.singleShot(self.SECONDARY_FREE_MEMORY_TIME_TRIGGER,
self.sig_free_memory_requested)
def resize_rows(self):
if self._current_editor is not None:
self._current_editor.resizeRowsToContents()
def resize_columns(self):
if self._current_editor is not None:
self._current_editor.resize_column_contents()
def paste(self):
self._current_editor.paste()
def copy(self):
self._current_editor.copy()
def edit_item(self):
self._current_editor.edit_item()
def plot_item(self):
self._current_editor.plot_item('plot')
def histogram_item(self):
self._current_editor.plot_item('hist')
def imshow_item(self):
self._current_editor.imshow_item()
def save_array(self):
self._current_editor.save_array()
def insert_item(self):
self._current_editor.insert_item(below=False)
def remove_item(self):
self._current_editor.remove_item()
def rename_item(self):
self._current_editor.rename_item()
def duplicate_item(self):
self._current_editor.duplicate_item()
def view_item(self):
self._current_editor.view_item()
# ---- Private API
# ------------------------------------------------------------------------
@property
def _current_editor(self):
editor = None
if not self.is_current_widget_error_message():
nsb = self.current_widget()
editor = nsb.editor
return editor
def _set_actions_and_menus(self, nsb):
"""
Set actions and menus created here and used by the namespace
browser editor.
Although this is not ideal, it's necessary to be able to use
the CollectionsEditor widget separately from this plugin.
"""
editor = nsb.editor
# Actions
editor.paste_action = self.paste_action
editor.copy_action = self.copy_action
editor.edit_action = self.edit_action
editor.plot_action = self.plot_action
editor.hist_action = self.hist_action
editor.imshow_action = self.imshow_action
editor.save_array_action = self.save_array_action
editor.insert_action = self.insert_action
editor.remove_action = self.remove_action
editor.rename_action = self.rename_action
editor.duplicate_action = self.duplicate_action
editor.view_action = self.view_action
# Menus
editor.menu = self.context_menu
editor.empty_ws_menu = self.empty_context_menu
# These actions are not used for dictionaries (so we don't need them
# for namespaces) but we have to create them so they can be used in
# several places in CollectionsEditor.
editor.insert_action_above = QAction()
editor.insert_action_below = QAction()
def _enable_filter_actions(self, value):
"""Handle the change of the filter state."""
self.exclude_private_action.setEnabled(value)
self.exclude_uppercase_action.setEnabled(value)
self.exclude_capitalized_action.setEnabled(value)
self.exclude_unsupported_action.setEnabled(value)
self.exclude_callables_and_modules_action.setEnabled(value)
def _set_main_toolbar_state(self, enabled):
"""Set main toolbar enabled state."""
main_toolbar = self.get_main_toolbar()
for action in main_toolbar.actions():
action.setEnabled(enabled)
# Adjustments for the filter button
if enabled:
# Restore state for active consoles
self.filter_button.setChecked(self._is_filter_button_checked)
else:
# Uncheck button for dead consoles if it's checked so that the
# toolbar looks good
if self.filter_button.isChecked():
self.filter_button.setChecked(False)
self._is_filter_button_checked = True
def _set_filter_button_state(self, checked):
"""Keep track of the filter button checked state."""
self._is_filter_button_checked = checked
| VariableExplorerWidget |
python | walkccc__LeetCode | solutions/2224. Minimum Number of Operations to Convert Time/2224.py | {
"start": 0,
"end": 322
} | class ____:
def convertTime(self, current: str, correct: str) -> int:
ops = [60, 15, 5, 1]
def getMinutes(s: str) -> int:
return int(s[:2]) * 60 + int(s[3:])
diff = getMinutes(correct) - getMinutes(current)
ans = 0
for op in ops:
ans += diff // op
diff %= op
return ans
| Solution |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 11984,
"end": 12292
} | class ____(PrefectException):
"""
Raised when an event exceeds the configured maximum size.
"""
def __init__(self, size: int, maximum: int):
super().__init__(f"Event is too large to emit ({size} > {maximum} bytes)")
self.size = size
self.maximum = maximum
| EventTooLarge |
python | nedbat__coveragepy | tests/test_config.py | {
"start": 22884,
"end": 36466
} | class ____(UsingModulesMixin, CoverageTest):
"""Tests of the config file settings in particular."""
# This sample file tries to use lots of variation of syntax...
# The {section} placeholder lets us nest these settings in another file.
LOTSA_SETTINGS = """\
# This is a settings file for coverage.py
[{section}run]
timid = yes
data_file = something_or_other.dat
branch = 1
cover_pylib = TRUE
parallel = on
concurrency = thread
; this omit is overridden by the omit from [report]
omit = twenty
source = myapp
source_pkgs = ned
source_dirs = cooldir
plugins =
plugins.a_plugin
plugins.another
debug = callers, pids , dataio
disable_warnings = abcd , efgh
[{section}report]
; these settings affect reporting.
exclude_lines =
if 0:
pragma:?\\s+no cover
another_tab
ignore_errors = TRUE
omit =
one, another, some_more,
yet_more
include = thirty
precision = 3
partial_branches =
pragma:?\\s+no branch
partial_branches_always =
if 0:
while True:
show_missing= TruE
skip_covered = TruE
skip_empty =TruE
include_namespace_packages = TRUE
[{section}html]
directory = c:\\tricky\\dir.somewhere
extra_css=something/extra.css
title = Title & nums # nums!
[{section}xml]
output=mycov.xml
package_depth = 17
[{section}paths]
source =
.
/home/ned/src/
other = other, /home/ned/other, c:\\Ned\\etc
[{section}plugins.a_plugin]
hello = world
; comments still work.
names = Jane/John/Jenny
[{section}json]
pretty_print = True
show_contexts = True
"""
# Just some sample setup.cfg text from the docs.
SETUP_CFG = """\
[bdist_rpm]
release = 1
packager = Jane Packager <janep@pysoft.com>
doc_files = CHANGES.txt
README.txt
USAGE.txt
doc/
examples/
"""
# Just some sample tox.ini text from the docs.
TOX_INI = """\
[tox]
envlist = py{26,27,33,34,35}-{c,py}tracer
skip_missing_interpreters = True
[testenv]
commands =
# Create tests/zipmods.zip
python igor.py zip_mods
"""
def assert_config_settings_are_correct(self, cov: Coverage) -> None:
"""Check that `cov` has all the settings from LOTSA_SETTINGS."""
assert cov.config.timid
assert cov.config.data_file == "something_or_other.dat"
assert cov.config.branch
assert cov.config.cover_pylib
assert cov.config.debug == ["callers", "pids", "dataio"]
assert cov.config.parallel
assert cov.config.concurrency == ["thread"]
assert cov.config.source == ["myapp"]
assert cov.config.source_pkgs == ["ned"]
assert cov.config.source_dirs == ["cooldir"]
assert cov.config.disable_warnings == ["abcd", "efgh"]
assert cov.get_exclude_list() == ["if 0:", r"pragma:?\s+no cover", "another_tab"]
assert cov.config.ignore_errors
assert cov.config.run_omit == ["twenty"]
assert cov.config.report_omit == ["one", "another", "some_more", "yet_more"]
assert cov.config.report_include == ["thirty"]
assert cov.config.precision == 3
assert cov.config.partial_list == [r"pragma:?\s+no branch"]
assert cov.config.partial_always_list == ["if 0:", "while True:"]
assert cov.config.plugins == ["plugins.a_plugin", "plugins.another"]
assert cov.config.show_missing
assert cov.config.skip_covered
assert cov.config.skip_empty
assert cov.config.html_dir == r"c:\tricky\dir.somewhere"
assert cov.config.extra_css == "something/extra.css"
assert cov.config.html_title == "Title & nums # nums!"
assert cov.config.xml_output == "mycov.xml"
assert cov.config.xml_package_depth == 17
assert cov.config.paths == {
"source": [".", "/home/ned/src/"],
"other": ["other", "/home/ned/other", "c:\\Ned\\etc"],
}
assert cov.config.get_plugin_options("plugins.a_plugin") == {
"hello": "world",
"names": "Jane/John/Jenny",
}
assert cov.config.get_plugin_options("plugins.another") == {}
assert cov.config.json_show_contexts is True
assert cov.config.json_pretty_print is True
assert cov.config.include_namespace_packages is True
def test_config_file_settings(self) -> None:
self.make_file(".coveragerc", self.LOTSA_SETTINGS.format(section=""))
cov = coverage.Coverage()
self.assert_config_settings_are_correct(cov)
def check_config_file_settings_in_other_file(self, fname: str, contents: str) -> None:
"""Check config will be read from another file, with prefixed sections."""
nested = self.LOTSA_SETTINGS.format(section="coverage:")
fname = self.make_file(fname, nested + "\n" + contents)
cov = coverage.Coverage()
self.assert_config_settings_are_correct(cov)
def test_config_file_settings_in_setupcfg(self) -> None:
self.check_config_file_settings_in_other_file("setup.cfg", self.SETUP_CFG)
def test_config_file_settings_in_toxini(self) -> None:
self.check_config_file_settings_in_other_file("tox.ini", self.TOX_INI)
def check_other_config_if_coveragerc_specified(self, fname: str, contents: str) -> None:
"""Check that config `fname` is read if .coveragerc is missing, but specified."""
nested = self.LOTSA_SETTINGS.format(section="coverage:")
self.make_file(fname, nested + "\n" + contents)
cov = coverage.Coverage(config_file=".coveragerc")
self.assert_config_settings_are_correct(cov)
def test_config_file_settings_in_setupcfg_if_coveragerc_specified(self) -> None:
self.check_other_config_if_coveragerc_specified("setup.cfg", self.SETUP_CFG)
def test_config_file_settings_in_tox_if_coveragerc_specified(self) -> None:
self.check_other_config_if_coveragerc_specified("tox.ini", self.TOX_INI)
def check_other_not_read_if_coveragerc(self, fname: str) -> None:
"""Check config `fname` is not read if .coveragerc exists."""
self.make_file(
".coveragerc",
"""\
[run]
include = foo
""",
)
self.make_file(
fname,
"""\
[coverage:run]
omit = bar
branch = true
""",
)
cov = coverage.Coverage()
assert cov.config.run_include == ["foo"]
assert cov.config.run_omit == []
assert cov.config.branch is False
def test_setupcfg_only_if_not_coveragerc(self) -> None:
self.check_other_not_read_if_coveragerc("setup.cfg")
def test_toxini_only_if_not_coveragerc(self) -> None:
self.check_other_not_read_if_coveragerc("tox.ini")
def check_other_config_need_prefixes(self, fname: str) -> None:
"""Check that `fname` sections won't be read if un-prefixed."""
self.make_file(
fname,
"""\
[run]
omit = bar
branch = true
""",
)
cov = coverage.Coverage()
assert cov.config.run_omit == []
assert cov.config.branch is False
def test_setupcfg_only_if_prefixed(self) -> None:
self.check_other_config_need_prefixes("setup.cfg")
def test_toxini_only_if_prefixed(self) -> None:
self.check_other_config_need_prefixes("tox.ini")
def test_tox_ini_even_if_setup_cfg(self) -> None:
# There's a setup.cfg, but no coverage settings in it, so tox.ini
# is read.
nested = self.LOTSA_SETTINGS.format(section="coverage:")
self.make_file("tox.ini", self.TOX_INI + "\n" + nested)
self.make_file("setup.cfg", self.SETUP_CFG)
cov = coverage.Coverage()
self.assert_config_settings_are_correct(cov)
def test_read_prefixed_sections_from_explicit_file(self) -> None:
# You can point to a tox.ini, and it will find [coverage:run] sections
nested = self.LOTSA_SETTINGS.format(section="coverage:")
self.make_file("tox.ini", self.TOX_INI + "\n" + nested)
cov = coverage.Coverage(config_file="tox.ini")
self.assert_config_settings_are_correct(cov)
def test_non_ascii(self) -> None:
self.make_file(
".coveragerc",
"""\
[report]
exclude_lines =
first
✘${TOX_ENVNAME}
third
[html]
title = tabblo & «ταБЬℓσ» # numbers
""",
)
self.set_environ("TOX_ENVNAME", "weirdo")
cov = coverage.Coverage()
assert cov.config.exclude_list == ["first", "✘weirdo", "third"]
assert cov.config.html_title == "tabblo & «ταБЬℓσ» # numbers"
@pytest.mark.parametrize("bad_file", ["nosuchfile.txt", "."])
def test_unreadable_config(self, bad_file: str) -> None:
# If a config file is explicitly specified, then it is an error for it
# to not be readable.
msg = f"Couldn't read {bad_file!r} as a config file"
with pytest.raises(ConfigError, match=msg):
coverage.Coverage(config_file=bad_file)
def test_nocoveragerc_file_when_specified(self) -> None:
cov = coverage.Coverage(config_file=".coveragerc")
assert not cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == ".coverage"
def test_no_toml_installed_no_toml(self) -> None:
# Can't read a toml file that doesn't exist.
with mock.patch.object(coverage.tomlconfig, "has_tomllib", False):
msg = "Couldn't read 'cov.toml' as a config file"
with pytest.raises(ConfigError, match=msg):
coverage.Coverage(config_file="cov.toml")
@pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib")
def test_no_toml_installed_explicit_toml(self) -> None:
# Can't specify a toml config file if toml isn't installed.
self.make_file("cov.toml", "# A toml file!")
with mock.patch.object(coverage.tomlconfig, "has_tomllib", False):
msg = "Can't read 'cov.toml' without TOML support"
with pytest.raises(ConfigError, match=msg):
coverage.Coverage(config_file="cov.toml")
@pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib")
def test_no_toml_installed_pyproject_toml(self) -> None:
# Can't have coverage config in pyproject.toml without toml installed.
self.make_file(
"pyproject.toml",
"""\
# A toml file!
[tool.coverage.run]
xyzzy = 17
""",
)
with mock.patch.object(coverage.tomlconfig, "has_tomllib", False):
msg = "Can't read 'pyproject.toml' without TOML support"
with pytest.raises(ConfigError, match=msg):
coverage.Coverage()
@pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib")
def test_no_toml_installed_pyproject_toml_shorter_syntax(self) -> None:
# Can't have coverage config in pyproject.toml without toml installed.
self.make_file(
"pyproject.toml",
"""\
# A toml file!
[tool.coverage]
run.parallel = true
""",
)
with mock.patch.object(coverage.tomlconfig, "has_tomllib", False):
msg = "Can't read 'pyproject.toml' without TOML support"
with pytest.raises(ConfigError, match=msg):
coverage.Coverage()
@pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib")
def test_no_toml_installed_pyproject_no_coverage(self) -> None:
# It's ok to have non-coverage pyproject.toml without toml installed.
self.make_file(
"pyproject.toml",
"""\
# A toml file!
[tool.something]
xyzzy = 17
""",
)
with mock.patch.object(coverage.tomlconfig, "has_tomllib", False):
cov = coverage.Coverage()
# We get default settings:
assert not cov.config.timid
assert not cov.config.branch
assert cov.config.data_file == ".coverage"
def test_exceptions_from_missing_toml_things(self) -> None:
self.make_file(
"pyproject.toml",
"""\
[tool.coverage.run]
branch = true
""",
)
config = TomlConfigParser(False)
config.read("pyproject.toml")
with pytest.raises(ConfigError, match="No section: 'xyzzy'"):
config.options("xyzzy")
with pytest.raises(ConfigError, match="No section: 'xyzzy'"):
config.get("xyzzy", "foo")
with pytest.raises(ConfigError, match="No option 'foo' in section: 'tool.coverage.run'"):
config.get("run", "foo")
| ConfigFileTest |
python | pandas-dev__pandas | pandas/core/internals/blocks.py | {
"start": 62528,
"end": 72963
} | class ____(EABackedBlock):
"""
Block for holding extension types.
Notes
-----
This holds all 3rd-party extension array types. It's also the immediate
parent class for our internal extension types' blocks.
ExtensionArrays are limited to 1-D.
"""
values: ExtensionArray
def fillna(
self,
value,
limit: int | None = None,
inplace: bool = False,
) -> list[Block]:
if isinstance(self.dtype, (IntervalDtype, StringDtype)):
# Block.fillna handles coercion (test_fillna_interval)
if isinstance(self.dtype, IntervalDtype) and limit is not None:
raise ValueError("limit must be None")
return super().fillna(
value=value,
limit=limit,
inplace=inplace,
)
if self._can_hold_na and not self.values._hasna:
refs = self.refs
new_values = self.values
else:
copy, refs = self._get_refs_and_copy(inplace)
try:
new_values = self.values.fillna(value=value, limit=limit, copy=copy)
except TypeError:
# 3rd party EA that has not implemented copy keyword yet
refs = None
new_values = self.values.fillna(value=value, limit=limit)
# issue the warning *after* retrying, in case the TypeError
# was caused by an invalid fill_value
warnings.warn(
# GH#53278
"ExtensionArray.fillna added a 'copy' keyword in pandas "
"2.1.0. In a future version, ExtensionArray subclasses will "
"need to implement this keyword or an exception will be "
"raised. In the interim, the keyword is ignored by "
f"{type(self.values).__name__}.",
Pandas4Warning,
stacklevel=find_stack_level(),
)
return [self.make_block_same_class(new_values, refs=refs)]
@cache_readonly
def shape(self) -> Shape:
# TODO(EA2D): override unnecessary with 2D EAs
if self.ndim == 1:
return (len(self.values),)
return len(self._mgr_locs), len(self.values)
def iget(self, i: int | tuple[int, int] | tuple[slice, int]):
# In the case where we have a tuple[slice, int], the slice will always
# be slice(None)
# We _could_ make the annotation more specific, but mypy would
# complain about override mismatch:
# Literal[0] | tuple[Literal[0], int] | tuple[slice, int]
# Note: only reached with self.ndim == 2
if isinstance(i, tuple):
# TODO(EA2D): unnecessary with 2D EAs
col, loc = i
if not com.is_null_slice(col) and col != 0:
raise IndexError(f"{self} only contains one item")
if isinstance(col, slice):
# the is_null_slice check above assures that col is slice(None)
# so what we want is a view on all our columns and row loc
if loc < 0:
loc += len(self.values)
# Note: loc:loc+1 vs [[loc]] makes a difference when called
# from fast_xs because we want to get a view back.
return self.values[loc : loc + 1]
return self.values[loc]
else:
if i != 0:
raise IndexError(f"{self} only contains one item")
return self.values
def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None:
# When an ndarray, we should have locs.tolist() == [0]
# When a BlockPlacement we should have list(locs) == [0]
if copy:
self.values = self.values.copy()
self.values[:] = values
def _maybe_squeeze_arg(self, arg):
"""
If necessary, squeeze a (N, 1) ndarray to (N,)
"""
# e.g. if we are passed a 2D mask for putmask
if (
isinstance(arg, (np.ndarray, ExtensionArray))
and arg.ndim == self.values.ndim + 1
):
# TODO(EA2D): unnecessary with 2D EAs
assert arg.shape[1] == 1
# error: No overload variant of "__getitem__" of "ExtensionArray"
# matches argument type "Tuple[slice, int]"
arg = arg[:, 0] # type: ignore[call-overload]
elif isinstance(arg, ABCDataFrame):
# 2022-01-06 only reached for setitem
# TODO: should we avoid getting here with DataFrame?
assert arg.shape[1] == 1
arg = arg._ixs(0, axis=1)._values
return arg
def _unwrap_setitem_indexer(self, indexer):
"""
Adapt a 2D-indexer to our 1D values.
This is intended for 'setitem', not 'iget' or '_slice'.
"""
# TODO: ATM this doesn't work for iget/_slice, can we change that?
if isinstance(indexer, tuple) and len(indexer) == 2:
# TODO(EA2D): not needed with 2D EAs
# Should never have length > 2. Caller is responsible for checking.
# Length 1 is reached vis setitem_single_block and setitem_single_column
# each of which pass indexer=(pi,)
if all(isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer):
# GH#44703 went through indexing.maybe_convert_ix
first, second = indexer
if not (
second.size == 1 and (second == 0).all() and first.shape[1] == 1
):
raise NotImplementedError(
"This should not be reached. Please report a bug at "
"github.com/pandas-dev/pandas/"
)
indexer = first[:, 0]
elif lib.is_integer(indexer[1]) and indexer[1] == 0:
# reached via setitem_single_block passing the whole indexer
indexer = indexer[0]
elif com.is_null_slice(indexer[1]):
indexer = indexer[0]
elif is_list_like(indexer[1]) and indexer[1][0] == 0:
indexer = indexer[0]
else:
raise NotImplementedError(
"This should not be reached. Please report a bug at "
"github.com/pandas-dev/pandas/"
)
return indexer
@property
def is_view(self) -> bool:
"""Extension arrays are never treated as views."""
return False
# error: Cannot override writeable attribute with read-only property
@cache_readonly
def is_numeric(self) -> bool: # type: ignore[override]
return self.values.dtype._is_numeric
def _slice(
self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp]
) -> ExtensionArray:
"""
Return a slice of my values.
Parameters
----------
slicer : slice, ndarray[int], or ndarray[bool]
Valid (non-reducing) indexer for self.values.
Returns
-------
ExtensionArray
"""
# Notes: ndarray[bool] is only reachable when via get_rows_with_mask, which
# is only for Series, i.e. self.ndim == 1.
# return same dims as we currently have
if self.ndim == 2:
# reached via getitem_block via _slice_take_blocks_ax0
# TODO(EA2D): won't be necessary with 2D EAs
if not isinstance(slicer, slice):
raise AssertionError(
"invalid slicing for a 1-ndim ExtensionArray", slicer
)
# GH#32959 only full-slicers along fake-dim0 are valid
# TODO(EA2D): won't be necessary with 2D EAs
# range(1) instead of self._mgr_locs to avoid exception on [::-1]
# see test_iloc_getitem_slice_negative_step_ea_block
new_locs = range(1)[slicer]
if not len(new_locs):
raise AssertionError(
"invalid slicing for a 1-ndim ExtensionArray", slicer
)
slicer = slice(None)
return self.values[slicer]
@final
def slice_block_rows(self, slicer: slice) -> Self:
"""
Perform __getitem__-like specialized to slicing along index.
"""
# GH#42787 in principle this is equivalent to values[..., slicer], but we don't
# require subclasses of ExtensionArray to support that form (for now).
new_values = self.values[slicer]
return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
def _unstack(
self,
unstacker,
fill_value,
new_placement: npt.NDArray[np.intp],
needs_masking: npt.NDArray[np.bool_],
):
# ExtensionArray-safe unstack.
# We override Block._unstack, which unstacks directly on the
# values of the array. For EA-backed blocks, this would require
# converting to a 2-D ndarray of objects.
# Instead, we unstack an ndarray of integer positions, followed by
# a `take` on the actual values.
# Caller is responsible for ensuring self.shape[-1] == len(unstacker.index)
new_values, mask = unstacker.arange_result
# Note: these next two lines ensure that
# mask.sum() == sum(len(nb.mgr_locs) for nb in blocks)
# which the calling function needs in order to pass verify_integrity=False
# to the BlockManager constructor
new_values = new_values.T[mask]
new_placement = new_placement[mask]
# needs_masking[i] calculated once in BlockManager.unstack tells
# us if there are any -1s in the relevant indices. When False,
# that allows us to go through a faster path in 'take', among
# other things avoiding e.g. Categorical._validate_scalar.
blocks = [
# TODO: could cast to object depending on fill_value?
type(self)(
self.values.take(
indices, allow_fill=needs_masking[i], fill_value=fill_value
),
BlockPlacement(place),
ndim=2,
)
for i, (indices, place) in enumerate(
zip(new_values, new_placement, strict=True)
)
]
return blocks, mask
| ExtensionBlock |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 17723,
"end": 18914
} | class ____(ChainError):
"""
Raised when a contract is not found at an address.
"""
# TODO: In 0.9, pass in provider object directly (instead of network choice + name)
def __init__(self, address: "AddressType", has_explorer: bool, network_choice: str):
msg = f"Failed to get contract type for address '{address}'."
# NOTE: Network name is optional to avoid breaking change.
choice_parts = network_choice.split(":")
if len(choice_parts) > 1:
network_name = network_choice.split(":")[1]
else:
network_name = network_choice
if has_explorer:
msg += " Contract may need verification."
elif network_name != "local":
# Only bother mentioning explorer plugins if we are not the local network.
msg += (
f" Current network '{network_choice}' has no associated "
"explorer plugin. Try installing an explorer plugin using "
f"{click.style(text='ape plugins install etherscan', fg='green')}, "
"or using a network with explorer support."
)
super().__init__(msg)
| ContractNotFoundError |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 5820,
"end": 5998
} | class ____(Node):
"""
Create an intersection node. All children need to be satisfied in order for
this node to evaluate as true
"""
JOINSTR = " "
| IntersectNode |
python | getsentry__sentry | src/sentry/hybridcloud/tasks/deliver_webhooks.py | {
"start": 2398,
"end": 23908
} | class ____(Exception):
"""
Used to signal an expected delivery failure.
"""
pass
@instrumented_task(
name="sentry.hybridcloud.tasks.deliver_webhooks.schedule_webhook_delivery",
namespace=hybridcloud_control_tasks,
processing_deadline_duration=30,
silo_mode=SiloMode.CONTROL,
)
def schedule_webhook_delivery() -> None:
"""
Find mailboxes that contain undelivered webhooks that were scheduled
to be delivered now or in the past.
Prioritizes webhooks based on provider importance.
Triggered frequently by task-scheduler.
"""
# Se use the replica for any read queries to webhook payload
WebhookPayloadReplica = WebhookPayload.objects.using_replica()
# The double call to .values() ensures that the group by includes mailbox_name
# but only id_min is selected
head_of_line = (
WebhookPayloadReplica.all()
.values("mailbox_name")
.annotate(id_min=Min("id"))
.values("id_min")
)
# Get any heads that are scheduled to run
# Use provider field directly, with default priority for null values
scheduled_mailboxes = (
WebhookPayloadReplica.filter(
schedule_for__lte=timezone.now(),
id__in=Subquery(head_of_line),
)
# Set priority value based on provider field
.annotate(
provider_priority=Case(
# For providers that match our priority list
*[
When(provider=provider, then=Value(priority))
for provider, priority in PROVIDER_PRIORITY.items()
],
# Default value for all other cases (including null providers)
default=Value(DEFAULT_PROVIDER_PRIORITY),
output_field=CharField(),
)
)
# Order by priority first (lowest number = highest priority), then ID
.order_by("provider_priority", "id").values("id", "mailbox_name")
)
metrics.distribution(
"hybridcloud.schedule_webhook_delivery.mailbox_count", scheduled_mailboxes.count()
)
for record in scheduled_mailboxes[:BATCH_SIZE]:
# Reschedule the records that we will attempt to deliver next.
# We update schedule_for in an attempt to minimize races for potentially in-flight batches.
mailbox_batch = (
WebhookPayloadReplica.filter(id__gte=record["id"], mailbox_name=record["mailbox_name"])
.order_by("id")
.values("id")[:MAX_MAILBOX_DRAIN]
)
updated_count = WebhookPayload.objects.filter(id__in=Subquery(mailbox_batch)).update(
schedule_for=timezone.now() + BATCH_SCHEDULE_OFFSET
)
# If we have 1/5 or more in a mailbox we should process in parallel as we're likely behind.
if updated_count >= int(MAX_MAILBOX_DRAIN / 5):
drain_mailbox_parallel.delay(record["id"])
else:
drain_mailbox.delay(record["id"])
@instrumented_task(
name="sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox",
namespace=hybridcloud_control_tasks,
processing_deadline_duration=300,
silo_mode=SiloMode.CONTROL,
)
def drain_mailbox(payload_id: int) -> None:
"""
Attempt deliver up to 50 webhooks from the mailbox that `id` is from.
Messages will be delivered in order until one fails or 50 are delivered.
Once messages have successfully been delivered or discarded, they are deleted.
"""
WebhookPayloadReplica = WebhookPayload.objects.using_replica()
try:
payload = WebhookPayloadReplica.get(id=payload_id)
except WebhookPayload.DoesNotExist:
# We could have hit a race condition. Since we've lost already return
# and let the other process continue, or a future process.
metrics.incr("hybridcloud.deliver_webhooks.delivery", tags={"outcome": "race"})
logger.info(
"deliver_webhook.potential_race",
extra={
"id": payload_id,
},
)
return
delivered = 0
deadline = timezone.now() + BATCH_SCHEDULE_OFFSET
while True:
# We have run until the end of our batch schedule delay. Break the loop so this worker can take another
# task.
if timezone.now() >= deadline:
logger.info(
"deliver_webhook.delivery_deadline",
extra={
"mailbox_name": payload.mailbox_name,
"delivered": delivered,
},
)
metrics.incr(
"hybridcloud.deliver_webhooks.delivery", tags={"outcome": "delivery_deadline"}
)
break
# Fetch records from the batch in slices of 100. This avoids reading
# redundant data should we hit an error and should help keep query duration low.
query = WebhookPayloadReplica.filter(
id__gte=payload.id, mailbox_name=payload.mailbox_name
).order_by("id")
batch_count = 0
for record in query[:100]:
batch_count += 1
try:
deliver_message(record)
delivered += 1
except DeliveryFailed:
metrics.incr("hybridcloud.deliver_webhooks.delivery", tags={"outcome": "retry"})
return
# No more messages to deliver
if batch_count < 1:
logger.debug(
"deliver_webhook.delivery_complete",
extra={
"mailbox_name": payload.mailbox_name,
"delivered": delivered,
},
)
return
@instrumented_task(
name="sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox_parallel",
namespace=hybridcloud_control_tasks,
processing_deadline_duration=180,
silo_mode=SiloMode.CONTROL,
)
def drain_mailbox_parallel(payload_id: int) -> None:
"""
Deliver messages from a mailbox in small parallel batches.
Parallel delivery sacrifices strict ordering for increased throughput.
Because of the sequential delivery in a mailbox we can't get higher throughput
by scheduling batches in parallel.
Messages will be delivered in small batches until one fails, the batch
delay timeout is reached, or a message with a schedule_for greater than
the current time is encountered. A message with a higher schedule_for value
indicates that we have hit the start of another batch that has been scheduled.
"""
try:
payload = WebhookPayload.objects.get(id=payload_id)
except WebhookPayload.DoesNotExist:
# We could have hit a race condition. Since we've lost already return
# and let the other process continue, or a future process.
metrics.incr("hybridcloud.deliver_webhooks.delivery", tags={"outcome": "race"})
logger.info(
"deliver_webhook_parallel.potential_race",
extra={
"id": payload_id,
},
)
return
# Remove batches payloads that have been backlogged for MAX_DELIVERY_AGE.
# Once payloads are this old they are low value, and we're better off prioritizing new work.
max_age = timezone.now() - MAX_DELIVERY_AGE
if payload.date_added < max_age:
# We delete chunks of stale messages using a subquery
# because postgres cannot do delete with limit
stale_query = WebhookPayload.objects.filter(
id__gte=payload.id,
mailbox_name=payload.mailbox_name,
date_added__lte=timezone.now() - MAX_DELIVERY_AGE,
).values("id")[:10000]
deleted, _ = WebhookPayload.objects.filter(id__in=stale_query).delete()
if deleted:
logger.info(
"deliver_webhook_parallel.max_age_discard",
extra={
"mailbox_name": payload.mailbox_name,
"deleted": deleted,
},
)
metrics.incr(
"hybridcloud.deliver_webhooks.delivery", amount=deleted, tags={"outcome": "max_age"}
)
worker_threads = options.get("hybridcloud.webhookpayload.worker_threads")
deadline = timezone.now() + BATCH_SCHEDULE_OFFSET
request_failed = False
delivered = 0
while True:
current_time = timezone.now()
# We have run until the end of our batch schedule delay. Break the loop so this worker can take another
# task.
if current_time >= deadline:
logger.info(
"deliver_webhook_parallel.delivery_deadline",
extra={
"mailbox_name": payload.mailbox_name,
"delivered": delivered,
},
)
metrics.incr(
"hybridcloud.deliver_webhooks.delivery", tags={"outcome": "delivery_deadline"}
)
break
# Fetch records from the batch in batch_size blocks. This avoids reading
# redundant data should we hit an error and should help keep query duration low.
query = WebhookPayload.objects.filter(
id__gte=payload.id, mailbox_name=payload.mailbox_name
).order_by("id")
# Use a threadpool to send requests concurrently
with ThreadPoolExecutor(max_workers=worker_threads) as threadpool:
futures = {
threadpool.submit(deliver_message_parallel, record)
for record in query[:worker_threads]
}
for future in as_completed(futures):
payload_record, err = future.result()
if err:
# Was this the final attempt? Failing on a final attempt shouldn't stop
# deliveries as we won't retry
if payload_record.attempts >= MAX_ATTEMPTS:
payload_record.delete()
metrics.incr(
"hybridcloud.deliver_webhooks.delivery",
tags={"outcome": "attempts_exceed"},
)
logger.info(
"deliver_webhook_parallel.discard",
extra={"id": payload_record.id, "attempts": payload_record.attempts},
)
else:
metrics.incr(
"hybridcloud.deliver_webhooks.delivery", tags={"outcome": "retry"}
)
payload_record.schedule_next_attempt()
request_failed = True
if not isinstance(err, DeliveryFailed):
raise err
else:
# Delivery was successful
payload_record.delete()
delivered += 1
duration = timezone.now() - payload_record.date_added
metrics.incr("hybridcloud.deliver_webhooks.delivery", tags={"outcome": "ok"})
metrics.timing(
"hybridcloud.deliver_webhooks.delivery_time", duration.total_seconds()
)
# We didn't have any more messages to deliver.
# Break out of this task so we can get a new one.
if len(futures) < 1:
logger.info(
"deliver_webhook_parallel.task_complete",
extra={
"mailbox_name": payload.mailbox_name,
"delivered": delivered,
},
)
break
# If a delivery failed we should stop processing this mailbox and try again later.
if request_failed:
logger.info(
"deliver_webhook_parallel.delivery_request_failed",
extra={
"mailbox_name": payload.mailbox_name,
"delivered": delivered,
},
)
return
def deliver_message_parallel(payload: WebhookPayload) -> tuple[WebhookPayload, Exception | None]:
try:
perform_request(payload)
return (payload, None)
except Exception as err:
return (payload, err)
def deliver_message(payload: WebhookPayload) -> None:
"""Deliver a message if it still has delivery attempts remaining"""
if payload.attempts >= MAX_ATTEMPTS:
payload.delete()
metrics.incr("hybridcloud.deliver_webhooks.delivery", tags={"outcome": "attempts_exceed"})
logger.info(
"deliver_webhook.discard", extra={"id": payload.id, "attempts": payload.attempts}
)
return
payload.schedule_next_attempt()
perform_request(payload)
payload.delete()
duration = timezone.now() - payload.date_added
metrics.timing("hybridcloud.deliver_webhooks.delivery_time", duration.total_seconds())
metrics.incr("hybridcloud.deliver_webhooks.delivery", tags={"outcome": "ok"})
def perform_request(payload: WebhookPayload) -> None:
destination_type = payload.destination_type
match destination_type:
case DestinationType.SENTRY_REGION:
assert payload.region_name is not None
region = get_region_by_name(name=payload.region_name)
perform_region_request(region, payload)
case DestinationType.CODECOV:
perform_codecov_request(payload)
def perform_region_request(region: Region, payload: WebhookPayload) -> None:
logging_context: dict[str, str | int] = {
"payload_id": payload.id,
"mailbox_name": payload.mailbox_name,
"attempt": payload.attempts,
}
try:
client = RegionSiloClient(region=region)
with metrics.timer(
"hybridcloud.deliver_webhooks.send_request",
tags={"destination_region": region.name},
):
logging_context["region"] = region.name
logging_context["request_method"] = payload.request_method
logging_context["request_path"] = payload.request_path
headers = orjson.loads(payload.request_headers)
response = client.request(
method=payload.request_method,
path=payload.request_path,
headers=headers,
# We need to send the body as raw bytes to avoid interfering with webhook signatures
data=payload.request_body.encode("utf-8"),
json=False,
)
logger.debug(
"deliver_webhooks.success",
extra={
"status": getattr(
response, "status_code", 204
), # Request returns empty dict instead of a response object when the code is a 204
**logging_context,
},
)
except ApiHostError as err:
metrics.incr(
"hybridcloud.deliver_webhooks.failure",
tags={"reason": "host_error", "destination_region": region.name},
)
with sentry_sdk.isolation_scope() as scope:
scope.set_context(
"region",
{
"name": region.name,
"id": region.category,
"address": region.address,
},
)
err_cause = err.__cause__
if err_cause is not None and isinstance(err_cause, RestrictedIPAddress):
# Region silos that are IP address restricted are actionable.
silo_client_err = SiloClientError("Region silo is IP address restricted")
silo_client_err.__cause__ = err
sentry_sdk.capture_exception(silo_client_err)
raise DeliveryFailed()
sentry_sdk.capture_exception(err)
logger.warning("deliver_webhooks.host_error", extra={"error": str(err), **logging_context})
raise DeliveryFailed() from err
except ApiConflictError as err:
metrics.incr(
"hybridcloud.deliver_webhooks.failure",
tags={"reason": "conflict", "destination_region": region.name},
)
logger.warning(
"deliver_webhooks.conflict_occurred",
extra={"conflict_text": err.text, **logging_context},
)
# We don't retry conflicts as those are explicit failure code to drop webhook.
except (ApiTimeoutError, ApiConnectionResetError) as err:
metrics.incr(
"hybridcloud.deliver_webhooks.failure",
tags={"reason": "timeout_reset", "destination_region": region.name},
)
logger.warning("deliver_webhooks.timeout_error", extra=logging_context)
raise DeliveryFailed() from err
except ApiError as err:
err_cause = err.__cause__
response_code = -1
if isinstance(err_cause, HTTPError):
orig_response: Response | None = err_cause.response
if orig_response is not None:
response_code = orig_response.status_code
# We need to retry on region 500s
if status.HTTP_500_INTERNAL_SERVER_ERROR <= response_code < 600:
raise DeliveryFailed() from err
# We don't retry 404 or 400 as they will fail again.
if response_code in {400, 401, 403, 404}:
reason = "not_found"
if response_code == 400:
reason = "bad_request"
elif response_code == 401:
reason = "unauthorized"
elif response_code == 403:
reason = "forbidden"
metrics.incr(
"hybridcloud.deliver_webhooks.failure",
tags={"reason": reason, "destination_region": region.name},
)
logger.info(
"deliver_webhooks.40x_error",
extra={"reason": reason, **logging_context},
)
return
# Other ApiErrors should be retried
metrics.incr(
"hybridcloud.deliver_webhooks.failure",
tags={"reason": "api_error", "destination_region": region.name},
)
logger.warning(
"deliver_webhooks.api_error",
extra={"error": str(err), "response_code": response_code, **logging_context},
)
raise DeliveryFailed() from err
def perform_codecov_request(payload: WebhookPayload) -> None:
"""
We don't retry forwarding Codecov requests for now. We want to prove out that it would work.
"""
logging_context: dict[str, str | int] = {
"payload_id": payload.id,
"mailbox_name": payload.mailbox_name,
"attempt": payload.attempts,
"request_method": payload.request_method,
"request_path": payload.request_path,
}
with metrics.timer(
"hybridcloud.deliver_webhooks.send_request_to_codecov",
):
# transform request to match what codecov is expecting
if payload.request_path.strip("/") != "extensions/github/webhook":
metrics.incr(
"hybridcloud.deliver_webhooks.send_request_to_codecov.unexpected_path",
)
logger.warning(
"deliver_webhooks.send_request_to_codecov.unexpected_path",
extra={"error": "unexpected path", **logging_context},
)
return
# hard coding this because the endpoint path is different from the original request
endpoint = "/webhooks/sentry"
try:
client = CodecovApiClient()
except ConfigurationError as err:
metrics.incr(
"hybridcloud.deliver_webhooks.send_request_to_codecov.configuration_error",
)
logger.warning(
"deliver_webhooks.send_request_to_codecov.configuration_error",
extra={"error": str(err), **logging_context},
)
return
try:
headers = orjson.loads(payload.request_headers)
except orjson.JSONDecodeError as err:
metrics.incr(
"hybridcloud.deliver_webhooks.send_request_to_codecov.json_decode_error",
)
logger.warning(
"deliver_webhooks.send_request_to_codecov.json_decode_error",
extra={"error": str(err), **logging_context},
)
return
try:
response = client.post(
endpoint=endpoint,
data=payload.request_body,
headers=clean_proxy_headers(headers),
)
if response.status_code != 200:
metrics.incr(
"hybridcloud.deliver_webhooks.send_request_to_codecov.failure",
)
logger.warning(
"deliver_webhooks.send_request_to_codecov.failure",
extra={
"error": "unexpected status code",
"status_code": response.status_code,
**logging_context,
},
)
return
except requests.exceptions.RequestException as err:
metrics.incr(
"hybridcloud.deliver_webhooks.send_request_to_codecov.failure",
)
logger.warning(
"deliver_webhooks.send_request_to_codecov.failure",
extra={"error": str(err), **logging_context},
)
return
| DeliveryFailed |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 5181,
"end": 5461
} | class ____(PasswordMixin, NewPasswordMixin, wtforms.Form):
__params__ = ["password", "new_password", "password_confirm"]
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
| ChangePasswordForm |
python | allegroai__clearml | clearml/binding/frameworks/catboost_bind.py | {
"start": 295,
"end": 5836
} | class ____(PatchBaseModelIO):
_current_task = None
__patched = None
__callback_cls = None
@staticmethod
def update_current_task(task: Framework, **kwargs: Any) -> None:
PatchCatBoostModelIO._current_task = task
if not task:
return
PatchCatBoostModelIO._patch_model_io()
PostImportHookPatching.add_on_import("catboost", PatchCatBoostModelIO._patch_model_io)
@staticmethod
def _patch_model_io() -> None:
if PatchCatBoostModelIO.__patched:
return
if "catboost" not in sys.modules:
return
PatchCatBoostModelIO.__patched = True
# noinspection PyBroadException
try:
from catboost import (
CatBoost,
CatBoostClassifier,
CatBoostRegressor,
CatBoostRanker,
)
CatBoost.save_model = _patched_call(CatBoost.save_model, PatchCatBoostModelIO._save)
CatBoost.load_model = _patched_call(CatBoost.load_model, PatchCatBoostModelIO._load)
PatchCatBoostModelIO.__callback_cls = PatchCatBoostModelIO._generate_training_callback_class()
CatBoost.fit = _patched_call(CatBoost.fit, PatchCatBoostModelIO._fit)
CatBoostClassifier.fit = _patched_call(CatBoostClassifier.fit, PatchCatBoostModelIO._fit)
CatBoostRegressor.fit = _patched_call(CatBoostRegressor.fit, PatchCatBoostModelIO._fit)
CatBoostRanker.fit = _patched_call(CatBoostRanker.fit, PatchCatBoostModelIO._fit)
except Exception as e:
logger = PatchCatBoostModelIO._current_task.get_logger()
logger.report_text("Failed patching Catboost. Exception is: '" + str(e) + "'")
@staticmethod
def _save(
original_fn: Callable,
obj: Any,
f: Union[str, IO],
*args: Any,
**kwargs: Any,
) -> Any:
# see https://catboost.ai/en/docs/concepts/python-reference_catboost_save_model
ret = original_fn(obj, f, *args, **kwargs)
if not PatchCatBoostModelIO._current_task:
return ret
if isinstance(f, six.string_types):
filename = f
else:
filename = None
# give the model a descriptive name based on the file name
# noinspection PyBroadException
try:
model_name = Path(filename).stem
except Exception:
model_name = None
WeightsFileHandler.create_output_model(
obj,
filename,
Framework.catboost,
PatchCatBoostModelIO._current_task,
singlefile=True,
model_name=model_name,
)
return ret
@staticmethod
def _load(original_fn: Callable, f: Union[str, Any], *args: Any, **kwargs: Any) -> Any:
# see https://catboost.ai/en/docs/concepts/python-reference_catboost_load_model
if not PatchCatBoostModelIO._current_task:
return original_fn(f, *args, **kwargs)
if isinstance(f, six.string_types):
filename = f
elif len(args) >= 1 and isinstance(args[0], six.string_types):
filename = args[0]
else:
filename = None
# register input model
empty = _Empty()
model = original_fn(f, *args, **kwargs)
WeightsFileHandler.restore_weights_file(empty, filename, Framework.catboost, PatchCatBoostModelIO._current_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
@staticmethod
def _fit(original_fn: Callable, obj: Any, *args: Any, **kwargs: Any) -> Any:
if not PatchCatBoostModelIO._current_task:
return original_fn(obj, *args, **kwargs)
callbacks = kwargs.get("callbacks") or []
kwargs["callbacks"] = callbacks + [PatchCatBoostModelIO.__callback_cls(task=PatchCatBoostModelIO._current_task)]
# noinspection PyBroadException
try:
return original_fn(obj, *args, **kwargs)
except Exception:
logger = PatchCatBoostModelIO._current_task.get_logger()
logger.report_text(
"Catboost metrics logging is not supported for GPU. "
"See https://github.com/catboost/catboost/issues/1792"
)
del kwargs["callbacks"]
return original_fn(obj, *args, **kwargs)
@staticmethod
def _generate_training_callback_class() -> Any:
class ClearMLCallback:
_scalar_index_counter = 0
def __init__(self, task: Any) -> None:
self._logger = task.get_logger()
self._scalar_index = ClearMLCallback._scalar_index_counter
ClearMLCallback._scalar_index_counter += 1
def after_iteration(self, info: Any) -> bool:
info = vars(info)
iteration = info.get("iteration")
for title, metric in (info.get("metrics") or {}).items():
if self._scalar_index != 0:
title = "{} - {}".format(title, self._scalar_index)
for series, log in metric.items():
value = log[-1]
self._logger.report_scalar(title=title, series=series, value=value, iteration=iteration)
return True
return ClearMLCallback
| PatchCatBoostModelIO |
python | fluentpython__example-code-2e | 08-def-type-hints/birds/protocol/swan.py | {
"start": 54,
"end": 336
} | class ____: # <2>
def honk(self, repetitions: int) -> None: # <3>
print('Honk! ' * repetitions)
def swim(self) -> None: # <4>
pass
bella = Swan()
alert(bella) # <5>
| Swan |
python | donnemartin__system-design-primer | solutions/system_design/web_crawler/web_crawler_mapreduce.py | {
"start": 55,
"end": 494
} | class ____(MRJob):
def mapper(self, _, line):
yield line, 1
def reducer(self, key, values):
total = sum(values)
if total == 1:
yield key, total
def steps(self):
"""Run the map and reduce steps."""
return [
self.mr(mapper=self.mapper,
reducer=self.reducer)
]
if __name__ == '__main__':
RemoveDuplicateUrls.run()
| RemoveDuplicateUrls |
python | falconry__falcon | examples/things_advanced.py | {
"start": 357,
"end": 555
} | class ____(Exception):
@staticmethod
def handle(req, resp, ex, params):
# TODO: Log the error, clean up, etc. before raising
raise falcon.HTTPInternalServerError()
| StorageError |
python | astropy__astropy | astropy/io/ascii/cds.py | {
"start": 1269,
"end": 8942
} | class ____(core.BaseHeader):
_subfmt = "CDS"
col_type_map = {
"e": core.FloatType,
"f": core.FloatType,
"i": core.IntType,
"a": core.StrType,
}
"The ReadMe file to construct header from."
readme = None
def get_type_map_key(self, col):
match = re.match(r"\d*(\S)", col.raw_type.lower())
if not match:
raise ValueError(
f'Unrecognized {self._subfmt} format "{col.raw_type}" for column'
f'"{col.name}"'
)
return match.group(1)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a CDS/MRT
header.
Parameters
----------
lines : list
List of table lines
"""
# Read header block for the table ``self.data.table_name`` from the read
# me file ``self.readme``.
if self.readme and self.data.table_name:
in_header = False
readme_inputter = core.BaseInputter()
f = readme_inputter.get_lines(self.readme)
# Header info is not in data lines but in a separate file.
lines = []
comment_lines = 0
for line in f:
line = line.strip()
if in_header:
lines.append(line)
if _is_section_delimiter(line):
comment_lines += 1
if comment_lines == 3:
break
else:
match = re.match(
r"Byte-by-byte Description of file: (?P<name>.+)$",
line,
re.IGNORECASE,
)
if match:
# Split 'name' in case in contains multiple files
names = [s for s in re.split("[, ]+", match.group("name")) if s]
# Iterate on names to find if one matches the tablename
# including wildcards.
for pattern in names:
if fnmatch.fnmatch(
self.data.table_name.removesuffix(".gz"), pattern
):
in_header = True
lines.append(line)
break
else:
raise core.InconsistentTableError(
f"Can't find table {self.data.table_name} in {self.readme}"
)
found_line = False
for i_col_def, line in enumerate(lines):
if re.match(r"Byte-by-byte Description", line, re.IGNORECASE):
found_line = True
elif found_line: # First line after list of file descriptions
i_col_def -= 1 # Set i_col_def to last description line
break
else:
raise ValueError('no line with "Byte-by-byte Description" found')
re_col_def = re.compile(
r"""\s*
(?P<start> \d+ \s* -)? \s*
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE,
)
cols = []
for line in itertools.islice(lines, i_col_def + 4, None):
if _is_section_delimiter(line):
break
match = re_col_def.match(line)
if match:
col = core.Column(name=match.group("name"))
col.start = int(
re.sub(r'[-\s]', '', match.group('start') or match.group('end'))) - 1 # fmt: skip
col.end = int(match.group("end"))
unit = match.group("units")
if unit == "---":
col.unit = None # "---" is the marker for no unit in CDS/MRT table
else:
try:
col.unit = Unit(unit, format="cds", parse_strict="warn")
except UnitsWarning:
# catch when warnings are turned into errors so we can check
# whether this line is likely a multi-line description (see below)
col.unit = UnrecognizedUnit(unit)
col.description = (match.group("descr") or "").strip()
col.raw_type = match.group("format")
try:
col.type = self.get_col_type(col)
except ValueError:
# If parsing the format fails and the unit is unrecognized,
# then this line is likely a continuation of the previous col's
# description that happens to start with a number
if isinstance(col.unit, UnrecognizedUnit):
if len(cols[-1].description) > 0:
cols[-1].description += " "
cols[-1].description += line.strip()
continue
else:
if col.unit is not None:
# Because we may have ignored a UnitsWarning turned into an error
# we do this again so it can be raised again if it is a real error
col.unit = Unit(unit, format="cds", parse_strict="warn")
match = re.match(
# Matches limits specifier (eg []) that may or may not be
# present
r"(?P<limits>[\[\]] \S* [\[\]])?"
# Matches '?' directly
r"\?"
# Matches to nullval if and only if '=' is present
r"((?P<equal>=)(?P<nullval> \S*))?"
# Matches to order specifier: ('+', '-', '+=', '-=')
r"(?P<order>[-+]?[=]?)"
# Matches description text even even if no whitespace is
# present after '?'
r"(\s* (?P<descriptiontext> \S.*))?",
col.description,
re.VERBOSE,
)
if match:
col.description = (match.group("descriptiontext") or "").strip()
if issubclass(col.type, core.FloatType):
fillval = "nan"
else:
fillval = "0"
if match.group("nullval") == "-":
col.null = "---"
# CDS/MRT tables can use -, --, ---, or ---- to mark missing values
# see https://github.com/astropy/astropy/issues/1335
for i in [1, 2, 3, 4]:
self.data.fill_values.append(("-" * i, fillval, col.name))
else:
col.null = match.group("nullval")
if col.null is None:
col.null = ""
self.data.fill_values.append((col.null, fillval, col.name))
cols.append(col)
else: # could be a continuation of the previous col's description
if cols:
if len(cols[-1].description) > 0:
cols[-1].description += " "
cols[-1].description += line.strip()
else:
raise ValueError(f'Line "{line}" not parsable as CDS header')
self.names = [x.name for x in cols]
self.cols = cols
| CdsHeader |
python | kamyu104__LeetCode-Solutions | Python/remove-trailing-zeros-from-a-string.py | {
"start": 38,
"end": 253
} | class ____(object):
def removeTrailingZeros(self, num):
"""
:type num: str
:rtype: str
"""
return num[:next(i for i in reversed(xrange(len(num))) if num[i] != '0')+1]
| Solution |
python | getsentry__sentry | tests/sentry/models/test_projectownership.py | {
"start": 827,
"end": 30429
} | class ____(TestCase):
def setUp(self) -> None:
self.rpc_user = user_service.get_user(user_id=self.user.id)
self.user2 = self.create_user("bar@localhost", username="bar")
self.organization.member_set.create(user_id=self.user2.id)
self.team = self.create_team(
organization=self.organization, slug="tiger-team", members=[self.user]
)
self.team2 = self.create_team(
organization=self.organization, slug="dolphin-team", members=[self.user]
)
self.team3 = self.create_team(
organization=self.organization, slug="barracuda-team", members=[self.user2]
)
self.project = self.create_project(organization=self.organization, teams=[self.team])
self.project2 = self.create_project(
organization=self.organization, teams=[self.team, self.team2]
)
def python_event_data(self):
return {
"message": "Kaboom!",
"platform": "python",
"timestamp": before_now(seconds=10).isoformat(),
"stacktrace": {
"frames": [
{
"function": "handle_set_commits",
"abs_path": "/usr/src/sentry/src/sentry/api/foo.py",
"module": "sentry.api",
"in_app": True,
"lineno": 30,
"filename": "sentry/api/foo.py",
},
{
"function": "set_commits",
"abs_path": "/usr/src/sentry/src/sentry/models/release.py",
"module": "sentry.models.release",
"in_app": True,
"lineno": 39,
"filename": "sentry/models/release.py",
},
]
},
"tags": {"sentry:release": self.release.version},
}
def assert_ownership_equals(self, o1, o2):
# Ensure actors match
assert sorted(o1[0], key=actor_key) == sorted(o2[0], key=actor_key)
# Ensure rules match
assert sorted(o1[1]) == sorted(o2[1])
def test_get_owners_default(self) -> None:
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
assert ProjectOwnership.get_owners(self.project.id, {}) == ([], None)
def test_get_owners_no_record(self) -> None:
assert ProjectOwnership.get_owners(self.project.id, {}) == ([], None)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
assert ProjectOwnership.get_owners(self.project.id, {}) == ([], None)
def test_get_owners_basic(self) -> None:
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema([rule_a, rule_b]), fallthrough=True
)
# No data matches
assert ProjectOwnership.get_owners(self.project.id, {}) == ([], None)
# Match only rule_a
self.assert_ownership_equals(
ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "foo.py"}]}}
),
([Actor(id=self.team.id, actor_type=ActorType.TEAM)], [rule_a]),
)
# Match only rule_b
self.assert_ownership_equals(
ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "src/thing.txt"}]}}
),
([Actor(id=self.user.id, actor_type=ActorType.USER)], [rule_b]),
)
# Matches both rule_a and rule_b
self.assert_ownership_equals(
ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "src/foo.py"}]}}
),
(
[
Actor(id=self.team.id, actor_type=ActorType.TEAM),
Actor(id=self.user.id, actor_type=ActorType.USER),
],
[rule_a, rule_b],
),
)
# We should be ignoring the fallthrough flag
owner = ProjectOwnership.objects.get(project_id=self.project.id)
owner.fallthrough = False
owner.save()
assert ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "xxxx"}]}}
) == ([], None)
self.assert_ownership_equals(
ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "src/foo.py"}]}}
),
(
[
Actor(id=self.team.id, actor_type=ActorType.TEAM),
Actor(id=self.user.id, actor_type=ActorType.USER),
],
[rule_a, rule_b],
),
)
def test_get_owners_when_codeowners_exists_and_no_issueowners(self) -> None:
# This case will never exist bc we create a ProjectOwnership record if none exists when creating a ProjectCodeOwner record.
# We have this testcase for potential corrupt data.
self.code_mapping = self.create_code_mapping(project=self.project)
rule_a = Rule(Matcher("path", "*.js"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project,
self.code_mapping,
raw="*.js @tiger-team",
schema=dump_schema([rule_a]),
)
self.assert_ownership_equals(
ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "src/foo.js"}]}}
),
(
[Actor(id=self.team.id, actor_type=ActorType.TEAM)],
[rule_a],
),
)
def test_get_owners_when_codeowners_and_issueowners_exists(self) -> None:
self.code_mapping = self.create_code_mapping(project=self.project2)
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team2.slug)])
ProjectOwnership.objects.create(
project_id=self.project2.id, schema=dump_schema([rule_a, rule_b]), fallthrough=True
)
self.create_codeowners(
self.project2, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
self.assert_ownership_equals(
ProjectOwnership.get_owners(
self.project2.id, {"stacktrace": {"frames": [{"filename": "api/foo.py"}]}}
),
(
[
Actor(id=self.team.id, actor_type=ActorType.TEAM),
Actor(id=self.team2.id, actor_type=ActorType.TEAM),
],
[rule_a, rule_c],
),
)
def test_get_issue_owners_no_codeowners_or_issueowners(self) -> None:
assert ProjectOwnership.get_issue_owners(self.project.id, {}) == []
def test_get_issue_owners_only_issueowners_exists(self) -> None:
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])
ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema([rule_a, rule_b]),
)
# No data matches
assert ProjectOwnership.get_issue_owners(self.project.id, {}) == []
# Match on stacktrace
assert ProjectOwnership.get_issue_owners(
self.project.id,
{"stacktrace": {"frames": [{"filename": "foo.py"}]}},
) == [(rule_a, [self.team], OwnerRuleType.OWNERSHIP_RULE.value)]
def test_get_issue_owners_where_owner_is_not_in_project(self) -> None:
self.project_2 = self.create_project(organization=self.organization, teams=[self.team3])
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/*"), [Owner("user", self.user2.email)])
ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema([rule_a, rule_b]),
)
# Match on stacktrace but owner is not in the Project
assert (
ProjectOwnership.get_issue_owners(
self.project.id,
{"stacktrace": {"frames": [{"filename": "src/foo.js"}]}},
)
== []
)
def test_get_issue_owners_only_codeowners_exists_with_default_assignment_settings(self) -> None:
# This case will never exist bc we create a ProjectOwnership record if none exists when creating a ProjectCodeOwner record.
# We have this testcase for potential corrupt data.
self.code_mapping = self.create_code_mapping(project=self.project)
rule_a = Rule(Matcher("path", "*.js"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project,
self.code_mapping,
raw="*.js @tiger-team",
schema=dump_schema([rule_a]),
)
# No data matches
assert ProjectOwnership.get_issue_owners(self.project.id, {}) == []
# Match on stacktrace
assert ProjectOwnership.get_issue_owners(
self.project.id, {"stacktrace": {"frames": [{"filename": "foo.js"}]}}
) == [(rule_a, [self.team], OwnerRuleType.CODEOWNERS.value)]
def test_get_issue_owners_when_codeowners_and_issueowners_exists(self) -> None:
self.code_mapping = self.create_code_mapping(project=self.project2)
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/foo.py"), [Owner("user", self.user.email)])
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team2.slug)])
ProjectOwnership.objects.create(
project_id=self.project2.id,
schema=dump_schema([rule_a, rule_b]),
fallthrough=True,
)
self.create_codeowners(
self.project2, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
assert ProjectOwnership.get_issue_owners(
self.project2.id, {"stacktrace": {"frames": [{"filename": "api/foo.py"}]}}
) == [
(rule_a, [self.team], OwnerRuleType.OWNERSHIP_RULE.value),
(rule_c, [self.team2], OwnerRuleType.CODEOWNERS.value),
]
# more than 2 matches
assert ProjectOwnership.get_issue_owners(
self.project2.id, {"stacktrace": {"frames": [{"filename": "src/foo.py"}]}}
) == [
(rule_b, [self.rpc_user], OwnerRuleType.OWNERSHIP_RULE.value),
(rule_a, [self.team], OwnerRuleType.OWNERSHIP_RULE.value),
]
def test_handle_auto_assignment_when_only_codeowners_exists(self) -> None:
self.code_mapping = self.create_code_mapping(project=self.project)
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team.id,
project=self.project,
organization=self.project.organization,
context={"rule": str(rule_c)},
)
ProjectOwnership.handle_auto_assignment(self.project.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.team_id == self.team.id
def test_handle_auto_assignment_when_only_suspect_commit_exists_multiple_emails(self) -> None:
"""Test that if a user has 2 verified email addresses, the non-primary one is the commit author, and the project
is using the suspect committer auto assignment we correctly assign the issue to the user.
"""
self.ownership = ProjectOwnership.objects.create(
project_id=self.project2.id,
fallthrough=False,
auto_assignment=True,
suspect_committer_auto_assignment=True,
)
self.repo = Repository.objects.create(
organization_id=self.project2.organization.id,
name="example",
integration_id=self.integration.id,
)
self.second_email = self.create_useremail(
user=self.user2, email="hb@mysecondemail.com", is_verified=True
)
self.commit_author = self.create_commit_author(
project=self.project2, user=self.user2, email=self.second_email.email
)
self.commit = self.create_commit(
project=self.project2,
repo=self.repo,
author=self.commit_author,
key="asdfwreqr",
message="placeholder commit message",
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project2.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user2.id,
team_id=None,
project=self.project2,
organization=self.project2.organization,
context={"commitId": self.commit.id},
)
ProjectOwnership.handle_auto_assignment(self.project2.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.user_id == self.user2.id
def test_handle_skip_auto_assignment(self) -> None:
"""Test that if an issue has already been manually assigned, we skip overriding the assignment
on a future event with auto-assignment.
"""
self.code_mapping = self.create_code_mapping(project=self.project)
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team.id,
project=self.project,
organization=self.project.organization,
context={"rule": str(rule_c)},
)
ProjectOwnership.handle_auto_assignment(self.project.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.team_id == self.team.id
# manually assign the issue to someone else
assert self.event.group is not None
GroupAssignee.objects.assign(self.event.group, self.user)
# ensure the issue was not reassigned
ProjectOwnership.handle_auto_assignment(self.project.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.user_id == self.user.id
@patch("sentry.models.GroupAssignee.objects.assign")
def test_handle_skip_auto_assignment_same_assignee(self, mock_assign: MagicMock) -> None:
"""Test that if an issue has already been assigned, we skip the assignment
on a future event with auto-assignment if the assignee won't change.
"""
self.code_mapping = self.create_code_mapping(project=self.project)
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team.id,
project=self.project,
organization=self.project.organization,
context={"rule": str(rule_c)},
)
GroupAssignee.objects.create(group=self.event.group, project=self.project, team=self.team)
# ensure we skip calling assign
ProjectOwnership.handle_auto_assignment(self.project.id, self.event)
mock_assign.assert_not_called()
def test_handle_auto_assignment_when_codeowners_and_issueowners_exists(self) -> None:
self.code_mapping = self.create_code_mapping(project=self.project2)
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team2.slug)])
self.ownership = ProjectOwnership.objects.create(
project_id=self.project2.id,
schema=dump_schema([rule_a, rule_b]),
fallthrough=True,
auto_assignment=False,
suspect_committer_auto_assignment=False,
)
self.create_codeowners(
self.project2, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project2.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.OWNERSHIP_RULE.value,
user_id=None,
team_id=self.team.id,
project=self.project2,
organization=self.project2.organization,
context={"rule": str(rule_a)},
)
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team2.id,
project=self.project2,
organization=self.project2.organization,
context={"rule": str(rule_c)},
)
ProjectOwnership.handle_auto_assignment(self.project2.id, self.event)
assert len(GroupAssignee.objects.all()) == 0
# Turn on auto assignment
self.ownership.auto_assignment = True
self.ownership.suspect_committer_auto_assignment = True
self.ownership.save()
ProjectOwnership.handle_auto_assignment(self.project2.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.team_id == self.team.id
def test_no_group_owner(self) -> None:
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project2.id,
)
ProjectOwnership.handle_auto_assignment(self.project2.id, self.event)
assert len(GroupAssignee.objects.all()) == 0
def test_handle_auto_assignment_when_suspect_committer_and_codeowners_and_issueowners_exists(
self,
):
self.repo = Repository.objects.create(
organization_id=self.organization.id,
name="example",
integration_id=self.integration.id,
)
self.code_mapping = self.create_code_mapping(
repo=self.repo,
project=self.project2,
)
self.commit_author = self.create_commit_author(project=self.project2, user=self.user2)
self.commit = self.create_commit(
project=self.project2,
repo=self.repo,
author=self.commit_author,
key="asdfwreqr",
message="placeholder commit message",
)
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
rule_b = Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])
rule_c = Rule(Matcher("path", "*.py"), [Owner("team", self.team3.slug)])
self.ownership = ProjectOwnership.objects.create(
project_id=self.project2.id,
schema=dump_schema([rule_a, rule_b]),
fallthrough=True,
auto_assignment=False,
suspect_committer_auto_assignment=False,
)
self.create_codeowners(
self.project2, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_c])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project2.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
project=self.project2,
user_id=self.user2.id,
team_id=None,
organization=self.project2.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
context={"commitId": self.commit.id},
)
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.OWNERSHIP_RULE.value,
user_id=None,
team_id=self.team.id,
project=self.project2,
organization=self.project2.organization,
context={"rule": str(rule_a)},
)
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team3.id,
project=self.project2,
organization=self.project.organization,
context={"rule": str(rule_c)},
)
ProjectOwnership.handle_auto_assignment(self.project2.id, self.event)
assert len(GroupAssignee.objects.all()) == 0
# Turn on auto assignment
self.ownership.auto_assignment = True
self.ownership.suspect_committer_auto_assignment = True
self.ownership.save()
ProjectOwnership.handle_auto_assignment(self.project2.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.user_id == self.user2.id
def test_abs_path_when_filename_present(self) -> None:
frame = {
"filename": "computer.cpp",
"abs_path": "C:\\My\\Path\\computer.cpp",
}
rule = Rule(Matcher("path", "*My\\Path*"), [Owner("team", self.team.slug)])
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema([rule]), fallthrough=True
)
assert ProjectOwnership.get_owners(
self.project.id, {"stacktrace": {"frames": [frame]}}
) == ([Actor(id=self.team.id, actor_type=ActorType.TEAM)], [rule])
def test_saves_without_either_auto_assignment_option(self) -> None:
self.group = self.create_group(project=self.project)
# Turn off all autoassignment
ProjectOwnership.objects.create(
project_id=self.project.id,
suspect_committer_auto_assignment=False,
auto_assignment=False,
)
assert ProjectOwnership.get_owners(self.project.id, {}) == ([], None)
def test_force_handle_auto_assignment(self) -> None:
# Run auto-assignment first
self.code_mapping = self.create_code_mapping(project=self.project)
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_a])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team.id,
project=self.project,
organization=self.project.organization,
context={"rule": str(rule_a)},
)
ProjectOwnership.handle_auto_assignment(self.project.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.team_id == self.team.id
# Unassign the auto-assigned user
GroupAssignee.objects.deassign(self.event.group, self.user)
assert len(GroupAssignee.objects.all()) == 0
# Manually assign the group to someone else
GroupAssignee.objects.assign(self.event.group, self.user)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.user_id == self.user.id
# Run force auto-assignment
ProjectOwnership.handle_auto_assignment(
self.project.id,
group=self.event.group,
force_autoassign=True,
)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.team_id == self.team.id
def test_force_handle_auto_assignment_cache_check(self) -> None:
# Run auto-assignment first
self.code_mapping = self.create_code_mapping(project=self.project)
rule_a = Rule(Matcher("path", "*.py"), [Owner("team", self.team.slug)])
self.create_codeowners(
self.project, self.code_mapping, raw="*.py @tiger-team", schema=dump_schema([rule_a])
)
self.event = self.store_event(
data=self.python_event_data(),
project_id=self.project.id,
)
assert self.event.group is not None
GroupOwner.objects.create(
group=self.event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=self.team.id,
project=self.project,
organization=self.project.organization,
context={"rule": str(rule_a)},
)
ProjectOwnership.handle_auto_assignment(self.project.id, self.event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=self.event.group)
assert assignee.team_id == self.team.id
def test_autoassignment_with_multiple_codeowners(self) -> None:
processing_team = self.create_team(
organization=self.organization, slug="processing-team", members=[self.user]
)
payment_team = self.create_team(
organization=self.organization, slug="payment-team", members=[self.user2]
)
project = self.create_project(
organization=self.organization, teams=[processing_team, payment_team], slug="rotation"
)
data = {
"stacktrace": {
"frames": [
{"abs_path": "/app/payment_service.rb", "in_app": True},
{"abs_path": "/app/processing_unit.rb", "in_app": True},
{"abs_path": "/app/processing_unit.rb", "in_app": True},
]
}
}
event = self.store_event(
data=data,
project_id=project.id,
)
rules = [
Rule(Matcher("codeowners", "*payment*"), [Owner("team", payment_team.slug)]),
Rule(
Matcher("codeowners", "/app/processing_unit.rb"),
[Owner("team", processing_team.slug)],
),
]
ProjectOwnership.objects.create(
project_id=project.id, schema=dump_schema(rules), fallthrough=True
)
assert len(ProjectOwnership.get_issue_owners(project.id, data)) == 2
# Order of group owners should be determined by `get_issue_owners` which has the correct order
group_owners = [
GroupOwner(
group=event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=processing_team.id,
project=project,
organization=project.organization,
context={"rule": str(rules[1])},
),
GroupOwner(
group=event.group,
type=GroupOwnerType.CODEOWNERS.value,
user_id=None,
team_id=payment_team.id,
project=project,
organization=project.organization,
context={"rule": str(rules[0])},
),
]
GroupOwner.objects.bulk_create(group_owners)
ProjectOwnership.handle_auto_assignment(project.id, event)
assert len(GroupAssignee.objects.all()) == 1
assignee = GroupAssignee.objects.get(group=event.group)
assert assignee.team_id == processing_team.id
| ProjectOwnershipTestCase |
python | huggingface__transformers | src/transformers/models/got_ocr2/configuration_got_ocr2.py | {
"start": 5502,
"end": 9286
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GotOcr2ForConditionalGeneration`]. It is used to instantiate a
GotOcr2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of GOT-OCR-2.0.
e.g [stepfun-ai/GOT-OCR-2.0-hf](https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 151859):
The image token index to encode the image prompt.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
pad_token_id (`int`, *optional*, defaults to -1):
Padding token id.
```python
>>> from transformers import GotOcr2ForConditionalGeneration, GotOcr2Config
>>> # Initializing a GotOcr2 style configuration
>>> configuration = GotOcr2Config()
>>> # Initializing a model from the Qwen2-VL-7B style configuration
>>> model = GotOcr2ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "got_ocr2"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": GotOcr2VisionConfig}
def __init__(
self,
vision_config: Optional[dict] = None,
text_config: Optional[dict] = None,
image_token_index: Optional[int] = 151859,
image_seq_length: Optional[int] = 576,
pad_token_id: Optional[int] = -1,
**kwargs,
):
self.image_token_index = image_token_index
self.image_seq_length = image_seq_length
self.pad_token_id = pad_token_id
if vision_config is None:
self.vision_config = GotOcr2VisionConfig()
elif isinstance(vision_config, dict):
self.vision_config = GotOcr2VisionConfig(**vision_config)
elif isinstance(vision_config, GotOcr2VisionConfig):
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "qwen2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["qwen2"](
vocab_size=151860,
hidden_size=1024,
intermediate_size=2816,
num_hidden_layers=24,
num_attention_heads=16,
num_key_value_heads=16,
hidden_act="silu",
max_position_embeddings=32768,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
tie_word_embeddings=True,
rope_theta=1000000.0,
rope_parameters=None,
use_sliding_window=False,
sliding_window=4096,
max_window_layers=21,
attention_dropout=0.0,
)
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["GotOcr2VisionConfig", "GotOcr2Config"]
| GotOcr2Config |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 44199,
"end": 44848
} | class ____(dict, NonStrictDataModel):
"""
Task section params
"""
_schema = {
"additionalProperties": True,
"description": "Task section params",
"type": "object",
}
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.assert_isinstance(args, "section_params", dict, is_array=True)
kwargs.update(args)
self.assert_isinstance(kwargs.values(), "params", (ParamsItem, dict), is_array=True)
for k, v in kwargs.items():
if isinstance(v, dict):
kwargs[k] = ParamsItem(**v)
super(SectionParams, self).__init__(**kwargs)
| SectionParams |
python | scipy__scipy | scipy/linalg/tests/test_basic.py | {
"start": 81352,
"end": 83698
} | class ____:
def test_types(self):
for dtype in np.typecodes['AllFloat']:
x = np.array([1, 2, 3], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
for dtype in np.typecodes['Complex']:
x = np.array([1j, 2j, 3j], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
def test_overflow(self):
# unlike numpy's norm, this one is
# safer on overflow
a = array([1e20], dtype=float32)
assert_almost_equal(norm(a), a)
def test_stable(self):
# more stable than numpy's norm
a = array([1e4] + [1]*10000, dtype=float32)
try:
# snrm in double precision; we obtain the same as for float64
# -- large atol needed due to varying blas implementations
assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)
except AssertionError:
# snrm implemented in single precision, == np.linalg.norm result
msg = ": Result should equal either 0.0 or 0.5 (depending on " \
"implementation of snrm2)."
assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)
def test_zero_norm(self):
assert_equal(norm([1, 0, 3], 0), 2)
assert_equal(norm([1, 2, 3], 0), 3)
def test_axis_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2)
assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2)
def test_keepdims_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
b = norm(a, axis=1, keepdims=True)
assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2)
assert_(b.shape == (2, 1, 2))
assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2)
@pytest.mark.skipif(not HAS_ILP64, reason="64-bit BLAS required")
def test_large_vector(self):
check_free_memory(free_mb=17000)
x = np.zeros([2**31], dtype=np.float64)
x[-1] = 1
res = norm(x)
del x
assert_allclose(res, 1.0)
| TestVectorNorms |
python | aimacode__aima-python | agents.py | {
"start": 28340,
"end": 36213
} | class ____(XYEnvironment):
pit_probability = 0.2 # Probability to spawn a pit in a location. (From Chapter 7.2)
# Room should be 4x4 grid of rooms. The extra 2 for walls
def __init__(self, agent_program, width=6, height=6):
super().__init__(width, height)
self.init_world(agent_program)
def init_world(self, program):
"""Spawn items in the world based on probabilities from the book"""
"WALLS"
self.add_walls()
"PITS"
for x in range(self.x_start, self.x_end):
for y in range(self.y_start, self.y_end):
if random.random() < self.pit_probability:
self.add_thing(Pit(), (x, y), True)
self.add_thing(Breeze(), (x - 1, y), True)
self.add_thing(Breeze(), (x, y - 1), True)
self.add_thing(Breeze(), (x + 1, y), True)
self.add_thing(Breeze(), (x, y + 1), True)
"WUMPUS"
w_x, w_y = self.random_location_inbounds(exclude=(1, 1))
self.add_thing(Wumpus(lambda x: ""), (w_x, w_y), True)
self.add_thing(Stench(), (w_x - 1, w_y), True)
self.add_thing(Stench(), (w_x + 1, w_y), True)
self.add_thing(Stench(), (w_x, w_y - 1), True)
self.add_thing(Stench(), (w_x, w_y + 1), True)
"GOLD"
self.add_thing(Gold(), self.random_location_inbounds(exclude=(1, 1)), True)
"AGENT"
self.add_thing(Explorer(program), (1, 1), True)
def get_world(self, show_walls=True):
"""Return the items in the world"""
result = []
x_start, y_start = (0, 0) if show_walls else (1, 1)
if show_walls:
x_end, y_end = self.width, self.height
else:
x_end, y_end = self.width - 1, self.height - 1
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at((x, y)))
result.append(row)
return result
def percepts_from(self, agent, location, tclass=Thing):
"""Return percepts from a given location,
and replaces some items with percepts from chapter 7."""
thing_percepts = {
Gold: Glitter(),
Wall: Bump(),
Wumpus: Stench(),
Pit: Breeze()}
"""Agents don't need to get their percepts"""
thing_percepts[agent.__class__] = None
"""Gold only glitters in its cell"""
if location != agent.location:
thing_percepts[Gold] = None
result = [thing_percepts.get(thing.__class__, thing) for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
return result if len(result) else [None]
def percept(self, agent):
"""Return things in adjacent (not diagonal) cells of the agent.
Result format: [Left, Right, Up, Down, Center / Current location]"""
x, y = agent.location
result = []
result.append(self.percepts_from(agent, (x - 1, y)))
result.append(self.percepts_from(agent, (x + 1, y)))
result.append(self.percepts_from(agent, (x, y - 1)))
result.append(self.percepts_from(agent, (x, y + 1)))
result.append(self.percepts_from(agent, (x, y)))
"""The wumpus gives out a loud scream once it's killed."""
wumpus = [thing for thing in self.things if isinstance(thing, Wumpus)]
if len(wumpus) and not wumpus[0].alive and not wumpus[0].screamed:
result[-1].append(Scream())
wumpus[0].screamed = True
return result
def execute_action(self, agent, action):
"""Modify the state of the environment based on the agent's actions.
Performance score taken directly out of the book."""
if isinstance(agent, Explorer) and self.in_danger(agent):
return
agent.bump = False
if action in ['TurnRight', 'TurnLeft', 'Forward', 'Grab']:
super().execute_action(agent, action)
agent.performance -= 1
elif action == 'Climb':
if agent.location == (1, 1): # Agent can only climb out of (1,1)
agent.performance += 1000 if Gold() in agent.holding else 0
self.delete_thing(agent)
elif action == 'Shoot':
"""The arrow travels straight down the path the agent is facing"""
if agent.has_arrow:
arrow_travel = agent.direction.move_forward(agent.location)
while self.is_inbounds(arrow_travel):
wumpus = [thing for thing in self.list_things_at(arrow_travel)
if isinstance(thing, Wumpus)]
if len(wumpus):
wumpus[0].alive = False
break
arrow_travel = agent.direction.move_forward(agent.location)
agent.has_arrow = False
def in_danger(self, agent):
"""Check if Explorer is in danger (Pit or Wumpus), if he is, kill him"""
for thing in self.list_things_at(agent.location):
if isinstance(thing, Pit) or (isinstance(thing, Wumpus) and thing.alive):
agent.alive = False
agent.performance -= 1000
agent.killed_by = thing.__class__.__name__
return True
return False
def is_done(self):
"""The game is over when the Explorer is killed
or if he climbs out of the cave only at (1,1)."""
explorer = [agent for agent in self.agents if isinstance(agent, Explorer)]
if len(explorer):
if explorer[0].alive:
return False
else:
print("Death by {} [-1000].".format(explorer[0].killed_by))
else:
print("Explorer climbed out {}."
.format("with Gold [+1000]!" if Gold() not in self.things else "without Gold [+0]"))
return True
# TODO: Arrow needs to be implemented
# ______________________________________________________________________________
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
"""See how well each of several agents do in n instances of an environment.
Pass in a factory (constructor) for environments, and several for agents.
Create n instances of the environment, and run each agent in copies of
each one for steps. Return a list of (agent, average-score) tuples.
>>> environment = TrivialVacuumEnvironment
>>> agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]
>>> result = compare_agents(environment, agents)
>>> performance_ModelBasedVacuumAgent = result[0][1]
>>> performance_ReflexVacuumAgent = result[1][1]
>>> performance_ReflexVacuumAgent <= performance_ModelBasedVacuumAgent
True
"""
envs = [EnvFactory() for i in range(n)]
return [(A, test_agent(A, steps, copy.deepcopy(envs)))
for A in AgentFactories]
def test_agent(AgentFactory, steps, envs):
"""Return the mean score of running an agent in each of the envs, for steps
>>> def constant_prog(percept):
... return percept
...
>>> agent = Agent(constant_prog)
>>> result = agent.program(5)
>>> result == 5
True
"""
def score(env):
agent = AgentFactory()
env.add_thing(agent)
env.run(steps)
return agent.performance
return mean(map(score, envs))
# _________________________________________________________________________
__doc__ += """
>>> a = ReflexVacuumAgent()
>>> a.program((loc_A, 'Clean'))
'Right'
>>> a.program((loc_B, 'Clean'))
'Left'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> e = TrivialVacuumEnvironment()
>>> e.add_thing(ModelBasedVacuumAgent())
>>> e.run(5)
"""
| WumpusEnvironment |
python | wntrblm__nox | nox/_option_set.py | {
"start": 8564,
"end": 13626
} | class ____:
"""A set of options.
A high-level wrapper over ``argparse.ArgumentParser``. It allows for
introspection of options as well as quality-of-life features such as
finalization, callable defaults, and strongly typed namespaces for tests.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.parser_args = args
self.parser_kwargs = kwargs
self.options: dict[str, Option] = {}
self.groups: dict[str, OptionGroup] = {}
def add_options(self, *args: Option) -> None:
"""Adds a sequence of Options to the OptionSet.
Args:
args (Sequence[Options])
"""
for option in args:
self.options[option.name] = option
def add_groups(self, *args: OptionGroup) -> None:
"""Adds a sequence of OptionGroups to the OptionSet.
Args:
args (Sequence[OptionGroup])
"""
for option_group in args:
self.groups[option_group.name] = option_group
def parser(self) -> ArgumentParser:
"""Returns an ``ArgumentParser`` for this option set.
Generally, you won't use this directly. Instead, use
:func:`parse_args`.
"""
parser_kwargs = {"allow_abbrev": False, **self.parser_kwargs}
parser = argparse.ArgumentParser(*self.parser_args, **parser_kwargs)
groups = {
name: parser.add_argument_group(*option_group.args, **option_group.kwargs)
for name, option_group in self.groups.items()
}
for option in self.options.values():
if option.hidden:
continue
# Every option must have a group (except for hidden options)
if option.group is None:
msg = f"Option {option.name} must either have a group or be hidden."
raise ValueError(msg)
argument = groups[option.group.name].add_argument(
*option.flags, help=option.help, default=option.default, **option.kwargs
)
if option.completer:
argument.completer = option.completer # type: ignore[attr-defined]
return parser
def print_help(self) -> None:
return self.parser().print_help()
def _finalize_args(self, args: Namespace) -> None:
"""Does any necessary post-processing on arguments."""
for option in self.options.values():
# Handle hidden items.
if option.hidden and not hasattr(args, option.name):
setattr(args, option.name, option.default)
value = getattr(args, option.name)
# Handle options that have finalizer functions.
if option.finalizer_func:
setattr(args, option.name, option.finalizer_func(value, args))
def parse_args(self) -> Namespace:
parser = self.parser()
argcomplete.autocomplete(parser)
args = parser.parse_args()
try:
self._finalize_args(args)
except ArgumentError as err:
parser.error(str(err))
return args
def namespace(self, **kwargs: Any) -> argparse.Namespace:
"""Return a namespace that contains all of the options in this set.
kwargs can be used to set values and does so in a checked way - you
can not set an option that does not exist in the set. This is useful
for testing.
"""
args = {option.name: option.default for option in self.options.values()}
# Don't use update - validate that the keys actually exist so that
# we don't accidentally set non-existent options.
# don't bother with coverage here, this is effectively only ever
# used in tests.
for key, value in kwargs.items():
if key not in args:
msg = f"{key} is not an option."
raise KeyError(msg)
args[key] = value
return argparse.Namespace(**args)
def noxfile_namespace(self) -> NoxOptions:
"""Returns a namespace of options that can be set in the configuration
file."""
return NoxOptions(
**{
option.name: option.default
for option in self.options.values()
if option.noxfile
} # type: ignore[arg-type]
)
def merge_namespaces(
self, command_args: Namespace, noxfile_args: NoxOptions
) -> None:
"""Merges the command-line options with the Noxfile options."""
command_args_copy = Namespace(**vars(command_args))
for name, option in self.options.items():
if option.merge_func:
setattr(
command_args,
name,
option.merge_func(command_args_copy, noxfile_args),
)
elif option.noxfile:
value = getattr(command_args_copy, name, None) or getattr(
noxfile_args, name, None
)
setattr(command_args, name, value)
| OptionSet |
python | PyCQA__flake8 | src/flake8/processor.py | {
"start": 810,
"end": 16857
} | class ____:
"""Processes a file and holds state.
This processes a file by generating tokens, logical and physical lines,
and AST trees. This also provides a way of passing state about the file
to checks expecting that state. Any public attribute on this object can
be requested by a plugin. The known public attributes are:
- :attr:`blank_before`
- :attr:`blank_lines`
- :attr:`checker_state`
- :attr:`indent_char`
- :attr:`indent_level`
- :attr:`line_number`
- :attr:`logical_line`
- :attr:`max_line_length`
- :attr:`max_doc_length`
- :attr:`multiline`
- :attr:`noqa`
- :attr:`previous_indent_level`
- :attr:`previous_logical`
- :attr:`previous_unindented_logical_line`
- :attr:`tokens`
- :attr:`file_tokens`
- :attr:`total_lines`
- :attr:`verbose`
"""
#: always ``False``, included for compatibility
noqa = False
def __init__(
self,
filename: str,
options: argparse.Namespace,
lines: list[str] | None = None,
) -> None:
"""Initialize our file processor.
:param filename: Name of the file to process
"""
self.options = options
self.filename = filename
self.lines = lines if lines is not None else self.read_lines()
self.strip_utf_bom()
# Defaults for public attributes
#: Number of preceding blank lines
self.blank_before = 0
#: Number of blank lines
self.blank_lines = 0
#: Checker states for each plugin?
self._checker_states: dict[str, dict[Any, Any]] = {}
#: Current checker state
self.checker_state: dict[Any, Any] = {}
#: User provided option for hang closing
self.hang_closing = options.hang_closing
#: Character used for indentation
self.indent_char: str | None = None
#: Current level of indentation
self.indent_level = 0
#: Number of spaces used for indentation
self.indent_size = options.indent_size
#: Line number in the file
self.line_number = 0
#: Current logical line
self.logical_line = ""
#: Maximum line length as configured by the user
self.max_line_length = options.max_line_length
#: Maximum docstring / comment line length as configured by the user
self.max_doc_length = options.max_doc_length
#: Whether the current physical line is multiline
self.multiline = False
#: Previous level of indentation
self.previous_indent_level = 0
#: Previous logical line
self.previous_logical = ""
#: Previous unindented (i.e. top-level) logical line
self.previous_unindented_logical_line = ""
#: Current set of tokens
self.tokens: list[tokenize.TokenInfo] = []
#: Total number of lines in the file
self.total_lines = len(self.lines)
#: Verbosity level of Flake8
self.verbose = options.verbose
#: Statistics dictionary
self.statistics = {"logical lines": 0}
self._fstring_start = self._tstring_start = -1
@functools.cached_property
def file_tokens(self) -> list[tokenize.TokenInfo]:
"""Return the complete set of tokens for a file."""
line_iter = iter(self.lines)
return list(tokenize.generate_tokens(lambda: next(line_iter)))
def fstring_start(self, lineno: int) -> None: # pragma: >=3.12 cover
"""Signal the beginning of an fstring."""
self._fstring_start = lineno
def tstring_start(self, lineno: int) -> None: # pragma: >=3.14 cover
"""Signal the beginning of an tstring."""
self._tstring_start = lineno
def multiline_string(self, token: tokenize.TokenInfo) -> Generator[str]:
"""Iterate through the lines of a multiline string."""
if token.type == FSTRING_END: # pragma: >=3.12 cover
start = self._fstring_start
elif token.type == TSTRING_END: # pragma: >=3.14 cover
start = self._tstring_start
else:
start = token.start[0]
self.multiline = True
self.line_number = start
# intentionally don't include the last line, that line will be
# terminated later by a future end-of-line
for _ in range(start, token.end[0]):
yield self.lines[self.line_number - 1]
self.line_number += 1
self.multiline = False
def reset_blank_before(self) -> None:
"""Reset the blank_before attribute to zero."""
self.blank_before = 0
def delete_first_token(self) -> None:
"""Delete the first token in the list of tokens."""
del self.tokens[0]
def visited_new_blank_line(self) -> None:
"""Note that we visited a new blank line."""
self.blank_lines += 1
def update_state(self, mapping: _LogicalMapping) -> None:
"""Update the indent level based on the logical line mapping."""
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
def update_checker_state_for(self, plugin: LoadedPlugin) -> None:
"""Update the checker_state attribute for the plugin."""
if "checker_state" in plugin.parameters:
self.checker_state = self._checker_states.setdefault(
plugin.entry_name, {},
)
def next_logical_line(self) -> None:
"""Record the previous logical line.
This also resets the tokens list and the blank_lines count.
"""
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
if not self.indent_level:
self.previous_unindented_logical_line = self.logical_line
self.blank_lines = 0
self.tokens = []
def build_logical_line_tokens(self) -> _Logical: # noqa: C901
"""Build the mapping, comments, and logical line lists."""
logical = []
comments = []
mapping: _LogicalMapping = []
length = 0
previous_row = previous_column = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mutate_string(text)
elif token_type in {
FSTRING_MIDDLE,
TSTRING_MIDDLE,
}: # pragma: >=3.12 cover # noqa: E501
# A curly brace in an FSTRING_MIDDLE token must be an escaped
# curly brace. Both 'text' and 'end' will account for the
# escaped version of the token (i.e. a single brace) rather
# than the raw double brace version, so we must counteract this
brace_offset = text.count("{") + text.count("}")
text = "x" * (len(text) + brace_offset)
end = (end[0], end[1] + brace_offset)
if previous_row is not None and previous_column is not None:
(start_row, start_column) = start
if previous_row != start_row:
row_index = previous_row - 1
column_index = previous_column - 1
previous_text = self.lines[row_index][column_index]
if previous_text == "," or (
previous_text not in "{[(" and text not in "}])"
):
text = f" {text}"
elif previous_column != start_column:
text = line[previous_column:start_column] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(previous_row, previous_column) = end
return comments, logical, mapping
def build_ast(self) -> ast.AST:
"""Build an abstract syntax tree from the list of lines."""
return ast.parse("".join(self.lines))
def build_logical_line(self) -> tuple[str, str, _LogicalMapping]:
"""Build a logical line from the current tokens list."""
comments, logical, mapping_list = self.build_logical_line_tokens()
joined_comments = "".join(comments)
self.logical_line = "".join(logical)
self.statistics["logical lines"] += 1
return joined_comments, self.logical_line, mapping_list
def keyword_arguments_for(
self,
parameters: dict[str, bool],
arguments: dict[str, Any],
) -> dict[str, Any]:
"""Generate the keyword arguments for a list of parameters."""
ret = {}
for param, required in parameters.items():
if param in arguments:
continue
try:
ret[param] = getattr(self, param)
except AttributeError:
if required:
raise
else:
LOG.warning(
'Plugin requested optional parameter "%s" '
"but this is not an available parameter.",
param,
)
return ret
def generate_tokens(self) -> Generator[tokenize.TokenInfo]:
"""Tokenize the file and yield the tokens."""
for token in tokenize.generate_tokens(self.next_line):
if token[2][0] > self.total_lines:
break
self.tokens.append(token)
yield token
def _noqa_line_range(self, min_line: int, max_line: int) -> dict[int, str]:
line_range = range(min_line, max_line + 1)
joined = "".join(self.lines[min_line - 1: max_line])
return dict.fromkeys(line_range, joined)
@functools.cached_property
def _noqa_line_mapping(self) -> dict[int, str]:
"""Map from line number to the line we'll search for `noqa` in."""
try:
file_tokens = self.file_tokens
except (tokenize.TokenError, SyntaxError):
# if we failed to parse the file tokens, we'll always fail in
# the future, so set this so the code does not try again
return {}
else:
ret = {}
min_line = len(self.lines) + 2
max_line = -1
for tp, _, (s_line, _), (e_line, _), _ in file_tokens:
if tp == tokenize.ENDMARKER or tp == tokenize.DEDENT:
continue
min_line = min(min_line, s_line)
max_line = max(max_line, e_line)
if tp in (tokenize.NL, tokenize.NEWLINE):
ret.update(self._noqa_line_range(min_line, max_line))
min_line = len(self.lines) + 2
max_line = -1
return ret
def noqa_line_for(self, line_number: int) -> str | None:
"""Retrieve the line which will be used to determine noqa."""
# NOTE(sigmavirus24): Some plugins choose to report errors for empty
# files on Line 1. In those cases, we shouldn't bother trying to
# retrieve a physical line (since none exist).
return self._noqa_line_mapping.get(line_number)
def next_line(self) -> str:
"""Get the next line from the list."""
if self.line_number >= self.total_lines:
return ""
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in defaults.WHITESPACE:
self.indent_char = line[0]
return line
def read_lines(self) -> list[str]:
"""Read the lines for this file checker."""
if self.filename == "-":
self.filename = self.options.stdin_display_name or "stdin"
lines = self.read_lines_from_stdin()
else:
lines = self.read_lines_from_filename()
return lines
def read_lines_from_filename(self) -> list[str]:
"""Read the lines for a file."""
try:
with tokenize.open(self.filename) as fd:
return fd.readlines()
except (SyntaxError, UnicodeError):
# If we can't detect the codec with tokenize.detect_encoding, or
# the detected encoding is incorrect, just fallback to latin-1.
with open(self.filename, encoding="latin-1") as fd:
return fd.readlines()
def read_lines_from_stdin(self) -> list[str]:
"""Read the lines from standard in."""
return utils.stdin_get_lines()
def should_ignore_file(self) -> bool:
"""Check if ``flake8: noqa`` is in the file to be ignored.
:returns:
True if a line matches :attr:`defaults.NOQA_FILE`,
otherwise False
"""
if not self.options.disable_noqa and any(
defaults.NOQA_FILE.match(line) for line in self.lines
):
return True
elif any(defaults.NOQA_FILE.search(line) for line in self.lines):
LOG.warning(
"Detected `flake8: noqa` on line with code. To ignore an "
"error on a line use `noqa` instead.",
)
return False
else:
return False
def strip_utf_bom(self) -> None:
"""Strip the UTF bom from the lines of the file."""
if not self.lines:
# If we have nothing to analyze quit early
return
# If the first byte of the file is a UTF-8 BOM, strip it
if self.lines[0][:1] == "\uFEFF":
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == "\xEF\xBB\xBF":
self.lines[0] = self.lines[0][3:]
def is_eol_token(token: tokenize.TokenInfo) -> bool:
"""Check if the token is an end-of-line token."""
return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == "\\\n"
def is_multiline_string(token: tokenize.TokenInfo) -> bool:
"""Check if this is a multiline string."""
return token.type in {FSTRING_END, TSTRING_END} or (
token.type == tokenize.STRING and "\n" in token.string
)
def token_is_newline(token: tokenize.TokenInfo) -> bool:
"""Check if the token type is a newline token type."""
return token[0] in NEWLINE
def count_parentheses(current_parentheses_count: int, token_text: str) -> int:
"""Count the number of parentheses."""
if token_text in "([{": # nosec
return current_parentheses_count + 1
elif token_text in "}])": # nosec
return current_parentheses_count - 1
return current_parentheses_count
def expand_indent(line: str) -> int:
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
return len(line.expandtabs(8))
# NOTE(sigmavirus24): This was taken wholesale from
# https://github.com/PyCQA/pycodestyle. The in-line comments were edited to be
# more descriptive.
def mutate_string(text: str) -> str:
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mutate_string('"abc"')
'"xxx"'
>>> mutate_string("'''abc'''")
"'''xxx'''"
>>> mutate_string("r'abc'")
"r'xxx'"
"""
# NOTE(sigmavirus24): If there are string modifiers (e.g., b, u, r)
# use the last "character" to determine if we're using single or double
# quotes and then find the first instance of it
start = text.index(text[-1]) + 1
end = len(text) - 1
# Check for triple-quoted strings
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + "x" * (end - start) + text[end:]
| FileProcessor |
python | pypa__pipenv | pipenv/project.py | {
"start": 3138,
"end": 4052
} | class ____(json.JSONEncoder):
"""A specialized JSON encoder to convert loaded TOML data into a lock file.
This adds a few characteristics to the encoder:
* The JSON is always prettified with indents and spaces.
* TOMLKit's container elements are seamlessly encodable.
* The output is always UTF-8-encoded text, never binary, even on Python 2.
"""
def __init__(self):
super().__init__(indent=4, separators=(",", ": "), sort_keys=True)
def default(self, obj):
if isinstance(obj, Path):
obj = obj.as_posix()
return super().default(obj)
def encode(self, obj):
content = super().encode(obj)
if not isinstance(content, str):
content = content.decode("utf-8")
return content
def preferred_newlines(f):
if isinstance(f.newlines, str):
return f.newlines
return DEFAULT_NEWLINES
| _LockFileEncoder |
python | RaRe-Technologies__gensim | gensim/test/test_similarities.py | {
"start": 21254,
"end": 22952
} | class ____(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.Similarity
def factoryMethod(self):
# Override factoryMethod.
return self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=5)
def test_sharding(self):
for num_best in [None, 0, 1, 9, 1000]:
for shardsize in [1, 2, 9, 1000]:
self.testFull(num_best=num_best, shardsize=shardsize)
def test_reopen(self):
"""test re-opening partially full shards"""
index = similarities.Similarity(None, CORPUS[:5], num_features=len(DICTIONARY), shardsize=9)
_ = index[CORPUS[0]] # noqa:F841 forces shard close
index.add_documents(CORPUS[5:])
query = CORPUS[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)]
expected = matutils.sparse2full(expected, len(index))
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
def test_mmap_compressed(self):
pass
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
def test_chunksize(self):
index = self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=5)
expected = [sim for sim in index]
index.chunksize = len(index) - 1
sims = [sim for sim in index]
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
def test_nlargest(self):
sims = ([(0, 0.8), (1, 0.2), (2, 0.0), (3, 0.0), (4, -0.1), (5, -0.15)],)
expected = [(0, 0.8), (1, 0.2), (5, -0.15)]
self.assertTrue(_nlargest(3, sims), expected)
| TestSimilarity |
python | dask__dask | dask/layers.py | {
"start": 1598,
"end": 1815
} | class ____(ArrayBlockwiseDep):
"""Produce chunk shapes given a chunk index"""
def __getitem__(self, idx: tuple[int, ...]):
return tuple(chunk[i] for i, chunk in zip(idx, self.chunks))
| ArrayChunkShapeDep |
python | tensorflow__tensorflow | tensorflow/python/tpu/feature_column_v2_test.py | {
"start": 12356,
"end": 12994
} | class ____(test.TestCase,
parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def test_error_dense_shape_invalid(self):
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=5)
with self.assertRaisesRegex(ValueError, 'tensor_core_shape must be size 2'):
tpu_fc.shared_embedding_columns_v2([categorical_column_input],
dimension=20,
tensor_core_shape=[None, 20, 15])
if __name__ == '__main__':
test.main()
| DeviceSpecificEmbeddingColumnTestV2 |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/dynamo_inline.py | {
"start": 493,
"end": 697
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self._n = 1000
def forward(self, x):
for _ in range(self._n):
x = fn9(x)
return x
| InlineMod |
python | ray-project__ray | python/ray/util/collective/types.py | {
"start": 3886,
"end": 3995
} | class ____:
root_rank = 0
root_tensor = 0
timeout_ms = unset_timeout_ms
@dataclass
| BroadcastOptions |
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/s3_handler.py | {
"start": 1362,
"end": 12853
} | class ____(StorageHandler):
_scheme: str
_cache: ArtifactFileCache
_s3: boto3.resources.base.ServiceResource | None
def __init__(self, scheme: str = "s3") -> None:
self._scheme = scheme
self._cache = get_artifact_file_cache()
self._s3 = None
def can_handle(self, parsed_url: ParseResult) -> bool:
return parsed_url.scheme == self._scheme
def init_boto(self) -> boto3.resources.base.ServiceResource:
if self._s3 is not None:
return self._s3
boto: boto3 = util.get_module(
"boto3",
required="s3:// references requires the boto3 library, run pip install wandb[aws]",
lazy=False,
)
from botocore.client import Config # type: ignore
s3_endpoint = os.getenv("AWS_S3_ENDPOINT_URL")
config = (
Config(s3={"addressing_style": "virtual"})
if s3_endpoint and self._is_coreweave_endpoint(s3_endpoint)
else None
)
self._s3 = boto.session.Session().resource(
"s3",
endpoint_url=s3_endpoint,
region_name=os.getenv("AWS_REGION"),
config=config,
)
self._botocore = util.get_module("botocore")
return self._s3
def _parse_uri(self, uri: str) -> tuple[str, str, str | None]:
url = urlparse(uri)
query = dict(parse_qsl(url.query))
bucket = url.netloc
key = url.path[1:] # strip leading slash
version = query.get("versionId")
return bucket, key, version
def load_path(
self,
manifest_entry: ArtifactManifestEntry,
local: bool = False,
) -> URIStr | FilePathStr:
if not local:
assert manifest_entry.ref is not None
return manifest_entry.ref
assert manifest_entry.ref is not None
path, hit, cache_open = self._cache.check_etag_obj_path(
URIStr(manifest_entry.ref),
ETag(manifest_entry.digest),
manifest_entry.size or 0,
)
if hit:
return path
self.init_boto()
assert self._s3 is not None # mypy: unwraps optionality
bucket, key, _ = self._parse_uri(manifest_entry.ref)
version = manifest_entry.extra.get("versionID")
extra_args = {}
if version:
obj_version = self._s3.ObjectVersion(bucket, key, version)
extra_args["VersionId"] = version
obj = obj_version.Object()
else:
obj = self._s3.Object(bucket, key)
try:
etag = (
obj_version.head()["ETag"][1:-1] # escape leading and trailing
if version
else self._etag_from_obj(obj)
)
except self._botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
raise FileNotFoundError(
f"Unable to find {manifest_entry.path} at s3://{bucket}/{key}"
) from e
raise
if etag != manifest_entry.digest:
# Try to match the etag with some other version.
if version:
raise ValueError(
f"Digest mismatch for object {manifest_entry.ref} with version {version}: expected {manifest_entry.digest} but found {etag}"
)
obj = None
object_versions = self._s3.Bucket(bucket).object_versions.filter(Prefix=key)
for object_version in object_versions:
if manifest_entry.extra.get("etag") == self._etag_from_obj(
object_version
):
obj = object_version.Object()
extra_args["VersionId"] = object_version.version_id
break
if obj is None:
raise FileNotFoundError(
"Couldn't find object version for {}/{} matching etag {}".format(
bucket, key, manifest_entry.extra.get("etag")
)
)
with cache_open(mode="wb") as f:
obj.download_fileobj(f, ExtraArgs=extra_args)
return path
def store_path(
self,
artifact: Artifact,
path: URIStr | FilePathStr,
name: StrPath | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> list[ArtifactManifestEntry]:
self.init_boto()
assert self._s3 is not None # mypy: unwraps optionality
# The passed in path might have query string parameters.
# We only need to care about a subset, like version, when
# parsing. Once we have that, we can store the rest of the
# metadata in the artifact entry itself.
bucket, key, version = self._parse_uri(path)
path = URIStr(f"{self._scheme}://{bucket}/{key}")
max_objects = max_objects or DEFAULT_MAX_OBJECTS
if not checksum:
entry_path = name or (key if key != "" else bucket)
return [ArtifactManifestEntry(path=entry_path, ref=path, digest=path)]
# If an explicit version is specified, use that. Otherwise, use the head version.
objs = (
[self._s3.ObjectVersion(bucket, key, version).Object()]
if version
else [self._s3.Object(bucket, key)]
)
multi = False
if key != "":
try:
objs[0].load()
# S3 lacks true folders, but a folder key can reference a valid
# file, which prevents recursive uploads. Check whether the
# object's metadata marks it as a directory and perform a
# multi-file upload if so.
if "x-directory" in objs[0].content_type:
multi = True
except self._botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
multi = True
else:
raise CommError(
f"Unable to connect to S3 ({e.response['Error']['Code']}): "
f"{e.response['Error']['Message']}. Check that your "
"authentication credentials are valid and that your region is "
"set correctly."
)
else:
multi = True
with TimedIf(multi):
if multi:
termlog(
f'Generating checksum for up to {max_objects} objects in "{bucket}/{key}"... ',
newline=False,
)
if key != "":
objs = (
self._s3.Bucket(bucket)
.objects.filter(Prefix=key)
.limit(max_objects)
)
else:
objs = self._s3.Bucket(bucket).objects.limit(max_objects)
# Weird iterator scoping makes us assign this to a local function
size = self._size_from_obj
entries = [
self._entry_from_obj(obj, path, name, prefix=key, multi=multi)
for obj in objs
if size(obj) > 0
]
if len(entries) > max_objects:
raise ValueError(
f"Exceeded {max_objects} objects tracked, pass max_objects to add_reference"
)
return entries
def _size_from_obj(self, obj: boto3.s3.Object | boto3.s3.ObjectSummary) -> int:
# ObjectSummary has size, Object has content_length
size: int
if hasattr(obj, "size"):
size = obj.size
else:
size = obj.content_length
return size
def _entry_from_obj(
self,
obj: boto3.s3.Object | boto3.s3.ObjectSummary,
path: str,
name: StrPath | None = None,
prefix: str = "",
multi: bool = False,
) -> ArtifactManifestEntry:
"""Create an ArtifactManifestEntry from an S3 object.
Args:
obj: The S3 object
path: The S3-style path (e.g.: "s3://bucket/file.txt")
name: The user assigned name, or None if not specified
prefix: The prefix to add (will be the same as `path` for directories)
multi: Whether or not this is a multi-object add.
"""
bucket, key, _ = self._parse_uri(path)
# Always use posix paths, since that's what S3 uses.
posix_key = PurePosixPath(obj.key) # the bucket key
posix_path = PurePosixPath(bucket) / key # the path, with the scheme stripped
posix_prefix = PurePosixPath(prefix) # the prefix, if adding a prefix
posix_name = PurePosixPath(name or "")
posix_ref = posix_path
if name is None:
# We're adding a directory (prefix), so calculate a relative path.
if str(posix_prefix) in str(posix_key) and posix_prefix != posix_key:
posix_name = posix_key.relative_to(posix_prefix)
posix_ref = posix_path / posix_name
else:
posix_name = PurePosixPath(posix_key.name)
posix_ref = posix_path
elif multi:
# We're adding a directory with a name override.
relpath = posix_key.relative_to(posix_prefix)
posix_name = posix_name / relpath
posix_ref = posix_path / relpath
return ArtifactManifestEntry(
path=posix_name,
ref=URIStr(f"{self._scheme}://{str(posix_ref)}"),
digest=ETag(self._etag_from_obj(obj)),
size=self._size_from_obj(obj),
extra=self._extra_from_obj(obj),
)
@staticmethod
def _etag_from_obj(obj: boto3.s3.Object | boto3.s3.ObjectSummary) -> ETag:
etag: ETag
etag = obj.e_tag[1:-1] # escape leading and trailing quote
return etag
def _extra_from_obj(
self, obj: boto3.s3.Object | boto3.s3.ObjectSummary
) -> dict[str, str]:
extra = {
"etag": obj.e_tag[1:-1], # escape leading and trailing quote
}
if not hasattr(obj, "version_id"):
# Convert ObjectSummary to Object to get the version_id.
obj = self._s3.Object(obj.bucket_name, obj.key) # type: ignore[union-attr]
if hasattr(obj, "version_id") and obj.version_id and obj.version_id != "null":
extra["versionID"] = obj.version_id
return extra
_CW_LEGACY_NETLOC_REGEX: re.Pattern[str] = re.compile(
r"""
# accelerated endpoints like "accel-object.<region>.coreweave.com"
accel-object\.[a-z0-9-]+\.coreweave\.com
|
# URLs like "object.<region>.coreweave.com"
object\.[a-z0-9-]+\.coreweave\.com
""",
flags=re.VERBOSE,
)
def _is_coreweave_endpoint(self, endpoint_url: str) -> bool:
if not (url := endpoint_url.strip().rstrip("/")):
return False
# Only http://cwlota.com is supported using HTTP
if url == "http://cwlota.com":
return True
# Enforce HTTPS otherwise
https_url = ensureprefix(url, "https://")
netloc = urlparse(https_url).netloc
return bool(
# Match for https://cwobject.com
(netloc == "cwobject.com")
or
# Check for legacy endpoints
self._CW_LEGACY_NETLOC_REGEX.fullmatch(netloc)
)
| S3Handler |
python | doocs__leetcode | solution/0300-0399/0366.Find Leaves of Binary Tree/Solution.py | {
"start": 192,
"end": 637
} | class ____:
def findLeaves(self, root: Optional[TreeNode]) -> List[List[int]]:
def dfs(root: Optional[TreeNode]) -> int:
if root is None:
return 0
l, r = dfs(root.left), dfs(root.right)
h = max(l, r)
if len(ans) == h:
ans.append([])
ans[h].append(root.val)
return h + 1
ans = []
dfs(root)
return ans
| Solution |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_async_panel_compatibility.py | {
"start": 196,
"end": 247
} | class ____(Panel):
is_async = True
| MockAsyncPanel |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 6803,
"end": 6964
} | class ____(FileStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
FILE = '/etc/HOSTNAME'
| SLESStrategy |
python | django__django | tests/gis_tests/geo3d/models.py | {
"start": 959,
"end": 1112
} | class ____(NamedModel):
poly = models.PolygonField(dim=3, srid=32140)
class Meta:
required_db_features = {"supports_3d_storage"}
| Polygon3D |
python | mlflow__mlflow | mlflow/store/artifact/azure_data_lake_artifact_repo.py | {
"start": 2390,
"end": 12111
} | class ____(CloudArtifactRepository):
"""
Stores artifacts on Azure Data Lake Storage Gen2.
This repository is used with URIs of the form
``abfs[s]://file_system@account_name.dfs.core.windows.net/<path>/<path>``.
Args
credential: Azure credential (see options in https://learn.microsoft.com/en-us/python/api/azure-core/azure.core.credentials?view=azure-python)
to use to authenticate to storage
"""
def __init__(
self,
artifact_uri: str,
credential=None,
credential_refresh_def=None,
tracking_uri: str | None = None,
registry_uri: str | None = None,
) -> None:
super().__init__(artifact_uri, tracking_uri, registry_uri)
_DEFAULT_TIMEOUT = 600 # 10 minutes
self.write_timeout = MLFLOW_ARTIFACT_UPLOAD_DOWNLOAD_TIMEOUT.get() or _DEFAULT_TIMEOUT
self._parse_credentials(credential)
self._credential_refresh_def = credential_refresh_def
def _parse_credentials(self, credential):
(filesystem, account_name, domain_suffix, path) = _parse_abfss_uri(self.artifact_uri)
account_url = f"https://{account_name}.{domain_suffix}"
self.sas_token = ""
if credential is None:
if sas_token := os.environ.get("AZURE_STORAGE_SAS_TOKEN"):
self.sas_token = f"?{sas_token}"
account_url += self.sas_token
else:
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
self.credential = credential
data_lake_client = _get_data_lake_client(
account_url=account_url, credential=self.credential
)
self.fs_client = data_lake_client.get_file_system_client(filesystem)
self.domain_suffix = domain_suffix
self.base_data_lake_directory = path
self.account_name = account_name
self.container = filesystem
def _refresh_credentials(self):
if not self._credential_refresh_def:
return self.fs_client
new_creds = self._credential_refresh_def()
self._parse_credentials(new_creds["credential"])
return self.fs_client
def log_artifact(self, local_file, artifact_path=None):
dest_path = self.base_data_lake_directory
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
local_file_path = os.path.abspath(local_file)
file_name = os.path.basename(local_file_path)
def try_func(creds):
dir_client = creds.get_directory_client(dest_path)
file_client = dir_client.get_file_client(file_name)
if os.path.getsize(local_file_path) == 0:
file_client.create_file()
else:
with open(local_file_path, "rb") as file:
file_client.upload_data(data=file, overwrite=True)
_retry_with_new_creds(
try_func=try_func, creds_func=self._refresh_credentials, orig_creds=self.fs_client
)
def list_artifacts(self, path=None):
directory_to_list = self.base_data_lake_directory
if path:
directory_to_list = posixpath.join(directory_to_list, path)
infos = []
for result in self.fs_client.get_paths(path=directory_to_list, recursive=False):
if (
directory_to_list == result.name
): # result isn't actually a child of the path we're interested in, so skip it
continue
if result.is_directory:
subdir = posixpath.relpath(path=result.name, start=self.base_data_lake_directory)
subdir = subdir.removesuffix("/")
infos.append(FileInfo(subdir, is_dir=True, file_size=None))
else:
file_name = posixpath.relpath(path=result.name, start=self.base_data_lake_directory)
infos.append(FileInfo(file_name, is_dir=False, file_size=result.content_length))
# The list_artifacts API expects us to return an empty list if the
# the path references a single file.
rel_path = directory_to_list[len(self.base_data_lake_directory) + 1 :]
if (len(infos) == 1) and not infos[0].is_dir and (infos[0].path == rel_path):
return []
return sorted(infos, key=lambda f: f.path)
def _download_from_cloud(self, remote_file_path, local_path):
remote_full_path = posixpath.join(self.base_data_lake_directory, remote_file_path)
base_dir = posixpath.dirname(remote_full_path)
def try_func(creds):
dir_client = creds.get_directory_client(base_dir)
filename = posixpath.basename(remote_full_path)
file_client = dir_client.get_file_client(filename)
with open(local_path, "wb") as file:
file_client.download_file().readinto(file)
_retry_with_new_creds(
try_func=try_func, creds_func=self._refresh_credentials, orig_creds=self.fs_client
)
def delete_artifacts(self, artifact_path=None):
raise NotImplementedError("This artifact repository does not support deleting artifacts")
def _upload_to_cloud(self, cloud_credential_info, src_file_path, artifact_file_path):
if (
MLFLOW_ENABLE_MULTIPART_UPLOAD.get()
and os.path.getsize(src_file_path) > MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE.get()
):
self._multipart_upload(cloud_credential_info, src_file_path, artifact_file_path)
else:
artifact_subdir = posixpath.dirname(artifact_file_path)
self.log_artifact(src_file_path, artifact_subdir)
def _retryable_adls_function(self, func, artifact_file_path, **kwargs):
# Attempt to call the passed function. Retry if the credentials have expired
try:
func(**kwargs)
except requests.HTTPError as e:
if e.response.status_code in [403]:
new_credentials = self._get_write_credential_infos([artifact_file_path])[0]
kwargs["sas_url"] = new_credentials.signed_uri
func(**kwargs)
else:
raise e
def _multipart_upload(self, credentials, src_file_path, artifact_file_path):
"""
Uploads a file to a given Azure storage location using the ADLS gen2 API.
"""
try:
headers = self._extract_headers_from_credentials(credentials.headers)
# try to create the file
self._retryable_adls_function(
func=put_adls_file_creation,
artifact_file_path=artifact_file_path,
sas_url=credentials.signed_uri,
headers=headers,
)
# next try to append the file
futures = {}
file_size = os.path.getsize(src_file_path)
num_chunks = _compute_num_chunks(
src_file_path, MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE.get()
)
use_single_part_upload = num_chunks == 1
for index in range(num_chunks):
start_byte = index * MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE.get()
future = self.chunk_thread_pool.submit(
self._retryable_adls_function,
func=patch_adls_file_upload,
artifact_file_path=artifact_file_path,
sas_url=credentials.signed_uri,
local_file=src_file_path,
start_byte=start_byte,
size=MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE.get(),
position=start_byte,
headers=headers,
is_single=use_single_part_upload,
)
futures[future] = index
_, errors = _complete_futures(futures, src_file_path)
if errors:
raise MlflowException(
f"Failed to upload at least one part of {artifact_file_path}. Errors: {errors}"
)
# finally try to flush the file
if not use_single_part_upload:
self._retryable_adls_function(
func=patch_adls_flush,
artifact_file_path=artifact_file_path,
sas_url=credentials.signed_uri,
position=file_size,
headers=headers,
)
except Exception as err:
raise MlflowException(err)
def _get_presigned_uri(self, artifact_file_path):
"""
Gets the presigned URL required to upload a file to or download a file from a given Azure
storage location.
Args:
artifact_file_path: Path of the file relative to the artifact repository root.
Returns:
a string presigned URL.
"""
sas_token = (
f"?{self.credential.signature}"
if hasattr(self.credential, "signature")
else self.sas_token
)
return (
f"https://{self.account_name}.{self.domain_suffix}/{self.container}/"
f"{self.base_data_lake_directory}/{artifact_file_path}{sas_token}"
)
def _get_write_credential_infos(self, remote_file_paths) -> list[ArtifactCredentialInfo]:
return [
ArtifactCredentialInfo(signed_uri=self._get_presigned_uri(path))
for path in remote_file_paths
]
def _get_read_credential_infos(self, remote_file_paths) -> list[ArtifactCredentialInfo]:
return [
ArtifactCredentialInfo(signed_uri=self._get_presigned_uri(path))
for path in remote_file_paths
]
| AzureDataLakeArtifactRepository |
python | joke2k__faker | faker/providers/job/bn_BD/__init__.py | {
"start": 41,
"end": 20843
} | class ____(JobProvider):
"""
Implement job provider for ``bn_BD`` locale.
"""
jobs = (
"একাডেমিক গ্রন্থাগারিক",
"আবাসন ব্যবস্থাপক",
"অ্যাকাউন্টেন্ট, চার্টার্ড",
"অ্যাকাউন্টেন্ট, চার্টার্ড সার্টিফাইড",
"অ্যাকাউন্টেন্ট, চার্টার্ড ম্যানেজমেন্ট",
"অ্যাকাউন্টেন্ট, চার্টার্ড পাবলিক ফাইন্যান্স",
"অ্যাকাউন্টিং টেকনিশিয়ান",
"অভিনেতা",
"অ্যাকচুরি",
"আকুপাংচারিস্ট",
"প্রশাসক",
"প্রশাসক, কলা",
"প্রশাসক, দাতব্য/স্বেচ্ছাসেবী সংস্থা",
"প্রশাসক, সিভিল সার্ভিস",
"প্রশাসক, শিক্ষা",
"প্রশাসক, স্থানীয় সরকার",
"প্রশাসক, ক্রীড়া",
"প্রাপ্তবয়স্ক নির্দেশিকা কর্মী",
"প্রাপ্তবয়স্ক নার্স",
"বিজ্ঞাপন অ্যাকাউন্ট এক্সিকিউটিভ",
"বিজ্ঞাপন অ্যাকাউন্ট পরিকল্পনাকারী",
"বিজ্ঞাপন শিল্প পরিচালক",
"বিজ্ঞাপন কপিরাইটার",
"পরামর্শ কর্মী",
"বৈমানিক প্রকৌশলী",
"কৃষি পরামর্শক",
"কৃষি প্রকৌশলী",
"সহায়তা কর্মী",
"এয়ার ব্রোকার",
"এয়ার কেবিন ক্রু",
"এয়ার ট্রাফিক কন্ট্রোলার",
"এয়ারলাইন পাইলট",
"অ্যাম্বুলেন্স ব্যক্তি",
"সুবিধা উদ্যানতত্ত্ববিদ",
"বিশ্লেষনমূলক রসায়নবিদ",
"পশু পুষ্টিবিদ",
"প্রাণী প্রযুক্তিবিদ",
"অ্যানিমেটর",
"অ্যাপ্লিকেশন ডেভেলপার",
"আর্বোরিকালচারিস্ট",
"প্রত্নতত্ত্ববিদ",
"স্থপতি",
"স্থাপত্য প্রযুক্তিবিদ",
"আর্কাইভিস্ট",
"সশস্ত্র বাহিনী লজিস্টিক/সাপোর্ট/প্রশাসনিক কর্মকর্তা",
"সশস্ত্র বাহিনীর অপারেশনাল অফিসার",
"সশস্ত্র বাহিনীর প্রযুক্তিগত কর্মকর্তা",
"সশস্ত্র বাহিনীর প্রশিক্ষণ ও শিক্ষা অফিসার",
"আর্ট গ্যালারি ম্যানেজার",
"শিল্প থেরাপিস্ট",
"শিল্পী",
"কলা প্রশাসক",
"কলা উন্নয়ন কর্মকর্তা",
"সহযোগী অধ্যাপক",
"জ্যোতির্বিজ্ঞানী",
"অডিওলজিক্যাল সায়েন্টিস্ট",
"অটোমোটিভ ইঞ্জিনিয়ার",
"ব্যাঙ্কার",
"বরিস্তা",
"ব্যারিস্টার",
"ব্যারিস্টার কেরানি",
"সেরা ছেলে",
"বায়োকেমিস্ট, ক্লিনিকাল",
"বায়োমেডিকেল প্রকৌশলী",
"বায়োমেডিকাল বিজ্ঞানী",
"বন্ড ব্যবসায়ী",
"পুস্তক বিক্রেতা",
"ব্রুইং টেকনোলজিস্ট",
"সম্প্রচার প্রকৌশলী",
"সম্প্রচার সাংবাদিক",
"সম্প্রচার উপস্থাপক",
"বিল্ডিং কন্ট্রোল সার্ভেয়ার",
"বিল্ডিং সার্ভিস ইঞ্জিনিয়ার",
"বিল্ডিং সার্ভেয়ার",
"ক্রেতা, শিল্প",
"ক্রেতা, খুচরা",
"কেবিনের নাবিক",
"কল সেন্টার ম্যানেজার",
"কেমেরা চালাক",
"ক্যারিয়ার উপদেষ্টা",
"ক্যারিয়ার ইনফরমেশন অফিসার",
"মানচিত্রকার",
"পরিবেশন ব্যবস্থাপক",
"সিরামিক ডিজাইনার",
"দাতব্য তহবিল সংগ্রহকারী",
"চ্যারিটি অফিসার",
"চার্টার্ড হিসাবরক্ষক",
"চার্টার্ড সার্টিফাইড অ্যাকাউন্ট্যান্ট",
"চার্টার্ড লিগ্যাল এক্সিকিউটিভ (ইংল্যান্ড এবং ওয়েলস)",
"চার্টার্ড ক্ষতি সমন্বয়কারী",
"চার্টার্ড ম্যানেজমেন্ট অ্যাকাউন্ট্যান্ট",
"চার্টার্ড পাবলিক ফাইন্যান্স অ্যাকাউন্ট্যান্ট",
"রাসায়নিক প্রকৌশলী",
"রসায়নবিদ, বিশ্লেষণাত্মক",
"প্রধান নির্বাহী কর্মকর্তা",
"প্রধান অর্থনৈতিক কর্মকর্তা",
"প্রধান বিপণন কর্মকর্তা",
"বাহিনী প্রধান",
"প্রধান পরিচালন কর্মকর্তা",
"প্রধান কৌশল কর্মকর্তা",
"মুখ্য প্রযুক্তিবিদ্যা অফিসার",
"শিশু সাইকোথেরাপিস্ট",
"চিরোপডিস্ট",
"রোগ চিকিৎসা বিশেষ",
"সিভিল ইঞ্জিনিয়ার, পরামর্শ",
"সিভিল ইঞ্জিনিয়ার, কন্ট্রাক্টিং",
"সিভিল সার্ভিস প্রশাসক",
"সিভিল সার্ভিস ফাস্ট স্ট্রিমার",
"দাবি পরিদর্শক/মূল্যায়নকারী",
"ক্লিনিক্যাল বায়োকেমিস্ট",
"ক্লিনিকাল সাইটোজেনেটিস্ট",
"ক্লিনিকাল ভ্রূণ বিশেষজ্ঞ",
"ক্লিনিকাল আণবিক জেনেটিসিস্ট",
"ক্লিনিকাল মনোবৈজ্ঞানিক",
"ক্লিনিক্যাল রিসার্চ অ্যাসোসিয়েট",
"ক্লিনিক্যাল সায়েন্টিস্ট, হিস্টোকম্প্যাটিবিলিটি এবং ইমিউনোজেনেটিক্স",
"পোশাক/টেক্সটাইল প্রযুক্তিবিদ",
"রঙ প্রযুক্তিবিদ",
"বাণিজ্যিক আর্ট গ্যালারি ম্যানেজার",
"বাণিজ্যিক উদ্যানতত্ত্ববিদ",
"বাণিজ্যিক/আবাসিক সার্ভেয়ার",
"কমিশনিং এডিটর",
"যোগাযোগ প্রকৌশলী",
"সম্প্রদায়িক শিল্প কর্মী",
"সমাজ উন্নয়ন কর্মী",
"সম্প্রদায় শিক্ষা অফিসার",
"কমিউনিটি ফার্মাসিস্ট",
"কোম্পানি সচিব",
"নিয়ন্ত্রক",
"কম্পিউটার গেম ডেভেলপার",
"কনফারেন্স সেন্টার ম্যানেজার",
"সংরক্ষণ কর্মকর্তা, ঐতিহাসিক ভবন",
"সংরক্ষণ কর্মকর্তা, প্রকৃতি",
"সংরক্ষক, আসবাবপত্র",
"সংরক্ষক, জাদুঘর/গ্যালারি",
"পরামর্শ সিভিল ইঞ্জিনিয়ার",
"কন্ট্রাক্টিং সিভিল ইঞ্জিনিয়ার",
"ঠিকাদার",
"নিয়ন্ত্রণ এবং যন্ত্র প্রকৌশলী",
"কপি",
"কপিরাইটার, বিজ্ঞাপন",
"কর্পোরেট ইনভেস্টমেন্ট ব্যাংকার",
"কর্পোরেট কোষাধ্যক্ষ",
"কাউন্সেলিং সাইকোলজিস্ট",
"কাউন্সেলর",
"কিউরেটর",
"গ্রাহক পরিষেবা ব্যবস্থাপক",
"সাইটোজেনেটিসিস্ট",
"নৃত্য আন্দোলনের সাইকোথেরাপিস্ট",
"নর্তকী",
"ডেটা প্রসেসিং ম্যানেজার",
"তথ্য বিজ্ঞানী",
"ডাটাবেস প্রশাসক",
"বিক্রেতা",
"দন্ত চিকিৎসক",
"ডিজাইনার, প্রস্ফুটিত কাচ/দাগযুক্ত কাচ",
"ডিজাইনার, সিরামিক/মৃৎপাত্র",
"ডিজাইনার, প্রদর্শনী/ডিসপ্লে",
"ডিজাইনার, ফ্যাশন/পোশাক",
"ডিজাইনার, আসবাবপত্র",
"ডিজাইনার, গ্রাফিক",
"ডিজাইনার, শিল্প/পণ্য",
"ডিজাইনার, অভ্যন্তরীণ/স্থানিক",
"ডিজাইনার, গহনা",
"ডিজাইনার, মাল্টিমিডিয়া",
"ডিজাইনার, টেলিভিশন/ফিল্ম সেট",
"ডিজাইনার, টেক্সটাইল",
"উন্নয়নকর্মী, সম্প্রদায়",
"উন্নয়ন কর্মী, আন্তর্জাতিক সাহায্য",
"ডায়াগনস্টিক রেডিওগ্রাফার",
"আহার বিশেষজ্ঞ",
"কূটনৈতিক সার্ভিস অপারেশনাল অফিসার",
"ডিসপেন্সিং অপটিশিয়ান",
"ডাক্তার, সাধারণ অনুশীলন",
"ডাক্তার, হাসপাতাল",
"নাট্য থেরাপিস্ট",
"তুরপুন প্রকৌশলী",
"প্রাথমিক বছরের শিক্ষক",
"বাস্তু বিশেষজ্ঞ",
"অর্থনীতিবিদ",
"সম্পাদক, কমিশনিং",
"সম্পাদক, ফিল্ম/ভিডিও",
"সম্পাদক, পত্রিকার বৈশিষ্ট্য",
"সম্পাদকীয় সহকারী",
"শিক্ষা প্রশাসক",
"শিক্ষা কর্মকর্তা, সম্প্রদায়",
"শিক্ষা কর্মকর্তা, পরিবেশ",
"শিক্ষা কর্মকর্তা, জাদুঘর",
"শিক্ষামূলক মনোবিজ্ঞানী",
"তড়িৎ প্রকৌশলী",
"ইলেকট্রনিক্স প্রকৌশলী",
"ভ্রুণ বিশেষজ্ঞ, ক্লিনিকাল",
"জরুরি পরিকল্পনা/ব্যবস্থাপনা কর্মকর্তা",
"শক্তি প্রকৌশলী",
"শক্তি ব্যবস্থাপক",
"ইঞ্জিনিয়ার, অ্যারোনটিক্যাল",
"প্রকৌশলী, কৃষি",
"ইঞ্জিনিয়ার, স্বয়ংচালিত",
"প্রকৌশলী, বায়োমেডিকেল",
"প্রকৌশলী, সম্প্রচার (অপারেশন)",
"প্রকৌশলী, বিল্ডিং পরিষেবা",
"প্রকৌশলী, রাসায়নিক",
"প্রকৌশলী, সিভিল (পরামর্শ)",
"প্রকৌশলী, সিভিল (চুক্তি)",
"প্রকৌশলী, যোগাযোগ",
"প্রকৌশলী, নিয়ন্ত্রণ এবং উপকরণ",
"ইঞ্জিনিয়ার, ড্রিলিং",
"প্রকৌশলী, বৈদ্যুতিক",
"প্রকৌশলী, ইলেকট্রনিক্স",
"প্রকৌশলী, শক্তি",
"প্রকৌশলী, জমি",
"প্রকৌশলী, রক্ষণাবেক্ষণ",
"প্রকৌশলী, রক্ষণাবেক্ষণ (আইটি)",
"ইঞ্জিনিয়ার, ম্যানুফ্যাকচারিং",
"ইঞ্জিনিয়ার, ম্যানুফ্যাকচারিং সিস্টেম",
"প্রকৌশলী, উপকরণ",
"ইঞ্জিনিয়ার, মাইনিং",
"প্রকৌশলী, পেট্রোলিয়াম",
"প্রকৌশলী, উৎপাদন",
"ইঞ্জিনিয়ার, সাইট",
"প্রকৌশলী, কাঠামোগত",
"প্রকৌশলী, প্রযুক্তিগত বিক্রয়",
"প্রকৌশলী, জল",
"প্রকৌশলী ভূতত্ত্ববিদ",
"বিদেশী ভাষার শিক্ষক হিসাবে ইংরেজি",
"দ্বিতীয় ভাষার শিক্ষক হিসাবে ইংরেজি",
"পরিবেশ পরামর্শক",
"পরিবেশ শিক্ষা কর্মকর্তা",
"পরিবেশগত স্বাস্থ্য অনুশীলনকারী",
"পরিবেশ ব্যবস্থাপক",
"সমতা এবং বৈচিত্র্য কর্মকর্তা",
"ইকুইটি ব্যবসায়ী",
"এরগনোমিস্ট",
"এস্টেট এজেন্ট",
"এস্টেট ম্যানেজার/ল্যান্ড এজেন্ট",
"ইভেন্ট সংগঠক",
"ব্যায়াম ফিজিওলজিস্ট",
"প্রদর্শনী ডিজাইনার",
"প্রদর্শনী অফিসার, জাদুঘর/গ্যালারি",
"সুবিধা ম্যানেজার",
"খামার ব্যবস্থাপক",
"ফ্যাশান ডিজাইনার",
"ফাস্ট ফুড রেস্টুরেন্ট ম্যানেজার",
"ক্ষেত্র সিসমোলজিস্ট",
"ফিল্ড ট্রায়াল অফিসার",
"চলচ্চিত্র/ভিডিও সম্পাদক",
"একটি বন্ধু পূর্ণ নাম লিখুন",
"আর্থিক নিয়ন্ত্রক",
"অর্থনৈতিক ব্যবস্থাপক",
"আর্থিক পরিকল্পক",
"আর্থিক ঝুঁকি বিশ্লেষক",
"আর্থিক ব্যবসায়ী",
"ভালো শিল্পী",
"অগ্নিনির্বাপক",
"মাছ খামার ম্যানেজার",
"মৎস্য কর্মকর্তা",
"ফিটনেস সেন্টার ম্যানেজার",
"খাদ্য প্রযুক্তিবিদ",
"ফরেন্সিক সাইকোলজিস্ট",
"ফরেনসিক বিজ্ঞানী",
"বন/উডল্যান্ড ম্যানেজার",
"মালবাহী ফরওয়ার্ডার",
"আসবাব সংরক্ষণকারী/পুনরুদ্ধারকারী",
"ফার্নিচার ডিজাইনার",
"আরো শিক্ষার প্রভাষক",
"ফিউচার ট্রেডার",
"গফার",
"গেমস ডেভেলপার",
"গার্মেন্টস/টেক্সটাইল টেকনোলজিস্ট",
"সাধারণ অনুশীলন ডাক্তার",
"জেনেটিসিস্ট, আণবিক",
"জিওকেমিস্ট",
"ভৌগলিক তথ্য সিস্টেম অফিসার",
"ভূতত্ত্ববিদ, প্রকৌশল",
"ভূতত্ত্ববিদ, ওয়েলসাইট",
"জিওফিজিক্যাল ডেটা প্রসেসর",
"জিওফিজিসিস্ট/ফিল্ড সিসমোলজিস্ট",
"ভূ-বিজ্ঞানী",
"গ্লাস ব্লোয়ার/ডিজাইনার",
"সরকারি সামাজিক গবেষণা কর্মকর্তা",
"গ্রাফিক ডিজাইনার",
"হেমাটোলজিস্ট",
"স্বাস্থ্য ও নিরাপত্তা উপদেষ্টা",
"স্বাস্থ্য ও নিরাপত্তা পরিদর্শক",
"স্বাস্থ্য পদার্থবিদ",
"স্বাস্থ্য প্রচার বিশেষজ্ঞ",
"স্বাস্থ্য পরিষেবা ব্যবস্থাপক",
"স্বাস্থ্য পরিদর্শক",
"ভেষজবিদ",
"হেরিটেজ ম্যানেজার",
"হারপেটোলজিস্ট",
"উচ্চ শিক্ষা ক্যারিয়ার উপদেষ্টা",
"উচ্চ শিক্ষার প্রভাষক",
"ঐতিহাসিক ভবন পরিদর্শক/সংরক্ষণ কর্মকর্তা",
"ছুটির প্রতিনিধি",
"হোমিওপ্যাথ",
"হর্টিকালচারাল কনসালট্যান্ট",
"হর্টিকালচারাল থেরাপিস্ট",
"হর্টিকালচারিস্ট, সুবিধা",
"হর্টিকালচারিস্ট, বাণিজ্যিক",
"হাসপাতাল ডাক্তার",
"হাসপাতাল ফার্মাসিস্ট",
"হোটেল ব্যবস্থাপক",
"হাউজিং ম্যানেজার/অফিসার",
"মানব সম্পদ উন্নয়ন কর্মকর্তা",
"হাইড্রোজোলজিস্ট",
"হাইড্রোগ্রাফিক সার্ভেয়ার",
"জলবিদ",
"ইলাস্ট্রেটর",
"অভিবাসন কর্মকর্তা",
"ইমিউনোলজিস্ট",
"শিল্প ক্রেতা",
"শিল্প/পণ্য ডিজাইনার",
"তথ্য কর্মকর্তা",
"তথ্য সিস্টেম ম্যানেজার",
"বীমা অ্যাকাউন্ট ম্যানেজার",
"বীমা ব্রোকার",
"বীমা দাবির হ্যান্ডলার",
"বীমা ঝুঁকি সার্ভেয়ার",
"বীমা আন্ডাররাইটার",
"গোয়েন্দা বিশ্লেষক",
"অভ্যন্তরীণ এবং স্থানিক ডিজাইনার",
"আন্তর্জাতিক সাহায্য/উন্নয়ন কর্মী",
"দোভাষী",
"বিনিয়োগ বিশ্লেষক",
"বিনিয়োগ ব্যাংকার, কর্পোরেট",
"বিনিয়োগ ব্যাংকার, অপারেশনাল",
"তথ্য ও প্রযুক্তি বিশেষজ্ঞ",
"আইটি বিক্রয় পেশাদার",
"আইটি প্রযুক্তিগত সহায়তা কর্মকর্তা",
"আইটি প্রশিক্ষক",
"গহনা ডিজাইনার",
"সাংবাদিক, সম্প্রচার",
"সাংবাদিক, পত্রিকা",
"সাংবাদিক, সংবাদপত্র",
"ভূমি",
"ভূমি/ভৌমিক জরিপকারী",
"আড়াআড়ি স্থপতি",
"আইনজীবী",
"লার্নিং অক্ষমতা নার্স",
"শিক্ষার পরামর্শদাতা",
"প্রভাষক, আরও শিক্ষা",
"প্রভাষক, উচ্চ শিক্ষা",
"আইনি নির্বাহী",
"আইনি সচিব",
"অবসর কেন্দ্রের ব্যবস্থাপক",
"লেক্সিকোগ্রাফার",
"গ্রন্থাগারিক, একাডেমিক",
"লাইব্রেরিয়ান, পাবলিক",
"লাইসেন্সপ্রাপ্ত পরিবাহক",
"লাইটিং টেকনিশিয়ান, ব্রডকাস্টিং/ফিল্ম/ভিডিও",
"লবিস্ট",
"স্থানীয় সরকার কর্মকর্তা",
"অবস্থান ম্যানেজার",
"লজিস্টিক এবং ডিস্ট্রিবিউশন ম্যানেজার",
"লস সমন্বয়কারী, চার্টার্ড",
"পত্রিকা বৈশিষ্ট্য সম্পাদক",
"ম্যাগাজিন সাংবাদিক",
"রক্ষণাবেক্ষণ প্রকৌশলী",
"বানান",
"ব্যবস্থাপনা পরামর্শক",
"ম্যানুফ্যাকচারিং ইঞ্জিনিয়ার",
"উৎপাদন সিস্টেম ইঞ্জিনিয়ার",
"সামুদ্রিক বিজ্ঞানী",
"বাজার গবেষক",
"বিপণন নির্বাহী",
"পদার্থ প্রকৌশলী",
"যন্ত্র কৌশলী",
"মিডিয়া ক্রেতা",
"মিডিয়া পরিকল্পনাকারী",
"চিকিৎসা চিত্রকর",
"চিকিৎসা পরীক্ষাগার বৈজ্ঞানিক কর্মকর্তা",
"চিকিৎসা পদার্থবিদ",
"চিকিৎসা বিক্রয় প্রতিনিধি",
"চিকিৎসা সচিব",
"মেডিকেল টেকনিক্যাল অফিসার",
"মানসিক স্বাস্থ্য সেবিকা",
"মার্চেন্ডাইজার, খুচরা",
"মার্চেন্ট নেভি অফিসার",
"ধাতুবিদ",
"আবহাওয়াবিদ",
"মাইক্রোবায়োলজিস্ট",
"ধাত্রী",
"খনিজ জরিপকারী",
"খনি প্রকৌশলী",
"মাডলগার",
"মাল্টিমিডিয়া প্রোগ্রামার",
"মাল্টিমিডিয়া বিশেষজ্ঞ",
"জাদুঘর শিক্ষা অফিসার",
"জাদুঘর/গ্যালারি সংরক্ষক",
"জাদুঘর/গ্যালারি কিউরেটর",
"জাদুঘর/গ্যালারি প্রদর্শনী কর্মকর্তা",
"মিউজিক থেরাপিস্ট",
"সঙ্গীত শিক্ষক",
"সঙ্গীতশিল্পী",
"প্রকৃতি সংরক্ষণ কর্মকর্তা",
"নৌ - স্থপতি",
"নেটওয়ার্ক ইঞ্জিনিয়ার",
"নিউরোসার্জন",
"সংবাদপত্রের সাংবাদিক",
"নার্স, প্রাপ্তবয়স্ক",
"নার্স, বাচ্চাদের",
"নার্স, শেখার অক্ষমতা",
"নার্স, মানসিক স্বাস্থ্য",
"পুষ্টি থেরাপিস্ট",
"পেশাগত স্বাস্থ্যবিদ",
"পেশাগত মনোবিজ্ঞানী",
"অকুপেশনাল থেরাপিস্ট",
"সমুদ্রবিদ",
"অফিস ব্যবস্থাপক",
"অনকোলজিস্ট",
"অপারেশনাল ইনভেস্টমেন্ট ব্যাংকার",
"অপারেশনাল গবেষক",
"অপারেশনস জিওলজিস্ট",
"চক্ষু বিশেষজ্ঞ",
"চক্ষু বিশেষজ্ঞ, বিতরণ",
"চক্ষু বিশেষজ্ঞ",
"অর্থোপটিস্ট",
"অস্টিওপ্যাথ",
"বাইরের কার্যক্রম/শিক্ষা ব্যবস্থাপক",
"পেডিয়াট্রিক নার্স",
"প্যারামেডিক",
"যাত্রী পরিবহন ব্যবস্থাপক",
"পেটেন্ট অ্যাটর্নি",
"পেটেন্ট পরীক্ষক",
"প্যাথলজিস্ট",
"পেনশন স্কিম ম্যানেজার",
"পেনশন পরামর্শক",
"ব্যক্তিগত সহকারী",
"নিজের অফিসার",
"পেট্রোলিয়াম প্রকৌশলী",
"ফার্মাসিস্ট, সম্প্রদায়",
"ফার্মাসিস্ট, হাসপাতাল",
"ফার্মাকোলজিস্ট",
"ফটোগ্রাফার",
"পদার্থবিদ, চিকিৎসা",
"শারীরবৃত্তীয় বিজ্ঞানী",
"ফিজিওথেরাপিস্ট",
"ফাইটোথেরাপিস্ট",
"পাইলট, এয়ারলাইন",
"পরিকল্পনা ও উন্নয়ন সার্ভেয়ার",
"উদ্ভিদ প্রজননকারী/জিনতত্ত্ববিদ",
"পডিয়াট্রিস্ট",
"পুলিশ অফিসার",
"রাজনীতিবিদ সহকারী",
"উপস্থাপক, সম্প্রচার",
"ফটো সাংবাদিক",
"প্রেস সাব",
"প্রাথমিক স্কুল শিক্ষক",
"প্রিন্ট প্রোডাকশন প্ল্যানার",
"প্রিন্টমেকার",
"কারাগার অফিসার",
"বেসরকারী সঙ্গীত শিক্ষক",
"পরিদর্শক",
"প্রযোজক, রেডিও",
"প্রযোজক, টেলিভিশন/ফিল্ম/ভিডিও",
"পণ্য ডিজাইনার",
"পণ্য ব্যবস্থাপক",
"পণ্য/প্রক্রিয়া উন্নয়ন বিজ্ঞানী",
"উৎপাদন সহকারী, রেডিও",
"প্রযোজনা সহকারী, টেলিভিশন",
"প্রযোজনা ডিজাইনার, থিয়েটার/টেলিভিশন/ফিল্ম",
"উৎপাদন প্রকৌশলী",
"উৎপাদন ব্যবস্থাপক",
"অধ্যাপক এমেরিটাস",
"প্রোগ্রাম গবেষক, সম্প্রচার/ফিল্ম/ভিডিও",
"প্রোগ্রামার, অ্যাপ্লিকেশন",
"প্রোগ্রামার, মাল্টিমিডিয়া",
"প্রোগ্রামার, সিস্টেম",
"প্রুফরিডার",
"সাইকিয়াট্রিক নার্স",
"সাইকিয়াট্রিস্ট",
"মনোবিজ্ঞানী, ক্লিনিকাল",
"মনোবিজ্ঞানী, কাউন্সেলিং",
"মনোবিজ্ঞানী, শিক্ষামূলক",
"মনোবিজ্ঞানী, ফরেনসিক",
"মনোবিজ্ঞানী, পেশাগত",
"মনোবিজ্ঞানী, কারাগার এবং প্রবেশন পরিষেবা",
"মনোবিজ্ঞানী, খেলাধুলা এবং ব্যায়াম",
"সাইকোথেরাপিস্ট",
"সাইকোথেরাপিস্ট, শিশু",
"সাইকোথেরাপিস্ট, নাচের আন্দোলন",
"পাবলিক অ্যাফেয়ার্স কনসালটেন্ট",
"পাবলিক হাউস ম্যানেজার",
"পাবলিক লাইব্রেরিয়ান",
"জনসংযোগ অ্যাকাউন্ট এক্সিকিউটিভ",
"জনসংযোগ কর্মকর্তা",
"প্রকাশিত অনুলিপি",
"পাবলিশিং রাইট ম্যানেজার",
"ক্রয় ব্যবস্থাপক",
"গুনগতমান ব্যবস্থাপক",
"পরিমাণ পরিমাপক",
"কোয়ারি ম্যানেজার",
"জাতি সম্পর্ক অফিসার",
"বিকিরণ সুরক্ষা অনুশীলনকারী",
"রেডিও সম্প্রচার সহকারী",
"রেডিও প্রযোজক",
"রেডিওগ্রাফার, ডায়াগনস্টিক",
"রেডিওগ্রাফার, থেরাপিউটিক",
"রেঞ্জার/ওয়ার্ডেন",
"রেকর্ড ম্যানেজার",
"নিয়োগ পরামর্শদাতা",
"রিসাইক্লিং অফিসার",
"নিয়ন্ত্রক বিষয়ক কর্মকর্তা",
"গবেষণা কর্মকর্তা, সরকার",
"গবেষণা কর্মকর্তা, রাজনৈতিক দল",
"গবেষণা কর্মকর্তা, ট্রেড ইউনিয়ন",
"গবেষণা বিজ্ঞানী (জীবন বিজ্ঞান)",
"গবেষণা বিজ্ঞানী (গণিত)",
"গবেষণা বিজ্ঞানী (চিকিৎসা)",
"গবেষণা বিজ্ঞানী (শারীরিক বিজ্ঞান)",
"রেস্টুরেন্ট ম্যানেজার",
"রেস্তোরাঁর ম্যানেজার, ফাস্ট ফুড",
"খুচরা ব্যাংকার",
"খুচরা ক্রেতা",
"খুচরা পরিচালক",
"খুচরা মার্চেন্ডাইজার",
"ঝুঁকি বিশ্লেষক",
"ঝুকি ব্যাবস্থাপক",
"রানার, সম্প্রচার/ফিল্ম/ভিডিও",
"পল্লী অনুশীলন সার্ভেয়ার",
"সেলস এক্সিকিউটিভ",
"বিক্রয় পেশাদার, আইটি",
"সেলস প্রমোশন অ্যাকাউন্ট এক্সিকিউটিভ",
"বিজ্ঞান লেখক",
"বৈজ্ঞানিক পরীক্ষাগার প্রযুক্তিবিদ",
"বিজ্ঞানী, অডিওলজিক্যাল",
"বিজ্ঞানী, বায়োমেডিকাল",
"বিজ্ঞানী, ক্লিনিকাল (হিস্টোকম্প্যাটিবিলিটি এবং ইমিউনোজেনেটিক্স)",
"বিজ্ঞানী, ফরেনসিক",
"বিজ্ঞানী, সামুদ্রিক",
"বিজ্ঞানী, শারীরবৃত্তীয়",
"বিজ্ঞানী, পণ্য/প্রক্রিয়া উন্নয়ন",
"বিজ্ঞানী, গবেষণা (জীবন বিজ্ঞান)",
"বিজ্ঞানী, গবেষণা (গণিত)",
"বিজ্ঞানী, গবেষণা (চিকিৎসা)",
"বিজ্ঞানী, গবেষণা (ভৌত বিজ্ঞান)",
"বিজ্ঞানী, জলের গুণমান",
"মাধ্যমিক বিদ্যালয়ের শিক্ষক",
"সচিব/প্রশাসক",
"সচিব, কোম্পানি",
"সিসমিক ইন্টারপ্রেটার",
"সিনিয়র ট্যাক্স পেশাদার/কর পরিদর্শক",
"সেট ডিজাইনার",
"জাহাজ ব্রোকার",
"প্রকল্প প্রকৌশলী",
"সামাজিক গবেষণা কর্মকর্তা, সরকার",
"সমাজ গবেষক",
"সমাজ কর্মী",
"সফটওয়্যার ইঞ্জিনিয়ার",
"মৃত্তিকা বিজ্ঞানী",
"উকিল",
"সলিসিটর, স্কটল্যান্ড",
"সাউন্ড টেকনিশিয়ান, ব্রডকাস্টিং/ফিল্ম/ভিডিও",
"বিশেষ শিক্ষাগত প্রয়োজন শিক্ষক",
"বিশেষ প্রভাব শিল্পী",
"স্পিচ অ্যান্ড ল্যাঙ্গুয়েজ থেরাপিস্ট",
"ক্রীড়া এবং ব্যায়াম মনোবিজ্ঞানী",
"ক্রীড়া প্রশাসক",
"ক্রীড়াশিক্ষক",
"ক্রীড়া উন্নয়ন কর্মকর্তা",
"ক্রীড়া থেরাপিস্ট",
"পর্যায় ম্যানেজার",
"পরিসংখ্যানবিদ",
"স্ট্রাকচারাল ইঞ্জিনিয়ার",
"সাব",
"সার্জন",
"জরিপকারী, বিল্ডিং",
"সার্ভেয়ার, বিল্ডিং কন্ট্রোল",
"সার্ভেয়ার, বাণিজ্যিক/আবাসিক",
"জরিপকারী, হাইড্রোগ্রাফিক",
"জরিপকারী, বীমা",
"জরিপকারী, ভূমি/জিওম্যাটিক্স",
"জরিপকারী, খনিজ",
"জরিপকারী, খনির",
"জরিপকারী, পরিকল্পনা ও উন্নয়ন",
"জরিপকারী, পরিমাণ",
"জরিপকারী, গ্রামীণ অনুশীলন",
"সিস্টেম বিশ্লেষক",
"সিস্টেম ডেভেলপার",
"কর উপদেষ্টা",
"কর পরিদর্শক",
"শিক্ষক, বয়স্ক শিক্ষা",
"শিক্ষক, প্রারম্ভিক বছর / প্রাক",
"শিক্ষক, ইংরেজি একটি বিদেশী ভাষা হিসাবে",
"শিক্ষক, সঙ্গীত",
"শিক্ষক, প্রাথমিক বিদ্যালয়",
"শিক্ষক, মাধ্যমিক বিদ্যালয়",
"শিক্ষক, বিশেষ শিক্ষাগত প্রয়োজন",
"পড়ানো ল্যাবরেটরি টেকনিশিয়ান",
"প্রযুক্তিগত লেখক",
"প্রযুক্তিগত ব্রুয়ার",
"প্রযুক্তিগত বিক্রয় প্রকৌশলী",
"টিইএফএল শিক্ষক",
"টেলিযোগাযোগ গবেষক",
"টেলিভিশন ক্যামেরা অপারেটর",
"টেলিভিশন ফ্লোর ম্যানেজার",
"টেলিভিশন প্রযোজনা সহকারী",
"টেলিভিশন/ফিল্ম/ভিডিও প্রযোজক",
"টেক্সটাইল ডিজাইনার",
"থিয়েটার ডিরেক্টর",
"থিয়েটার ম্যানেজার",
"থিয়েটার স্টেজ ম্যানেজার",
"থিম পার্ক ম্যানেজার",
"থেরাপিউটিক রেডিওগ্রাফার",
"থেরাপিস্ট, শিল্প",
"থেরাপিস্ট, নাটক",
"থেরাপিস্ট, উদ্যানবিদ্যা",
"থেরাপিস্ট, সঙ্গীত",
"থেরাপিস্ট, পুষ্টি",
"থেরাপিস্ট, পেশাগত",
"থেরাপিস্ট, বক্তৃতা এবং ভাষা",
"থেরাপিস্ট, খেলাধুলা",
"ট্যুর ব্যবস্থাপক",
"পর্যটন কর্মকর্তা",
"পর্যটন তথ্য কেন্দ্র ব্যবস্থাপক",
"শহর পরিকল্পনাকারী",
"বিষাক্ত বিশেষজ্ঞ",
"ট্রেড মার্ক অ্যাটর্নি",
"ট্রেড ইউনিয়ন গবেষণা কর্মকর্তা",
"ট্রেডিং স্ট্যান্ডার্ড অফিসার",
"প্রশিক্ষণ ও উন্নয়ন কর্মকর্তা",
"অনুবাদক",
"পরিবহন পরিকল্পনাকারী",
"ট্রাভেল এজেন্সি ম্যানেজার",
"বৃক্ষ সার্জন",
"পশুচিকিৎসক",
"ভিডিও এডিটর",
"ভিজ্যুয়াল মার্চেন্ডাইজার",
"স্বেচ্ছাসেবক সমন্বয়কারী",
"ওয়ার্ডেন/রেঞ্জার",
"গুদাম ম্যানেজার",
"বর্জ্য ব্যবস্থাপনা কর্মকর্তা",
"জল প্রকৌশলী",
"পানির গুণমান বিজ্ঞানী",
"ওয়েব ডিজাইনার",
"ওয়েলসাইট ভূতত্ত্ববিদ",
"লেখক",
"যুবকর্মী",
)
| Provider |
python | kamyu104__LeetCode-Solutions | Python/closest-nodes-queries-in-a-binary-search-tree.py | {
"start": 1395,
"end": 2234
} | class ____(object):
def closestNodes(self, root, queries):
"""
:type root: Optional[TreeNode]
:type queries: List[int]
:rtype: List[List[int]]
"""
def dfs(node):
if not node:
return
dfs(node.left)
inorder.append(node.val)
dfs(node.right)
inorder = []
dfs(root)
result = []
for q in queries:
i = bisect.bisect_left(inorder, q)
if i == len(inorder):
result.append([inorder[i-1], -1])
elif inorder[i] == q:
result.append([inorder[i], inorder[i]])
elif i-1 >= 0:
result.append([inorder[i-1], inorder[i]])
else:
result.append([-1, inorder[i]])
return result
| Solution2 |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/dimension.py | {
"start": 434,
"end": 6948
} | class ____:
"""
Specified dimension (width/height) of a user control or window.
The layout engine tries to honor the preferred size. If that is not
possible, because the terminal is larger or smaller, it tries to keep in
between min and max.
:param min: Minimum size.
:param max: Maximum size.
:param weight: For a VSplit/HSplit, the actual size will be determined
by taking the proportion of weights from all the children.
E.g. When there are two children, one with a weight of 1,
and the other with a weight of 2, the second will always be
twice as big as the first, if the min/max values allow it.
:param preferred: Preferred size.
"""
def __init__(
self,
min: int | None = None,
max: int | None = None,
weight: int | None = None,
preferred: int | None = None,
) -> None:
if weight is not None:
assert weight >= 0 # Also cannot be a float.
assert min is None or min >= 0
assert max is None or max >= 0
assert preferred is None or preferred >= 0
self.min_specified = min is not None
self.max_specified = max is not None
self.preferred_specified = preferred is not None
self.weight_specified = weight is not None
if min is None:
min = 0 # Smallest possible value.
if max is None: # 0-values are allowed, so use "is None"
max = 1000**10 # Something huge.
if preferred is None:
preferred = min
if weight is None:
weight = 1
self.min = min
self.max = max
self.preferred = preferred
self.weight = weight
# Don't allow situations where max < min. (This would be a bug.)
if max < min:
raise ValueError("Invalid Dimension: max < min.")
# Make sure that the 'preferred' size is always in the min..max range.
if self.preferred < self.min:
self.preferred = self.min
if self.preferred > self.max:
self.preferred = self.max
@classmethod
def exact(cls, amount: int) -> Dimension:
"""
Return a :class:`.Dimension` with an exact size. (min, max and
preferred set to ``amount``).
"""
return cls(min=amount, max=amount, preferred=amount)
@classmethod
def zero(cls) -> Dimension:
"""
Create a dimension that represents a zero size. (Used for 'invisible'
controls.)
"""
return cls.exact(amount=0)
def __repr__(self) -> str:
fields = []
if self.min_specified:
fields.append(f"min={self.min!r}")
if self.max_specified:
fields.append(f"max={self.max!r}")
if self.preferred_specified:
fields.append(f"preferred={self.preferred!r}")
if self.weight_specified:
fields.append(f"weight={self.weight!r}")
return "Dimension({})".format(", ".join(fields))
def sum_layout_dimensions(dimensions: list[Dimension]) -> Dimension:
"""
Sum a list of :class:`.Dimension` instances.
"""
min = sum(d.min for d in dimensions)
max = sum(d.max for d in dimensions)
preferred = sum(d.preferred for d in dimensions)
return Dimension(min=min, max=max, preferred=preferred)
def max_layout_dimensions(dimensions: list[Dimension]) -> Dimension:
"""
Take the maximum of a list of :class:`.Dimension` instances.
Used when we have a HSplit/VSplit, and we want to get the best width/height.)
"""
if not len(dimensions):
return Dimension.zero()
# If all dimensions are size zero. Return zero.
# (This is important for HSplit/VSplit, to report the right values to their
# parent when all children are invisible.)
if all(d.preferred == 0 and d.max == 0 for d in dimensions):
return Dimension.zero()
# Ignore empty dimensions. (They should not reduce the size of others.)
dimensions = [d for d in dimensions if d.preferred != 0 and d.max != 0]
if dimensions:
# Take the highest minimum dimension.
min_ = max(d.min for d in dimensions)
# For the maximum, we would prefer not to go larger than then smallest
# 'max' value, unless other dimensions have a bigger preferred value.
# This seems to work best:
# - We don't want that a widget with a small height in a VSplit would
# shrink other widgets in the split.
# If it doesn't work well enough, then it's up to the UI designer to
# explicitly pass dimensions.
max_ = min(d.max for d in dimensions)
max_ = max(max_, max(d.preferred for d in dimensions))
# Make sure that min>=max. In some scenarios, when certain min..max
# ranges don't have any overlap, we can end up in such an impossible
# situation. In that case, give priority to the max value.
# E.g. taking (1..5) and (8..9) would return (8..5). Instead take (8..8).
if min_ > max_:
max_ = min_
preferred = max(d.preferred for d in dimensions)
return Dimension(min=min_, max=max_, preferred=preferred)
else:
return Dimension()
# Anything that can be converted to a dimension.
AnyDimension = Union[
None, # None is a valid dimension that will fit anything.
int,
Dimension,
# Callable[[], 'AnyDimension'] # Recursive definition not supported by mypy.
Callable[[], Any],
]
def to_dimension(value: AnyDimension) -> Dimension:
"""
Turn the given object into a `Dimension` object.
"""
if value is None:
return Dimension()
if isinstance(value, int):
return Dimension.exact(value)
if isinstance(value, Dimension):
return value
if callable(value):
return to_dimension(value())
raise ValueError("Not an integer or Dimension object.")
def is_dimension(value: object) -> TypeGuard[AnyDimension]:
"""
Test whether the given value could be a valid dimension.
(For usage in an assertion. It's not guaranteed in case of a callable.)
"""
if value is None:
return True
if callable(value):
return True # Assume it's a callable that doesn't take arguments.
if isinstance(value, (int, Dimension)):
return True
return False
# Common alias.
D = Dimension
# For backward-compatibility.
LayoutDimension = Dimension
| Dimension |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 851239,
"end": 852871
} | class ____(sgqlc.types.Type):
"""The value of a pull request field in a Project item."""
__schema__ = github_schema
__field_names__ = ("field", "pull_requests")
field = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field")
"""The field that contains this value."""
pull_requests = sgqlc.types.Field(
"PullRequestConnection",
graphql_name="pullRequests",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(PullRequestOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"}),
),
)
),
)
"""The pull requests for this field
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`PullRequestOrder`): Ordering options for pull
requests. (default: `{field: CREATED_AT, direction: ASC}`)
"""
| ProjectV2ItemFieldPullRequestValue |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 40499,
"end": 41746
} | class ____(PlotActionTool):
""" Abstract base class for zoom action tools. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
renderers = Either(Auto, List(Instance(DataRenderer)), default="auto", help="""
Restrict zoom to ranges used by the provided data renderers. If ``"auto"``
then all ranges provided by the cartesian frame will be used.
""")
# TODO ZoomInTool dimensions should probably be constrained to be the same as ZoomOutTool
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the zoom tool is constrained to act in. By default
the tool will zoom in any dimension, but can be configured to only
zoom horizontally across the width of the plot, or vertically across
the height of the plot.
""")
factor = Percent(default=0.1, help="""
Percentage of the range to zoom for each usage of the tool.
""")
level = NonNegative(Int, default=0, help="""
When working with composite scales (sub-coordinates), this property
allows to configure which set of ranges to scale. The default is to
scale top-level (frame) ranges.
""")
| ZoomBaseTool |
python | gevent__gevent | src/gevent/tests/test__ssl.py | {
"start": 1088,
"end": 5597
} | class ____(test__socket.TestTCP):
# To generate:
# openssl req -x509 -newkey rsa:4096 -keyout test_server.key -out test_server.crt -days 36500 -nodes -subj '/CN=localhost'
certfile = os.path.join(os.path.dirname(__file__), 'test_server.crt')
privfile = os.path.join(os.path.dirname(__file__), 'test_server.key')
# Python 2.x has socket.sslerror (which is an alias for
# ssl.SSLError); That's gone in Py3 though. In Python 2, most timeouts are raised
# as SSLError, but Python 3 raises the normal socket.timeout instead. So this has
# the effect of making TIMEOUT_ERROR be SSLError on Py2 and socket.timeout on Py3
# See https://bugs.python.org/issue10272.
# PyPy3 7.2 has a bug, though: it shares much of the SSL implementation with Python 2,
# and it unconditionally does `socket.sslerror = SSLError` when ssl is imported.
# So we can't rely on getattr/hasattr tests, we must be explicit.
TIMEOUT_ERROR = socket.timeout # pylint:disable=no-member
def _setup_listener(self):
listener, raw_listener = ssl_listener(self.privfile, self.certfile)
self._close_on_teardown(raw_listener)
return listener
def create_connection(self, *args, **kwargs): # pylint:disable=signature-differs
return self._close_on_teardown(
# pylint:disable=deprecated-method
wrap_socket(super(TestSSL, self).create_connection(*args, **kwargs)))
# The SSL library can take a long time to buffer the large amount of data we're trying
# to send, so we can't compare to the timeout values
_test_sendall_timeout_check_time = False
# The SSL layer has extra buffering, so test_sendall needs
# to send a very large amount to make it timeout
_test_sendall_data = data_sent = b'hello' * 100000000
test_sendall_array = greentest.skipOnMacOnCI("Sometimes misses data")(
greentest.skipOnManylinux("Sometimes misses data")(
test__socket.TestTCP.test_sendall_array
)
)
test_sendall_str = greentest.skipOnMacOnCI("Sometimes misses data")(
greentest.skipOnManylinux("Sometimes misses data")(
test__socket.TestTCP.test_sendall_str
)
)
@greentest.skipOnWindows("Not clear why we're skipping")
def test_ssl_sendall_timeout0(self):
# Issue #317: SSL_WRITE_PENDING in some corner cases
server_sock = []
acceptor = test__socket.Thread(target=lambda: server_sock.append(
# pylint:disable=no-member
self.listener.accept()))
client = self.create_connection()
client.setblocking(False)
try:
# Python 3 raises ssl.SSLWantWriteError; Python 2 simply *hangs*
# on non-blocking sockets because it's a simple loop around
# send(). Python 2.6 doesn't have SSLWantWriteError
expected = getattr(ssl, 'SSLWantWriteError', ssl.SSLError)
with self.assertRaises(expected):
client.sendall(self._test_sendall_data)
finally:
acceptor.join()
client.close()
server_sock[0][0].close()
# def test_fullduplex(self):
# try:
# super(TestSSL, self).test_fullduplex()
# except LoopExit:
# if greentest.LIBUV and greentest.WIN:
# # XXX: Unable to duplicate locally
# raise greentest.SkipTest("libuv on Windows sometimes raises LoopExit")
# raise
@greentest.ignores_leakcheck
@greentest.skipOnPy310("No longer raises SSLError")
def test_empty_send(self):
# Issue 719
# Sending empty bytes with the 'send' method raises
# ssl.SSLEOFError in the stdlib. PyPy 4.0 and CPython 2.6
# both just raise the superclass, ssl.SSLError.
# Ignored during leakchecks because the third or fourth iteration of the
# test hangs on CPython 2/posix for some reason, likely due to
# the use of _close_on_teardown keeping something alive longer than intended.
# cf test__makefile_ref
with self.assertRaises(ssl.SSLError):
super(TestSSL, self).test_empty_send()
@greentest.ignores_leakcheck
def test_sendall_nonblocking(self):
# Override; doesn't work with SSL sockets.
pass
@greentest.ignores_leakcheck
def test_connect_with_type_flags_ignored(self):
# Override; doesn't work with SSL sockets.
pass
if __name__ == '__main__':
greentest.main()
| TestSSL |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_getitem_test.py | {
"start": 4305,
"end": 26715
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name
#=============================================================================
# RaggedTensor.__getitem__
#=============================================================================
def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):
"""Helper function for testing RaggedTensor.__getitem__.
Checks that calling `rt.__getitem__(slice_spec) returns the expected value.
Checks three different configurations for each slice spec:
* Call __getitem__ with the slice spec as-is (with int values)
* Call __getitem__ with int values in the slice spec wrapped in
`tf.constant()`.
* Call __getitem__ with int values in the slice spec wrapped in
`tf.compat.v1.placeholder()` (so value is not known at graph
construction time).
Args:
rt: The RaggedTensor to test.
slice_spec: The slice spec.
expected: The expected value of rt.__getitem__(slice_spec), as a python
list; or an exception class.
expected_shape: The expected shape for `rt.__getitem__(slice_spec)`.
"""
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = rt.__getitem__(slice_spec)
value2 = rt.__getitem__(tensor_slice_spec1)
value3 = rt.__getitem__(tensor_slice_spec2)
self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
if expected_shape is not None:
value1.shape.assert_is_compatible_with(expected_shape)
value2.shape.assert_is_compatible_with(expected_shape)
value3.shape.assert_is_compatible_with(expected_shape)
def _TestGetItemException(self, rt, slice_spec, expected, message):
"""Helper function for testing RaggedTensor.__getitem__ exceptions."""
tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(slice_spec))
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(tensor_slice_spec))
@parameterized.parameters(
# Tests for rt[i]
(SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),
(SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),
# Tests for rt[i:]
(SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),
(SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),
(SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),
(SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),
# Tests for rt[:j]
(SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),
(SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),
(SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),
(SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),
# Tests for rt[i:j]
(SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),
(SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),
(SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),
(SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),
# Tests for rt[i, j]
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),
(SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
(SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_2D),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),
(SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[-1:,
1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_2D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_2D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_2D[::-2]),
(SLICE_BUILDER[::-3], EXAMPLE_RAGGED_TENSOR_2D[::-3]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-1], [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-2], [row[::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-3], [row[::-3] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-1],
[row[2::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[..., -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-2],
[row[2::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[::-1, ::-1],
[row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D[::-1]]),
) # pyformat: disable
def testWithRaggedRank1(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
# pylint: disable=g-complex-comprehension
@parameterized.parameters([(start, stop)
for start in [-2, -1, None, 0, 1, 2]
for stop in [-2, -1, None, 0, 1, 2]])
def testWithStridedSlices(self, start, stop):
test_value = [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10], [], [9],
[1, 2, 3, 4, 5, 6, 7, 8]]
rt = ragged_factory_ops.constant(test_value)
for step in [-3, -2, -1, 1, 2, 3]:
# Slice outer dimension
self.assertAllEqual(rt[start:stop:step], test_value[start:stop:step],
'slice=%s:%s:%s' % (start, stop, step))
# Slice inner dimension
self.assertAllEqual(rt[:, start:stop:step],
[row[start:stop:step] for row in test_value],
'slice=%s:%s:%s' % (start, stop, step))
# pylint: disable=invalid-slice-index
@parameterized.parameters(
# Tests for out-of-bound errors
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 2], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[3, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
# Indexing into an inner ragged dimension
(SLICE_BUILDER[:, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[:1, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[..., 3], ValueError,
'Cannot index into an inner ragged dimension'),
# Tests for type errors
(SLICE_BUILDER[0.5], TypeError, re.escape(
tensor_getitem_override._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[1:3:0.5], TypeError, re.escape(
tensor_getitem_override._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 1:3:0.5], TypeError,
'slice strides must be integers or None'),
(SLICE_BUILDER[:, 0.5:1.5], TypeError,
'slice offsets must be integers or None'),
(SLICE_BUILDER['foo'], TypeError, re.escape(
tensor_getitem_override._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 'foo':'foo'], TypeError,
'slice offsets must be integers or None'),
# Tests for other errors
(SLICE_BUILDER[..., 0, 0,
0], IndexError, 'Too many indices for RaggedTensor'),
)
def testErrorsWithRaggedRank1(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
# Tests for rt[index, index, ...]
(SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
(SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),
(SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),
(SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),
(SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),
(SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
# Tests for rt[index, slice, ...]
(SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),
(SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),
(SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),
(SLICE_BUILDER[1, :, :, 1], []),
(SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),
(SLICE_BUILDER[3, :, :, 1], [[20]]),
# Tests for rt[slice, slice, ...]
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],
[[20]]]),
(SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
(SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],
[[19]]]),
(SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),
(SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_4D),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, :-1],
[[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1:2],
[[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[1:, 1:3, 1:2],
[[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_4D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_4D[::-2]),
(SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::2],
[[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1::2],
[[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-1],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-2],
[[v[::-2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1, :],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1], [[[v[::-1] for v in col] for col in row]
for row in EXAMPLE_RAGGED_TENSOR_4D]),
) # pyformat: disable
def testWithRaggedRank2(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
# Test for errors in unsupported cases
(SLICE_BUILDER[:, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
(SLICE_BUILDER[:, :, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
# Test for out-of-bounds errors.
(SLICE_BUILDER[1, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[0, 0, 3],
(IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 5], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
)
def testErrorsWithRaggedRank2(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[:], []),
(SLICE_BUILDER[2:], []),
(SLICE_BUILDER[:-3], []),
)
def testWithEmptyTensor(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
)
def testErrorsWithEmptyTensor(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
)
def testWithPlaceholderShapes(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Intentionally use an unknown shape for `splits`, to force the code path
# that deals with having nrows unknown at graph construction time.
splits = constant_op.constant(
EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)
splits = array_ops.placeholder_with_default(splits, None)
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[..., 2], ValueError,
'Ellipsis not supported for unknown shape RaggedTensors'),)
def testErrorsWithPlaceholderShapes(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
if not context.executing_eagerly():
# Intentionally use an unknown shape for `values`.
values = array_ops.placeholder_with_default([0], None)
rt = RaggedTensor.from_row_splits(values, [0, 1])
self._TestGetItemException(rt, slice_spec, expected, message)
def testNewAxis(self):
# rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []]
splits1 = [0, 3, 3]
splits2 = [0, 2, 2, 3]
values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])
rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
self.assertAllEqual(
rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertAllEqual(
rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertAllEqual(
rt_newaxis1,
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertAllEqual(
rt_newaxis2,
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis3,
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis4,
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt_newaxis0.shape.as_list(), [1, 2, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, 1, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, 1, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
@parameterized.parameters(
# EXAMPLE_RAGGED_TENSOR_3D.shape = [2, 3, None]
# Indexing into uniform_row_splits dimension:
(SLICE_BUILDER[:, 1], [r[1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -2], [r[-2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -3], [r[-3] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
(SLICE_BUILDER[:, 1, 1:], [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 1, 1:],
[r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
# Slicing uniform_row_splits dimension:
(SLICE_BUILDER[:, 2:], [r[2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 1, None]),
(SLICE_BUILDER[:, -2:], [r[-2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, :, 1:],
[[c[1:] for c in r] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
(SLICE_BUILDER[:, 5:], [r[5:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 0, None]),
# Slicing uniform_row_splits dimension with a non-default step size:
(SLICE_BUILDER[:, ::2], [r[::2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, ::-1], [r[::-1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
) # pyformat: disable
def testWithUniformRowLength(self, slice_spec, expected, expected_shape):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self.assertIsNot(rt.uniform_row_length, None)
self._TestGetItem(rt, slice_spec, expected, expected_shape)
# If the result is 3D, then check that it still has a uniform row length:
actual = rt.__getitem__(slice_spec) # pylint: disable=assignment-from-no-return
if actual.shape.rank == 3:
self.assertIsNot(actual.uniform_row_length, None)
self.assertAllEqual(actual.uniform_row_length, expected_shape[1])
@parameterized.parameters(
(SLICE_BUILDER[:, 3], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -4], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, 10], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -10], errors.InvalidArgumentError, 'out of bounds'),
)
def testErrorsWithUniformRowLength(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self._TestGetItemException(rt, slice_spec, expected, message)
if __name__ == '__main__':
googletest.main()
| RaggedGetItemTest |
python | doocs__leetcode | solution/0400-0499/0487.Max Consecutive Ones II/Solution.py | {
"start": 0,
"end": 257
} | class ____:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
l = cnt = 0
for x in nums:
cnt += x ^ 1
if cnt > 1:
cnt -= nums[l] ^ 1
l += 1
return len(nums) - l
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-adjust/source_adjust/source.py | {
"start": 347,
"end": 476
} | class ____(YamlDeclarativeSource):
def __init__(self):
super().__init__(**{"path_to_yaml": "manifest.yaml"})
| SourceAdjust |
python | celery__celery | t/smoke/conftest.py | {
"start": 1010,
"end": 5068
} | class ____(
TaskTermination,
WorkerKill,
WorkerRestart,
):
"""Optional operations that can be performed with different methods,
shared across the smoke tests suite.
Example Usage:
>>> class test_mysuite(SuiteOperations):
>>> def test_something(self):
>>> self.prepare_worker_with_conditions()
>>> assert condition are met
"""
@pytest.fixture
def default_worker_tasks(default_worker_tasks: set) -> set:
"""Use all of the integration and smoke suites tasks in the smoke tests workers."""
from t.integration import tasks as integration_tests_tasks
from t.smoke import tasks as smoke_tests_tasks
default_worker_tasks.add(integration_tests_tasks)
default_worker_tasks.add(smoke_tests_tasks)
return default_worker_tasks
# When using integration tests tasks that requires a Redis instance,
# we use pytest-celery to raise a dedicated Redis container for the smoke tests suite that is configured
# to be used by the integration tests tasks.
redis_command = RedisContainer.command()
redis_command.insert(1, "/usr/local/etc/redis/redis.conf")
redis_image = fetch(repository=REDIS_IMAGE)
redis_test_container: RedisContainer = container(
image="{redis_image.id}",
ports=REDIS_PORTS,
environment=REDIS_ENV,
network="{default_pytest_celery_network.name}",
wrapper_class=RedisContainer,
timeout=REDIS_CONTAINER_TIMEOUT,
command=redis_command,
volumes={
os.path.abspath("t/smoke/redis.conf"): {
"bind": "/usr/local/etc/redis/redis.conf",
"mode": "ro", # Mount as read-only
}
},
)
@pytest.fixture(autouse=True)
def set_redis_test_container(redis_test_container: RedisContainer):
"""Configure the Redis test container to be used by the integration tests tasks."""
# get_redis_connection(): will use these settings in the tests environment
os.environ["REDIS_HOST"] = "localhost"
os.environ["REDIS_PORT"] = str(redis_test_container.port)
@pytest.fixture
def default_worker_env(default_worker_env: dict, redis_test_container: RedisContainer) -> dict:
"""Add the Redis connection details to the worker environment."""
# get_redis_connection(): will use these settings when executing tasks in the worker
default_worker_env.update(
{
"REDIS_HOST": redis_test_container.hostname,
"REDIS_PORT": 6379,
**LOCALSTACK_CREDS,
}
)
return default_worker_env
@pytest.fixture(scope="session", autouse=True)
def set_aws_credentials():
os.environ.update(LOCALSTACK_CREDS)
@pytest.fixture
def default_worker_app(default_worker_app: Celery) -> Celery:
app = default_worker_app
if app.conf.broker_url and app.conf.broker_url.startswith("sqs"):
app.conf.broker_transport_options["region"] = LOCALSTACK_CREDS["AWS_DEFAULT_REGION"]
return app
# Override the default redis broker container from pytest-celery
default_redis_broker = container(
image="{default_redis_broker_image}",
ports=fxtr("default_redis_broker_ports"),
environment=fxtr("default_redis_broker_env"),
network="{default_pytest_celery_network.name}",
wrapper_class=RedisContainer,
timeout=REDIS_CONTAINER_TIMEOUT,
command=redis_command,
volumes={
os.path.abspath("t/smoke/redis.conf"): {
"bind": "/usr/local/etc/redis/redis.conf",
"mode": "ro", # Mount as read-only
}
},
)
# Override the default redis backend container from pytest-celery
default_redis_backend = container(
image="{default_redis_backend_image}",
ports=fxtr("default_redis_backend_ports"),
environment=fxtr("default_redis_backend_env"),
network="{default_pytest_celery_network.name}",
wrapper_class=RedisContainer,
timeout=REDIS_CONTAINER_TIMEOUT,
command=redis_command,
volumes={
os.path.abspath("t/smoke/redis.conf"): {
"bind": "/usr/local/etc/redis/redis.conf",
"mode": "ro", # Mount as read-only
}
},
)
| SuiteOperations |
python | pypa__hatch | src/hatch/env/plugin/interface.py | {
"start": 36280,
"end": 38317
} | class ____:
"""
This class represents a synchronized path between the local file system and a potentially remote environment.
"""
def __init__(self, env: EnvironmentInterface, *, local_path: Path, env_path: str):
self.__env = env
self.__local_path = local_path
self.__env_path = env_path
@property
def env(self) -> EnvironmentInterface:
"""
Returns the environment to which this context belongs.
"""
return self.__env
@property
def local_path(self) -> Path:
"""
Returns the local path to which this context refers as a path-like object.
"""
return self.__local_path
@property
def env_path(self) -> str:
"""
Returns the environment path to which this context refers as a string. The environment
may not be on the local file system.
"""
return self.__env_path
def join(self, relative_path: str) -> FileSystemContext:
"""
Returns a new instance of this class with the given relative path appended to the local and
environment paths.
This method should not need overwriting.
"""
local_path = self.local_path / relative_path
env_path = f"{self.env_path}{self.__env.sep.join(['', *os.path.normpath(relative_path).split(os.sep)])}"
return FileSystemContext(self.__env, local_path=local_path, env_path=env_path)
def sync_env(self):
"""
Synchronizes the [environment path](utilities.md#hatch.env.plugin.interface.FileSystemContext.env_path)
with the [local path](utilities.md#hatch.env.plugin.interface.FileSystemContext.local_path) as the source.
"""
def sync_local(self):
"""
Synchronizes the [local path](utilities.md#hatch.env.plugin.interface.FileSystemContext.local_path) as the
source with the [environment path](utilities.md#hatch.env.plugin.interface.FileSystemContext.env_path) as
the source.
"""
| FileSystemContext |
python | huggingface__transformers | src/transformers/models/esm/tokenization_esm.py | {
"start": 1038,
"end": 5380
} | class ____(PreTrainedTokenizer):
"""
Constructs an ESM tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
unk_token="<unk>",
cls_token="<cls>",
pad_token="<pad>",
mask_token="<mask>",
eos_token="<eos>",
**kwargs,
):
self.all_tokens = load_vocab_file(vocab_file)
self._id_to_token = dict(enumerate(self.all_tokens))
self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
super().__init__(
unk_token=unk_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
eos_token=eos_token,
**kwargs,
)
# TODO, all the tokens are added? But they are also part of the vocab... bit strange.
# none of them are special, but they all need special splitting.
self.unique_no_split_tokens = self.all_tokens
self._update_trie(self.unique_no_split_tokens)
def _convert_id_to_token(self, index: int) -> str:
return self._id_to_token.get(index, self.unk_token)
def _convert_token_to_id(self, token: str) -> int:
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
def _tokenize(self, text, **kwargs):
return text.split()
def get_vocab(self):
base_vocab = self._token_to_id.copy()
base_vocab.update(self.added_tokens_encoder)
return base_vocab
def token_to_id(self, token: str) -> int:
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
def id_to_token(self, index: int) -> str:
return self._id_to_token.get(index, self.unk_token)
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
cls = [self.cls_token_id]
sep = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_1 is None:
if self.eos_token_id is None:
return cls + token_ids_0
else:
return cls + token_ids_0 + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
return cls + token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token
def get_special_tokens_mask(
self, token_ids_0: list, token_ids_1: Optional[list] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
mask = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
mask += [0] * len(token_ids_1) + [1]
return mask
def save_vocabulary(self, save_directory, filename_prefix):
vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt")
with open(vocab_file, "w") as f:
f.write("\n".join(self.all_tokens))
return (vocab_file,)
@property
def vocab_size(self) -> int:
return len(self.all_tokens)
__all__ = ["EsmTokenizer"]
| EsmTokenizer |
python | django__django | tests/sitemaps_tests/urls/http.py | {
"start": 855,
"end": 927
} | class ____(SimpleI18nSitemap):
alternates = True
| AlternatesI18nSitemap |
python | gabrielfalcao__HTTPretty | tests/functional/test_decorator.py | {
"start": 945,
"end": 1163
} | class ____(TestCase):
"""
Checks that the test methods in DecoratedNonUnitTest were decorated.
"""
def test_decorated(self):
DecoratedNonUnitTest().test_decorated()
@httprettified
| NonUnitTestTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/gumroad/provider.py | {
"start": 343,
"end": 1111
} | class ____(OAuth2Provider):
id = "gumroad"
name = "Gumroad"
account_class = GumroadAccount
oauth2_adapter_class = GumroadOAuth2Adapter
def get_default_scope(self):
return ["edit_products"]
def extract_uid(self, data):
return str(data["user_id"])
def extract_common_fields(self, data):
try:
username = data["url"].split("https://gumroad.com/")[1]
except (KeyError, IndexError, AttributeError):
username = None
return dict(
username=username,
email=data.get("email"),
name=data.get("name"),
twitter_handle=data.get("twitter_handle"),
url=data.get("url"),
)
provider_classes = [GumroadProvider]
| GumroadProvider |
python | huggingface__transformers | src/transformers/models/d_fine/configuration_d_fine.py | {
"start": 1549,
"end": 21538
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`DFineModel`]. It is used to instantiate a D-FINE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of D-FINE-X-COCO "[ustc-community/dfine-xlarge-coco"](https://huggingface.co/ustc-community/dfine-xlarge-coco").
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
backbone_config (`Dict`, *optional*, defaults to `RTDetrResNetConfig()`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):
Whether to freeze the batch normalization layers in the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
Dimension of the layers in hybrid encoder.
encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):
Multi level features input for encoder.
feat_strides (`list[int]`, *optional*, defaults to `[8, 16, 32]`):
Strides used in each feature map.
encoder_layers (`int`, *optional*, defaults to 1):
Total of layers to be used by the encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
encode_proj_layers (`list[int]`, *optional*, defaults to `[2]`):
Indexes of the projected layers to be used in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The temperature parameter used to create the positional encodings.
encoder_activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
activation_function (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the general layer. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride.
normalize_before (`bool`, *optional*, defaults to `False`):
Determine whether to apply layer normalization in the transformer encoder layer before self-attention and
feed-forward modules.
hidden_expansion (`float`, *optional*, defaults to 1.0):
Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers exclude hybrid encoder.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries.
decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):
Multi level features dimension for decoder
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of input feature levels.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_denoising (`int`, *optional*, defaults to 100):
The total number of denoising tasks or queries to be used for contrastive denoising.
label_noise_ratio (`float`, *optional*, defaults to 0.5):
The fraction of denoising labels to which random noise should be added.
box_noise_scale (`float`, *optional*, defaults to 1.0):
Scale or magnitude of noise to be added to the bounding boxes.
learn_initial_query (`bool`, *optional*, defaults to `False`):
Indicates whether the initial query embeddings for the decoder should be learned during training
anchor_image_size (`tuple[int, int]`, *optional*):
Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.
with_box_refine (`bool`, *optional*, defaults to `True`):
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
based on the predictions from the previous layer.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the architecture has an encoder decoder structure.
matcher_alpha (`float`, *optional*, defaults to 0.25):
Parameter alpha used by the Hungarian Matcher.
matcher_gamma (`float`, *optional*, defaults to 2.0):
Parameter gamma used by the Hungarian Matcher.
matcher_class_cost (`float`, *optional*, defaults to 2.0):
The relative weight of the class loss used by the Hungarian Matcher.
matcher_bbox_cost (`float`, *optional*, defaults to 5.0):
The relative weight of the bounding box loss used by the Hungarian Matcher.
matcher_giou_cost (`float`, *optional*, defaults to 2.0):
The relative weight of the giou loss of used by the Hungarian Matcher.
use_focal_loss (`bool`, *optional*, defaults to `True`):
Parameter informing if focal focal should be used.
auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
focal_loss_alpha (`float`, *optional*, defaults to 0.75):
Parameter alpha used to compute the focal loss.
focal_loss_gamma (`float`, *optional*, defaults to 2.0):
Parameter gamma used to compute the focal loss.
weight_loss_vfl (`float`, *optional*, defaults to 1.0):
Relative weight of the varifocal loss in the object detection loss.
weight_loss_bbox (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 bounding box loss in the object detection loss.
weight_loss_giou (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss in the object detection loss.
weight_loss_fgl (`float`, *optional*, defaults to 0.15):
Relative weight of the fine-grained localization loss in the object detection loss.
weight_loss_ddf (`float`, *optional*, defaults to 1.5):
Relative weight of the decoupled distillation focal loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.0001):
Relative classification weight of the 'no-object' class in the object detection loss.
eval_idx (`int`, *optional*, defaults to -1):
Index of the decoder layer to use for evaluation. If negative, counts from the end
(e.g., -1 means use the last layer). This allows for early prediction in the decoder
stack while still training later layers.
layer_scale (`float`, *optional*, defaults to `1.0`):
Scaling factor for the hidden dimension in later decoder layers. Used to adjust the
model capacity after the evaluation layer.
max_num_bins (`int`, *optional*, defaults to 32):
Maximum number of bins for the distribution-guided bounding box refinement.
Higher values allow for more fine-grained localization but increase computation.
reg_scale (`float`, *optional*, defaults to 4.0):
Scale factor for the regression distribution. Controls the range and granularity
of the bounding box refinement process.
depth_mult (`float`, *optional*, defaults to 1.0):
Multiplier for the number of blocks in RepNCSPELAN4 layers. Used to scale the model's
depth while maintaining its architecture.
top_prob_values (`int`, *optional*, defaults to 4):
Number of top probability values to consider from each corner's distribution.
lqe_hidden_dim (`int`, *optional*, defaults to 64):
Hidden dimension size for the Location Quality Estimator (LQE) network.
lqe_layers (`int`, *optional*, defaults to 2):
Number of layers in the Location Quality Estimator MLP.
decoder_offset_scale (`float`, *optional*, defaults to 0.5):
Offset scale used in deformable attention.
decoder_method (`str`, *optional*, defaults to `"default"`):
The method to use for the decoder: `"default"` or `"discrete"`.
up (`float`, *optional*, defaults to 0.5):
Controls the upper bounds of the Weighting Function.
"""
model_type = "d_fine"
sub_configs = {"backbone_config": AutoConfig}
layer_types = ["basic", "bottleneck"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
initializer_range=0.01,
initializer_bias_prior_prob=None,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
backbone=None,
use_pretrained_backbone=False,
use_timm_backbone=False,
freeze_backbone_batch_norms=True,
backbone_kwargs=None,
# encoder HybridEncoder
encoder_hidden_dim=256,
encoder_in_channels=[512, 1024, 2048],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
hidden_expansion=1.0,
# decoder DFineTransformer
d_model=256,
num_queries=300,
decoder_in_channels=[256, 256, 256],
decoder_ffn_dim=1024,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=6,
decoder_attention_heads=8,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=100,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
with_box_refine=True,
is_encoder_decoder=True,
# Loss
matcher_alpha=0.25,
matcher_gamma=2.0,
matcher_class_cost=2.0,
matcher_bbox_cost=5.0,
matcher_giou_cost=2.0,
use_focal_loss=True,
auxiliary_loss=True,
focal_loss_alpha=0.75,
focal_loss_gamma=2.0,
weight_loss_vfl=1.0,
weight_loss_bbox=5.0,
weight_loss_giou=2.0,
weight_loss_fgl=0.15,
weight_loss_ddf=1.5,
eos_coefficient=1e-4,
eval_idx=-1,
layer_scale=1,
max_num_bins=32,
reg_scale=4.0,
depth_mult=1.0,
top_prob_values=4,
lqe_hidden_dim=64,
lqe_layers=2,
decoder_offset_scale=0.5,
decoder_method="default",
up=0.5,
**kwargs,
):
self.initializer_range = initializer_range
self.initializer_bias_prior_prob = initializer_bias_prior_prob
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
# backbone
if backbone_config is None and backbone is None:
logger.info(
"`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNet-V2` backbone."
)
backbone_model_type = "hgnet_v2"
config_class = CONFIG_MAPPING[backbone_model_type]
# this will map it to RTDetrResNetConfig
# note: we can instead create HGNetV2Config
# and we would need to create HGNetV2Backbone
backbone_config = config_class(
num_channels=3,
embedding_size=64,
hidden_sizes=[256, 512, 1024, 2048],
depths=[3, 4, 6, 3],
layer_type="bottleneck",
hidden_act="relu",
downsample_in_first_stage=False,
downsample_in_bottleneck=False,
out_features=None,
out_indices=[2, 3, 4],
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.pop("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.freeze_backbone_batch_norms = freeze_backbone_batch_norms
self.backbone_kwargs = backbone_kwargs
# encoder
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.encoder_attention_heads = encoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.encoder_layers = encoder_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.eval_size = eval_size
self.normalize_before = normalize_before
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.hidden_expansion = hidden_expansion
# decoder
self.d_model = d_model
self.num_queries = num_queries
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_in_channels = decoder_in_channels
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.auxiliary_loss = auxiliary_loss
self.with_box_refine = with_box_refine
# Loss
self.matcher_alpha = matcher_alpha
self.matcher_gamma = matcher_gamma
self.matcher_class_cost = matcher_class_cost
self.matcher_bbox_cost = matcher_bbox_cost
self.matcher_giou_cost = matcher_giou_cost
self.use_focal_loss = use_focal_loss
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
self.weight_loss_vfl = weight_loss_vfl
self.weight_loss_bbox = weight_loss_bbox
self.weight_loss_giou = weight_loss_giou
self.weight_loss_fgl = weight_loss_fgl
self.weight_loss_ddf = weight_loss_ddf
self.eos_coefficient = eos_coefficient
# add the new attributes with the given values or defaults
self.eval_idx = eval_idx
self.layer_scale = layer_scale
self.max_num_bins = max_num_bins
self.reg_scale = reg_scale
self.depth_mult = depth_mult
self.decoder_offset_scale = decoder_offset_scale
self.decoder_method = decoder_method
self.top_prob_values = top_prob_values
self.lqe_hidden_dim = lqe_hidden_dim
self.lqe_layers = lqe_layers
self.up = up
if isinstance(self.decoder_n_points, list):
if len(self.decoder_n_points) != self.num_feature_levels:
raise ValueError(
f"Length of decoder_n_points list ({len(self.decoder_n_points)}) must match num_feature_levels ({self.num_feature_levels})."
)
head_dim = self.d_model // self.decoder_attention_heads
if head_dim * self.decoder_attention_heads != self.d_model:
raise ValueError(
f"Embedded dimension {self.d_model} must be divisible by decoder_attention_heads {self.decoder_attention_heads}"
)
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
self.tie_encoder_decoder = True
__all__ = ["DFineConfig"]
| DFineConfig |
python | huggingface__transformers | src/transformers/models/sam3_video/modeling_sam3_video.py | {
"start": 19025,
"end": 20341
} | class ____(ModelOutput):
r"""
object_ids (`list[int]`, *optional*):
List of object IDs being tracked in the current frame.
obj_id_to_mask (`dict[int, torch.FloatTensor]`, *optional*):
Dictionary mapping object IDs to their predicted low-resolution masks.
Each mask has shape `(1, H_low, W_low)`.
obj_id_to_score (`dict[int, float]`, *optional*):
Dictionary mapping object IDs to their detection scores.
obj_id_to_tracker_score (`dict[int, float]`, *optional*):
Dictionary mapping object IDs to their tracker scores for the current frame.
removed_obj_ids (`set[int]`, *optional*):
Set of object IDs that have been removed (e.g., via hotstart heuristics).
suppressed_obj_ids (`set[int]`, *optional*):
Set of object IDs that have been suppressed in the current frame.
frame_idx (`int`, *optional*):
The frame index of the video.
"""
object_ids: Optional[list[int]] = None
obj_id_to_mask: Optional[dict[int, torch.FloatTensor]] = None
obj_id_to_score: Optional[dict[int, float]] = None
obj_id_to_tracker_score: Optional[dict[int, float]] = None
removed_obj_ids: Optional[set[int]] = None
suppressed_obj_ids: Optional[set[int]] = None
frame_idx: Optional[int] = None
| Sam3VideoSegmentationOutput |
python | apache__airflow | providers/presto/tests/unit/presto/hooks/test_presto.py | {
"start": 2140,
"end": 8655
} | class ____:
@patch("airflow.providers.presto.hooks.presto.prestodb.auth.BasicAuthentication")
@patch("airflow.providers.presto.hooks.presto.prestodb.dbapi.connect")
@patch("airflow.providers.presto.hooks.presto.PrestoHook.get_connection")
def test_get_conn_basic_auth(self, mock_get_connection, mock_connect, mock_basic_auth):
mock_get_connection.return_value = Connection(
login="login", password="password", host="host", schema="hive"
)
conn = PrestoHook().get_conn()
mock_connect.assert_called_once_with(
catalog="hive",
host="host",
port=None,
http_headers=mock.ANY,
http_scheme="http",
schema="hive",
source="airflow",
user="login",
isolation_level=0,
auth=mock_basic_auth.return_value,
)
mock_basic_auth.assert_called_once_with("login", "password")
assert mock_connect.return_value == conn
@patch("airflow.providers.presto.hooks.presto.PrestoHook.get_connection")
def test_get_conn_invalid_auth(self, mock_get_connection):
mock_get_connection.return_value = Connection(
login="login",
password="password",
host="host",
schema="hive",
extra=json.dumps({"auth": "kerberos"}),
)
with pytest.raises(
AirflowException, match=re.escape("Kerberos authorization doesn't support password.")
):
PrestoHook().get_conn()
@patch("airflow.providers.presto.hooks.presto.prestodb.auth.KerberosAuthentication")
@patch("airflow.providers.presto.hooks.presto.prestodb.dbapi.connect")
@patch("airflow.providers.presto.hooks.presto.PrestoHook.get_connection")
def test_get_conn_kerberos_auth(self, mock_get_connection, mock_connect, mock_auth):
mock_get_connection.return_value = Connection(
login="login",
host="host",
schema="hive",
extra=json.dumps(
{
"auth": "kerberos",
"kerberos__config": "TEST_KERBEROS_CONFIG",
"kerberos__service_name": "TEST_SERVICE_NAME",
"kerberos__mutual_authentication": "TEST_MUTUAL_AUTHENTICATION",
"kerberos__force_preemptive": True,
"kerberos__hostname_override": "TEST_HOSTNAME_OVERRIDE",
"kerberos__sanitize_mutual_error_response": True,
"kerberos__principal": "TEST_PRINCIPAL",
"kerberos__delegate": "TEST_DELEGATE",
"kerberos__ca_bundle": "TEST_CA_BUNDLE",
}
),
)
conn = PrestoHook().get_conn()
mock_connect.assert_called_once_with(
catalog="hive",
host="host",
port=None,
http_headers=mock.ANY,
http_scheme="http",
schema="hive",
source="airflow",
user="login",
isolation_level=0,
auth=mock_auth.return_value,
)
mock_auth.assert_called_once_with(
ca_bundle="TEST_CA_BUNDLE",
config="TEST_KERBEROS_CONFIG",
delegate="TEST_DELEGATE",
force_preemptive=True,
hostname_override="TEST_HOSTNAME_OVERRIDE",
mutual_authentication="TEST_MUTUAL_AUTHENTICATION",
principal="TEST_PRINCIPAL",
sanitize_mutual_error_response=True,
service_name="TEST_SERVICE_NAME",
)
assert mock_connect.return_value == conn
@patch("airflow.providers.presto.hooks.presto.generate_presto_client_info")
@patch("airflow.providers.presto.hooks.presto.prestodb.auth.BasicAuthentication")
@patch("airflow.providers.presto.hooks.presto.prestodb.dbapi.connect")
@patch("airflow.providers.presto.hooks.presto.PrestoHook.get_connection")
def test_http_headers(
self,
mock_get_connection,
mock_connect,
mock_basic_auth,
mocked_generate_airflow_presto_client_info_header,
):
mock_get_connection.return_value = Connection(
login="login", password="password", host="host", schema="hive"
)
date_key = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
client = json.dumps(
{
"dag_id": "dag-id",
date_key: "2022-01-01T00:00:00",
"task_id": "task-id",
"try_number": "1",
"dag_run_id": "dag-run-id",
"dag_owner": "dag-owner",
},
sort_keys=True,
)
http_headers = {"X-Presto-Client-Info": client}
mocked_generate_airflow_presto_client_info_header.return_value = http_headers["X-Presto-Client-Info"]
conn = PrestoHook().get_conn()
mock_connect.assert_called_once_with(
catalog="hive",
host="host",
port=None,
http_headers=http_headers,
http_scheme="http",
schema="hive",
source="airflow",
user="login",
isolation_level=0,
auth=mock_basic_auth.return_value,
)
mock_basic_auth.assert_called_once_with("login", "password")
assert mock_connect.return_value == conn
@pytest.mark.parametrize(
("current_verify", "expected_verify"),
[
("False", False),
("false", False),
("True", True),
("true", True),
("/tmp/cert.crt", "/tmp/cert.crt"),
],
)
def test_get_conn_verify(self, current_verify, expected_verify):
patcher_connect = patch("airflow.providers.presto.hooks.presto.prestodb.dbapi.connect")
patcher_get_connections = patch("airflow.providers.presto.hooks.presto.PrestoHook.get_connection")
with patcher_connect as mock_connect, patcher_get_connections as mock_get_connection:
mock_get_connection.return_value = Connection(
login="login", host="host", schema="hive", extra=json.dumps({"verify": current_verify})
)
mock_verify = mock.PropertyMock()
type(mock_connect.return_value._http_session).verify = mock_verify
conn = PrestoHook().get_conn()
mock_verify.assert_called_once_with(expected_verify)
assert mock_connect.return_value == conn
| TestPrestoHookConn |
python | django__django | django/contrib/gis/geos/base.py | {
"start": 106,
"end": 181
} | class ____(CPointerBase):
null_ptr_exception_class = GEOSException
| GEOSBase |
python | fastai__fastai | fastai/learner.py | {
"start": 22507,
"end": 23140
} | class ____(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# %% ../nbs/13a_learner.ipynb 122
| AvgMetric |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataform.py | {
"start": 46184,
"end": 49180
} | class ____(GoogleCloudBaseOperator):
"""
Install NPM dependencies in the provided workspace.
Requires "package.json" to be created in the workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.install_npm_packages(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return InstallNpmPackagesResponse.to_dict(response)
| DataformInstallNpmPackagesOperator |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 146,
"end": 232
} | class ____(Exception):
"""Base exception used by this module."""
pass
| HTTPError |
python | pyparsing__pyparsing | examples/simpleBool.py | {
"start": 1685,
"end": 3148
} | class ____(BoolBinOp):
repr_symbol = "|"
eval_fn = any
# define keywords and simple infix notation grammar for boolean
# expressions
TRUE = Keyword("True")
FALSE = Keyword("False")
NOT = Keyword("not")
AND = Keyword("and")
OR = Keyword("or")
boolOperand = TRUE | FALSE | Word(alphas, max=1)
boolOperand.set_parse_action(BoolOperand).set_name("bool_operand")
# define expression, based on expression operand and
# list of operations in precedence order
boolExpr = infix_notation(
boolOperand,
[
(NOT, 1, OpAssoc.RIGHT, BoolNot),
(AND, 2, OpAssoc.LEFT, BoolAnd),
(OR, 2, OpAssoc.LEFT, BoolOr),
],
).set_name("boolean_expression")
if __name__ == "__main__":
p = True
q = False
r = True
tests = [
("p", True),
("q", False),
("p and q", False),
("p and not q", True),
("not not p", True),
("not(p and q)", True),
("q or not p and r", False),
("q or not p or not r", False),
("q or not (p and r)", False),
("p or q or r", True),
("p or q or r and False", True),
("(p or q or r) and False", False),
]
print("p =", p)
print("q =", q)
print("r =", r)
print()
for test_string, expected in tests:
res = boolExpr.parse_string(test_string)[0]
success = "PASS" if bool(res) == expected else "FAIL"
print(test_string, "\n", res, "=", bool(res), "\n", success, "\n")
| BoolOr |
python | pytorch__pytorch | test/jit/test_cuda.py | {
"start": 1078,
"end": 27908
} | class ____(JitTestCase):
"""
A suite of tests for the CUDA API in TorchScript.
"""
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
super().tearDown()
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_synchronize(self):
# Test device synchronization.
@torch.jit.script
def test_device_synchronize():
prev_current_device_index = torch.cuda.current_device()
torch.cuda.synchronize()
torch.cuda.synchronize("cuda")
torch.cuda.synchronize("cuda:0")
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device("cuda:1"))
after_current_device_index = torch.cuda.current_device()
# Check if the current device index is same as the device index before
# synchronizing the device.
return prev_current_device_index == after_current_device_index
@torch.jit.script
def test_multi_device_synchronize():
torch.cuda.synchronize(torch.device("cuda:0"))
prev_current_device_index = torch.cuda.current_device()
torch.cuda.synchronize(1)
after_current_device_index = torch.cuda.current_device()
# Check if the current device index is same as the device index before
# synchronizing the device.
return prev_current_device_index == after_current_device_index
self.assertTrue(test_device_synchronize)
FileCheck().check("cuda::synchronize(").run(test_device_synchronize.graph)
self.assertTrue(test_multi_device_synchronize)
FileCheck().check("cuda::synchronize(").run(test_multi_device_synchronize.graph)
def test_stream_args(self):
# Test stream creation with default arguments
@torch.jit.script
def stream_default_args() -> bool:
s = torch.cuda.Stream()
return s.device_index() == torch.cuda.current_device()
@torch.jit.script
def stream_default_args_for_device() -> bool:
s = torch.cuda.Stream(priority=0)
return s.device_index() == torch.cuda.current_device()
@torch.jit.script
def stream_default_args_for_priority() -> bool:
d = torch.device("cuda:1")
s = torch.cuda.Stream(d)
return s.device_index() == 1
@torch.jit.script
def stream_args_all() -> bool:
d = torch.device("cuda:0")
s = torch.cuda.Stream(d, 0)
return s.device_index() == 0
self.assertTrue(stream_default_args)
self.assertTrue(stream_default_args_for_device)
self.assertTrue(stream_default_args_for_priority)
self.assertTrue(stream_args_all)
def test_event_args(self):
# Test Event creation with default arguments
@torch.jit.script
def event_default_args() -> bool:
e = torch.cuda.Event()
return e is not None
self.assertTrue(event_default_args)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
# Test current stream on the device and check if the stream device index
# matches with the device ID
@torch.jit.script
def fn():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
s0 = torch.cuda.current_stream(device)
s1 = torch.cuda.current_stream(torch.device("cuda:1"))
s2 = torch.cuda.current_stream(torch.device("cuda:0"))
return s0.device_index(), s1.device_index(), s2.device_index()
d0, d1, d2 = fn()
# By default, the current device ID is 0.
self.assertEqual(0, d0)
self.assertEqual(1, d1)
self.assertEqual(0, d2)
self.assertEqual(d0, d2)
# Test current_stream API by passing device ID as an argument and
# and check if the stream device index matches with the device ID
@torch.jit.script
def fn_with_device_index_args():
device_index = torch.cuda.current_device()
s0 = torch.cuda.current_stream(device_index)
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(0)
return s0.device_index(), s1.device_index(), s2.device_index()
d0, d1, d2 = fn_with_device_index_args()
# By default, the current device ID is 0.
self.assertEqual(0, d0)
self.assertEqual(1, d1)
self.assertEqual(0, d2)
self.assertEqual(d0, d2)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@skipCUDANonDefaultStreamIf(True)
def test_streams_and_events(self):
# Test default_stream API by passing device ID as an argument and
# and check if the stream device index matches with the device ID
@torch.jit.script
def test_default_streams_with_device_index_args():
s0 = torch.cuda.default_stream(0)
s1 = torch.cuda.default_stream(1)
return s0.device_index(), s1.device_index()
d0, d1 = test_default_streams_with_device_index_args()
self.assertEqual(d0, 0)
self.assertEqual(d1, 1)
# This test checks for the default stream ID is set to 0 on the device
@torch.jit.script
def test_default_streams():
s0 = torch.cuda.default_stream(torch.device("cuda:0"))
s1 = torch.cuda.default_stream(torch.device("cuda:1"))
d = torch.device("cuda:1")
# Check the current stream id and default id are same
# on the current device. The current device id by default is 0
s2 = torch.cuda.current_stream(torch.device("cuda:0"))
check_s2 = s2.id() == s0.id()
check_d0 = torch.cuda.current_device() == s2.device_index()
# Set the current device to d1 and check if the stream
# has been set to the default stream on d1
with torch.cuda.device(d):
s3 = torch.cuda.current_stream(d)
check_s3 = s3.id() == s1.id()
check_d1 = torch.cuda.current_device() == s3.device_index()
# Check if the current device was reset to 0
is_device_d0 = torch.cuda.current_device() == s2.device_index()
return (
s0.device_index(),
s1.device_index(),
check_s2,
check_s3,
check_d0,
check_d1,
is_device_d0,
)
(
d0,
d1,
check_s2,
check_s3,
check_d0,
check_d1,
is_device_d0,
) = test_default_streams()
self.assertEqual(d0, 0)
self.assertEqual(d1, 1)
self.assertTrue(check_s2)
self.assertTrue(check_s3)
self.assertTrue(check_d0)
self.assertTrue(check_d1)
self.assertTrue(is_device_d0)
# This test checks if the Stream Context manager is a no op
# when the stream is none for `with torch.cuda.stream`
@torch.jit.script
def test_set_none_stream():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
current_stream = torch.cuda.current_stream(device)
default_stream = torch.cuda.default_stream(device)
# When stream is none, check if this operation is a no-op
with torch.cuda.stream(None):
cur_device_index = torch.cuda.current_device()
is_device_index_same = cur_device_index == device_index
is_current_stream_same = (
torch.cuda.current_stream(device).id() == current_stream.id()
)
is_default_stream_same = (
torch.cuda.default_stream(device).id() == default_stream.id()
)
# Check if the device index, current stream and default streams have not changed
are_streams_same = (
is_device_index_same
and is_current_stream_same
and is_default_stream_same
)
return are_streams_same
self.assertTrue(test_set_none_stream())
# This test checks if the Device Context manager is a no op
# when the device is none for `with torch.cuda.device`
@torch.jit.script
def test_set_device_none():
device_index = torch.cuda.current_device()
# When device is none, check if this operation is a no-op
with torch.cuda.device(None):
# Check if the current device is the same
is_device_same = torch.cuda.current_device() == device_index
return is_device_same
self.assertTrue(test_set_device_none())
# Check if a CUDA JIT stream is created
# on the current_device
@torch.jit.script
def test_simple_stream():
device_index = torch.cuda.current_device()
s = torch.cuda.Stream()
return device_index == s.device_index()
self.assertTrue(test_simple_stream(), "Could not create Stream!")
# Class used to store results for the test: test_get_stream.
class Result(NamedTuple):
t1: torch.Tensor
t2: torch.Tensor
is_current_and_default_stream_same: bool
is_default_and_user_stream_not_same: bool
is_stream_set: bool
is_stream_reset: bool
default_stream_query: bool
default_stream_id: int
user_stream_id: int
# The test aims at checking different stream properties.
@torch.jit.script
def test_get_stream():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
current_stream = torch.cuda.current_stream(device)
default_stream = torch.cuda.default_stream(device)
user_stream = torch.cuda.Stream()
# Check if the current and default streams are the same on the device
is_current_and_default_stream_same = (
current_stream.id() == default_stream.id()
)
# Check if user stream and default stream are not the same on the device
is_default_and_user_stream_not_same = (
default_stream.id() != user_stream.id()
)
with torch.cuda.stream(user_stream):
is_stream_set = (
torch.cuda.current_stream(device).id() == user_stream.id()
)
# Check if the stream was reset to current_stream
is_stream_reset = (
torch.cuda.current_stream(device).id() == current_stream.id()
)
tensor1 = torch.rand(10000, 10000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
default_stream.synchronize()
default_stream_query = default_stream.query()
# Capture all the results in the class Result
res = Result(
tensor1,
tensor2,
is_current_and_default_stream_same,
is_default_and_user_stream_not_same,
is_stream_set,
is_stream_reset,
default_stream_query,
default_stream.id(),
user_stream.id(),
)
return res
result = test_get_stream()
self.assertEqual(torch.matmul(result.t1, result.t1), result.t2)
self.assertTrue(result.is_current_and_default_stream_same)
self.assertTrue(result.is_default_and_user_stream_not_same)
self.assertTrue(result.is_stream_set)
self.assertTrue(result.is_stream_reset)
self.assertTrue(result.default_stream_query)
self.assertEqual(
result.default_stream_id, 0
) # Check if the default stream ID is always 0
self.assertNotEqual(
result.user_stream_id, 0
) # Check if the user stream is always non zero
# Test the stream context manager. This test checks if the stream is switched
# to the user stream on using the stream context manager.
@torch.jit.script
def test_stream_context():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
current_stream = torch.cuda.current_stream(device)
user_stream = torch.cuda.Stream()
A = torch.rand(1000, 1000, device="cuda")
with torch.cuda.stream(user_stream):
check = torch.cuda.current_stream(device).id() == user_stream.id()
B = torch.mm(A, A).to("cuda")
# Wait for B to be computed
user_stream.synchronize()
# Check if the stream has been reset on the current device
is_stream_reset = (
torch.cuda.current_stream(device).id() == current_stream.id()
)
return A, B, check, is_stream_reset
A, B, is_stream_set, is_stream_reset = test_stream_context()
self.assertEqual(torch.matmul(A, A), B)
self.assertTrue(
is_stream_set, "Error: Current stream was not set to user stream!"
)
self.assertTrue(
is_stream_reset, "Error: The stream was not restored to previous stream!"
)
# Test multiple nested streams. Check if the operations are computed as expected on the streams
# This test has been adapted from the eager mode tests available at test/test_cuda.py
@torch.jit.script
def test_multiple_stream():
prev_device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(prev_device_index))
prev_current_stream = torch.cuda.current_stream(device)
d1 = torch.device("cuda:0")
d2 = torch.device("cuda:1")
s1 = torch.cuda.Stream(d1, 0)
s2 = torch.cuda.Stream(d2, 0)
A = torch.rand(1000, 1000, device="cuda")
B = torch.rand(1000, 1000, device="cuda")
with torch.cuda.stream(s1):
C = torch.mm(A, A).to("cuda")
# Check if the stream and device have been set to s1
is_stream_s1 = torch.cuda.current_stream(d1).id() == s1.id()
is_device_s1 = torch.cuda.current_device() == s1.device_index()
with torch.cuda.stream(s2):
# Check if the stream and device have been set to s2
is_stream_s2 = torch.cuda.current_stream(d2).id() == s2.id()
is_device_s2 = torch.cuda.current_device() == s2.device_index()
D = torch.mm(B, B).to("cuda")
# Check if the stream and device have been set to s1
is_stream_s1_after = torch.cuda.current_stream(d1).id() == s1.id()
is_device_s1_after = torch.cuda.current_device() == s1.device_index()
# Wait for D to be computed
s2.synchronize()
# Wait for C to be computed on S1
s1.synchronize()
# Check if the stream and device has been restored to previous stream and device
is_device_current = torch.cuda.current_device() == prev_device_index
is_stream_current = (
torch.cuda.current_stream(device).id() == prev_current_stream.id()
)
check_stream = (
is_stream_s1
and is_stream_s2
and is_stream_s1_after
and is_stream_current
)
check_device = (
is_device_s1
and is_device_s2
and is_device_s1_after
and is_device_current
)
return A, B, C, D, check_stream, check_device
A, B, C, D, check_stream, check_device = test_multiple_stream()
self.assertEqual(torch.matmul(A, A), C)
self.assertEqual(torch.matmul(B, B), D)
self.assertTrue(check_stream)
self.assertTrue(check_device)
# Test multiple streams waiting on each other for the operations to be completed.
@torch.jit.script
def test_data_dependency_between_streams():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
prev_current_stream = torch.cuda.current_stream(device)
d = torch.device("cuda:0")
s1 = torch.cuda.Stream(d, 0)
s2 = torch.cuda.Stream(d, 0)
event = torch.cuda.Event(False, False, False)
A = torch.rand(1000, 1000, device="cuda")
with torch.cuda.stream(s1):
is_stream_s1 = torch.cuda.current_stream(device).id() == s1.id()
B = torch.mm(A, A).to("cuda")
s1.record_event(event)
# Check if the current_stream is reset
is_current_stream_1 = (
torch.cuda.current_stream(device).id() == prev_current_stream.id()
)
# Wait for ops on s1 to be computed
s2.wait_event(event)
with torch.cuda.stream(s2):
is_stream_s2 = torch.cuda.current_stream(device).id() == s2.id()
C = torch.mm(B, B).to("cuda")
# Wait for C to be computed
s2.synchronize()
# Check if the current_stream is reset
is_current_stream_2 = (
torch.cuda.current_stream(device).id() == prev_current_stream.id()
)
check_stream = (
is_current_stream_1
and is_current_stream_2
and is_stream_s1
and is_stream_s2
)
return A, B, C, check_stream
A, B, C, check_stream = test_data_dependency_between_streams()
self.assertEqual(torch.matmul(A, A), B)
self.assertEqual(torch.matmul(B, B), C)
self.assertTrue(check_stream)
# Test a simple CUDA event. Test if the CUDA event was created successfully
@torch.jit.script
def test_simple_event():
e = torch.cuda.Event(True, False, False)
return e is not None
self.assertTrue(test_simple_event(), "Could not create CUDA Event!")
# Record the CUDA event for operation torch.mm on the current stream
# and then test if the elapsed time is greater than 0. This test is also
# an adaption from eager mode CUDA tests available at test/test_cuda.py
@torch.jit.script
def test_event():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
stream = torch.cuda.current_stream(device)
event = torch.cuda.Event(True, False, False)
is_true_event_query = event.query()
start_event = torch.cuda.Event(True, False, False)
stream.record_event(start_event)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
stream.record_event(event)
event.synchronize()
is_again_true_event_query = event.query()
if not (is_true_event_query and is_again_true_event_query):
return -1.0
return start_event.elapsed_time(event)
self.assertGreater(test_event(), 0)
# Check for stream synchronization , when a large tensor multiplication is
# computed on the stream. The stream.query should be true once the synchronization is done
@torch.jit.script
def test_stream_synchronize() -> float:
device_index = torch.cuda.current_device()
s = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, False, False)
e_tok = torch.cuda.Event(True, False, False)
e_tik.record(s)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s):
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
s.synchronize()
e_tok.record(s)
e_tok.synchronize()
if not s.query():
return -1.0
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
self.assertGreater(test_stream_synchronize(), 0)
# Test event synchronization for the event that records a stream doing
# a large tensor multiplication. Check if the elapsed time is greater than 0
# and the stream.query evaluates to true.
@torch.jit.script
def test_event_synchronize() -> float:
s = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, False, False)
e_tok = torch.cuda.Event(True, False, False)
e_tik.record(s)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s):
tensor = torch.mm(tensor1, tensor1).to("cuda")
s.record_event(e_tok)
e_tok.synchronize()
s.synchronize()
if not s.query():
return -1.0
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
self.assertGreater(test_event_synchronize(), 0)
# Test for event wait. Check if event waits for the all the operations on
# the stream to be done. Check for synchronizations and query on the streams
# and events. This test is adapted from eager mode tests for CUDA. Please refer
# test/test_cuda.py
@torch.jit.script
def test_event_wait() -> float:
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
s0 = torch.cuda.current_stream(device)
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, True, False)
e_tok = torch.cuda.Event(True, True, False)
e_tik.record(s0)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s0):
tensor2 = torch.mm(tensor1, tensor1).cuda()
e_sync = torch.cuda.Event(True, False, False)
e_sync.record(torch.cuda.current_stream(device))
e_sync.wait(s1)
with torch.cuda.stream(s1):
tensor3 = torch.rand(1000000000, 1000000000, device="cuda")
tensor4 = torch.mm(tensor3, tensor3).cuda()
s1.synchronize()
e_tok.record(torch.cuda.current_stream(device))
e_tok.synchronize()
s0.synchronize()
if not s0.query() or not s1.query() or not e_sync.query():
return -1.0
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
self.assertGreater(test_event_wait(), 0)
# Test for stream wait_event. Checks if the stream waits on the event
@torch.jit.script
def test_wait_event():
d1 = torch.device("cuda:1")
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream(d1)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
e0 = torch.cuda.Event(False, False, False)
s0.record_event(e0)
s1 = torch.cuda.current_stream(torch.device("cuda:0"))
s1.wait_event(e0)
s1.synchronize()
return e0.query() and s0.query() and s1.query()
self.assertTrue(test_wait_event())
# Test if a scripted module with cuda streams can be saved, loaded and executed
def test_save_load(self):
class Model(torch.nn.Module):
def forward(self):
s = torch.cuda.Stream()
a = torch.rand(3, 4, device="cuda")
b = torch.rand(3, 4, device="cuda")
with torch.cuda.stream(s):
is_stream_s = torch.cuda.current_stream(s.device).id() == s.id()
c = torch.cat((a, b), 0).cuda()
s.synchronize()
return is_stream_s, a, b, c
model = Model()
# Script the model and save
script_model = torch.jit.script(model)
is_stream_s, a, b, c = script_model()
# Verify if the output is correct
self.assertTrue(is_stream_s)
self.assertEqual(torch.cat((a, b), 0), c)
# Save and load scripted model
load_model = self.getExportImportCopy(script_model)
is_stream_s, a_load, b_load, c_load = load_model()
self.assertTrue(is_stream_s)
self.assertEqual(torch.cat((a_load, b_load), 0), c_load)
# Make sure that cuda._exchange_device doesn't get DCE'ed
@unittest.skipIf(not TEST_CUDA, "Cuda not available")
def test__exchange_device_op(self):
def fn(device: int, tensor):
torch.cuda._exchange_device(device)
return tensor.cos().relu()
fn_s = torch.jit.script(fn)
# Just check the graph, don't run it. Otherwise, we'd need to
# run this test on a multi-gpu CI runner, which is overkill.
g = fn_s.graph
FileCheck().check("cuda::_exchange_device(").run(g)
torch._C._jit_pass_inline(g)
FileCheck().check("cuda::_exchange_device(").run(g)
# Make sure that cuda._maybe_exchange_device doesn't get DCE'ed
@unittest.skipIf(not TEST_CUDA, "Cuda not available")
def test__maybe_exchange_device_op(self):
def fn(device: int, tensor):
torch.cuda._maybe_exchange_device(device)
return tensor.cos().relu()
fn_s = torch.jit.script(fn)
# Just check the graph, don't run it. Otherwise, we'd need to
# run this test on a multi-gpu CI runner, which is overkill.
g = fn_s.graph
FileCheck().check("cuda::_maybe_exchange_device(").run(g)
torch._C._jit_pass_inline(g)
FileCheck().check("cuda::_maybe_exchange_device(").run(g)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestCUDA |
python | tiangolo__fastapi | tests/test_jsonable_encoder.py | {
"start": 467,
"end": 543
} | class ____:
def __init__(self, name: str):
self.name = name
| Person |
python | kamyu104__LeetCode-Solutions | Python/unique-morse-code-words.py | {
"start": 64,
"end": 600
} | class ____(object):
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
MORSE = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.",
"....", "..", ".---", "-.-", ".-..", "--", "-.",
"---", ".--.", "--.-", ".-.", "...", "-", "..-",
"...-", ".--", "-..-", "-.--", "--.."]
lookup = {"".join(MORSE[ord(c) - ord('a')] for c in word) \
for word in words}
return len(lookup)
| Solution |
python | Netflix__metaflow | metaflow/plugins/kubernetes/kubernetes_job.py | {
"start": 1736,
"end": 16313
} | class ____(object):
def __init__(self, client, **kwargs):
self._client = client
self._kwargs = kwargs
def create_job_spec(self):
client = self._client.get()
# tmpfs variables
use_tmpfs = self._kwargs["use_tmpfs"]
tmpfs_size = self._kwargs["tmpfs_size"]
tmpfs_enabled = use_tmpfs or (tmpfs_size and not use_tmpfs)
shared_memory = (
int(self._kwargs["shared_memory"])
if self._kwargs["shared_memory"]
else None
)
qos_requests, qos_limits = qos_requests_and_limits(
self._kwargs["qos"],
self._kwargs["cpu"],
self._kwargs["memory"],
self._kwargs["disk"],
)
security_context = self._kwargs.get("security_context", {})
_security_context = {}
if security_context is not None and len(security_context) > 0:
_security_context = {
"security_context": client.V1SecurityContext(**security_context)
}
return client.V1JobSpec(
# Retries are handled by Metaflow when it is responsible for
# executing the flow. The responsibility is moved to Kubernetes
# when Argo Workflows is responsible for the execution.
backoff_limit=self._kwargs.get("retries", 0),
completions=self._kwargs.get("completions", 1),
ttl_seconds_after_finished=7
* 60
* 60 # Remove job after a week. TODO: Make this configurable
* 24,
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
annotations=self._kwargs.get("annotations", {}),
labels=self._kwargs.get("labels", {}),
namespace=self._kwargs["namespace"],
),
spec=client.V1PodSpec(
# Timeout is set on the pod and not the job (important!)
active_deadline_seconds=self._kwargs["timeout_in_seconds"],
# TODO (savin): Enable affinities for GPU scheduling.
# affinity=?,
containers=[
client.V1Container(
command=self._kwargs["command"],
termination_message_policy="FallbackToLogsOnError",
ports=(
[]
if self._kwargs["port"] is None
else [
client.V1ContainerPort(
container_port=int(self._kwargs["port"])
)
]
),
env=[
client.V1EnvVar(name=k, value=str(v))
for k, v in self._kwargs.get(
"environment_variables", {}
).items()
]
# And some downward API magic. Add (key, value)
# pairs below to make pod metadata available
# within Kubernetes container.
+ [
client.V1EnvVar(
name=k,
value_from=client.V1EnvVarSource(
field_ref=client.V1ObjectFieldSelector(
field_path=str(v)
)
),
)
for k, v in {
"METAFLOW_KUBERNETES_NAMESPACE": "metadata.namespace",
"METAFLOW_KUBERNETES_POD_NAMESPACE": "metadata.namespace",
"METAFLOW_KUBERNETES_POD_NAME": "metadata.name",
"METAFLOW_KUBERNETES_POD_ID": "metadata.uid",
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME": "spec.serviceAccountName",
"METAFLOW_KUBERNETES_NODE_IP": "status.hostIP",
}.items()
]
+ [
client.V1EnvVar(name=k, value=str(v))
for k, v in inject_tracing_vars({}).items()
],
env_from=[
client.V1EnvFromSource(
secret_ref=client.V1SecretEnvSource(
name=str(k),
# optional=True
)
)
for k in list(self._kwargs.get("secrets", []))
+ KUBERNETES_SECRETS.split(",")
if k
],
image=self._kwargs["image"],
image_pull_policy=self._kwargs["image_pull_policy"],
name=self._kwargs["step_name"].replace("_", "-"),
resources=client.V1ResourceRequirements(
requests=qos_requests,
limits={
**qos_limits,
**{
"%s.com/gpu".lower()
% self._kwargs["gpu_vendor"]: str(
self._kwargs["gpu"]
)
for k in [0]
# Don't set GPU limits if gpu isn't specified.
if self._kwargs["gpu"] is not None
},
},
),
volume_mounts=(
[
client.V1VolumeMount(
mount_path=self._kwargs.get("tmpfs_path"),
name="tmpfs-ephemeral-volume",
)
]
if tmpfs_enabled
else []
)
+ (
[
client.V1VolumeMount(
mount_path="/dev/shm", name="dhsm"
)
]
if shared_memory
else []
)
+ (
[
client.V1VolumeMount(mount_path=path, name=claim)
for claim, path in self._kwargs[
"persistent_volume_claims"
].items()
]
if self._kwargs["persistent_volume_claims"] is not None
else []
),
**_security_context,
)
],
node_selector=self._kwargs.get("node_selector"),
image_pull_secrets=[
client.V1LocalObjectReference(secret)
for secret in self._kwargs.get("image_pull_secrets") or []
],
# TODO (savin): Support preemption policies
# preemption_policy=?,
#
# A Container in a Pod may fail for a number of
# reasons, such as because the process in it exited
# with a non-zero exit code, or the Container was
# killed due to OOM etc. If this happens, fail the pod
# and let Metaflow handle the retries.
restart_policy="Never",
service_account_name=self._kwargs["service_account"],
# Terminate the container immediately on SIGTERM
termination_grace_period_seconds=0,
tolerations=[
client.V1Toleration(**toleration)
for toleration in self._kwargs.get("tolerations") or []
],
volumes=(
[
client.V1Volume(
name="tmpfs-ephemeral-volume",
empty_dir=client.V1EmptyDirVolumeSource(
medium="Memory",
# Add default unit as ours differs from Kubernetes default.
size_limit="{}Mi".format(tmpfs_size),
),
)
]
if tmpfs_enabled
else []
)
+ (
[
client.V1Volume(
name="dhsm",
empty_dir=client.V1EmptyDirVolumeSource(
medium="Memory",
size_limit="{}Mi".format(shared_memory),
),
)
]
if shared_memory
else []
)
+ (
[
client.V1Volume(
name=claim,
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
claim_name=claim
),
)
for claim in self._kwargs["persistent_volume_claims"].keys()
]
if self._kwargs["persistent_volume_claims"] is not None
else []
),
),
),
)
def create(self):
# A discerning eye would notice and question the choice of using the
# V1Job construct over the V1Pod construct given that we don't rely much
# on any of the V1Job semantics. The major reasons at the moment are -
# 1. It makes the Kubernetes UIs (Octant, Lens) a bit easier on
# the eyes, although even that can be questioned.
# 2. AWS Step Functions, at the moment (Apr' 22) only supports
# executing Jobs and not Pods as part of it's publicly declared
# API. When we ship the AWS Step Functions integration with EKS,
# it will hopefully lessen our workload.
#
# Note: This implementation ensures that there is only one unique Pod
# (unique UID) per Metaflow task attempt.
client = self._client.get()
self._job = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(
# Annotations are for humans
annotations=self._kwargs.get("annotations", {}),
# While labels are for Kubernetes
labels=self._kwargs.get("labels", {}),
generate_name=self._kwargs["generate_name"],
namespace=self._kwargs["namespace"], # Defaults to `default`
),
spec=self.create_job_spec(),
)
return self
def execute(self):
client = self._client.get()
try:
# TODO: Make job submission back-pressure aware. Currently
# there doesn't seem to be a kubernetes-native way to
# achieve the guarantees that we are seeking.
# https://github.com/kubernetes/enhancements/issues/1040
# Hopefully, we will be able to get creative with kube-batch
response = (
client.BatchV1Api()
.create_namespaced_job(
body=self._job, namespace=self._kwargs["namespace"]
)
.to_dict()
)
return RunningJob(
client=self._client,
name=response["metadata"]["name"],
uid=response["metadata"]["uid"],
namespace=response["metadata"]["namespace"],
)
except client.rest.ApiException as e:
raise KubernetesJobException(
"Unable to launch Kubernetes job.\n %s"
% (json.loads(e.body)["message"] if e.body is not None else e.reason)
)
def step_name(self, step_name):
self._kwargs["step_name"] = step_name
return self
def namespace(self, namespace):
self._kwargs["namespace"] = namespace
return self
def name(self, name):
self._kwargs["name"] = name
return self
def command(self, command):
self._kwargs["command"] = command
return self
def image(self, image):
self._kwargs["image"] = image
return self
def cpu(self, cpu):
self._kwargs["cpu"] = cpu
return self
def memory(self, mem):
self._kwargs["memory"] = mem
return self
def environment_variable(self, name, value):
# Never set to None
if value is None:
return self
self._kwargs["environment_variables"] = dict(
self._kwargs.get("environment_variables", {}), **{name: value}
)
return self
def label(self, name, value):
self._kwargs["labels"] = dict(self._kwargs.get("labels", {}), **{name: value})
return self
def annotation(self, name, value):
self._kwargs["annotations"] = dict(
self._kwargs.get("annotations", {}), **{name: value}
)
return self
| KubernetesJob |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/records/users_records_builder.py | {
"start": 196,
"end": 481
} | class ____(ZendeskSupportRecordBuilder):
@classmethod
def record(cls) -> "UsersRecordBuilder":
record_template = cls.extract_record("users", __file__, NestedPath(["users", 0]))
return cls(record_template, FieldPath("id"), FieldPath("updated_at"))
| UsersRecordBuilder |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 165884,
"end": 166328
} | class ____(_PrintableStructure):
_fields_ = [
("clusterUuid", c_char * NVML_DEVICE_UUID_BUFFER_SIZE),
("status", _nvmlReturn_t),
("partitionId", c_uint32),
("state", _nvmlGpuFabricState_t)
]
def nvmlDeviceGetGpuFabricInfo(device, gpuFabricInfo):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuFabricInfo")
ret = fn(device, gpuFabricInfo)
_nvmlCheckReturn(ret)
return ret | c_nvmlGpuFabricInfo_t |
python | openai__openai-python | src/openai/resources/fine_tuning/checkpoints/checkpoints.py | {
"start": 3284,
"end": 3606
} | class ____:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> AsyncPermissionsWithStreamingResponse:
return AsyncPermissionsWithStreamingResponse(self._checkpoints.permissions)
| AsyncCheckpointsWithStreamingResponse |
python | huggingface__transformers | src/transformers/models/align/modeling_align.py | {
"start": 2875,
"end": 6689
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The output of [`AlignVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`AlignTextModel`].
vision_model_output (`BaseModelOutputWithPoolingAndNoAttention`):
The output of the [`AlignVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
# contrastive loss function, adapted from
# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1)
def align_loss(similarity: torch.Tensor) -> torch.Tensor:
caption_loss = contrastive_loss(similarity)
image_loss = contrastive_loss(similarity.t())
return (caption_loss + image_loss) / 2.0
# Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet->AlignVision
def round_filters(config: AlignVisionConfig, num_channels: int):
r"""
Round number of filters based on depth multiplier.
"""
divisor = config.depth_divisor
num_channels *= config.width_coefficient
new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_dim < 0.9 * num_channels:
new_dim += divisor
return int(new_dim)
# Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad
def correct_pad(kernel_size: Union[int, tuple], adjust: bool = True):
r"""
Utility function to get the tuple padding value for the depthwise convolution.
Args:
kernel_size (`int` or `tuple`):
Kernel size of the convolution layers.
adjust (`bool`, *optional*, defaults to `True`):
Adjusts padding value to apply to right and bottom sides of the input.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
if adjust:
return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
else:
return (correct[1], correct[1], correct[0], correct[0])
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision
| AlignOutput |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_contrast.py | {
"start": 296,
"end": 5474
} | class ____(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images
by a random factor. Contrast is adjusted independently
for each channel of each image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound.
When represented as a single float, lower = upper.
The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean`
where `mean` is the mean value of the channel.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_FACTOR_BOUNDS = (0, 1)
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(**kwargs)
self._set_factor(factor)
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
factor_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
factor_shape = [images_shape[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if not training:
return {"contrast_factor": self.backend.numpy.zeros(factor_shape)}
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
shape=factor_shape,
minval=1.0 - self.factor[0],
maxval=1.0 + self.factor[1],
seed=seed,
dtype=self.compute_dtype,
)
return {"contrast_factor": factor}
def transform_images(self, images, transformation, training=True):
if training:
constrast_factor = transformation["contrast_factor"]
outputs = self._adjust_constrast(images, constrast_factor)
outputs = self.backend.numpy.clip(
outputs, self.value_range[0], self.value_range[1]
)
self.backend.numpy.reshape(outputs, self.backend.shape(images))
return outputs
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def _adjust_constrast(self, inputs, contrast_factor):
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
# reduce mean on height
inp_mean = self.backend.numpy.mean(
inputs, axis=height_axis, keepdims=True
)
# reduce mean on width
inp_mean = self.backend.numpy.mean(
inp_mean, axis=width_axis, keepdims=True
)
outputs = (inputs - inp_mean) * contrast_factor + inp_mean
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| RandomContrast |
python | sqlalchemy__sqlalchemy | test/engine/test_reflection.py | {
"start": 53815,
"end": 57978
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
no_multibyte_period = {("plain", "col_plain", "ix_plain")}
no_has_table = [
(
"no_has_table_1",
"col_Unit\u00e9ble",
"ix_Unit\u00e9ble",
),
("no_has_table_2", "col_\u6e2c\u8a66", "ix_\u6e2c\u8a66"),
]
no_case_sensitivity = [
(
"\u6e2c\u8a66",
"col_\u6e2c\u8a66",
"ix_\u6e2c\u8a66",
),
(
"unit\u00e9ble",
"col_unit\u00e9ble",
"ix_unit\u00e9ble",
),
]
full = [
(
"Unit\u00e9ble",
"col_Unit\u00e9ble",
"ix_Unit\u00e9ble",
),
(
"\u6e2c\u8a66",
"col_\u6e2c\u8a66",
"ix_\u6e2c\u8a66",
),
]
# as you can see, our options for this kind of thing
# are really limited unless you're on PG or SQLite
# forget about it on these backends
if not testing.requires.unicode_ddl.enabled:
names = no_multibyte_period
# mysql can't handle casing usually
elif (
testing.against("mysql")
and not testing.requires.mysql_fully_case_sensitive.enabled
):
names = no_multibyte_period.union(no_case_sensitivity)
# mssql + pyodbc + freetds can't compare multibyte names to
# information_schema.tables.table_name
elif testing.against("mssql"):
names = no_multibyte_period.union(no_has_table)
else:
names = no_multibyte_period.union(full)
for tname, cname, ixname in names:
t = Table(
tname,
metadata,
Column(
"id",
sa.Integer,
normalize_sequence(config, sa.Sequence(cname + "_id_seq")),
primary_key=True,
),
Column(cname, Integer),
)
schema.Index(ixname, t.c[cname])
cls.names = names
@testing.requires.unicode_connections
def test_has_table(self, connection):
insp = inspect(connection)
for tname, cname, ixname in self.names:
assert insp.has_table(tname), "Can't detect name %s" % tname
@testing.requires.unicode_connections
def test_basic(self, connection):
# the 'convert_unicode' should not get in the way of the
# reflection process. reflect_table for oracle, postgresql
# (others?) expect non-unicode strings in result sets/bind
# params
names = {rec[0] for rec in self.names}
reflected = set(inspect(connection).get_table_names())
if not names.issubset(reflected) and hasattr(unicodedata, "normalize"):
# Python source files in the utf-8 coding seem to
# normalize literals as NFC (and the above are
# explicitly NFC). Maybe this database normalizes NFD
# on reflection.
nfc = {unicodedata.normalize("NFC", n) for n in names}
self.assert_(nfc == names)
# Yep. But still ensure that bulk reflection and
# create/drop work with either normalization.
r = MetaData()
r.reflect(connection)
r.drop_all(connection, checkfirst=False)
r.create_all(connection, checkfirst=False)
@testing.requires.unicode_connections
def test_get_names(self, connection):
inspector = inspect(connection)
names = {tname: (cname, ixname) for tname, cname, ixname in self.names}
for tname in inspector.get_table_names():
assert tname in names
eq_(
[
(rec["name"], rec["column_names"][0])
for rec in inspector.get_indexes(tname)
],
[(names[tname][1], names[tname][0])],
)
| UnicodeReflectionTest |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 4983,
"end": 5427
} | class ____(models.Model):
"""A model which uses a UUID pk (issue 1274)"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField("Book name", max_length=100)
author = models.ForeignKey(
NamedAuthor, blank=True, null=True, on_delete=models.CASCADE
)
categories = models.ManyToManyField(UUIDCategory, blank=True)
def __str__(self):
return self.name
| UUIDBook |
python | django-extensions__django-extensions | tests/test_color.py | {
"start": 122,
"end": 664
} | class ____(SimpleTestCase):
def test_no_style(self):
with force_color_support:
style = color.no_style().MODULE_NAME
text = "csv"
styled_text = style(text)
self.assertEqual(text, styled_text)
def test_color_style(self):
with force_color_support:
style = color.color_style().MODULE_NAME
text = "antigravity"
styled_text = style(text)
self.assertIn(text, styled_text)
self.assertNotEqual(text, styled_text)
| ColorTest |
python | scipy__scipy | scipy/linalg/tests/test_interpolative.py | {
"start": 2641,
"end": 8616
} | class ____:
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_real_id_fixed_precision(self, A, L, eps, rand, lin_op, rng):
# Test ID routines on a Hilbert matrix.
A_or_L = A if not lin_op else L
k, idx, proj = pymatrixid.interp_decomp(A_or_L, eps, rand=rand, rng=rng)
B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_real_id_fixed_rank(self, A, L, eps, rank, rand, lin_op, rng):
k = rank
A_or_L = A if not lin_op else L
idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand, rng=rng)
B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize("rand,lin_op", [(False, False)])
def test_real_id_skel_and_interp_matrices(
self, A, L, eps, rank, rand, lin_op, rng):
k = rank
A_or_L = A if not lin_op else L
idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand, rng=rng)
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(B, A[:, idx[:k]], rtol=eps, atol=1e-08)
assert_allclose(B @ P, A, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_svd_fixed_precision(self, A, L, eps, rand, lin_op, rng):
A_or_L = A if not lin_op else L
U, S, V = pymatrixid.svd(A_or_L, eps, rand=rand, rng=rng)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_svd_fixed_rank(self, A, L, eps, rank, rand, lin_op, rng):
k = rank
A_or_L = A if not lin_op else L
U, S, V = pymatrixid.svd(A_or_L, k, rand=rand, rng=rng)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
def test_id_to_svd(self, A, eps, rank):
k = rank
idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
U, S, V = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
def test_estimate_spectral_norm(self, A, rng):
s = svdvals(A)
norm_2_est = pymatrixid.estimate_spectral_norm(A, rng=rng)
assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
def test_estimate_spectral_norm_diff(self, A, rng):
B = A.copy()
B[:, 0] *= 1.2
s = svdvals(A - B)
norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B, rng=rng)
assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
def test_rank_estimates_array(self, A, rng):
B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
for M in [A, B]:
rank_tol = 1e-9
rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
rank_est = pymatrixid.estimate_rank(M, rank_tol, rng=rng)
assert_(rank_est >= rank_np)
assert_(rank_est <= rank_np + 10)
def test_rank_estimates_lin_op(self, A, rng):
B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
for M in [A, B]:
ML = aslinearoperator(M)
rank_tol = 1e-9
rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
rank_est = pymatrixid.estimate_rank(ML, rank_tol, rng=rng)
assert_(rank_est >= rank_np - 4)
assert_(rank_est <= rank_np + 4)
def test_badcall(self):
A = hilbert(5).astype(np.float32)
with assert_raises(ValueError):
pymatrixid.interp_decomp(A, 1e-6, rand=False)
def test_rank_too_large(self):
# svd(array, k) should not segfault
a = np.ones((4, 3))
with assert_raises(ValueError):
pymatrixid.svd(a, 4)
def test_full_rank(self):
eps = 1.0e-12
rng = np.random.default_rng(1234)
# fixed precision
A = rng.random((16, 8))
k, idx, proj = pymatrixid.interp_decomp(A, eps)
assert_equal(k, A.shape[1])
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(A, B @ P)
# fixed rank
idx, proj = pymatrixid.interp_decomp(A, k)
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(A, B @ P)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("rand", [True, False])
@pytest.mark.parametrize("eps", [1, 0.1])
def test_bug_9793(self, dtype, rand, eps):
A = np.array([[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]],
dtype=dtype, order="C")
B = A.copy()
interp_decomp(A.T, eps, rand=rand)
assert_array_equal(A, B)
def test_svd_aslinearoperator_shape_check(self):
# See gh-issue #22451
rng = np.random.default_rng(1744580941832515)
x = rng.uniform(size=[7, 5])
xl = aslinearoperator(x)
u, s, v = pymatrixid.svd(xl, 3)
assert_equal(u.shape, (7, 3))
assert_equal(s.shape, (3,))
assert_equal(v.shape, (5, 3))
x = rng.uniform(size=[4, 9])
xl = aslinearoperator(x)
u, s, v = pymatrixid.svd(xl, 2)
assert_equal(u.shape, (4, 2))
assert_equal(s.shape, (2,))
assert_equal(v.shape, (9, 2))
| TestInterpolativeDecomposition |
python | tensorflow__tensorflow | tensorflow/python/distribute/failure_handling/failure_handling.py | {
"start": 15394,
"end": 56955
} | class ____(object):
# pylint: disable=line-too-long
"""Preemption and error handler for synchronous training.
Note: This API only supports use with
`tf.distribute.MultiWorkerMirroredStrategy` and `tf.distribute.TPUStrategy`.
A `PreemptionCheckpointHandler` coordinates all workers to save a checkpoint
upon receiving a preemption signal. It also helps disseminate application
error messages accurately among the cluster. When a
`PreemptionCheckpointHandler` object is created, it restores values from
the latest checkpoint file if any exists.
Right after the initialization, the object starts to watch out for termination
signal for any member in the cluster. If receiving a signal, the next time the
worker executes `PreemptionCheckpointHandler.run`, the
`PreemptionCheckpointHandler` will align all workers to save a checkpoint.
Then, if an `exit_fn` is configured via
`tf.distribute.experimental.TerminationConfig`, it will be invoked. Otherwise,
the process will simply exit and later the platform should restart it.
Note: We advise users of `tf.distribute.MultiWorkerMirroredStrategy` who
choose to configure their
own `exit_fn` in `tf.distribute.experimental.TerminationConfig` to include a
`sys.exit(CODE_OR_MESSAGE)` in the `exit_fn` so that after the restart, all
workers can initialize communication services correctly. For users of
`tf.distribute.TPUStrategy`, if they do not wish to do a cluster restart but
would like an in-process restart (i.e., keep the coordinator alive and re-do
the steps to connect to cluster, initialize TPU system, and make the
`TPUStrategy` object), they could configure the `exit_fn` to a no-op.
For users of `tf.distribute.MultiWorkerMirroredStrategy`, the core API is
`PreemptionCheckpointHandler.run`:
```python
strategy = tf.distribute.MultiWorkerMirroredStrategy()
trained_epoch = tf.Variable(initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='epoch')
step_in_epoch = tf.Variable(initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='step_in_epoch')
with strategy.scope():
dataset, model, optimizer = ...
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
model=model,
trained_epoch=trained_epoch,
step_in_epoch=step_in_epoch)
preemption_checkpoint_handler = tf.distribute.experimental.PreemptionCheckpointHandler(cluster_resolver, checkpoint, checkpoint_dir)
while trained_epoch.numpy() < NUM_EPOCH:
while step_in_epoch.numpy() < STEPS_PER_EPOCH:
# distributed_train_function contains a call to strategy.run.
loss += preemption_checkpoint_handler.run(distributed_train_function, args=(next(iterator),))
# For users of MultiWorkerMirroredStrategy, usually
# STEPS_PER_TRAIN_FUNCTION = 1.
step_in_epoch.assign_add(STEPS_PER_TRAIN_FUNCTION)
...
epoch.assign_add(1)
step_in_epoch.assign(0)
```
For users of `tf.distribute.TPUStrategy`, the core APIs are
`PreemptionCheckpointHandler.run` and
`PreemptionCheckpointHandler.watch_preemption_scope`:
```python
strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
# Rest of TPU init omitted, see documentation for TPUSTrategy.
with preemption_checkpoint_handler.watch_preemption_scope():
while trained_epoch.numpy() < NUM_EPOCH:
while step_in_epoch.numpy() < STEPS_PER_EPOCH:
# distributed_train_function contains a call to strategy.run.
loss += preemption_checkpoint_handler.run(distributed_train_function, args=(next(iterator),))
# For users of TPUStrategy, usually STEPS_PER_TRAIN_FUNCTION >> 1 since
# clustering multiple steps within a tf.function amortizes the overhead
# of launching a multi-device function on TPU Pod.
step_in_epoch.assign_add(STEPS_PER_TRAIN_FUNCTION)
...
epoch.assign_add(1)
step_in_epoch.assign(0)
```
Not all interruptions come with advance notice so that the
`PreemptionCheckpointHandler` can handle them, e.g., those caused by hardware
failure. For a user who saves checkpoints for these cases themselves outside
the `PreemptionCheckpointHandler`, if they are using a
`tf.train.CheckpointManager`, pass it as the
`checkpoint_or_checkpoint_manager` argument to the
`PreemptionCheckpointHandler`. If they do not have a
`tf.train.CheckpointManager` but are directly working with
`tf.train.Checkpoint`, we advise saving the checkpoints in the directory
that's passed as the `checkpoint_dir` argument. In this way, at the program
beginning, `PreemptionCheckpointHandler` can restore the latest checkpoint
from the directory, no matter it's saved by the user themselves or saved by
the `PreemptionCheckpointHandler` before preemption happens.
**A note on the platform:**
`PreemptionCheckpointHandler` can only handle the kind of termination with
advance notice. For now, the API recognizes the termination signal for CPU,
GPU, and TPU on Google Borg and CPU and GPU on the Google Cloud Platform. In
these cases, `PreemptionCheckpointHandler` will automatically adopt the
correct preemption/maintenance notification detection mechanism. Users of
other platforms can configure a detection monitoring behavior through the
`tf.distribute.experimental.TerminationConfig`. Customization for the exit
behavior and grace period length could also be done here.
"""
# pylint: enable=line-too-long
def __init__(self,
cluster_resolver,
checkpoint_or_checkpoint_manager,
checkpoint_dir=None,
termination_config=None):
"""Creates the `PreemptionCheckpointHandler`.
Args:
cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`
object. You may also obtain it through the `cluster_resolver` attribute
of the distribution strategy in use.
checkpoint_or_checkpoint_manager: a `tf.train.CheckpointManager` or a
`tf.train.Checkpoint`. If you are using a `tf.train.CheckpointManager`
to manage checkpoints outside the `PreemptionCheckpointHandler` for
backup purpose as well, pass it as `checkpoint_or_checkpoint_manager`
argument. Otherwise, pass a `tf.train.Checkpoint` and the
`PreemptionCheckpointHandler` will create
a `tf.train.CheckpointManager` to manage it in the `checkpoint_dir`.
checkpoint_dir: a directory where the `PreemptionCheckpointHandler` saves
and restores checkpoints. When a `PreemptionCheckpointHandler` is
created, the latest checkpoint in the `checkpoint_dir` will be restored.
(This is not needed if a `tf.train.CheckpointManager` instead of a
`tf.train.Checkpoint` is passed as the
`checkpoint_or_checkpoint_manager` argument.)
termination_config: optional, a
`tf.distribute.experimental.TerminationConfig` object to configure for a
platform other than Google Borg or GCP.
"""
# TODO(wxinyi): Maybe make checkpoint_or_checkpoint_manager optional if
# save_fn is passed. For now it's still useful for restore.
if isinstance(checkpoint_or_checkpoint_manager,
checkpoint_lib.Checkpoint) and not checkpoint_dir:
raise errors.InvalidArgumentError('When a checkpoint is passed, a '
'checkpoint_dir must be passed as well'
'.')
self._cluster_resolver = cluster_resolver
self._termination_config = termination_config
self._checkpoint_or_checkpoint_manager = checkpoint_or_checkpoint_manager
self._checkpoint_dir = checkpoint_dir
self._platform_device = failure_handling_util.detect_platform()
completed_termination_config = _complete_config_for_environment(
self._platform_device, self._termination_config
)
self._termination_watcher_fn = (
completed_termination_config.termination_watcher_fn
)
self._exit_fn = completed_termination_config.exit_fn
self._grace_period = completed_termination_config.grace_period
self._save_fn = completed_termination_config.save_fn
self._local_mode = True
if self._platform_device in (
failure_handling_util.PlatformDevice.GCE_TPU,
failure_handling_util.PlatformDevice.GCE_CPU,
):
# While running MultiWorkerMirroredStrategy training with GPUs and CPUs
# are the same on Borg, GCE CPU VM and GPU VM are different in terms
# of live migration, grace period, etc. We can make it work upon request.
logging.warning(
'PreemptionCheckpointHandler does not support usage with '
'TPU or CPU device on GCP.'
)
elif (
self._platform_device
== failure_handling_util.PlatformDevice.INTERNAL_TPU
):
self._initialize_for_tpu_strategy()
else:
if cluster_resolver and 'ps' in cluster_resolver.cluster_spec().as_dict():
raise NotImplementedError(
'PreemptionCheckpointHandler does not support'
'usage with tf.distribute.experimental.ParameterServerStrategy.'
)
self._initialize_for_mirrored_and_multi_worker_mirrored()
logging.info('PreemptionCheckpointHandler initialized or restored.')
def _initialize_for_tpu_strategy(self):
"""Makes configurations for using the handler with TPUStrategy."""
self._is_chief = True
self._poll_termination_signal_thread = None
self._cluster_wise_termination_watcher_thread = None
self._maybe_create_checkpoint_manager()
self._read_checkpoint_manager.restore_or_initialize()
self._run_counter = 0
def _initialize_for_mirrored_and_multi_worker_mirrored(self):
"""Makes configurations and start watchers for MS, MWMS, or OneDevice."""
if (
not self._cluster_resolver
or not self._cluster_resolver.cluster_spec().jobs
):
# For MirroredStrategy, OneDeviceStrategy, and local-mode
# MultiWorkerMirroredStrategy, an empty cluster spec is passed, and
# coordination service is not enabled nor is it needed (since
# it's used for cross-worker communication). Thus we will directly name
# the worker id and is_chief properties and also skip the
# uploading/reading from coordination service logic.
self._local_mode = True
self._id_in_cluster = 'single_worker'
self._is_chief = True
else:
self._local_mode = False
self._id_in_cluster = str(
multi_worker_util.id_in_cluster(
self._cluster_resolver.cluster_spec(),
self._cluster_resolver.task_type,
self._cluster_resolver.task_id))
self._is_chief = multi_worker_util.is_chief(
cluster_spec=self._cluster_resolver.cluster_spec(),
task_type=self._cluster_resolver.task_type,
task_id=self._cluster_resolver.task_id)
# The number of calls to `PreemptionCheckpointHandler.run` when the latest
# checkpoint was saved.
self._checkpointed_runs = variables.Variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
trainable=False,
name=_ITERATION_VARIABLE)
self._maybe_create_checkpoint_manager()
if not hasattr(self._write_checkpoint_manager._checkpoint, # pylint: disable=protected-access
_ITERATION_VARIABLE):
setattr(self._write_checkpoint_manager._checkpoint, _ITERATION_VARIABLE, # pylint: disable=protected-access
self._checkpointed_runs)
if not hasattr(self._read_checkpoint_manager._checkpoint, # pylint: disable=protected-access
_ITERATION_VARIABLE):
setattr(self._read_checkpoint_manager._checkpoint, _ITERATION_VARIABLE, # pylint: disable=protected-access
self._checkpointed_runs)
self._read_checkpoint_manager.restore_or_initialize()
# grace period countdown. Set to True for all workers once they finish
# timing saving a checkpoint. Once entering this phase, new
# preemption/maintenance notice will not be handled, since the whole cluster
# goes down as the worker who first initiates the grace period goes down.
self._final_checkpoint_countdown = False
self._estimated_run_time = 0
# An internal step counter that's restored to checkpointed_iterations when
# training is restored. It increments by one every time
# `PreemptionCheckpointHandler.run` is called. Note that in this case, the
# user must pass a single-step training function to
# `PreemptionCheckpointHandler.run` instead of a multiple-step one.
self._run_counter = self._checkpointed_runs.numpy()
# The worker itself has received preeption signal.
self._received_own_sigterm = threading.Event()
# Some member (could be oneself) has received preemption signal, and the
# step number to save a checkpoint has been aligned.
self._received_checkpoint_step = threading.Event()
distribute_lib.distribution_strategy_input_api_counter.get_cell(
self._platform_device.name,
'PreemptionCheckpointHandler').increase_by(1)
if not self._local_mode:
# When training is interrupted, we explicitly call the cleanup methods for
# the thread watching for local worker's termination signal and the thread
# watching for clusterwise information before we save a checkpoint and
# exit. In the final chapter of the training where no interruption is
# encountered, we rely on __del__ to clean up. However, there is no
# guarantee when or whether __del__ is executed, thus we make the threads
# daemon to avoid it preventing program from exit.
self._cluster_wise_termination_watcher_thread = threading.Thread(
target=self._watch_step_to_save_key,
name='PeerTerminationWatcher-%s' % self._id_in_cluster,
daemon=True)
logging.info('Start watcher for peer\'s signal.')
self._cluster_wise_termination_watcher_thread.start()
else:
self._cluster_wise_termination_watcher_thread = None
self._poll_termination_signal_thread = None
if self._termination_watcher_fn:
self._start_polling_for_termination_signal()
else:
self._start_watching_for_signal()
def _maybe_create_checkpoint_manager(self):
"""Create CheckpointManager(s) if a checkpoint is passed else take it."""
if isinstance(self._checkpoint_or_checkpoint_manager,
checkpoint_management.CheckpointManager):
self._read_checkpoint_manager = self._checkpoint_or_checkpoint_manager
self._write_checkpoint_manager = self._checkpoint_or_checkpoint_manager
self._api_made_checkpoint_manager = False
else:
self._api_made_checkpoint_manager = True
# Make CheckpointManagers. MultiWorkerMirroredStrategy requires different
# setup on chief and on other workers.
self._read_checkpoint_manager = checkpoint_management.CheckpointManager(
self._checkpoint_or_checkpoint_manager,
directory=self._checkpoint_dir,
max_to_keep=1)
if self._is_chief:
self._write_checkpoint_manager = self._read_checkpoint_manager
else:
self._write_checkpoint_manager = (
checkpoint_management.CheckpointManager(
self._checkpoint_or_checkpoint_manager,
_non_chief_checkpoint_dir(self._checkpoint_dir,
self._cluster_resolver.task_id),
max_to_keep=1))
def _start_watching_for_signal(self):
logging.info('Start watcher for local signal.')
signal.signal(signal.SIGTERM, self._sigterm_handler_fn)
def _start_polling_for_termination_signal(self):
self._poll_termination_signal_thread_should_stop = threading.Event()
self._poll_termination_signal_thread = threading.Thread(
target=self._poll_termination_signal,
name='WorkerTerminationSignalWatcher-%s' % self._id_in_cluster,
daemon=True)
logging.info('Start polling for termination signal.')
self._poll_termination_signal_thread.start()
def _poll_termination_signal(self):
"""Poll maintenance notice and notify peers if receiving one."""
while True:
if self._poll_termination_signal_thread_should_stop.is_set(
) or self._final_checkpoint_countdown:
return
if self._termination_watcher_fn():
break
time.sleep(1)
self._maybe_set_received_own_sigterm()
def _maybe_set_received_own_sigterm(self):
"""Claim earliest preemption if no one else has done it before."""
if self._local_mode:
logging.info('Member %s has received termination notice.',
self._id_in_cluster)
self._received_own_sigterm_time = time.time()
self._received_own_sigterm.set()
return
try:
context.context().set_config_key_value(_PREEMPTION_WORKER_KEY,
self._id_in_cluster)
logging.info('Member %s has received termination notice.',
self._id_in_cluster)
self._received_own_sigterm_time = time.time()
self._received_own_sigterm.set()
# This is to handle the case that a worker has received termination
# notice but hasn't come to the next step to set the step key. Other
# workers might receive a termination notice too, and attempt to set the
# config key again, which causes this error. This can be safely ignored
# since checkpoint should be saved as early as the earliest call is made.
except errors.AlreadyExistsError:
logging.info(
(
'Member %s has received termination notice. But some other '
'worker has received it as well! Leaving'
' it to them to decide when to checkpoint. '
),
self._id_in_cluster,
)
return
def _stop_poll_termination_signal_thread(self):
if getattr(self, '_poll_termination_signal_thread', None):
self._poll_termination_signal_thread_should_stop.set()
self._poll_termination_signal_thread.join()
self._poll_termination_signal_thread = None
logging.info("Shut down watcher for one's own termination signal")
def _stop_cluster_wise_termination_watcher_thread(self):
"""Stop the thread that is _watch_step_to_save_key."""
if getattr(self, '_cluster_wise_termination_watcher_thread', None):
try:
context.context().set_config_key_value(
_INITIAL_RUN_COUNT_KEY, _STOP_WATCHING_CLUSTER_VALUE
)
except (errors.AlreadyExistsError, errors.UnavailableError):
# We'll ignore any error in the process of setting this key. There
# certainly will be a AlreadyExistError since all workers are trying to
# push this key. Or some worker might have exited already, leading to a
# errors.UnavailableError or errors.AbortedError.
pass
except Exception as e: # pylint: disable=broad-except
# We'll also ignore other errors since they are not important to the
# process.
logging.info('Ignoring error when shutting down '
'_stop_cluster_wise_termination_watcher_thread: ' + str(e))
try:
context.context().set_config_key_value(_FINAL_RUN_COUNT_KEY,
_STOP_WATCHING_CLUSTER_VALUE)
except (errors.AlreadyExistsError, errors.UnavailableError):
pass
except Exception as e: # pylint: disable=broad-except
logging.info('Ignoring error when shutting down '
'_stop_cluster_wise_termination_watcher_thread: ' + str(e))
finally:
self._cluster_wise_termination_watcher_thread.join()
self._cluster_wise_termination_watcher_thread = None
logging.info('Shut down watcher for peer\'s termination signal.')
def __del__(self):
self._stop_cluster_wise_termination_watcher_thread()
self._stop_poll_termination_signal_thread()
@property
@deprecated(None,
'Track steps using a tf.Variable saved in checkpoint instead.')
@doc_controls.do_not_generate_docs
def total_run_calls(self):
"""Returns the number of times `PreemptionCheckpointHandler.run` is called.
DEPRECATED: user should track total steps themselves, as this API provides
little expressivity gain but could easily be misused and incurs extra
synchronization cost for TPUStrategy users.
This value tracks the number of all calls to
`PreemptionCheckpointHandler.run` including those before the program is
restarted and the training is restored, by saving and reading the value in
the checkpoint. A user can compute their total number of iterations
by `PreemptionCheckpointHandler.total_run_calls *
number_of_steps_in_train_function`,
while `number_of_steps_in_train_function` should be one for
`tf.distribute.MultiWorkerMirroredStrategy` users. They can also use this
value to infer the starting epoch and step after training restores, as shown
in the example above.
"""
if (self._platform_device ==
failure_handling_util.PlatformDevice.INTERNAL_TPU):
raise NotImplementedError('Please create variables saved in checkpoint '
'to keep track of steps and epochs.')
return self._run_counter
def run(self,
distributed_train_function,
*args,
**kwargs):
"""Runs a training function with error and preemption handling.
This function handles the preemption signal from any peer in the cluster by
saving the training progress and exiting gracefully. It will
also broadcast any program error encountered during the execution of
`distributed_train_function` to all workers so that they can raise the same
error.
The `distributed_train_function` argument should be a distributed train
function (i.e., containing a call to `tf.distribute.Strategy.run`). For
`tf.distribute.MultiWorkerMirroredStrategy` users, we recommend passing in a
single-step `distributed_train_function` to
`PreemptionCheckpointHandler.run` so that the checkpoint can be saved in
time in case a preemption signal or maintenance notice is sent.
Besides the preemption and error handling part,
`PreemptionCheckpointHandler.run(distributed_train_function, *args,
**kwargs)` has the same effect and output as
`distributed_train_function(*args, **kwargs)`. `distributed_train_function`
can return either some or no result. The following is a shortened example:
```python
@tf.function
def distributed_train_step(iterator):
# A distributed single-step training function.
def step_fn(inputs):
# A per-replica single-step training function.
x, y = inputs
...
return loss
per_replica_losses = strategy.run(step_fn, args=(next(iterator),))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
for epoch in range(preemption_handler.total_run_calls // STEPS_PER_EPOCH,
EPOCHS_TO_RUN):
iterator = iter(multi_worker_dataset)
total_loss = 0.0
num_batches = 0
for step in range(preemption_handler.total_run_calls % STEPS_PER_EPOCH,
STEPS_PER_EPOCH):
total_loss += preemption_handler.run(distributed_train_step)
num_batches += 1
train_loss = total_loss / num_batches
print('Epoch: %d, train_loss: %f.' %(epoch.numpy(), train_loss))
train_accuracy.reset_states()
```
Args:
distributed_train_function: A (single-step) distributed training function.
*args: args for `distributed_train_function`.
**kwargs: kwargs for `distributed_train_function`.
Raises:
Program error encountered by any member in the cluster while executing the
`distributed_train_function`, or any error from the program error
propagation process.
Returns:
Result of running the `distributed_train_function`.
"""
# TODO(wxinyi): after we support use with TPUStrategy, we should expand the
# API doc to state that `distributed_train_function` does not need to be a
# single-step training function, since a multi-step host-training loop is
# the dominant use case for TPU user. Besides, passing in a multi-step
# `distributed_train_function` will require the user to track their own
# training steps.
if (
self._platform_device
== failure_handling_util.PlatformDevice.INTERNAL_TPU
):
return self._run_for_tpu(distributed_train_function, *args, **kwargs)
elif self._platform_device in (
failure_handling_util.PlatformDevice.GCE_TPU,
failure_handling_util.PlatformDevice.GCE_CPU,
):
return distributed_train_function(*args, **kwargs)
else:
return self._run_for_multi_worker_mirrored(
distributed_train_function, *args, **kwargs
)
def _run_for_tpu(self, distributed_train_function, *args, **kwargs):
"""PreemptionCheckpointHandler.run implementation for TPUStrategy."""
gen_check_preemption_op.check_preemption(preemption_key=PREEMPTION_KEY)
return distributed_train_function(*args, **kwargs)
def _run_for_multi_worker_mirrored(
self, distributed_train_function, *args, **kwargs
):
"""PreemptionCheckpointHandler.run implementation for MWMS."""
try:
self._check_preemption_and_maybe_checkpoint()
run_begin_time = time.time()
result = distributed_train_function(*args, **kwargs)
new_run_time = time.time() - run_begin_time
self._run_counter += 1
# Update the average run time with the new run.
self._estimated_run_time = self._estimated_run_time + (
new_run_time - self._estimated_run_time) / self._run_counter
except errors.OpError as e:
if not self._local_mode:
logging.info('Propagating error to cluster: %r: %s', e, e)
try:
context.context().report_error_to_cluster(e.error_code, e.message)
except Exception as ex: # pylint: disable=broad-except
logging.info('Ignoring error during error propagation: %r:%s', ex, ex)
raise
return result
# Disabling line-too-long check since we do not want to break the line when
# converted to public documentation.
# pylint: disable=line-too-long
def save_checkpoint_if_preempted(self, *args, **kwargs):
"""Saves a checkpoint if a preemption signal has been made available.
This is an alternative API for `PreemptionCheckpointHandler.run` and
`PreemptionCheckpointHandler.watch_preemption_scope`. This method works for
both `tf.distribute.MultiWorkerMirroredStrategy` and
`tf.distribute.TPUStrategy`. However, **for TPUStrategy, this method will
add a synchronization point between workers and the coordinator** and thus
may have performance implication. If this is a concern, use the combination
of `PreemptionCheckpointHandler.watch_preemption_scope` and
`PreemptionCheckpointHandler.run` instead.
```python
strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
# initialization omitted
with strategy.scope():
# Save in the checkpoint.
trained_step = tf.Variable(initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='trained_step', aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
checkpoint_manager = tf.train.CheckpointManager(checkpoint, directory, max_to_keep=1)
preemption_handler = tf.distribute.experimental.PreemptionCheckpointHandler(cluster_resolver, checkpoint_manager)
while trained_step.numpy() < NUM_STEPS:
# Train STEPS_IN_FUNCTION steps at once.
train_multi_step_function()
trained_step.assign_add(STEPS_IN_FUNCTION)
preemption_handler.save_checkpoint_if_preempted()
```
Args:
*args: args for `tf.train.CheckpointManager.save()` to save checkpoint.
**kwargs: kwargs for `tf.train.CheckpointManager.save()` to save.
"""
# pylint: enable=line-too-long
if (self._platform_device ==
failure_handling_util.PlatformDevice.INTERNAL_TPU):
try:
with context.async_scope():
gen_check_preemption_op.check_preemption(
preemption_key=PREEMPTION_KEY)
except errors.AbortedError as abort_error:
if abort_error.experimental_payloads.get(
b'type.googleapis.com/tensorflow.distributed_runtime.WorkerPreemption'
):
logging.info('Clearing preemption error to save checkpoint...')
context.async_clear_error()
self._save_checkpoint(*args, **kwargs)
# For TPU training, the default behavior is that it will block until
# workers are down and returns with error.
self._exit_fn()
else:
raise
elif self._platform_device in (
failure_handling_util.PlatformDevice.GCE_TPU,
failure_handling_util.PlatformDevice.GCE_CPU,
):
return
else:
self._check_preemption_and_maybe_checkpoint(*args, **kwargs)
self._run_counter += 1
self._estimated_run_time = 0
@tf_contextlib.contextmanager
def watch_preemption_scope(self):
"""Syncs error and maybe save checkpoint for usage with TPUStrategy.
Note: Usage with `tf.distribute.MultiWorkerMirroredStrategy` does not need
this API.
Example usage:
```python
with preemption_checkpoint_handler.watch_preemption_scope():
while trained_step.numpy() < NUM_STEPS:
# distributed_train_function contains a call to strategy.run.
loss += preemption_checkpoint_handler.run(distributed_train_function, args=(next(iterator),))
trained_step.assign_add(STEPS_PER_TRAIN_FUNCTION)
```
In this workflow, `PreemptionCheckpointHandler.run` will flag preemption
signal received, and `watch_preemption_scope` will handle the preemption
signal by saving a checkpoint and then either exit to restart or execute a
user-passed `exit_fn` in `tf.distribute.experimental.TerminationConfig`. If
no preemption signal is received during execution of ops and function inside
the scope, `watch_preemption_scope` ensures the completion of all async op
and function execution when exiting and will raises exceptions if async
execution results in an error state.
Yields:
None
"""
if self._platform_device == failure_handling_util.PlatformDevice.INTERNAL_TPU:
try:
with context.async_scope():
yield
except errors.AbortedError as abort_error:
if abort_error.experimental_payloads.get(
b'type.googleapis.com/tensorflow.distributed_runtime.WorkerPreemption'
):
logging.info('Clearing preemption error to save checkpoint...')
context.async_clear_error()
self._save_checkpoint()
self._exit_fn()
else:
raise
else:
try:
yield
except errors.OpError as e:
if not self._local_mode:
logging.info('Propagating error to cluster: %r: %s', e, e)
try:
context.context().report_error_to_cluster(e.error_code, e.message)
except Exception as ex: # pylint: disable=broad-except
logging.info('Ignoring error during error propagation: %r:%s', ex, ex)
raise
def _save_checkpoint(self, *args, **kwargs):
"""Saves the checkpoint and exit program."""
distribute_lib.distribution_strategy_input_api_counter.get_cell(
self._platform_device.name,
'PreemptionCheckpointHandler Saving Checkpoint').increase_by(1)
logging.info('PreemptionCheckpointHandler: Starting saving a checkpoint.')
if self._platform_device != failure_handling_util.PlatformDevice.INTERNAL_TPU:
self._checkpointed_runs.assign(self.total_run_calls)
start_time = time.monotonic()
with checkpoint_context.preemption_save_context():
if self._save_fn:
self._save_fn(*args, **kwargs)
else:
self._write_checkpoint_manager.save(*args, **kwargs)
end_time = time.monotonic()
logging.info('Checkpoint finished at path %s',
self._write_checkpoint_manager.directory)
self._checkpoint_time = end_time - start_time
def _check_preemption_and_maybe_checkpoint(self, *args, **kwargs):
"""Checkpoint if any worker has received a preemption signal.
This function handles preemption signal reported by any worker in the
cluster. The current implementation relies on the fact that all workers in a
MultiWorkerMirroredStrategy training cluster have a step number difference
maximum of 1.
- If the signal comes from the worker itself (i.e., where this failure
handler sits), the worker will notify all peers to checkpoint after they
finish CURRENT_STEP+1 steps, where CURRENT_STEP is the step this worker has
just finished. And the worker will wait for all peers to acknowledge that
they have received its preemption signal and the final-step number before
the worker proceeds on training the final step.
- If the signal comes from another member in the cluster but NO final-step
info is available, proceed on training, because it will be available after
finishing the next step.
- If the signal comes from some other member in the cluster, and final-step
info is available, if the worker has not finished these steps yet, keep
training; otherwise, checkpoint and exit with a cluster-recognized restart
code.
Args:
*args: args for `tf.train.CheckpointManager.save()` to save checkpoint.
**kwargs: kwargs for `tf.train.CheckpointManager.save()` to save.
"""
if self._platform_device == failure_handling_util.PlatformDevice.INTERNAL_TPU:
gen_check_preemption_op.check_preemption(preemption_key=PREEMPTION_KEY)
return
if self._final_checkpoint_countdown:
run_count_config_key = _FINAL_RUN_COUNT_KEY
else:
run_count_config_key = _INITIAL_RUN_COUNT_KEY
if self._received_checkpoint_step.is_set():
if self._step_to_checkpoint == str(self._run_counter):
self._save_checkpoint(*args, **kwargs)
if self._time_to_exit():
self._stop_poll_termination_signal_thread()
self._stop_cluster_wise_termination_watcher_thread()
if self._api_made_checkpoint_manager and not self._is_chief:
gfile.DeleteRecursively(
os.path.dirname(self._write_checkpoint_manager.directory))
logging.info(
'PreemptionCheckpointHandler: checkpoint saved. Exiting.')
self._exit_fn()
else:
logging.info('Continue training for the grace period.')
self._final_checkpoint_countdown = True
self._received_checkpoint_step.clear()
elif self._received_own_sigterm.is_set():
# Only the worker who gets termination signal first among the cluster
# will enter this branch. The following will happen in chronological
# order:
# 1. The worker just receives a preemption signal and enters this branch
# for the first time. It will set a step-to-checkpoint and let the cluster
# know.
# 2. If there is a long grace period, it will also set
# _final_checkpoint_countdown, so that during this grace period, it will
# re-enter this branch to check if grace period is ending.
# 3. If it is, set a step-to-checkpoint key again.
if self._final_checkpoint_countdown:
if self._target_time_for_termination < time.time():
logging.info(
'Grace period almost ended. Final call to save a checkpoint!')
else:
return
step_to_save_at = str(self._run_counter + 1)
logging.info('Termination caught in main thread on preempted worker')
if self._local_mode:
self._step_to_checkpoint = step_to_save_at
self._received_checkpoint_step.set()
else:
context.context().set_config_key_value(run_count_config_key,
step_to_save_at)
logging.info('%s set to %s', run_count_config_key, step_to_save_at)
if not self._local_mode:
worker_count = multi_worker_util.worker_count(
self._cluster_resolver.cluster_spec(),
self._cluster_resolver.task_type)
for i in range(worker_count):
context.context().get_config_key_value(
f'{_ACKNOWLEDGE_KEY}_{run_count_config_key}_{i}')
logging.info('Sigterm acknowledgement from replica %d received', i)
self._setup_countdown_if_has_grace_period_and_not_already_counting_down()
def _time_to_exit(self):
"""Return whether to exit: exit if no grace period or grace period ends."""
# we should directly exit in either of the two cases:
# 1. if no grace period is provided;
# 2. if there is a grace period, and we're in countdown period. This,
# together with the fact that _received_checkpoint_step is set (again),
# means it's time to exit: when there is a grace period, a worker
# receives preemption signal and sets the step key. Then all workers
# receive the step key and set their local _received_checkpoint_step
# event, enters this branch in _check_preemption_and_maybe_checkpoint, make
# a checkpoint. Then they set _final_checkpoint_countdown to True, clear
# _received_checkpoint_step, and continue training. New preemption
# signals anywhere in the cluster will not be handled, because
# _PREEMPTION_WORKER_KEY is occupied. The only chance that
# _received_checkpoint_step gets set again is when the worker who has
# received the preemption signal earlier decide it's time to do a final
# checkpoint (by checking if it already passes
# _target_time_for_termination). It will upload a final step key. All
# workers receive this key and again set _received_checkpoint_step. So,
# if we found out that _received_checkpoint_step is set, and also
# _final_checkpoint_countdown is true, it's checkpoint and exit time.
return (self._grace_period <= 0) or self._final_checkpoint_countdown
def _setup_countdown_if_has_grace_period_and_not_already_counting_down(self):
"""Set up at the beginning of a countdown period for long grace period."""
if self._grace_period > 0 and not self._final_checkpoint_countdown:
# A factor to provide more buffer / inaccuracy.
# TODO(wxinyi): update buffer_factor as needed. Maybe deduct a constant.
buffer_factor = 3
# Timing by 2 since while the preempted worker needs to do 1 extra step
# when time_till_final_call <=0, other workers might need to do x step
# where 0<x<2
self._target_time_for_termination = (
self._received_own_sigterm_time + self._grace_period -
buffer_factor * self._estimated_run_time * 2)
def _sigterm_handler_fn(self, signum, frame):
"""Upload the to-be-preempted worker's id to coordination service."""
del signum, frame
self._maybe_set_received_own_sigterm()
def _watch_step_to_save_key(self):
"""Watch out for step-to-save config key and acknowledge.
All workers, including the one to be preempted, execute this function to get
step-to-save.
"""
step_value = context.context().get_config_key_value(_INITIAL_RUN_COUNT_KEY)
# get_config_key_value does not return until it gets some result. Thus at
# the time to clean up, we upload a _STOP_WATCHING_CLUSTER_VALUE as the
# value so we can join the thread executing _watch_step_to_save_key.
if step_value != _STOP_WATCHING_CLUSTER_VALUE:
# This must be set before we set the ack key below, otherwise its value
# in _check_preemption_and_maybe_checkpoint may be outdated.
self._step_to_checkpoint = step_value
self._received_checkpoint_step.set()
ack_key = f'{_ACKNOWLEDGE_KEY}_{_INITIAL_RUN_COUNT_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info(
'PreemptionCheckpointHandler: %s set, '
'preemption awareness acknowledged', ack_key)
# If a positive grace_period is not configured, we get the
# _INITIAL_RUN_COUNT_KEY and then we're done.
# _check_preemption_and_maybe_checkpoint
# will save a checkpoint and then exit. Otherwise, we need to move on to
# wait for the _FINAL_RUN_COUNT_KEY, the one that the preempted worker
# will set after we utilize the extended grace period to train, so that
# a final checkpoint should be made right before the termination.
if self._grace_period > 0:
# Continue to wait until a final call is made.
final_step_value = context.context().get_config_key_value(
_FINAL_RUN_COUNT_KEY)
if final_step_value != _STOP_WATCHING_CLUSTER_VALUE:
ack_key = f'{_ACKNOWLEDGE_KEY}_{_FINAL_RUN_COUNT_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info(
'PreemptionCheckpointHandler: %s acknowledged, final '
'checkpoint timing received.', ack_key)
self._received_checkpoint_step.set()
self._step_to_checkpoint = final_step_value
# TODO(wxinyi): remove this line after we move the Keras callback prototype and
# change gce test usage.
WorkerPreemptionHandler = PreemptionCheckpointHandler
| PreemptionCheckpointHandler |
python | plotly__plotly.py | plotly/graph_objs/_scattermap.py | {
"start": 215,
"end": 64763
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "scattermap"
_valid_props = {
"below",
"cluster",
"connectgaps",
"customdata",
"customdatasrc",
"fill",
"fillcolor",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"lat",
"latsrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"lon",
"lonsrc",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"stream",
"subplot",
"text",
"textfont",
"textposition",
"textsrc",
"texttemplate",
"texttemplatefallback",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
}
@property
def below(self):
"""
Determines if this scattermap trace's layers are to be inserted
before the layer with the specified ID. By default, scattermap
layers are inserted above all the base layers. To place the
scattermap layers above every other layer, set `below` to "''".
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
@property
def cluster(self):
"""
The 'cluster' property is an instance of Cluster
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Cluster`
- A dict of string/value properties that will be passed
to the Cluster constructor
Returns
-------
plotly.graph_objs.scattermap.Cluster
"""
return self["cluster"]
@cluster.setter
def cluster(self, val):
self["cluster"] = val
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". "toself" connects the endpoints of the trace (or
each segment of the trace if it has gaps) into a closed shape.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lon', 'lat', 'text', 'name'] joined with '+' characters
(e.g. 'lon+lat')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.scattermap.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Sets hover text elements associated with each (lon,lat) pair If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def lat(self):
"""
Sets the latitude coordinates (in degrees North).
The 'lat' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
@property
def latsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `lat`.
The 'latsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["latsrc"]
@latsrc.setter
def latsrc(self, val):
self["latsrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.scattermap.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.scattermap.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def lon(self):
"""
Sets the longitude coordinates (in degrees East).
The 'lon' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
@property
def lonsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `lon`.
The 'lonsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lonsrc"]
@lonsrc.setter
def lonsrc(self, val):
self["lonsrc"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scattermap.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover.
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.scattermap.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.scattermap.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
map subplot. If "map" (the default value), the data refer to
`layout.map`. If "map2", the data refer to `layout.map2`, and
so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'map', that may be specified as the string 'map'
optionally followed by an integer >= 1
(e.g. 'map', 'map1', 'map2', 'map3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
@property
def text(self):
"""
Sets text elements associated with each (lon,lat) pair If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set, these
elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textfont(self):
"""
Sets the icon text font (color=map.layer.paint.text-color,
size=map.layer.layout.text-size). Has an effect only when
`type` is set to "symbol".
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scattermap.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
Returns
-------
Any
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appears on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`) are
available. Finally, the template string has access to variables
`lat`, `lon` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'texttemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["texttemplatefallback"]
@texttemplatefallback.setter
def texttemplatefallback(self, val):
self["texttemplatefallback"] = val
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.scattermap.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
below
Determines if this scattermap trace's layers are to be
inserted before the layer with the specified ID. By
default, scattermap layers are inserted above all the
base layers. To place the scattermap layers above every
other layer, set `below` to "''".
cluster
:class:`plotly.graph_objects.scattermap.Cluster`
instance or dict with compatible properties
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scattermap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
`lat`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattermap.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scattermap.Line` instance
or dict with compatible properties
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
`lon`.
marker
:class:`plotly.graph_objects.scattermap.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattermap.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattermap.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a map subplot. If "map" (the default value), the
data refer to `layout.map`. If "map2", the data refer
to `layout.map2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font (color=map.layer.paint.text-
color, size=map.layer.layout.text-size). Has an effect
only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `lat`, `lon` and `text`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattermap.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
below=None,
cluster=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
lat=None,
latsrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
lon=None,
lonsrc=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
subplot=None,
text=None,
textfont=None,
textposition=None,
textsrc=None,
texttemplate=None,
texttemplatefallback=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
**kwargs,
):
"""
Construct a new Scattermap object
The data visualized as scatter point, lines or marker symbols
on a MapLibre GL geographic map is provided by
longitude/latitude pairs in `lon` and `lat`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Scattermap`
below
Determines if this scattermap trace's layers are to be
inserted before the layer with the specified ID. By
default, scattermap layers are inserted above all the
base layers. To place the scattermap layers above every
other layer, set `below` to "''".
cluster
:class:`plotly.graph_objects.scattermap.Cluster`
instance or dict with compatible properties
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scattermap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
`lat`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattermap.Legendgrouptitl
e` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scattermap.Line` instance
or dict with compatible properties
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
`lon`.
marker
:class:`plotly.graph_objects.scattermap.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattermap.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattermap.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a map subplot. If "map" (the default value), the
data refer to `layout.map`. If "map2", the data refer
to `layout.map2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font (color=map.layer.paint.text-
color, size=map.layer.layout.text-size). Has an effect
only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `lat`, `lon` and `text`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattermap.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Scattermap
"""
super().__init__("scattermap")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Scattermap
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scattermap`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("below", arg, below)
self._set_property("cluster", arg, cluster)
self._set_property("connectgaps", arg, connectgaps)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("fill", arg, fill)
self._set_property("fillcolor", arg, fillcolor)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("lat", arg, lat)
self._set_property("latsrc", arg, latsrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("lon", arg, lon)
self._set_property("lonsrc", arg, lonsrc)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("mode", arg, mode)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("stream", arg, stream)
self._set_property("subplot", arg, subplot)
self._set_property("text", arg, text)
self._set_property("textfont", arg, textfont)
self._set_property("textposition", arg, textposition)
self._set_property("textsrc", arg, textsrc)
self._set_property("texttemplate", arg, texttemplate)
self._set_property("texttemplatefallback", arg, texttemplatefallback)
self._set_property("texttemplatesrc", arg, texttemplatesrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._props["type"] = "scattermap"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Scattermap |
python | pandas-dev__pandas | pandas/tests/extension/test_common.py | {
"start": 894,
"end": 2205
} | class ____:
@pytest.mark.parametrize(
"values",
[
pd.Categorical([]),
pd.Categorical([]).dtype,
pd.Series(pd.Categorical([])),
DummyDtype(),
DummyArray(np.array([1, 2])),
],
)
def test_is_extension_array_dtype(self, values):
assert is_extension_array_dtype(values)
@pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))])
def test_is_not_extension_array_dtype(self, values):
assert not is_extension_array_dtype(values)
def test_astype():
arr = DummyArray(np.array([1, 2, 3]))
expected = np.array([1, 2, 3], dtype=object)
result = arr.astype(object)
tm.assert_numpy_array_equal(result, expected)
result = arr.astype("object")
tm.assert_numpy_array_equal(result, expected)
def test_astype_no_copy():
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
result = arr.astype(arr.dtype, copy=False)
assert arr is result
result = arr.astype(arr.dtype)
assert arr is not result
@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()])
def test_is_extension_array_dtype(dtype):
assert isinstance(dtype, dtypes.ExtensionDtype)
assert is_extension_array_dtype(dtype)
| TestExtensionArrayDtype |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_quicksight.py | {
"start": 1521,
"end": 3383
} | class ____:
def setup_method(self):
self.default_op_kwargs = {
"task_id": "quicksight_create",
"aws_conn_id": None,
"data_set_id": DATA_SET_ID,
"ingestion_id": INGESTION_ID,
}
def test_init(self):
self.default_op_kwargs.pop("aws_conn_id", None)
op = QuickSightCreateIngestionOperator(
**self.default_op_kwargs,
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="cn-north-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.hook.client_type == "quicksight"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "cn-north-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = QuickSightCreateIngestionOperator(**self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch.object(QuickSightHook, "create_ingestion", return_value=MOCK_RESPONSE)
def test_execute(self, mock_create_ingestion):
QuickSightCreateIngestionOperator(**self.default_op_kwargs).execute({})
mock_create_ingestion.assert_called_once_with(
data_set_id=DATA_SET_ID,
ingestion_id=INGESTION_ID,
ingestion_type="FULL_REFRESH",
wait_for_completion=True,
check_interval=30,
)
def test_template_fields(self):
operator = QuickSightCreateIngestionOperator(**self.default_op_kwargs)
validate_template_fields(operator)
| TestQuickSightCreateIngestionOperator |
python | dask__distributed | distributed/core.py | {
"start": 42656,
"end": 55194
} | class ____:
"""A maximum sized pool of Comm objects.
This provides a connect method that mirrors the normal distributed.connect
method, but provides connection sharing and tracks connection limits.
This object provides an ``rpc`` like interface::
>>> rpc = ConnectionPool(limit=512)
>>> scheduler = rpc('127.0.0.1:8786')
>>> workers = [rpc(address) for address in ...]
>>> info = await scheduler.identity()
It creates enough comms to satisfy concurrent connections to any
particular address::
>>> a, b = await asyncio.gather(scheduler.who_has(), scheduler.has_what())
It reuses existing comms so that we don't have to continuously reconnect.
It also maintains a comm limit to avoid "too many open file handle"
issues. Whenever this maximum is reached we clear out all idling comms.
If that doesn't do the trick then we wait until one of the occupied comms
closes.
Parameters
----------
limit: int
The number of open comms to maintain at once
deserialize: bool
Whether or not to deserialize data by default or pass it through
"""
_instances: ClassVar[weakref.WeakSet[ConnectionPool]] = weakref.WeakSet()
def __init__(
self,
limit: int = 512,
deserialize: bool = True,
serializers: list[str] | None = None,
allow_offload: bool = True,
deserializers: list[str] | None = None,
connection_args: dict[str, object] | None = None,
timeout: float | None = None,
server: object = None,
) -> None:
self.limit = limit # Max number of open comms
# Invariant: len(available) == open - active
self.available: defaultdict[str, set[Comm]] = defaultdict(set)
# Invariant: len(occupied) == active
self.occupied: defaultdict[str, set[Comm]] = defaultdict(set)
self.allow_offload = allow_offload
self.deserialize = deserialize
self.serializers = serializers
self.deserializers = deserializers if deserializers is not None else serializers
self.connection_args = connection_args or {}
self.timeout = timeout
self.server = weakref.ref(server) if server else None
self._created: weakref.WeakSet[Comm] = weakref.WeakSet()
self._instances.add(self)
# _n_connecting and _connecting have subtle different semantics. The set
# _connecting contains futures actively trying to establish a connection
# while the _n_connecting also accounts for connection attempts which
# are waiting due to the connection limit
self._connecting: defaultdict[str, set[Callable[[str], None]]] = defaultdict(
set
)
self._pending_count = 0
self._connecting_count = 0
self._connecting_close_timeout = 5
self.status = Status.init
def _validate(self) -> None:
"""
Validate important invariants of this class
Used only for testing / debugging
"""
assert self.semaphore._value == self.limit - self.open - self._n_connecting
@property
def active(self) -> int:
return sum(map(len, self.occupied.values()))
@property
def open(self) -> int:
return self.active + sum(map(len, self.available.values()))
def __repr__(self) -> str:
return "<ConnectionPool: open=%d, active=%d, connecting=%d>" % (
self.open,
self.active,
len(self._connecting),
)
def __call__(
self,
addr: str | tuple[str, int | None] | None = None,
ip: str | None = None,
port: int | None = None,
) -> PooledRPCCall:
"""Cached rpc objects"""
addr = addr_from_args(addr=addr, ip=ip, port=port)
return PooledRPCCall(
addr, self, serializers=self.serializers, deserializers=self.deserializers
)
def __await__(self) -> Generator[Any, Any, Self]:
async def _() -> Self:
await self.start()
return self
return _().__await__()
async def __aenter__(self):
await self
return self
async def __aexit__(self, *args):
await self.close()
return
async def start(self) -> None:
# Invariant: semaphore._value == limit - open - _n_connecting
self.semaphore = asyncio.Semaphore(self.limit)
self.status = Status.running
@property
def _n_connecting(self) -> int:
return self._connecting_count
async def _connect(self, addr: str, timeout: float | None = None) -> Comm:
self._pending_count += 1
try:
await self.semaphore.acquire()
try:
self._connecting_count += 1
comm = await connect(
addr,
timeout=timeout or self.timeout,
deserialize=self.deserialize,
**self.connection_args,
)
comm.name = "ConnectionPool"
comm._pool = weakref.ref(self)
comm.allow_offload = self.allow_offload
self._created.add(comm)
self.occupied[addr].add(comm)
return comm
except BaseException:
self.semaphore.release()
raise
finally:
self._connecting_count -= 1
finally:
self._pending_count -= 1
async def connect(self, addr: str, timeout: float | None = None) -> Comm:
"""
Get a Comm to the given address. For internal use.
"""
if self.status != Status.running:
raise RuntimeError("ConnectionPool is closed")
available = self.available[addr]
occupied = self.occupied[addr]
while available:
comm = available.pop()
if comm.closed():
self.semaphore.release()
else:
occupied.add(comm)
return comm
if self.semaphore.locked():
self.collect()
# on 3.11 this uses asyncio.timeout as a cancel scope to avoid having
# to track inner and outer CancelledError exceptions and correctly
# call .uncancel() when a CancelledError is caught.
if sys.version_info >= (3, 11):
reason: str | None = None
try:
async with asyncio.timeout(math.inf) as scope:
def cancel_timeout_cb(reason_: str) -> None:
nonlocal reason
reason = reason_
scope.reschedule(-1)
self._connecting[addr].add(cancel_timeout_cb)
try:
return await self._connect(addr=addr, timeout=timeout)
finally:
connecting = self._connecting[addr]
connecting.discard(cancel_timeout_cb)
if not connecting:
try:
del self._connecting[addr]
except KeyError:
pass
except TimeoutError:
if reason is None:
raise
raise CommClosedError(reason)
else:
# This construction is there to ensure that cancellation requests from
# the outside can be distinguished from cancellations of our own.
# Once the CommPool closes, we'll cancel the connect_attempt which will
# raise an OSError
# If the ``connect`` is cancelled from the outside, the Event.wait will
# be cancelled instead which we'll reraise as a CancelledError and allow
# it to propagate
connect_attempt = asyncio.create_task(self._connect(addr, timeout))
done = asyncio.Event()
reason = "ConnectionPool closing."
def cancel_task_cb(reason_: str) -> None:
nonlocal reason
reason = reason_
connect_attempt.cancel()
connecting = self._connecting[addr]
connecting.add(cancel_task_cb)
def callback(task: asyncio.Task[Comm]) -> None:
done.set()
connecting = self._connecting[addr]
connecting.discard(cancel_task_cb)
if not connecting:
try:
del self._connecting[addr]
except KeyError: # pragma: no cover
pass
connect_attempt.add_done_callback(callback)
try:
await done.wait()
except asyncio.CancelledError:
# This is an outside cancel attempt
connect_attempt.cancel()
await connect_attempt
raise
try:
return connect_attempt.result()
except asyncio.CancelledError:
if reason:
raise CommClosedError(reason)
raise
def reuse(self, addr: str, comm: Comm) -> None:
"""
Reuse an open communication to the given address. For internal use.
"""
# if the pool is asked to reuse a comm it does not know about, ignore
# this comm: just close it.
if comm not in self.occupied[addr]:
IOLoop.current().add_callback(comm.close)
else:
self.occupied[addr].remove(comm)
if comm.closed():
# Either the user passed the close=True parameter to send_recv, or
# the RPC call raised OSError or CancelledError
self.semaphore.release()
else:
self.available[addr].add(comm)
if self.semaphore.locked() and self._pending_count:
self.collect()
def collect(self) -> None:
"""
Collect open but unused communications, to allow opening other ones.
"""
logger.info(
"Collecting unused comms. open: %d, active: %d, connecting: %d",
self.open,
self.active,
len(self._connecting),
)
for comms in self.available.values():
for comm in comms:
IOLoop.current().add_callback(comm.close)
self.semaphore.release()
comms.clear()
def remove(self, addr: str, *, reason: str = "Address removed.") -> None:
"""
Remove all Comms to a given address.
"""
logger.debug("Removing comms to %s", addr)
if addr in self.available:
comms = self.available.pop(addr)
for comm in comms:
IOLoop.current().add_callback(comm.close)
self.semaphore.release()
if addr in self.occupied:
comms = self.occupied.pop(addr)
for comm in comms:
IOLoop.current().add_callback(comm.close)
self.semaphore.release()
if addr in self._connecting:
cbs = self._connecting[addr]
for cb in cbs:
cb(reason)
async def close(self) -> None:
"""
Close all communications
"""
self.status = Status.closed
for cbs in self._connecting.values():
for cb in cbs:
cb("ConnectionPool closing.")
for d in [self.available, self.occupied]:
comms = set()
while d:
comms.update(d.popitem()[1])
await asyncio.gather(
*(comm.close() for comm in comms), return_exceptions=True
)
for _ in comms:
self.semaphore.release()
start = time()
while self._connecting:
if time() - start > self._connecting_close_timeout:
logger.warning(
"Pending connections refuse to cancel. %d connections pending. Closing anyway.",
len(self._connecting),
)
break
await asyncio.sleep(0.01)
def coerce_to_address(o):
if isinstance(o, (list, tuple)):
o = unparse_host_port(*o)
return normalize_address(o)
def collect_causes(e: BaseException) -> list[BaseException]:
causes = []
while e.__cause__ is not None:
causes.append(e.__cause__)
e = e.__cause__
return causes
| ConnectionPool |
python | pypa__pipenv | pipenv/vendor/pipdeptree/_cli.py | {
"start": 606,
"end": 5205
} | class ____(ArgumentDefaultsHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog, max_help_position=22, width=240)
def build_parser() -> ArgumentParser:
parser = ArgumentParser(description="Dependency tree of the installed python packages", formatter_class=_Formatter)
parser.add_argument("-v", "--version", action="version", version=f"{__version__}")
parser.add_argument(
"-w",
"--warn",
dest="warn",
type=WarningType,
nargs="?",
default="suppress",
action=EnumAction,
help=(
"warning control: suppress will show warnings but return 0 whether or not they are present; silence will "
"not show warnings at all and always return 0; fail will show warnings and return 1 if any are present"
),
)
select = parser.add_argument_group(title="select", description="choose what to render")
select.add_argument(
"--python",
default=sys.executable,
help=(
'Python interpreter to inspect. With "auto", it attempts to detect your virtual environment and fails if'
" it can't."
),
)
select.add_argument(
"-p",
"--packages",
help="comma separated list of packages to show - wildcards are supported, like 'somepackage.*'",
metavar="P",
)
select.add_argument(
"-e",
"--exclude",
help="comma separated list of packages to not show - wildcards are supported, like 'somepackage.*'. "
"(cannot combine with -p or -a)",
metavar="P",
)
select.add_argument("-a", "--all", action="store_true", help="list all deps at top level")
scope = select.add_mutually_exclusive_group()
scope.add_argument(
"-l",
"--local-only",
action="store_true",
help="if in a virtualenv that has global access do not show globally installed packages",
)
scope.add_argument("-u", "--user-only", action="store_true", help="only show installations in the user site dir")
render = parser.add_argument_group(
title="render",
description="choose how to render the dependency tree (by default will use text mode)",
)
render.add_argument("-f", "--freeze", action="store_true", help="print names so as to write freeze files")
render.add_argument(
"--encoding",
dest="encoding_type",
default=sys.stdout.encoding,
help="the encoding to use when writing to the output",
metavar="E",
)
render.add_argument(
"-d",
"--depth",
type=lambda x: int(x) if x.isdigit() and (int(x) >= 0) else parser.error("Depth must be a number that is >= 0"),
default=float("inf"),
help="limit the depth of the tree (text render only)",
metavar="D",
)
render.add_argument(
"-r",
"--reverse",
action="store_true",
default=False,
help=(
"render the dependency tree in the reverse fashion ie. the sub-dependencies are listed with the list of "
"packages that need them under them"
),
)
render.add_argument(
"--license",
action="store_true",
help="list the license(s) of a package (text render only)",
)
render_type = render.add_mutually_exclusive_group()
render_type.add_argument(
"-j",
"--json",
action="store_true",
default=False,
help="raw JSON - this will yield output that may be used by external tools",
)
render_type.add_argument(
"--json-tree",
action="store_true",
default=False,
help="nested JSON - mimics the text format layout",
)
render_type.add_argument(
"--mermaid",
action="store_true",
default=False,
help="https://mermaid.js.org flow diagram",
)
render_type.add_argument(
"--graph-output",
metavar="FMT",
dest="output_format",
help="Graphviz rendering with the value being the graphviz output e.g.: dot, jpeg, pdf, png, svg",
)
return parser
def get_options(args: Sequence[str] | None) -> Options:
parser = build_parser()
parsed_args = parser.parse_args(args)
if parsed_args.exclude and (parsed_args.all or parsed_args.packages):
return parser.error("cannot use --exclude with --packages or --all")
if parsed_args.license and parsed_args.freeze:
return parser.error("cannot use --license with --freeze")
return cast(Options, parsed_args)
| _Formatter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis35.py | {
"start": 315,
"end": 1397
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis35.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [63008128, 62522496]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis({"line": {"none": True}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 8759,
"end": 12630
} | class ____(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("13:30:05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean("13:30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField(self):
"""
Localized TimeFields in a non-localized environment act as unlocalized
widgets
"""
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("13:30:05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13:30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"""
TimeFields with manually specified input formats can accept those
formats
"""
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"])
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("1:30:05 PM")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("1:30 PM")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"""
Localized TimeFields with manually specified input formats can accept
those formats.
"""
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("1:30:05 PM")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("1:30 PM")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
| SimpleTimeFormatTests |
python | mlflow__mlflow | tests/tracing/test_fluent.py | {
"start": 2259,
"end": 2851
} | class ____:
@mlflow.trace(output_reducer=lambda x: sum(x))
def predict_stream(self, x, y):
z = x + y
for i in range(z):
yield i
# Generator with a normal func
for i in range(z):
yield self.square(i)
# Nested generator
yield from self.generate_numbers(z)
@mlflow.trace
def square(self, t):
time.sleep(0.1)
return t**2
# No output_reducer -> record the list of outputs
@mlflow.trace
def generate_numbers(self, z):
for i in range(z):
yield i
| StreamTestModel |
python | doocs__leetcode | solution/1800-1899/1879.Minimum XOR Sum of Two Arrays/Solution3.py | {
"start": 0,
"end": 387
} | class ____:
def minimumXORSum(self, nums1: List[int], nums2: List[int]) -> int:
n = len(nums2)
f = [inf] * (1 << n)
f[0] = 0
for i in range(1, 1 << n):
k = i.bit_count() - 1
for j in range(n):
if i >> j & 1:
f[i] = min(f[i], f[i ^ (1 << j)] + (nums1[k] ^ nums2[j]))
return f[-1]
| Solution |
python | pydata__xarray | asv_bench/benchmarks/groupby.py | {
"start": 2450,
"end": 2941
} | class ____(GroupBy):
"""Run groupby tests using pandas DataFrame."""
def setup(self, *args, **kwargs):
# Skip testing in CI as it won't ever change in a commit:
_skip_slow()
super().setup(**kwargs)
self.ds1d = self.ds1d.to_dataframe()
self.ds1d_mean = self.ds1d.groupby("b").mean()
def time_binary_op_2d(self):
raise NotImplementedError
def peakmem_binary_op_2d(self):
raise NotImplementedError
| GroupByPandasDataFrame |
python | yandexdataschool__Practical_RL | week08_pomdp/env_pool.py | {
"start": 240,
"end": 4497
} | class ____(object):
def __init__(self, agent, make_env, n_parallel_games=1):
"""
A special class that handles training on multiple parallel sessions
and is capable of some auxilary actions like evaluating agent on one game session (See .evaluate()).
:param agent: Agent which interacts with the environment.
:param make_env: Factory that produces environments OR a name of the gymnasium environment.
:param n_games: Number of parallel games. One game by default.
:param max_size: Max pool size by default (if appending sessions). By default, pool is not constrained in size.
"""
# Create Atari games.
self.agent = agent
self.make_env = make_env
self.envs = [self.make_env() for _ in range(n_parallel_games)]
# Initial observations.
self.prev_observations = [env.reset()[0] for env in self.envs]
# Agent memory variables (if you use recurrent networks).
self.prev_memory_states = agent.get_initial_state(n_parallel_games)
# Whether particular session has just been terminated or truncated and needs
# restarting.
self.just_ended = [False] * len(self.envs)
def interact(self, n_steps=100, verbose=False):
"""Generate interaction sessions with ataries (Farama gymnasium Atari environments)
Sessions will have length n_steps. Each time one of games is finished, it is immediately getting reset
and this time is recorded in is_alive_log (See returned values).
:param n_steps: Length of an interaction.
:returns: observation_seq, action_seq, reward_seq, is_alive_seq
:rtype: a bunch of tensors [batch, tick, ...]
"""
def env_step(i, action):
if not self.just_ended[i]:
new_observation, cur_reward, terminated, truncated, info = \
self.envs[i].step(action)
if terminated or truncated:
# Game ends now, will finalize on next tick.
self.just_ended[i] = True
# note: is_alive=True in any case because environment is still
# alive (last tick alive) in our notation.
return new_observation, cur_reward, True, info
else:
# Reset environment, get new observation to be used on next
# tick.
new_observation = self.envs[i].reset()[0]
# Reset memory for new episode.
initial_memory_state = self.agent.get_initial_state(
batch_size=1)
for m_i in range(len(new_memory_states)):
new_memory_states[m_i][i] = initial_memory_state[m_i][0]
if verbose:
print("env %i reloaded" % i)
self.just_ended[i] = False
return new_observation, 0, False, {'end': True}
history_log = []
for i in range(n_steps - 1):
new_memory_states, readout = self.agent.step(
self.prev_memory_states, self.prev_observations)
actions = self.agent.sample_actions(readout)
new_observations, cur_rewards, is_alive, infos = zip(
*map(env_step, range(len(self.envs)), actions))
# Append data tuple for this tick.
history_log.append(
(self.prev_observations, actions, cur_rewards, is_alive))
self.prev_observations = new_observations
self.prev_memory_states = new_memory_states
# add last observation
dummy_actions = [0] * len(self.envs)
dummy_rewards = [0] * len(self.envs)
dummy_mask = [1] * len(self.envs)
history_log.append(
(self.prev_observations,
dummy_actions,
dummy_rewards,
dummy_mask))
# cast to numpy arrays, transpose from [time, batch, ...] to [batch,
# time, ...]
history_log = [
np.array(tensor).swapaxes(0, 1)
for tensor in zip(*history_log)
]
observation_seq, action_seq, reward_seq, is_alive_seq = history_log
return observation_seq, action_seq, reward_seq, is_alive_seq
| EnvPool |
python | pytorch__pytorch | torch/testing/_internal/optests/generate_tests.py | {
"start": 26700,
"end": 29458
} | class ____(Exception):
pass
def generate_repro(
test: str,
op: torch._ops.OpOverload,
args: tuple[Any, ...],
kwargs: dict[str, Any],
*,
save_data: bool,
dry_run: bool = False,
) -> str:
if save_data:
now = datetime.datetime.now()
path = os.path.join(tempfile.gettempdir(), "pytorch_opcheck_safe_to_delete")
unix_timestamp = datetime.datetime.timestamp(now) * 100000
filepath = os.path.join(path, f"repro_{unix_timestamp}.pt")
if not dry_run:
os.makedirs(path, exist_ok=True)
torch.save((args, kwargs), filepath)
args_kwargs = f'args, kwargs = torch.load("{filepath}")'
else:
args_kwargs = (
"# If you rerun your test with PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1\n"
"# we will fill them in same (args, kwargs) as in your test\n"
"args = () # args to the operator\n"
"kwargs = {} # kwargs to the operator"
)
ns, name = op._schema.name.split("::")
overload = op._overloadname
repro_command = (
f"# =========================================================\n"
f"# BEGIN REPRO SCRIPT\n"
f"# =========================================================\n"
f"import torch\n"
f"from torch.testing._internal.optests import opcheck\n"
f"\n"
f"# Make sure you have loaded the library that contains the op\n"
f"# via an import or torch.ops.load_library(...)\n"
f"op = torch.ops.{ns}.{name}.{overload}\n"
f"\n"
f"{args_kwargs}\n"
f'opcheck(op, args, kwargs, test_utils="{test}")\n'
f"# =========================================================\n"
f"# END REPRO SCRIPT\n"
f"# =========================================================\n"
)
return repro_command
def resolve_unique_overload_or_throw(
op: torch._ops.OpOverloadPacket,
) -> torch._ops.OpOverload:
all_schemas = torch._C._jit_get_schemas_for_operator(op._qualified_op_name)
if len(all_schemas) != 1:
raise RuntimeError(
f"opcheck can only test operators without overloads. "
f"Got the following overloads for {op._qualified_op_name}: "
f"{[schema.overload_name for schema in all_schemas]}"
)
overload_name = all_schemas[0].overload_name
if overload_name == "":
return op.default
return getattr(op, overload_name)
DUMP_OPTIONS = {"indent": 2, "sort_keys": True}
FailuresDictData = dict[str, dict[str, dict[str, str]]]
VERSION = 1
DESCRIPTION = (
f"This is a dict containing failures for tests autogenerated by "
f"generate_opcheck_tests. "
f"For more details, please see {GDOC}"
)
| OpCheckError |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 29539,
"end": 33767
} | class ____(base_classes.Sheet):
def __init__(self, xl):
self.xl = xl
@property
def api(self):
return self.xl
@property
def name(self):
return self.xl.Name
@name.setter
def name(self, value):
self.xl.Name = value
@property
def names(self):
return Names(xl=self.xl.Names)
@property
def book(self):
return Book(xl=self.xl.Parent)
@property
def index(self):
return self.xl.Index
def range(self, arg1, arg2=None):
if isinstance(arg1, Range):
xl1 = arg1.xl
elif isinstance(arg1, tuple):
if len(arg1) == 4:
row, col, nrows, ncols = arg1
return Range(xl=(self.xl, row, col, nrows, ncols))
if 0 in arg1:
raise IndexError(
"Attempted to access 0-based Range. "
"xlwings/Excel Ranges are 1-based."
)
xl1 = self.xl.Cells(arg1[0], arg1[1])
elif isinstance(arg1, numbers.Number) and isinstance(arg2, numbers.Number):
xl1 = self.xl.Cells(arg1, arg2)
arg2 = None
else:
xl1 = self.xl.Range(arg1)
if arg2 is None:
return Range(xl=xl1)
if isinstance(arg2, Range):
xl2 = arg2.xl
elif isinstance(arg2, tuple):
if 0 in arg2:
raise IndexError(
"Attempted to access 0-based Range. "
"xlwings/Excel Ranges are 1-based."
)
xl2 = self.xl.Cells(arg2[0], arg2[1])
else:
xl2 = self.xl.Range(arg2)
return Range(xl=self.xl.Range(xl1, xl2))
@property
def cells(self):
return Range(xl=self.xl.Cells)
def activate(self):
return self.xl.Activate()
def select(self):
return self.xl.Select()
def clear_contents(self):
self.xl.Cells.ClearContents()
def clear_formats(self):
self.xl.Cells.ClearFormats()
def clear(self):
self.xl.Cells.Clear()
def autofit(self, axis=None):
if axis == "rows" or axis == "r":
self.xl.Rows.AutoFit()
elif axis == "columns" or axis == "c":
self.xl.Columns.AutoFit()
elif axis is None:
self.xl.Rows.AutoFit()
self.xl.Columns.AutoFit()
def delete(self):
app = self.xl.Parent.Application
alerts_state = app.DisplayAlerts
app.DisplayAlerts = False
self.xl.Delete()
app.DisplayAlerts = alerts_state
def copy(self, before, after):
if before:
before = before.xl
if after:
after = after.xl
self.xl.Copy(Before=before, After=after)
@property
def charts(self):
return Charts(xl=self.xl.ChartObjects())
@property
def shapes(self):
return Shapes(xl=self.xl.Shapes)
@property
def tables(self):
return Tables(xl=self.xl.ListObjects)
@property
def pictures(self):
return Pictures(xl=self.xl.Pictures())
@property
def used_range(self):
return Range(xl=self.xl.UsedRange)
@property
def visible(self):
return self.xl.Visible
@visible.setter
def visible(self, value):
self.xl.Visible = value
@property
def page_setup(self):
return PageSetup(self.xl.PageSetup)
def to_html(self, path):
if not Path(path).is_absolute():
path = Path(".").resolve() / path
source_cell2 = self.used_range.address.split(":")
if len(source_cell2) == 2:
source = f"A1:{source_cell2[1]}"
else:
source = f"A1:{source_cell2[0]}"
self.book.xl.PublishObjects.Add(
SourceType=SourceType.xlSourceRange,
Filename=path,
Sheet=self.name,
Source=source,
HtmlType=HtmlType.xlHtmlStatic,
).Publish(True)
html_file = Path(path)
content = html_file.read_text()
html_file.write_text(
content.replace(
"align=center x:publishsource=", "align=left x:publishsource="
)
)
| Sheet |
python | python__mypy | mypy/reachability.py | {
"start": 12539,
"end": 13013
} | class ____(TraverserVisitor):
"""Visitor that sets is_mypy_only (which affects priority)."""
def visit_import(self, node: Import) -> None:
node.is_mypy_only = True
def visit_import_from(self, node: ImportFrom) -> None:
node.is_mypy_only = True
def visit_import_all(self, node: ImportAll) -> None:
node.is_mypy_only = True
def visit_func_def(self, node: FuncDef) -> None:
node.is_mypy_only = True
| MarkImportsMypyOnlyVisitor |
python | explosion__spaCy | spacy/language.py | {
"start": 2321,
"end": 4411
} | class ____:
"""Language data defaults, available via Language.Defaults. Can be
overwritten by language subclasses by defining their own subclasses of
Language.Defaults.
"""
config: Config = Config(section_order=CONFIG_SECTION_ORDER)
tokenizer_exceptions: Dict[str, List[dict]] = BASE_EXCEPTIONS
prefixes: Optional[Sequence[Union[str, Pattern]]] = TOKENIZER_PREFIXES
suffixes: Optional[Sequence[Union[str, Pattern]]] = TOKENIZER_SUFFIXES
infixes: Optional[Sequence[Union[str, Pattern]]] = TOKENIZER_INFIXES
token_match: Optional[Callable] = None
url_match: Optional[Callable] = URL_MATCH
syntax_iterators: Dict[str, Callable] = {}
lex_attr_getters: Dict[int, Callable[[str], Any]] = {}
stop_words: Set[str] = set()
writing_system = {"direction": "ltr", "has_case": True, "has_letters": True}
def create_tokenizer() -> Callable[["Language"], Tokenizer]:
"""Registered function to create a tokenizer. Returns a factory that takes
the nlp object and returns a Tokenizer instance using the language detaults.
"""
def tokenizer_factory(nlp: "Language") -> Tokenizer:
prefixes = nlp.Defaults.prefixes
suffixes = nlp.Defaults.suffixes
infixes = nlp.Defaults.infixes
prefix_search = util.compile_prefix_regex(prefixes).search if prefixes else None
suffix_search = util.compile_suffix_regex(suffixes).search if suffixes else None
infix_finditer = util.compile_infix_regex(infixes).finditer if infixes else None
return Tokenizer(
nlp.vocab,
rules=nlp.Defaults.tokenizer_exceptions,
prefix_search=prefix_search,
suffix_search=suffix_search,
infix_finditer=infix_finditer,
token_match=nlp.Defaults.token_match,
url_match=nlp.Defaults.url_match,
)
return tokenizer_factory
def load_lookups_data(lang, tables):
util.logger.debug("Loading lookups from spacy-lookups-data: %s", tables)
lookups = load_lookups(lang=lang, tables=tables)
return lookups
| BaseDefaults |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 30395,
"end": 31727
} | class ____(datetime.datetime, AnsibleTaggedObject):
__slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
@classmethod
def _instance_factory(cls, value: datetime.datetime, tags_mapping: _AnsibleTagsMapping) -> _AnsibleTaggedDateTime:
instance = cls(
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond,
tzinfo=value.tzinfo,
fold=value.fold,
)
instance._ansible_tags_mapping = tags_mapping
return instance
def _native_copy(self) -> datetime.datetime:
return datetime.datetime(
year=self.year,
month=self.month,
day=self.day,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond,
tzinfo=self.tzinfo,
fold=self.fold,
)
def __new__(cls, year, *args, **kwargs):
return super()._new(year, *args, **kwargs)
def __reduce_ex__(self, protocol: t.SupportsIndex) -> tuple:
return super()._reduce(super().__reduce_ex__(protocol))
def __repr__(self) -> str:
return self._native_copy().__repr__()
| _AnsibleTaggedDateTime |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 30519,
"end": 30653
} | class ____(BaseModel):
dag_id: str
run_id: str
type: Literal["GetTaskBreadcrumbs"] = "GetTaskBreadcrumbs"
| GetTaskBreadcrumbs |
python | astropy__astropy | astropy/time/core.py | {
"start": 127142,
"end": 129309
} | class ____(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
f"Unsupported operand type(s){op_string}: '{type(left).__name__}' "
f"and '{type(right).__name__}'"
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
| OperandTypeError |
python | patrick-kidger__equinox | equinox/internal/_misc.py | {
"start": 335,
"end": 2259
} | class ____(type):
reverse_lookup: dict
def __new__(cls, name, bases, dict):
assert "reverse_lookup" not in dict
_dict = {}
reverse_lookup = []
i = 0
for key, value in dict.items():
if key.startswith("__") and key.endswith("__"):
_dict[key] = value
else:
_dict[key] = i
reverse_lookup.append(value)
i += 1
_dict["reverse_lookup"] = reverse_lookup
return super().__new__(cls, name, bases, _dict)
def __instancecheck__(cls, instance):
if is_array(instance):
return instance.shape == () and jnp.issubdtype(instance.dtype, jnp.integer)
else:
return isinstance(instance, int) or super().__instancecheck__(instance)
def __getitem__(cls, item):
return cls.reverse_lookup[item]
def __len__(cls):
return len(cls.reverse_lookup)
_X = TypeVar("_X")
def scan_trick(fn: Callable, intermediates: Sequence[Callable], init: _X) -> _X:
def body(carry, step):
out = fn(carry)
step = nonbatchable(step)
out = lax.switch(step, intermediates, out)
return out, None
intermediates = list(intermediates) + [lambda x: x]
out, _ = lax.scan(body, init, xs=jnp.arange(len(intermediates)))
return out
def eval_empty(fn: Callable, *inputs: PyTree[Any]) -> PyTree[Array]:
out = filter_eval_shape(fn, *inputs)
return jtu.tree_map(lambda x: jnp.empty(x.shape, x.dtype), out)
def eval_zero(fn: Callable, *inputs: PyTree[Any]) -> PyTree[Array]:
out = filter_eval_shape(fn, *inputs)
return jtu.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), out)
def eval_full(fn: Callable, *inputs: PyTree[Any], fill_value: Scalar) -> PyTree[Any]:
out = filter_eval_shape(fn, *inputs)
return jtu.tree_map(lambda x: jnp.full(x.shape, fill_value, x.dtype), out)
| ContainerMeta |
python | tensorflow__tensorflow | tensorflow/python/saved_model/load_v1_in_v2.py | {
"start": 1867,
"end": 2933
} | class ____(resource.CapturableResource):
"""Represents an initialization operation restored from a SavedModel.
Without this object re-export of imported 1.x SavedModels would omit the
original SavedModel's initialization procedure.
Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an
initialization op. This object holds a function that runs the
initialization. It does not require any manual user intervention;
`tf.saved_model.save` will see this object and automatically add it to the
exported SavedModel, and `tf.saved_model.load` runs the initialization
function automatically.
"""
def __init__(self, init_fn, asset_paths):
super(_Initializer, self).__init__()
self._asset_paths = asset_paths
self._init_fn = init_fn
def _create_resource(self):
# Return a constant here so that when re-saved, the traced `create_resource`
# has valid returns.
return constant_op.constant(1.0)
def _initialize(self):
return self._init_fn(*[path.asset_path for path in self._asset_paths])
| _Initializer |
python | wandb__wandb | tests/unit_tests/test_launch/test_runner/test_safe_watch.py | {
"start": 206,
"end": 2388
} | class ____:
"""Mock class for testing."""
def __init__(self):
self.is_alive = True
self.args = []
self.queue = []
async def stream(self, *args, **kwargs):
"""Simulate an input stream."""
self.args.append((args, kwargs))
while True:
if not self.is_alive:
break
if not self.queue:
continue
item = self.queue.pop(0)
if isinstance(item, Exception) or item is StopIteration:
raise item
yield item
def stop(self):
self.is_alive = False
def add(self, item):
self.queue.append(item)
def event_factory(resource_version):
"""Create an event."""
mock_event = MagicMock()
mock_event.get.return_value.metadata.resource_version = resource_version
return mock_event
# If this timeout fails it means that the SafeWatch is not breaking out of its
# loop after stop() is called.
@pytest.mark.timeout(60)
@pytest.mark.asyncio
@pytest.mark.xfail(reason="This test is flaky.")
async def test_safe_watch():
"""Test that safewatch wraps properly.
This unit test is designed to verify that the SafeWatch is properly wrapping
the watch object so that it continues to yield items even if the watch object
raises specific exceptions.
"""
watch = MockWatch()
item_1 = event_factory("1")
item_2 = event_factory("2")
item_3 = event_factory("3")
item_4 = event_factory("4")
watch.add(item_1)
watch.add(ProtocolError("test"))
watch.add(item_2)
watch.add(StopIteration)
watch.add(item_3)
watch.add(ApiException(410))
watch.add(item_4)
safe_watch = SafeWatch(watch)
stream = safe_watch.stream(None)
assert await stream.__anext__() == item_1
assert safe_watch._last_seen_resource_version == "1"
assert await stream.__anext__() == item_2
assert safe_watch._last_seen_resource_version == "2"
assert await stream.__anext__() == item_3
assert safe_watch._last_seen_resource_version == "3"
assert await stream.__anext__() == item_4
assert safe_watch._last_seen_resource_version == "4"
| MockWatch |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/ppo/trainer.py | {
"start": 1089,
"end": 8375
} | class ____(OnPolicyTrainer):
"""The PPOTrainer is an implementation of the PPO algorithm."""
def __init__(
self,
behavior_name: str,
reward_buff_cap: int,
trainer_settings: TrainerSettings,
training: bool,
load: bool,
seed: int,
artifact_path: str,
):
"""
Responsible for collecting experiences and training PPO model.
:param behavior_name: The name of the behavior associated with trainer config
:param reward_buff_cap: Max reward history to track in the reward buffer
:param trainer_settings: The parameters for the trainer.
:param training: Whether the trainer is set for training.
:param load: Whether the model should be loaded.
:param seed: The seed the model will be initialized with
:param artifact_path: The directory within which to store artifacts from this trainer.
"""
super().__init__(
behavior_name,
reward_buff_cap,
trainer_settings,
training,
load,
seed,
artifact_path,
)
self.hyperparameters: PPOSettings = cast(
PPOSettings, self.trainer_settings.hyperparameters
)
self.seed = seed
self.shared_critic = self.hyperparameters.shared_critic
self.policy: TorchPolicy = None # type: ignore
def _process_trajectory(self, trajectory: Trajectory) -> None:
"""
Takes a trajectory and processes it, putting it into the update buffer.
Processing involves calculating value and advantage targets for model updating step.
:param trajectory: The Trajectory tuple containing the steps to be processed.
"""
super()._process_trajectory(trajectory)
agent_id = trajectory.agent_id # All the agents should have the same ID
agent_buffer_trajectory = trajectory.to_agentbuffer()
# Check if we used group rewards, warn if so.
self._warn_if_group_reward(agent_buffer_trajectory)
# Update the normalization
if self.is_training:
self.policy.actor.update_normalization(agent_buffer_trajectory)
self.optimizer.critic.update_normalization(agent_buffer_trajectory)
# Get all value estimates
(
value_estimates,
value_next,
value_memories,
) = self.optimizer.get_trajectory_value_estimates(
agent_buffer_trajectory,
trajectory.next_obs,
trajectory.done_reached and not trajectory.interrupted,
)
if value_memories is not None:
agent_buffer_trajectory[BufferKey.CRITIC_MEMORY].set(value_memories)
for name, v in value_estimates.items():
agent_buffer_trajectory[RewardSignalUtil.value_estimates_key(name)].extend(
v
)
self._stats_reporter.add_stat(
f"Policy/{self.optimizer.reward_signals[name].name.capitalize()} Value Estimate",
np.mean(v),
)
# Evaluate all reward functions
self.collected_rewards["environment"][agent_id] += np.sum(
agent_buffer_trajectory[BufferKey.ENVIRONMENT_REWARDS]
)
for name, reward_signal in self.optimizer.reward_signals.items():
evaluate_result = (
reward_signal.evaluate(agent_buffer_trajectory) * reward_signal.strength
)
agent_buffer_trajectory[RewardSignalUtil.rewards_key(name)].extend(
evaluate_result
)
# Report the reward signals
self.collected_rewards[name][agent_id] += np.sum(evaluate_result)
# Compute GAE and returns
tmp_advantages = []
tmp_returns = []
for name in self.optimizer.reward_signals:
bootstrap_value = value_next[name]
local_rewards = agent_buffer_trajectory[
RewardSignalUtil.rewards_key(name)
].get_batch()
local_value_estimates = agent_buffer_trajectory[
RewardSignalUtil.value_estimates_key(name)
].get_batch()
local_advantage = get_gae(
rewards=local_rewards,
value_estimates=local_value_estimates,
value_next=bootstrap_value,
gamma=self.optimizer.reward_signals[name].gamma,
lambd=self.hyperparameters.lambd,
)
local_return = local_advantage + local_value_estimates
# This is later use as target for the different value estimates
agent_buffer_trajectory[RewardSignalUtil.returns_key(name)].set(
local_return
)
agent_buffer_trajectory[RewardSignalUtil.advantage_key(name)].set(
local_advantage
)
tmp_advantages.append(local_advantage)
tmp_returns.append(local_return)
# Get global advantages
global_advantages = list(
np.mean(np.array(tmp_advantages, dtype=np.float32), axis=0)
)
global_returns = list(np.mean(np.array(tmp_returns, dtype=np.float32), axis=0))
agent_buffer_trajectory[BufferKey.ADVANTAGES].set(global_advantages)
agent_buffer_trajectory[BufferKey.DISCOUNTED_RETURNS].set(global_returns)
self._append_to_update_buffer(agent_buffer_trajectory)
# If this was a terminal trajectory, append stats and reset reward collection
if trajectory.done_reached:
self._update_end_episode_stats(agent_id, self.optimizer)
def create_optimizer(self) -> TorchOptimizer:
return TorchPPOOptimizer( # type: ignore
cast(TorchPolicy, self.policy), self.trainer_settings # type: ignore
) # type: ignore
def create_policy(
self, parsed_behavior_id: BehaviorIdentifiers, behavior_spec: BehaviorSpec
) -> TorchPolicy:
"""
Creates a policy with a PyTorch backend and PPO hyperparameters
:param parsed_behavior_id:
:param behavior_spec: specifications for policy construction
:return policy
"""
actor_cls: Union[Type[SimpleActor], Type[SharedActorCritic]] = SimpleActor
actor_kwargs: Dict[str, Any] = {
"conditional_sigma": False,
"tanh_squash": False,
}
if self.shared_critic:
reward_signal_configs = self.trainer_settings.reward_signals
reward_signal_names = [
key.value for key, _ in reward_signal_configs.items()
]
actor_cls = SharedActorCritic
actor_kwargs.update({"stream_names": reward_signal_names})
policy = TorchPolicy(
self.seed,
behavior_spec,
self.trainer_settings.network_settings,
actor_cls,
actor_kwargs,
)
return policy
def get_policy(self, name_behavior_id: str) -> Policy:
"""
Gets policy from trainer associated with name_behavior_id
:param name_behavior_id: full identifier of policy
"""
return self.policy
@staticmethod
def get_trainer_name() -> str:
return TRAINER_NAME
| PPOTrainer |
python | huggingface__transformers | src/transformers/models/olmo3/modular_olmo3.py | {
"start": 9665,
"end": 12130
} | class ____(Olmo2Attention):
def __init__(self, config: Olmo3Config, layer_idx: int):
super().__init__(config, layer_idx=layer_idx)
assert config.layer_types is not None
self.attention_type = config.layer_types[layer_idx]
self.sliding_window = config.sliding_window if self.attention_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Olmo3Attention |
python | sympy__sympy | sympy/tensor/array/expressions/array_expressions.py | {
"start": 11018,
"end": 12945
} | class ____(_CodegenArrayAbstract):
r"""
Class for elementwise array additions.
"""
def __new__(cls, *args, **kwargs):
args = [_sympify(arg) for arg in args]
ranks = [get_rank(arg) for arg in args]
ranks = list(set(ranks))
if len(ranks) != 1:
raise ValueError("summing arrays of different ranks")
shapes = [arg.shape if hasattr(arg, "shape") else () for arg in args]
if len({i for i in shapes if i is not None}) > 1:
raise ValueError("mismatching shapes in addition")
canonicalize = kwargs.pop("canonicalize", False)
obj = Basic.__new__(cls, *args)
obj._subranks = ranks
if any(i is None for i in shapes):
obj._shape = None
else:
obj._shape = shapes[0]
if canonicalize:
return obj._canonicalize()
return obj
def _canonicalize(self):
args = self.args
# Flatten:
args = self._flatten_args(args)
shapes = [get_shape(arg) for arg in args]
args = [arg for arg in args if not isinstance(arg, (ZeroArray, ZeroMatrix))]
if len(args) == 0:
if any(i for i in shapes if i is None):
raise NotImplementedError("cannot handle addition of ZeroMatrix/ZeroArray and undefined shape object")
return ZeroArray(*shapes[0])
elif len(args) == 1:
return args[0]
return self.func(*args, canonicalize=False)
@classmethod
def _flatten_args(cls, args):
new_args = []
for arg in args:
if isinstance(arg, ArrayAdd):
new_args.extend(arg.args)
else:
new_args.append(arg)
return new_args
def as_explicit(self):
return reduce(
operator.add,
[arg.as_explicit() if hasattr(arg, "as_explicit") else arg for arg in self.args])
| ArrayAdd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.