language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/snuba/test_discover_query.py | {
"start": 992,
"end": 116651
} | class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.environment = self.create_environment(self.project, name="prod")
self.release = self.create_release(self.project, version="first-release")
self.now = before_now()
self.one_min_ago = before_now(minutes=1)
self.two_min_ago = before_now(minutes=2)
self.event_time = self.one_min_ago
self.event = self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
"tags": [["key1", "value1"]],
},
project_id=self.project.id,
)
self.params = SnubaParams(
start=before_now(days=1),
end=self.now,
projects=[self.project],
organization=self.organization,
)
def test_project_mapping(self) -> None:
other_project = self.create_project(organization=self.organization)
self.params.projects = [other_project]
self.store_event(
data={"message": "hello", "timestamp": self.one_min_ago.isoformat()},
project_id=other_project.id,
)
result = discover.query(
selected_columns=["project", "message"],
query="",
snuba_params=self.params,
orderby=["project"],
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project"] == other_project.slug
def test_sorting_project_name(self) -> None:
projects = []
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
self.store_event(
data={"message": "ohh no", "timestamp": self.one_min_ago.isoformat()},
project_id=other_project.id,
)
self.params.projects = projects
result = discover.query(
selected_columns=["project", "message"],
query="",
snuba_params=self.params,
orderby=["project"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["project"] for item in data] == ["a" * 32, "m" * 32, "z" * 32]
def test_reverse_sorting_project_name(self) -> None:
projects = []
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
self.store_event(
data={"message": "ohh no", "timestamp": self.one_min_ago.isoformat()},
project_id=other_project.id,
)
self.params.projects = projects
result = discover.query(
selected_columns=["project", "message"],
query="",
snuba_params=self.params,
orderby=["-project"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["project"] for item in data] == ["z" * 32, "m" * 32, "a" * 32]
def test_using_project_and_project_name(self) -> None:
projects = []
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
self.store_event(
data={"message": "ohh no", "timestamp": self.one_min_ago.isoformat()},
project_id=other_project.id,
)
self.params.projects = projects
result = discover.query(
selected_columns=["project.name", "message", "project"],
query="",
snuba_params=self.params,
orderby=["project.name"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["project.name"] for item in data] == [
"a" * 32,
"m" * 32,
"z" * 32,
]
def test_missing_project(self) -> None:
projects = []
other_project = None
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
self.store_event(
data={"message": "ohh no", "timestamp": self.one_min_ago.isoformat()},
project_id=other_project.id,
)
self.params.projects = projects
# delete the last project so its missing
if other_project is not None:
other_project.delete()
result = discover.query(
selected_columns=["message", "project"],
query="",
snuba_params=self.params,
orderby=["project"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 2
assert [item["project"] for item in data] == ["a" * 32, "z" * 32]
def test_issue_short_id_mapping(self) -> None:
tests = [
("issue", f"issue:{self.event.group.qualified_short_id}"),
("issue", f"issue.id:{self.event.group_id}"),
("issue.id", f"issue:{self.event.group.qualified_short_id}"),
("issue.id", f"issue.id:{self.event.group_id}"),
]
for column, query in tests:
result = discover.query(
selected_columns=[column],
query=query,
referrer="discover",
snuba_params=self.params,
)
data = result["data"]
assert len(data) == 1
# The query will translate `issue` into `issue.id`. Additional post processing
# is required to insert the `issue` column.
assert [item["issue.id"] for item in data] == [self.event.group_id]
def test_issue_filters(self) -> None:
tests = [
"has:issue",
"has:issue.id",
f"issue:[{self.event.group.qualified_short_id}]",
f"issue.id:[{self.event.group_id}]",
]
for query in tests:
result = discover.query(
selected_columns=["issue", "issue.id"],
query=query,
snuba_params=self.params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
# The query will translate `issue` into `issue.id`. Additional post processing
# is required to insert the `issue` column.
assert [item["issue.id"] for item in data] == [self.event.group_id]
def test_tags_orderby(self) -> None:
self.event = self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
"tags": [["key1", "value2"]],
},
project_id=self.project.id,
)
tests = [
("key1", "key1", ["value1", "value2"]),
("key1", "-key1", ["value2", "value1"]),
("tags[key1]", "tags[key1]", ["value1", "value2"]),
("tags[key1]", "-tags[key1]", ["value2", "value1"]),
]
for column, orderby, expected in tests:
result = discover.query(
selected_columns=[column],
query="",
snuba_params=self.params,
orderby=[orderby],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected)
assert [item[column] for item in data] == expected
def test_tags_filter(self) -> None:
self.event = self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
"tags": [["key1", "value2"]],
},
project_id=self.project.id,
)
tests: list[tuple[str, str, list[str]]] = [
("key1", "", ["value1", "value2"]),
("key1", "has:key1", ["value1", "value2"]),
("key1", "!has:key1", []),
("key1", "key1:value1", ["value1"]),
("key1", "key1:value2", ["value2"]),
("key1", 'key1:""', []),
("key1", "key1:value*", ["value1", "value2"]),
("key1", 'key1:["value1"]', ["value1"]),
("key1", 'key1:["value1", "value2"]', ["value1", "value2"]),
("tags[key1]", "", ["value1", "value2"]),
# has does not work with tags[...] syntax
# ("tags[key1]", 'has:"tags[key1]"', ["value1", "value2"]),
# ("tags[key1]", '!has:"tags[key1]"', []),
("tags[key1]", "tags[key1]:value1", ["value1"]),
("tags[key1]", "tags[key1]:value2", ["value2"]),
("tags[key1]", 'tags[key1]:""', []),
("tags[key1]", "tags[key1]:value*", ["value1", "value2"]),
("tags[key1]", 'tags[key1]:["value1"]', ["value1"]),
("tags[key1]", 'tags[key1]:["value1", "value2"]', ["value1", "value2"]),
]
for column, query, expected in tests:
result = discover.query(
selected_columns=[column],
query=query,
snuba_params=self.params,
orderby=[column],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected), (column, query, expected)
assert [item[column] for item in data] == expected
def test_tags_colliding_with_fields(self) -> None:
event = self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
"tags": [["id", "new"]],
},
project_id=self.project.id,
)
tests = [
("id", "", sorted([self.event.event_id, event.event_id])),
("id", f"id:{event.event_id}", [event.event_id]),
("tags[id]", "", ["", "new"]),
("tags[id]", "tags[id]:new", ["new"]),
]
for column, query, expected in tests:
result = discover.query(
selected_columns=[column],
query=query,
snuba_params=self.params,
orderby=[column],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected), (query, expected)
assert [item[column] for item in data] == expected
def test_reverse_sorting_issue(self) -> None:
other_event = self.store_event(
data={
"message": "whoopsies",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
tests = [
# issue is not sortable
# "issue",
"issue.id",
]
for column in tests:
for direction in ["", "-"]:
result = discover.query(
selected_columns=[column],
query="",
snuba_params=self.params,
orderby=[f"{direction}{column}"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
expected = [self.event.group_id, other_event.group_id]
if direction == "-":
expected.reverse()
assert [item["issue.id"] for item in data] == expected
def test_timestamp_rounding_fields(self) -> None:
result = discover.query(
selected_columns=["timestamp.to_hour", "timestamp.to_day"],
query="",
snuba_params=self.params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
hour = self.event_time.replace(minute=0, second=0, microsecond=0)
day = hour.replace(hour=0)
assert [item["timestamp.to_hour"] for item in data] == [hour.isoformat()]
assert [item["timestamp.to_day"] for item in data] == [day.isoformat()]
def test_timestamp_rounding_filters(self) -> None:
one_day_ago = before_now(days=1)
two_day_ago = before_now(days=2)
three_day_ago = before_now(days=3)
self.params.start = three_day_ago
self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": two_day_ago.isoformat(),
},
project_id=self.project.id,
)
result = discover.query(
selected_columns=["timestamp.to_hour", "timestamp.to_day"],
query=f"timestamp.to_hour:<{one_day_ago.isoformat()} timestamp.to_day:<{one_day_ago.isoformat()}",
snuba_params=self.params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
hour = two_day_ago.replace(minute=0, second=0, microsecond=0)
day = hour.replace(hour=0)
assert [item["timestamp.to_hour"] for item in data] == [hour.isoformat()]
assert [item["timestamp.to_day"] for item in data] == [day.isoformat()]
def test_user_display(self) -> None:
# `user.display` should give `username`
self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"username": "brucew", "id": "1234", "ip": "127.0.0.1"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
# `user.display` should give `id`
self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "1234", "ip": "127.0.0.1"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
# `user.display` should give `ip`
self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"ip_address": "127.0.0.1"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
result = discover.query(
selected_columns=["user.display"],
query="",
snuba_params=self.params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 4
assert {item["user.display"] for item in data} == {
"bruce@example.com",
"brucew",
"1234",
"127.0.0.1",
}
def test_user_display_filter(self) -> None:
# `user.display` should give `username`
self.store_event(
data={
"message": "oh no",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"username": "brucew", "ip": "127.0.0.1"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
result = discover.query(
selected_columns=["user.display"],
query="has:user.display user.display:bruce@example.com",
snuba_params=self.params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
assert [item["user.display"] for item in data] == ["bruce@example.com"]
def test_message_orderby(self) -> None:
self.event = self.store_event(
data={
"message": "oh yeah",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
tests = [
("message", ["oh no", "oh yeah"]),
("-message", ["oh yeah", "oh no"]),
]
for orderby, expected in tests:
result = discover.query(
selected_columns=["message"],
query="",
snuba_params=self.params,
orderby=[orderby],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 2
assert [item["message"] for item in data] == expected
def test_message_filter(self) -> None:
self.event = self.store_event(
data={
"message": "oh yeah",
"release": "first-release",
"environment": "prod",
"platform": "python",
"user": {"id": "99", "email": "bruce@example.com", "username": "brucew"},
"timestamp": self.event_time.isoformat(),
},
project_id=self.project.id,
)
tests: list[tuple[str, list[str]]] = [
('message:"oh no"', ["oh no"]),
('message:"oh yeah"', ["oh yeah"]),
('message:""', []),
("has:message", ["oh no", "oh yeah"]),
("!has:message", []),
("message:oh*", ["oh no", "oh yeah"]),
('message:"oh *"', ["oh no", "oh yeah"]),
('message:["oh meh"]', []),
('message:["oh yeah"]', ["oh yeah"]),
('message:["oh yeah", "oh no"]', ["oh no", "oh yeah"]),
]
for query, expected in tests:
result = discover.query(
selected_columns=["message"],
query=query,
snuba_params=self.params,
orderby=["message"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected)
assert [item["message"] for item in data] == expected
def test_team_key_transactions(self) -> None:
team1 = self.create_team(organization=self.organization, name="Team A")
self.project.add_team(team1)
team2 = self.create_team(organization=self.organization, name="Team B")
self.project.add_team(team2)
transactions = ["/blah_transaction/"]
key_transactions = [
(team1, "/foo_transaction/"),
(team2, "/zoo_transaction/"),
]
for transaction in transactions:
data = load_data(
"transaction",
timestamp=before_now(minutes=(5)),
)
data["transaction"] = transaction
self.store_event(data, project_id=self.project.id)
for team, transaction in key_transactions:
data = load_data(
"transaction",
timestamp=before_now(minutes=(5)),
)
data["transaction"] = transaction
self.store_event(data, project_id=self.project.id)
TeamKeyTransaction.objects.create(
organization=self.organization,
transaction=transaction,
project_team=ProjectTeam.objects.get(project=self.project, team=team),
)
queries = [
("", [("/blah_transaction/", 0), ("/foo_transaction/", 1), ("/zoo_transaction/", 1)]),
("has:team_key_transaction", [("/foo_transaction/", 1), ("/zoo_transaction/", 1)]),
("!has:team_key_transaction", [("/blah_transaction/", 0)]),
("team_key_transaction:true", [("/foo_transaction/", 1), ("/zoo_transaction/", 1)]),
("team_key_transaction:false", [("/blah_transaction/", 0)]),
]
for query, expected_results in queries:
result = discover.query(
selected_columns=["transaction", "team_key_transaction"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project],
organization=self.organization,
teams=[team1, team2],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected_results)
assert [
(x["transaction"], x["team_key_transaction"])
for x in sorted(data, key=lambda k: k["transaction"])
] == expected_results
@pytest.mark.xfail(reason="Started failing on ClickHouse 21.8")
def test_snql_wip_project_threshold_config(self) -> None:
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=100,
metric=TransactionMetric.DURATION.value,
)
project2 = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project2,
organization=project2.organization,
threshold=600,
metric=TransactionMetric.LCP.value,
)
events = [
("a" * 10, 300),
("b" * 10, 300),
("c" * 10, 3000),
("d" * 10, 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(3 + idx)),
start_timestamp=before_now(minutes=(3 + idx), milliseconds=event[1]),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = event[0]
self.store_event(data, project_id=self.project.id)
if idx % 2:
ProjectTransactionThresholdOverride.objects.create(
transaction=event[0],
project=self.project,
organization=self.organization,
threshold=1000,
metric=TransactionMetric.DURATION.value,
)
data = load_data(
"transaction", timestamp=before_now(minutes=3), start_timestamp=before_now(minutes=4)
)
data["transaction"] = "e" * 10
self.store_event(data, project_id=project2.id)
expected_transaction = ["a" * 10, "b" * 10, "c" * 10, "d" * 10, "e" * 10]
expected_project_threshold_config = [
["duration", 100],
["duration", 1000],
["duration", 100],
["duration", 1000],
["lcp", 600],
]
result = discover.query(
selected_columns=["project", "transaction", "project_threshold_config"],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project, project2],
organization=self.organization,
),
referrer="test_discover_query",
)
assert len(result["data"]) == 5
sorted_data = sorted(result["data"], key=lambda k: k["transaction"])
assert [row["transaction"] for row in sorted_data] == expected_transaction
assert [row["project_threshold_config"][0] for row in sorted_data] == [
r[0] for r in expected_project_threshold_config
]
assert [row["project_threshold_config"][1] for row in sorted_data] == [
r[1] for r in expected_project_threshold_config
]
ProjectTransactionThreshold.objects.filter(
project=project2,
organization=project2.organization,
).delete()
expected_transaction = ["e" * 10]
expected_project_threshold_config = [["duration", 300]]
result = discover.query(
selected_columns=["project", "transaction", "project_threshold_config"],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project2],
organization=self.organization,
),
referrer="test_discover_query",
)
assert len(result["data"]) == 1
sorted_data = sorted(result["data"], key=lambda k: k["transaction"])
assert [row["transaction"] for row in sorted_data] == expected_transaction
assert [row["project_threshold_config"][0] for row in sorted_data] == [
r[0] for r in expected_project_threshold_config
]
assert [row["project_threshold_config"][1] for row in sorted_data] == [
r[1] for r in expected_project_threshold_config
]
def test_to_other_function(self) -> None:
project = self.create_project()
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = f"/to_other/{i}"
data["release"] = "aaaa"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/to_other/y"
data["release"] = "yyyy"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/to_other/z"
data["release"] = "zzzz"
self.store_event(data, project_id=project.id)
columns1 = ["transaction", 'to_other(release,"aaaa")']
columns2 = ["transaction", 'to_other(release,"aaaa",old,new)']
test_cases = [
(columns1, "", ["this", "this", "this", "that", "that"], "to_other_release__aaaa"),
(columns2, "", ["new", "new", "new", "old", "old"], "to_other_release__aaaa__old_new"),
]
for cols, query, expected, alias in test_cases:
result = discover.query(
selected_columns=cols,
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected)
assert [x[alias] for x in data] == expected
def test_count_if_function(self) -> None:
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = "aaaa"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = "bbbb"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = "cccc"
self.store_event(data, project_id=self.project.id)
columns1 = ["count()", "count_if(release,equals,aaaa)", "count_if(release,notEquals,aaaa)"]
columns2 = ["count()", "count_if(release,less,bbbb)", "count_if(release,lessOrEquals,bbbb)"]
test_cases = [
(
columns1,
"",
{
"count": 5,
"count_if_release_equals_aaaa": 3,
"count_if_release_notEquals_aaaa": 2,
},
),
(
columns2,
"",
{
"count": 5,
"count_if_release_less_bbbb": 3,
"count_if_release_lessOrEquals_bbbb": 4,
},
),
]
for cols, query, expected in test_cases:
result = discover.query(
selected_columns=cols,
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
assert data[0] == expected
def test_count_if_function_with_unicode(self) -> None:
unicode_phrase1 = "\u716e\u6211\u66f4\u591a\u7684\u98df\u7269\uff0c\u6211\u9913\u4e86"
unicode_phrase2 = "\u53cd\u6b63\u611b\u60c5\u4e0d\u5c31\u90a3\u6837"
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = unicode_phrase1
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = unicode_phrase2
self.store_event(data, project_id=self.project.id)
columns1 = [
"count()",
f"count_if(release,equals,{unicode_phrase1})",
f"count_if(release,notEquals,{unicode_phrase1})",
]
test_cases = [
(
columns1,
"",
{
"count": 4,
"count_if_release_equals__u716e_u6211_u66f4_u591a_u7684_u98df_u7269_uff0c_u6211_u9913_u4e86": 3,
"count_if_release_notEquals__u716e_u6211_u66f4_u591a_u7684_u98df_u7269_uff0c_u6211_u9913_u4e86": 1,
},
),
]
for cols, query, expected in test_cases:
result = discover.query(
selected_columns=cols,
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
assert data[0] == expected
def test_failure_count_function(self) -> None:
project = self.create_project()
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/success"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/unknown"
data["contexts"]["trace"]["status"] = "unknown_error"
self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = f"/failure_count/{i}"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/0"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
queries = [
("", 8, True),
("failure_count():>0", 7, True),
("failure_count():>0", 8, False),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=["transaction", "failure_count()"],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
assert data[0]["failure_count"] == 2
assert data[1]["failure_count"] == 1
def test_apdex_function(self) -> None:
project = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
ProjectTransactionThresholdOverride.objects.create(
project=project,
transaction="/apdex/ace",
organization=project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
project2 = self.create_project()
events = [
("ace", 400),
("ace", 400),
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
("zorp", 300),
("zorp", 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(5 + idx)),
start_timestamp=before_now(minutes=(5 + idx), milliseconds=event[1]),
)
data["measurements"]["lcp"]["value"] = 3000
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/apdex/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
if event[0] == "zorp":
self.store_event(data, project_id=project2.id) # No custom thresholds for project2
else:
self.store_event(data, project_id=project.id)
queries = [
("", [0.5, 0.5, 0.25, 0.0, 0.25], ["apdex(100)"], "apdex_100"),
("", [0.0, 1.0, 0.5, 0.0, 0.5], ["apdex()"], "apdex"),
("apdex(100):<0.5", [0.25, 0.0, 0.25], ["apdex(100)"], "apdex_100"),
("apdex():>0", [1.0, 0.5, 0.5], ["apdex()"], "apdex"),
]
for query, expected_apdex, col, alias in queries:
result = discover.query(
selected_columns=["transaction"] + col,
query=query,
orderby=["transaction"],
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=30),
end=before_now(minutes=2),
projects=[project, project2],
organization=self.organization,
),
use_aggregate_conditions=True,
)
data = result["data"]
assert len(data) == len(expected_apdex)
assert [
x[alias] for x in sorted(data, key=lambda k: k["transaction"])
] == expected_apdex
def test_count_miserable_function(self) -> None:
project = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
ProjectTransactionThresholdOverride.objects.create(
project=project,
transaction="/count_miserable/ace",
organization=project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
project2 = self.create_project()
events = [
("ace", 400),
("ace", 400),
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
("zorp", 300),
("zorp", 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(5 + idx)),
start_timestamp=before_now(minutes=(5 + idx), milliseconds=event[1]),
)
data["measurements"]["lcp"]["value"] = 3000
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/count_miserable/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
if event[0] == "zorp":
self.store_event(data, project_id=project2.id) # No custom thresholds for project2
else:
self.store_event(data, project_id=project.id)
queries = [
(
"",
[0, 0, 1, 2, 1],
["count_miserable(user,100)"],
"count_miserable_user_100",
),
("", [2, 0, 1, 2, 1], ["count_miserable(user)"], "count_miserable_user"),
(
"count_miserable(user,100):<2",
[0, 0, 1, 1],
["count_miserable(user,100)"],
"count_miserable_user_100",
),
(
"count_miserable(user):>0",
[2, 1, 2, 1],
["count_miserable(user)"],
"count_miserable_user",
),
]
for query, expected_count_miserable, col, alias in queries:
result = discover.query(
selected_columns=["transaction"] + col,
query=query,
orderby=["transaction"],
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=30),
end=before_now(minutes=2),
projects=[project, project2],
organization=self.organization,
),
use_aggregate_conditions=True,
)
data = result["data"]
assert len(data) == len(expected_count_miserable)
assert [
x[alias] for x in sorted(data, key=lambda k: k["transaction"])
] == expected_count_miserable
def test_user_misery_function(self) -> None:
project = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
ProjectTransactionThresholdOverride.objects.create(
project=project,
transaction="/user_misery/ace",
organization=project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
project2 = self.create_project()
events = [
("ace", 400),
("ace", 400),
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
("zorp", 300),
("zorp", 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(5 + idx)),
start_timestamp=before_now(minutes=(5 + idx), milliseconds=event[1]),
)
data["measurements"]["lcp"]["value"] = 3000
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/user_misery/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
if event[0] == "zorp":
self.store_event(data, project_id=project2.id) # No custom thresholds for project2
else:
self.store_event(data, project_id=project.id)
queries = [
(
"",
[0.0492, 0.0492, 0.0575, 0.0659, 0.0575],
["user_misery(100)"],
"user_misery_100",
),
("", [0.0659, 0.0492, 0.0575, 0.0659, 0.0575], ["user_misery()"], "user_misery"),
(
"user_misery(100):<0.06",
[0.0492, 0.0492, 0.0575, 0.0575],
["user_misery(100)"],
"user_misery_100",
),
(
"user_misery():>0.05",
[0.0659, 0.0575, 0.0659, 0.0575],
["user_misery()"],
"user_misery",
),
]
similar = lambda a, b: abs(a - b) < 0.001
for query, expected_user_misery, col, alias in queries:
result = discover.query(
selected_columns=["transaction"] + col,
referrer="discover",
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=30),
end=before_now(minutes=2),
projects=[project, project2],
organization=self.organization,
),
use_aggregate_conditions=True,
)
data = result["data"]
assert len(data) == len(expected_user_misery)
for i, misery in enumerate(sorted(data, key=lambda k: k["transaction"])):
assert similar(misery[alias], expected_user_misery[i])
def test_count(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/count/6"
self.store_event(data, project_id=project.id)
for i in range(8):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/count/8"
self.store_event(data, project_id=project.id)
queries = [
("", 2, (6, 8), True),
("count():>6", 2, (6, 8), False),
("count():>6", 1, (8,), True),
]
for query, expected_length, expected_counts, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=["transaction", "count()"],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
referrer="discover",
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
for index, count in enumerate(data):
assert count["count"] == expected_counts[index]
def test_compare_numeric_aggregate_function(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
start_timestamp=before_now(minutes=4 + i),
)
data["transaction"] = "/percentile"
self.store_event(data, project_id=project.id)
fields = [
(
[
"transaction",
"p50(measurements.lcp)",
"compare_numeric_aggregate(p50_measurements_lcp,greater,2000)",
],
"",
),
(
[
"transaction",
"p50(measurements.lcp)",
"compare_numeric_aggregate(p50_measurements_lcp,less,2000)",
],
"",
),
]
expected_results = [
("compare_numeric_aggregate_p50_measurements_lcp_greater_2000", 1),
("compare_numeric_aggregate_p50_measurements_lcp_less_2000", 0),
]
for i, test_case in enumerate(fields):
selected, query = test_case
result = discover.query(
referrer="discover",
selected_columns=selected,
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=True,
)
alias, expected_value = expected_results[i]
data = result["data"]
assert data[0][alias] == expected_value
def test_last_seen(self) -> None:
project = self.create_project()
expected_timestamp = before_now(minutes=3)
string_condition_timestamp = before_now(minutes=4).strftime("%Y-%m-%dT%H:%M:%S+00:00")
data = load_data("transaction", timestamp=expected_timestamp)
data["transaction"] = "/last_seen"
self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=i + 4))
data["transaction"] = "/last_seen"
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
(f"last_seen():>{string_condition_timestamp}", 1, True),
("last_seen():>0", 1, False),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=["transaction", "last_seen()"],
query=query,
referrer="discover",
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
assert data[0]["last_seen"] == expected_timestamp.strftime("%Y-%m-%dT%H:%M:%S+00:00")
def test_latest_event(self) -> None:
project = self.create_project()
expected_timestamp = before_now(minutes=3)
data = load_data("transaction", timestamp=expected_timestamp)
data["transaction"] = "/latest_event"
stored_event = self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=i + 4))
data["transaction"] = "/latest_event"
self.store_event(data, project_id=project.id)
result = discover.query(
selected_columns=["transaction", "latest_event()"],
query="",
orderby=["transaction"],
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=False,
)
data = result["data"]
assert len(data) == 1
assert data[0]["latest_event"] == stored_event.event_id
def test_failure_rate(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/over"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
for i in range(4):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/over"
self.store_event(data, project_id=project.id)
for i in range(7):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/under"
self.store_event(data, project_id=project.id)
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/under"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
queries = [
("", 2, True),
("failure_rate():>0.5", 1, True),
("failure_rate():>0.5", 2, False),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=["transaction", "failure_rate()"],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
assert data[0]["failure_rate"] == 0.6
if expected_length > 1:
assert data[1]["failure_rate"] == 0.3
def _create_percentile_events(self, project):
for i in range(6):
start = before_now(minutes=3)
end = start - timedelta(minutes=1 + i)
data = load_data(
"transaction",
timestamp=start,
start_timestamp=end,
)
data["transaction"] = "/p50"
self.store_event(data, project_id=project.id)
def test_percentile(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("percentile(transaction.duration, 0.7):>0", 1, False),
("percentile(transaction.duration, 0.7):>500000", 0, True),
("percentile(transaction.duration, 0.7):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
referrer="discover",
selected_columns=[
"transaction",
"percentile(transaction.duration, 0.7)",
"percentile(transaction.duration, 0.5)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["percentile_transaction_duration_0_7"] == 270000
assert data[0]["percentile_transaction_duration_0_5"] == 210000
def test_p50(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p50(transaction.duration):>0", 1, False),
("p50(transaction.duration):>500000", 0, True),
("p50(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
referrer="discover",
selected_columns=[
"transaction",
"p50(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p50_transaction_duration"] == 210000
def test_p75(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p75(transaction.duration):>0", 1, False),
("p75(transaction.duration):>500000", 0, True),
("p75(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"p75(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p75_transaction_duration"] == 285000
def test_p95(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p95(transaction.duration):>0", 1, False),
("p95(transaction.duration):>500000", 0, True),
("p95(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"p95(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p95_transaction_duration"] == 345000
def test_p99(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p99(transaction.duration):>0", 1, False),
("p99(transaction.duration):>500000", 0, True),
("p99(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"p99(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p99_transaction_duration"] == 357000
def test_p100(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p100(transaction.duration):>0", 1, False),
("p100(transaction.duration):>500000", 0, True),
("p100(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"p100(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p100_transaction_duration"] == 360000
def test_p100_with_measurement(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
start_timestamp=before_now(minutes=4 + i),
)
data["transaction"] = "/p100"
data["measurements"]["frames_total"] = {"value": 100 * i}
data["measurements"]["frames_slow"] = {"value": 50 * i}
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
("p100(measurements.frames_slow_rate):>0", 1, False),
("p100(measurements.frames_slow_rate):>0.6", 0, True),
("p100(measurements.frames_slow_rate):>0.4", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"p100(measurements.frames_slow_rate)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p100_measurements_frames_slow_rate"] == 0.5
def test_count_unique(self) -> None:
for idx in range(3):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
)
data["user"] = {"email": f"{idx}@example.com"}
data["tags"] = {"foo": "bar" if idx < 1 else "baz"}
self.store_event(data, project_id=self.project.id)
result = discover.query(
selected_columns=["count_unique(user.display)", "count_unique(foo)"],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["count_unique_user_display"] == 3
assert data[0]["count_unique_foo"] == 2
def test_min_max(self) -> None:
"""Testing both min and max since they're so similar"""
for idx in range(3):
start = before_now(minutes=3)
end = start - timedelta(minutes=1 + idx)
data = load_data(
"transaction",
timestamp=start,
start_timestamp=end,
)
self.store_event(data, project_id=self.project.id)
result = discover.query(
selected_columns=[
"min(transaction.duration)",
"max(transaction.duration)",
],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["min_transaction_duration"] == 60000
assert data[0]["max_transaction_duration"] == 180000
def test_stats_functions(self) -> None:
for idx in range(3):
start = before_now(minutes=3)
end = start - timedelta(minutes=1 + idx)
data = load_data(
"transaction",
timestamp=start,
start_timestamp=end,
)
self.store_event(data, project_id=self.project.id)
queries = [
("var(transaction.duration)", "var_transaction_duration", 3600000000),
("stddev(transaction.duration)", "stddev_transaction_duration", 60000),
# This is a nonsense cov&corr column, but gives us a consistent result for tests
(
"cov(transaction.duration,transaction.duration)",
"cov_transaction_duration_transaction_duration",
3600000000,
),
(
"corr(transaction.duration,transaction.duration)",
"corr_transaction_duration_transaction_duration",
1,
),
]
for column, alias, expected in queries:
result = discover.query(
selected_columns=[column],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1, column
assert data[0][alias] == expected, column
def test_count_at_least(self) -> None:
end = before_now(minutes=3)
start_one_minute = end - timedelta(minutes=1)
start_two_minute = end - timedelta(minutes=2)
for idx in range(3):
data = load_data(
"transaction",
timestamp=end,
start_timestamp=start_one_minute if idx < 1 else start_two_minute,
)
self.store_event(data, project_id=self.project.id)
result = discover.query(
selected_columns=[
"count_at_least(transaction.duration,60000)",
"count_at_least(transaction.duration,120000)",
],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["count_at_least_transaction_duration_60000"] == 3
assert data[0]["count_at_least_transaction_duration_120000"] == 2
def test_eps(self) -> None:
project = self.create_project()
for _ in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
)
data["transaction"] = "/eps"
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
("eps():>1", 0, True),
("eps():>1", 1, False),
("eps(10):>0.5", 1, True),
("tps():>1", 0, True),
("tps():>1", 1, False),
("tps(10):>0.5", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"eps()",
"eps(10)",
"eps(60)",
"tps()",
"tps(10)",
"tps(60)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["eps"] == 0.05
assert data[0]["eps_10"] == 0.6
assert data[0]["eps_60"] == 0.1
assert data[0]["tps"] == 0.05
assert data[0]["tps_10"] == 0.6
assert data[0]["tps_60"] == 0.1
def test_epm(self) -> None:
project = self.create_project()
for _ in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
)
data["transaction"] = "/epm"
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
("epm():>3", 0, True),
("epm():>3", 1, False),
("epm(10):>3", 1, True),
("tpm():>3", 0, True),
("tpm():>3", 1, False),
("tpm(10):>3", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = discover.query(
selected_columns=[
"transaction",
"epm()",
"epm(10)",
"epm(60)",
"tpm()",
"tpm(10)",
"tpm(60)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["epm"] == 3
assert data[0]["epm_10"] == 36.0
assert data[0]["epm_60"] == 6
assert data[0]["tpm"] == 3
assert data[0]["tpm_10"] == 36.0
assert data[0]["tpm_60"] == 6
def test_transaction_status(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/success"
data["contexts"]["trace"]["status"] = "ok"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/aborted"
data["contexts"]["trace"]["status"] = "aborted"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/already_exists"
data["contexts"]["trace"]["status"] = "already_exists"
self.store_event(data, project_id=self.project.id)
result = discover.query(
selected_columns=["transaction.status"],
query="",
snuba_params=self.params,
referrer="discover",
)
data = result["data"]
assert len(data) == 3
assert {
data[0]["transaction.status"],
data[1]["transaction.status"],
data[2]["transaction.status"],
} == {0, 10, 6}
def test_transaction_status_filter(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/success"
data["contexts"]["trace"]["status"] = "ok"
self.store_event(data, project_id=self.project.id)
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/already_exists"
data["contexts"]["trace"]["status"] = "already_exists"
self.store_event(data, project_id=self.project.id)
def run_query(query, expected_statuses, message):
result = discover.query(
selected_columns=["transaction.status"],
query=query,
snuba_params=self.params,
referrer="discover",
)
data = result["data"]
assert len(data) == len(expected_statuses), message
assert sorted(item["transaction.status"] for item in data) == sorted(
expected_statuses
), message
run_query("has:transaction.status transaction.status:ok", [0, 0], "status 'ok'")
run_query(
"has:transaction.status transaction.status:[ok,already_exists]",
[0, 0, 6],
"status 'ok' or 'already_exists'",
)
run_query("has:transaction.status !transaction.status:ok", [6], "status not 'ok'")
run_query(
"has:transaction.status !transaction.status:already_exists",
[0, 0],
"status not 'already_exists'",
)
run_query(
"has:transaction.status !transaction.status:[ok,already_exists]",
[],
"status not 'ok' and not 'already_exists'",
)
run_query("!has:transaction.status", [], "status nonexistant")
def test_error_handled_alias(self) -> None:
data = load_data("android-ndk", timestamp=before_now(minutes=10))
events = (
("a" * 32, "not handled", False),
("b" * 32, "is handled", True),
("c" * 32, "undefined", None),
)
for event in events:
data["event_id"] = event[0]
data["logentry"] = {"formatted": event[1]}
data["exception"]["values"][0]["value"] = event[1]
data["exception"]["values"][0]["mechanism"]["handled"] = event[2]
self.store_event(data=data, project_id=self.project.id)
queries: list[tuple[str, list[int]]] = [
("", [0, 1, 1]),
("error.handled:true", [1, 1]),
("!error.handled:true", [0]),
("has:error.handled", [1, 1]),
("has:error.handled error.handled:true", [1, 1]),
("error.handled:false", [0]),
("has:error.handled error.handled:false", []),
]
for query, expected_data in queries:
result = discover.query(
selected_columns=["error.handled"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
data = sorted(data, key=lambda k: (k["error.handled"] is None, k["error.handled"]))
assert len(data) == len(expected_data)
assert [item["error.handled"] for item in data] == expected_data
def test_error_unhandled_alias(self) -> None:
data = load_data("android-ndk", timestamp=before_now(minutes=10))
events = (
("a" * 32, "not handled", False),
("b" * 32, "is handled", True),
("c" * 32, "undefined", None),
)
for event in events:
data["event_id"] = event[0]
data["logentry"] = {"formatted": event[1]}
data["exception"]["values"][0]["value"] = event[1]
data["exception"]["values"][0]["mechanism"]["handled"] = event[2]
self.store_event(data=data, project_id=self.project.id)
queries: list[tuple[str, list[str], list[int]]] = [
("error.unhandled:true", ["a" * 32], [1]),
("!error.unhandled:true", ["b" * 32, "c" * 32], [0, 0]),
("has:error.unhandled", ["a" * 32], [1]),
("!has:error.unhandled", ["b" * 32, "c" * 32], [0, 0]),
("has:error.unhandled error.unhandled:true", ["a" * 32], [1]),
("error.unhandled:false", ["b" * 32, "c" * 32], [0, 0]),
("has:error.unhandled error.unhandled:false", [], []),
]
for query, expected_events, error_handled in queries:
result = discover.query(
selected_columns=["error.unhandled"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
assert len(data) == len(expected_events)
assert [item["error.unhandled"] for item in data] == error_handled
def test_array_fields(self) -> None:
data = load_data("javascript")
data["timestamp"] = before_now(minutes=10).isoformat()
self.store_event(data=data, project_id=self.project.id)
expected_filenames = [
"../../sentry/scripts/views.js",
"../../sentry/scripts/views.js",
"../../sentry/scripts/views.js",
"raven.js",
]
queries = [
("", 1),
("stack.filename:*.js", 1),
("stack.filename:*.py", 0),
("has:stack.filename", 1),
("!has:stack.filename", 0),
]
for query, expected_len in queries:
result = discover.query(
selected_columns=["stack.filename"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
assert len(data) == expected_len
if len(data) == 0:
continue
assert len(data[0]["stack.filename"]) == len(expected_filenames)
assert sorted(data[0]["stack.filename"]) == expected_filenames
result = discover.query(
selected_columns=["stack.filename"],
query="stack.filename:[raven.js]",
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
)
data = result["data"]
assert len(data) == 1
assert len(data[0]["stack.filename"]) == len(expected_filenames)
assert sorted(data[0]["stack.filename"]) == expected_filenames
@pytest.mark.skip("setting snuba config is too slow")
def test_spans_op_array_field(self) -> None:
trace_context = {
"parent_span_id": "8988cec7cc0779c1",
"type": "trace",
"op": "http.server",
"trace_id": "a7d67cf796774551a95be6543cacd459",
"span_id": "babaae0d4b7512d9",
"status": "ok",
"hash": "a" * 16,
"exclusive_time": 1.2345,
}
data = load_data(
"transaction", timestamp=before_now(minutes=10), trace_context=trace_context, spans=[]
)
self.store_event(data=data, project_id=self.project.id)
queries = [
("has:spans_op", 1),
("!has:spans_op", 0),
]
for query, expected_len in queries:
result = discover.query(
selected_columns=["spans_op"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
assert len(data) == expected_len
def test_orderby_field_alias(self) -> None:
data = load_data("android-ndk", timestamp=before_now(minutes=10))
events = (
("a" * 32, "not handled", False),
("b" * 32, "is handled", True),
("c" * 32, "undefined", None),
)
for event in events:
data["event_id"] = event[0]
data["transaction"] = event[0]
data["logentry"] = {"formatted": event[1]}
data["exception"]["values"][0]["value"] = event[1]
data["exception"]["values"][0]["mechanism"]["handled"] = event[2]
self.store_event(data=data, project_id=self.project.id)
queries = [
(["error.unhandled"], [0, 0, 1]),
(["error.unhandled"], [0, 0, 1]),
(["-error.unhandled"], [1, 0, 0]),
(["-error.unhandled"], [1, 0, 0]),
]
for orderby, expected in queries:
result = discover.query(
selected_columns=["transaction", "error.unhandled"],
query="",
orderby=orderby,
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
assert [x["error.unhandled"] for x in data] == expected
def test_orderby_aggregate_function(self) -> None:
project = self.create_project()
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/success"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/unknown"
data["contexts"]["trace"]["status"] = "unknown_error"
self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = f"/failure_count/{i}"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/0"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
orderbys = [
(["failure_count()"], [0, 1, 1, 1, 1, 1, 1, 2]),
(["failure_count()"], [0, 1, 1, 1, 1, 1, 1, 2]),
(["-failure_count()"], [2, 1, 1, 1, 1, 1, 1, 0]),
(["-failure_count()"], [2, 1, 1, 1, 1, 1, 1, 0]),
(["failure_count"], [0, 1, 1, 1, 1, 1, 1, 2]),
(["-failure_count"], [2, 1, 1, 1, 1, 1, 1, 0]),
]
for orderby, expected in orderbys:
result = discover.query(
selected_columns=["transaction", "failure_count()"],
query="",
orderby=orderby,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
referrer="discover",
)
data = result["data"]
assert [x["failure_count"] for x in data] == expected
def test_field_aliasing_in_selected_columns(self) -> None:
result = discover.query(
selected_columns=["project.id", "user", "release", "timestamp.to_hour"],
query="",
snuba_params=self.params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user"] == "id:99"
assert data[0]["release"] == "first-release"
event_hour = self.event_time.replace(minute=0, second=0, microsecond=0)
assert data[0]["timestamp.to_hour"] == event_hour.isoformat()
assert len(result["meta"]["fields"]) == 4
assert result["meta"]["fields"] == {
"project.id": "integer",
"user": "string",
"release": "string",
"timestamp.to_hour": "date",
}
def test_field_alias_with_component(self) -> None:
result = discover.query(
selected_columns=["project.id", "user", "user.email"],
query="",
snuba_params=self.params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user"] == "id:99"
assert data[0]["user.email"] == "bruce@example.com"
assert len(result["meta"]["fields"]) == 3
assert result["meta"]["fields"] == {
"project.id": "integer",
"user": "string",
"user.email": "string",
}
def test_field_aliasing_in_aggregate_functions_and_groupby(self) -> None:
result = discover.query(
selected_columns=["project.id", "count_unique(user.email)"],
query="",
snuba_params=self.params,
auto_fields=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["count_unique_user_email"] == 1
def test_field_aliasing_in_conditions(self) -> None:
result = discover.query(
selected_columns=["project.id", "user.email"],
query="user.email:bruce@example.com",
snuba_params=self.params,
referrer="discover",
auto_fields=True,
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user.email"] == "bruce@example.com"
def test_auto_fields_simple_fields(self) -> None:
result = discover.query(
selected_columns=["user.email", "release"],
referrer="discover",
query="",
snuba_params=self.params,
auto_fields=True,
)
data = result["data"]
assert len(data) == 1
assert data[0]["id"] == self.event.event_id
assert data[0]["user.email"] == "bruce@example.com"
assert data[0]["release"] == "first-release"
assert data[0]["project.name"] == self.project.slug
assert len(result["meta"]["fields"]) == 4
assert result["meta"]["fields"] == {
"user.email": "string",
"release": "string",
"id": "string",
"project.name": "string",
}
def test_auto_fields_aggregates(self) -> None:
result = discover.query(
selected_columns=["count_unique(user.email)"],
referrer="discover",
query="",
snuba_params=self.params,
auto_fields=True,
)
data = result["data"]
assert len(data) == 1
assert data[0]["count_unique_user_email"] == 1
def test_release_condition(self) -> None:
result = discover.query(
selected_columns=["id", "message"],
query=f"release:{self.create_release(self.project).version}",
snuba_params=self.params,
referrer="discover",
)
assert len(result["data"]) == 0
result = discover.query(
selected_columns=["id", "message"],
query=f"release:{self.release.version}",
snuba_params=self.params,
referrer="discover",
)
assert len(result["data"]) == 1
data = result["data"]
assert data[0]["id"] == self.event.event_id
assert data[0]["message"] == self.event.message
assert "event_id" not in data[0]
def test_semver_condition(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test@1.2.4")
release_3 = self.create_release(version="test@1.2.5")
release_1_e_1 = self.store_event(
data={"release": release_1.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_1_e_2 = self.store_event(
data={"release": release_1.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_2_e_1 = self.store_event(
data={"release": release_2.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_2_e_2 = self.store_event(
data={"release": release_2.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_3_e_1 = self.store_event(
data={"release": release_3.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_3_e_2 = self.store_event(
data={"release": release_3.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_ALIAS}:>1.2.3",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_2_e_1,
release_2_e_2,
release_3_e_1,
release_3_e_2,
}
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_ALIAS}:>=1.2.3",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_1_e_1,
release_1_e_2,
release_2_e_1,
release_2_e_2,
release_3_e_1,
release_3_e_2,
}
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_ALIAS}:<1.2.4",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {release_1_e_1, release_1_e_2}
result = discover.query(
selected_columns=["id"],
query=f"!{SEMVER_ALIAS}:1.2.3",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
self.event.event_id,
release_2_e_1,
release_2_e_2,
release_3_e_1,
release_3_e_2,
}
def test_release_stage_condition(self) -> None:
replaced_release = self.create_release(
version="replaced_release",
environments=[self.environment],
adopted=timezone.now(),
unadopted=timezone.now(),
)
adopted_release = self.create_release(
version="adopted_release",
environments=[self.environment],
adopted=timezone.now(),
)
self.create_release(version="not_adopted_release", environments=[self.environment])
adopted_release_e_1 = self.store_event(
data={
"release": adopted_release.version,
"environment": self.environment.name,
"timestamp": self.one_min_ago.isoformat(),
},
project_id=self.project.id,
).event_id
adopted_release_e_2 = self.store_event(
data={
"release": adopted_release.version,
"environment": self.environment.name,
"timestamp": self.one_min_ago.isoformat(),
},
project_id=self.project.id,
).event_id
replaced_release_e_1 = self.store_event(
data={
"release": replaced_release.version,
"environment": self.environment.name,
"timestamp": self.one_min_ago.isoformat(),
},
project_id=self.project.id,
).event_id
replaced_release_e_2 = self.store_event(
data={
"release": replaced_release.version,
"environment": self.environment.name,
"timestamp": self.one_min_ago.isoformat(),
},
project_id=self.project.id,
).event_id
self.params.environments = [self.environment]
result = discover.query(
selected_columns=["id"],
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
adopted_release_e_1,
adopted_release_e_2,
}
result = discover.query(
selected_columns=["id"],
query=f"!{RELEASE_STAGE_ALIAS}:{ReleaseStages.LOW_ADOPTION.value}",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
adopted_release_e_1,
adopted_release_e_2,
replaced_release_e_1,
replaced_release_e_2,
}
result = discover.query(
selected_columns=["id"],
query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.ADOPTED.value}, {ReleaseStages.REPLACED.value}]",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
adopted_release_e_1,
adopted_release_e_2,
replaced_release_e_1,
replaced_release_e_2,
}
def test_semver_package_condition(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test2@1.2.4")
release_1_e_1 = self.store_event(
data={"release": release_1.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_1_e_2 = self.store_event(
data={"release": release_1.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_2_e_1 = self.store_event(
data={"release": release_2.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
result = discover.query(
selected_columns=["id"],
referrer="discover",
query=f"{SEMVER_PACKAGE_ALIAS}:test",
snuba_params=self.params,
)
assert {r["id"] for r in result["data"]} == {
release_1_e_1,
release_1_e_2,
}
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_PACKAGE_ALIAS}:test2",
referrer="discover",
snuba_params=self.params,
)
assert {r["id"] for r in result["data"]} == {
release_2_e_1,
}
def test_semver_build_condition(self) -> None:
release_1 = self.create_release(version="test@1.2.3+123")
release_2 = self.create_release(version="test2@1.2.4+124")
release_1_e_1 = self.store_event(
data={"release": release_1.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_1_e_2 = self.store_event(
data={"release": release_1.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
release_2_e_1 = self.store_event(
data={"release": release_2.version, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
).event_id
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_BUILD_ALIAS}:123",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_1_e_1,
release_1_e_2,
}
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_BUILD_ALIAS}:124",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_2_e_1,
}
result = discover.query(
selected_columns=["id"],
query=f"{SEMVER_BUILD_ALIAS}:>=123",
snuba_params=self.params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {release_1_e_1, release_1_e_2, release_2_e_1}
def test_latest_release_condition(self) -> None:
result = discover.query(
selected_columns=["id", "message"],
query="release:latest",
snuba_params=self.params,
referrer="discover",
)
assert len(result["data"]) == 1
data = result["data"]
assert data[0]["id"] == self.event.event_id
assert data[0]["message"] == self.event.message
assert "event_id" not in data[0]
def test_environment_condition(self) -> None:
result = discover.query(
selected_columns=["id", "message"],
query=f"environment:{self.create_environment(self.project).name}",
snuba_params=self.params,
referrer="discover",
)
assert len(result["data"]) == 0
result = discover.query(
selected_columns=["id", "message"],
query=f"environment:{self.environment.name}",
snuba_params=self.params,
referrer="discover",
)
assert len(result["data"]) == 1
data = result["data"]
assert data[0]["id"] == self.event.event_id
assert data[0]["message"] == self.event.message
def test_conditional_filter(self) -> None:
project2 = self.create_project(organization=self.organization)
project3 = self.create_project(organization=self.organization)
self.store_event(
data={"message": "aaaaa", "timestamp": self.one_min_ago.isoformat()},
project_id=project2.id,
)
self.store_event(
data={"message": "bbbbb", "timestamp": self.one_min_ago.isoformat()},
project_id=project3.id,
)
result = discover.query(
selected_columns=["project", "message"],
query=f"project:{self.project.slug} OR project:{project2.slug}",
snuba_params=SnubaParams(
projects=[self.project, project2],
start=self.two_min_ago,
end=self.now,
),
orderby=["message"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
assert data[0]["project"] == project2.slug
assert data[1]["project"] == self.project.slug
def test_nested_conditional_filter(self) -> None:
project2 = self.create_project(organization=self.organization)
self.store_event(
data={"release": "a" * 32, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
)
self.event = self.store_event(
data={"release": "b" * 32, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
)
self.event = self.store_event(
data={"release": "c" * 32, "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
)
self.event = self.store_event(
data={"release": "a" * 32, "timestamp": self.one_min_ago.isoformat()},
project_id=project2.id,
)
result = discover.query(
selected_columns=["release"],
query="(release:{} OR release:{}) AND project:{}".format(
"a" * 32, "b" * 32, self.project.slug
),
snuba_params=SnubaParams(
projects=[self.project, project2],
start=self.two_min_ago,
end=self.now,
),
orderby=["release"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
assert data[0]["release"] == "a" * 32
assert data[1]["release"] == "b" * 32
def test_conditions_with_special_columns(self) -> None:
for val in ["a", "b", "c"]:
data = load_data("transaction")
data["timestamp"] = self.one_min_ago.isoformat()
data["transaction"] = val * 32
data["logentry"] = {"formatted": val * 32}
data["tags"] = {"sub_customer.is-Enterprise-42": val * 32}
self.store_event(data=data, project_id=self.project.id)
result = discover.query(
selected_columns=["title", "message"],
query="event.type:transaction (title:{} OR message:{})".format("a" * 32, "b" * 32),
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
orderby=["title"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
assert data[0]["title"] == "a" * 32
assert data[1]["title"] == "b" * 32
result = discover.query(
selected_columns=["title", "sub_customer.is-Enterprise-42"],
query="event.type:transaction (title:{} AND sub_customer.is-Enterprise-42:{})".format(
"a" * 32, "a" * 32
),
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
orderby=["title"],
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["title"] == "a" * 32
assert data[0]["sub_customer.is-Enterprise-42"] == "a" * 32
def test_conditions_with_aggregates(self) -> None:
events = [("a", 2), ("b", 3), ("c", 4)]
for ev in events:
val = ev[0] * 32
for i in range(ev[1]):
data = load_data("transaction")
data["timestamp"] = self.one_min_ago.isoformat()
data["transaction"] = f"{val}-{i}"
data["logentry"] = {"formatted": val}
data["tags"] = {"trek": val}
self.store_event(data=data, project_id=self.project.id)
result = discover.query(
selected_columns=["trek", "count()"],
query="event.type:transaction (trek:{} OR trek:{}) AND count():>2".format(
"a" * 32, "b" * 32
),
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
orderby=["trek"],
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["trek"] == "b" * 32
assert data[0]["count"] == 3
def test_conditions_with_nested_aggregates(self) -> None:
events = [("a", 2), ("b", 3), ("c", 4)]
for ev in events:
val = ev[0] * 32
for i in range(ev[1]):
data = load_data("transaction")
data["timestamp"] = self.one_min_ago.isoformat()
data["transaction"] = f"{val}-{i}"
data["logentry"] = {"formatted": val}
data["tags"] = {"trek": val}
self.store_event(data=data, project_id=self.project.id)
result = discover.query(
selected_columns=["trek", "count()"],
query="(event.type:transaction AND (trek:{} AND (transaction:*{}* AND count():>2)))".format(
"b" * 32, "b" * 32
),
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
orderby=["trek"],
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["trek"] == "b" * 32
assert data[0]["count"] == 3
with pytest.raises(InvalidSearchQuery) as err:
discover.query(
selected_columns=["trek", "transaction"],
query="(event.type:transaction AND (trek:{} AND (transaction:*{}* AND count():>2)))".format(
"b" * 32, "b" * 32
),
referrer="discover",
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
orderby=["trek"],
use_aggregate_conditions=True,
)
assert "used in a condition but is not a selected column" in str(err)
def test_conditions_with_timestamps(self) -> None:
events = [("a", 1), ("b", 2), ("c", 3)]
for t, ev in enumerate(events):
val = ev[0] * 32
for i in range(ev[1]):
data = load_data("transaction", timestamp=self.now - timedelta(seconds=3 * t + 1))
data["transaction"] = f"{val}"
self.store_event(data=data, project_id=self.project.id)
results = discover.query(
selected_columns=["transaction", "count()"],
query="event.type:transaction AND (timestamp:<{} OR timestamp:>{})".format(
(self.now - timedelta(seconds=5)).isoformat(),
(self.now - timedelta(seconds=3)).isoformat(),
),
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
orderby=["transaction"],
use_aggregate_conditions=True,
referrer="discover",
)
data = results["data"]
assert len(data) == 2
assert data[0]["transaction"] == "a" * 32
assert data[0]["count"] == 1
assert data[1]["transaction"] == "c" * 32
assert data[1]["count"] == 3
def test_timestamp_rollup_filter(self) -> None:
event_hour = self.event_time.replace(minute=0, second=0)
result = discover.query(
selected_columns=["project.id", "user", "release"],
query="timestamp.to_hour:" + event_hour.isoformat(),
snuba_params=self.params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user"] == "id:99"
assert data[0]["release"] == "first-release"
assert len(result["meta"]["fields"]) == 3
assert result["meta"]["fields"] == {
"project.id": "integer",
"user": "string",
"release": "string",
}
def test_count_with_or(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "a" * 32
self.store_event(data=data, project_id=self.project.id)
results = discover.query(
selected_columns=["transaction", "count()"],
query="event.type:transaction AND (count():<1 OR count():>0)",
snuba_params=self.params,
orderby=["transaction"],
use_aggregate_conditions=True,
referrer="discover",
)
data = results["data"]
assert len(data) == 1
assert data[0]["transaction"] == "a" * 32
assert data[0]["count"] == 1
def test_array_join(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=90))
data["measurements"] = {
"fp": {"value": 1000},
"fcp": {"value": 1000},
"lcp": {"value": 1000},
}
self.store_event(data=data, project_id=self.project.id)
results = discover.query(
selected_columns=["array_join(measurements_key)"],
query="",
snuba_params=self.params,
functions_acl=["array_join"],
referrer="discover",
)
assert {"fcp", "fp", "lcp"} == {
row["array_join_measurements_key"] for row in results["data"]
}
def test_access_to_private_functions(self) -> None:
# using private functions directly without access should error
with pytest.raises(InvalidSearchQuery, match="array_join: no access to private function"):
discover.query(
selected_columns=["array_join(tags.key)"],
query="",
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
referrer="discover",
)
# using private functions in an aggregation without access should error
with pytest.raises(InvalidSearchQuery, match="histogram: no access to private function"):
for array_column in ARRAY_COLUMNS:
discover.query(
selected_columns=[f"histogram({array_column}_value, 1,0,1)"],
query=f"histogram({array_column}_value, 1,0,1):>0",
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
use_aggregate_conditions=True,
referrer="discover",
)
# using private functions in an aggregation without access should error
# with auto aggregation on
with pytest.raises(InvalidSearchQuery, match="histogram: no access to private function"):
for array_column in ARRAY_COLUMNS:
discover.query(
selected_columns=["count()"],
query=f"histogram({array_column}_value, 1,0,1):>0",
snuba_params=SnubaParams(
projects=[self.project],
start=self.two_min_ago,
end=self.now,
),
referrer="discover",
auto_aggregations=True,
use_aggregate_conditions=True,
)
def test_sum_array_combinator(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["measurements"] = {
"fp": {"value": 1000},
"fcp": {"value": 1000},
"lcp": {"value": 1000},
}
self.store_event(data=data, project_id=self.project.id)
results = discover.query(
selected_columns=["sumArray(measurements_value)"],
query="",
snuba_params=self.params,
# make sure to opt in to gain access to the function
functions_acl=["sumArray"],
referrer="discover",
# -Array combinator is only supported in SnQL
)
assert results["data"][0]["sumArray_measurements_value"] == 3000.0
def test_any_function(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "a" * 32
self.store_event(data=data, project_id=self.project.id)
results = discover.query(
selected_columns=["count()", "any(transaction)", "any(user.id)"],
query="event.type:transaction",
snuba_params=SnubaParams(
projects=[self.project],
start=before_now(minutes=5),
end=before_now(seconds=1),
),
referrer="discover",
use_aggregate_conditions=True,
)
data = results["data"]
assert len(data) == 1
assert data[0]["any_transaction"] == "a" * 32
assert data[0]["any_user_id"] is None
assert data[0]["count"] == 1
def test_offsets(self) -> None:
self.store_event(
data={"message": "hello1", "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
)
self.store_event(
data={"message": "hello2", "timestamp": self.one_min_ago.isoformat()},
project_id=self.project.id,
)
result = discover.query(
selected_columns=["message"],
query="",
snuba_params=self.params,
orderby=["message"],
limit=1,
offset=1,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
# because we're ording by `message`, and offset by 1, the message should be `hello2`
assert data[0]["message"] == "hello2"
def test_reflective_types(self) -> None:
results = discover.query(
selected_columns=[
"p50(measurements.lcp)",
"p50(measurements.foo)",
"p50(spans.foo)",
],
query="event.type:transaction",
snuba_params=self.params,
use_aggregate_conditions=True,
referrer="discover",
)
assert results["meta"]["fields"] == {
"p50_measurements_lcp": "duration",
"p50_measurements_foo": "number",
"p50_spans_foo": "duration",
}
def test_measurements(self) -> None:
event_data = load_data("transaction", timestamp=before_now(seconds=3))
self.store_event(data=event_data, project_id=self.project.id)
results = discover.query(
selected_columns=[
"measurements.fp",
"measurements.fcp",
"measurements.lcp",
"measurements.fid",
"measurements.cls",
"measurements.does_not_exist",
],
query="event.type:transaction",
snuba_params=self.params,
referrer="discover",
)
data = results["data"]
assert len(data) == 1
assert data[0]["measurements.fp"] == event_data["measurements"]["fp"]["value"]
assert data[0]["measurements.fcp"] == event_data["measurements"]["fcp"]["value"]
assert data[0]["measurements.lcp"] == event_data["measurements"]["lcp"]["value"]
assert data[0]["measurements.fid"] == event_data["measurements"]["fid"]["value"]
assert data[0]["measurements.cls"] == event_data["measurements"]["cls"]["value"]
assert data[0]["measurements.does_not_exist"] is None
def test_span_op_breakdowns(self) -> None:
event_data = load_data("transaction", timestamp=before_now(seconds=3))
self.store_event(data=event_data, project_id=self.project.id)
results = discover.query(
selected_columns=[
"spans.http",
"spans.db",
"spans.resource",
"spans.browser",
"spans.total.time",
"spans.does_not_exist",
],
query="event.type:transaction",
snuba_params=self.params,
referrer="discover",
)
data = results["data"]
assert len(data) == 1
span_ops = event_data["breakdowns"]["span_ops"]
assert data[0]["spans.http"] == span_ops["ops.http"]["value"]
assert data[0]["spans.db"] == span_ops["ops.db"]["value"]
assert data[0]["spans.resource"] == span_ops["ops.resource"]["value"]
assert data[0]["spans.browser"] == span_ops["ops.browser"]["value"]
assert data[0]["spans.total.time"] == span_ops["total.time"]["value"]
assert data[0]["spans.does_not_exist"] is None
def test_project_in_condition_with_or(self) -> None:
project2 = self.create_project(organization=self.organization)
event_data = load_data("transaction", timestamp=before_now(seconds=3))
self.store_event(data=event_data, project_id=project2.id)
expected = sorted([self.project.slug, project2.slug])
result = discover.query(
selected_columns=["project"],
query=f"project:{self.project.slug} or event.type:transaction",
snuba_params=SnubaParams(
projects=[self.project, project2],
start=self.two_min_ago,
end=self.now,
organization=self.organization,
),
orderby=["project"],
referrer="discover",
)
data = result["data"]
assert len(data) == len(expected)
assert [item["project"] for item in data] == expected
| DiscoverQueryIntegrationTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructorCallable2.py | {
"start": 355,
"end": 551
} | class ____:
def __init__(self, x: int) -> None:
pass
r1 = accepts_callable(Class1)
reveal_type(r1, expected_text="(x: int) -> Class1")
reveal_type(r1(1), expected_text="Class1")
| Class1 |
python | ray-project__ray | python/ray/data/tests/test_consumption.py | {
"start": 24361,
"end": 24845
} | class ____(CSVDatasource):
def __init__(self, paths, **csv_datasource_kwargs):
super().__init__(paths, **csv_datasource_kwargs)
self.counter = Counter.remote()
def _read_stream(self, f: "pa.NativeFile", path: str):
count = self.counter.increment.remote()
if ray.get(count) == 1:
raise ValueError("oops")
else:
for block in CSVDatasource._read_stream(self, f, path):
yield block
| FlakyCSVDatasource |
python | langchain-ai__langchain | libs/text-splitters/tests/unit_tests/test_text_splitters.py | {
"start": 1102,
"end": 23196
} | class ____:
def bar():
def foo():
def testing_func():
def bar():
"""
def test_character_text_splitter() -> None:
"""Test splitting by character count."""
text = "foo bar baz 123"
splitter = CharacterTextSplitter(separator=" ", chunk_size=7, chunk_overlap=3)
output = splitter.split_text(text)
expected_output = ["foo bar", "bar baz", "baz 123"]
assert output == expected_output
def test_character_text_splitter_empty_doc() -> None:
"""Test splitting by character count doesn't create empty documents."""
text = "foo bar"
splitter = CharacterTextSplitter(separator=" ", chunk_size=2, chunk_overlap=0)
output = splitter.split_text(text)
expected_output = ["foo", "bar"]
assert output == expected_output
def test_character_text_splitter_separtor_empty_doc() -> None:
"""Test edge cases are separators."""
text = "f b"
splitter = CharacterTextSplitter(separator=" ", chunk_size=2, chunk_overlap=0)
output = splitter.split_text(text)
expected_output = ["f", "b"]
assert output == expected_output
def test_character_text_splitter_long() -> None:
"""Test splitting by character count on long words."""
text = "foo bar baz a a"
splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ["foo", "bar", "baz", "a a"]
assert output == expected_output
def test_character_text_splitter_short_words_first() -> None:
"""Test splitting by character count when shorter words are first."""
text = "a a foo bar baz"
splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ["a a", "foo", "bar", "baz"]
assert output == expected_output
def test_character_text_splitter_longer_words() -> None:
"""Test splitting by characters when splits not found easily."""
text = "foo bar baz 123"
splitter = CharacterTextSplitter(separator=" ", chunk_size=1, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ["foo", "bar", "baz", "123"]
assert output == expected_output
@pytest.mark.parametrize(
("separator", "is_separator_regex"), [(re.escape("."), True), (".", False)]
)
def test_character_text_splitter_keep_separator_regex(
*, separator: str, is_separator_regex: bool
) -> None:
"""Test CharacterTextSplitter keep separator regex.
Test splitting by characters while keeping the separator
that is a regex special character.
"""
text = "foo.bar.baz.123"
splitter = CharacterTextSplitter(
separator=separator,
chunk_size=1,
chunk_overlap=0,
keep_separator=True,
is_separator_regex=is_separator_regex,
)
output = splitter.split_text(text)
expected_output = ["foo", ".bar", ".baz", ".123"]
assert output == expected_output
@pytest.mark.parametrize(
("separator", "is_separator_regex"), [(re.escape("."), True), (".", False)]
)
def test_character_text_splitter_keep_separator_regex_start(
*, separator: str, is_separator_regex: bool
) -> None:
"""Test CharacterTextSplitter keep separator regex and put at start.
Test splitting by characters while keeping the separator
that is a regex special character and placing it at the start of each chunk.
"""
text = "foo.bar.baz.123"
splitter = CharacterTextSplitter(
separator=separator,
chunk_size=1,
chunk_overlap=0,
keep_separator="start",
is_separator_regex=is_separator_regex,
)
output = splitter.split_text(text)
expected_output = ["foo", ".bar", ".baz", ".123"]
assert output == expected_output
@pytest.mark.parametrize(
("separator", "is_separator_regex"), [(re.escape("."), True), (".", False)]
)
def test_character_text_splitter_keep_separator_regex_end(
*, separator: str, is_separator_regex: bool
) -> None:
"""Test CharacterTextSplitter keep separator regex and put at end.
Test splitting by characters while keeping the separator
that is a regex special character and placing it at the end of each chunk.
"""
text = "foo.bar.baz.123"
splitter = CharacterTextSplitter(
separator=separator,
chunk_size=1,
chunk_overlap=0,
keep_separator="end",
is_separator_regex=is_separator_regex,
)
output = splitter.split_text(text)
expected_output = ["foo.", "bar.", "baz.", "123"]
assert output == expected_output
@pytest.mark.parametrize(
("separator", "is_separator_regex"), [(re.escape("."), True), (".", False)]
)
def test_character_text_splitter_discard_separator_regex(
*, separator: str, is_separator_regex: bool
) -> None:
"""Test CharacterTextSplitter discard separator regex.
Test splitting by characters discarding the separator
that is a regex special character.
"""
text = "foo.bar.baz.123"
splitter = CharacterTextSplitter(
separator=separator,
chunk_size=1,
chunk_overlap=0,
keep_separator=False,
is_separator_regex=is_separator_regex,
)
output = splitter.split_text(text)
expected_output = ["foo", "bar", "baz", "123"]
assert output == expected_output
def test_recursive_character_text_splitter_keep_separators() -> None:
split_tags = [",", "."]
query = "Apple,banana,orange and tomato."
# start
splitter = RecursiveCharacterTextSplitter(
chunk_size=10,
chunk_overlap=0,
separators=split_tags,
keep_separator="start",
)
result = splitter.split_text(query)
assert result == ["Apple", ",banana", ",orange and tomato", "."]
# end
splitter = RecursiveCharacterTextSplitter(
chunk_size=10,
chunk_overlap=0,
separators=split_tags,
keep_separator="end",
)
result = splitter.split_text(query)
assert result == ["Apple,", "banana,", "orange and tomato."]
def test_character_text_splitting_args() -> None:
"""Test invalid arguments."""
with pytest.raises(
ValueError,
match=re.escape(
"Got a larger chunk overlap (4) than chunk size (2), should be smaller."
),
):
CharacterTextSplitter(chunk_size=2, chunk_overlap=4)
for invalid_size in (0, -1):
with pytest.raises(ValueError, match="chunk_size must be > 0, got"):
CharacterTextSplitter(chunk_size=invalid_size)
with pytest.raises(ValueError, match="chunk_overlap must be >= 0, got -1"):
CharacterTextSplitter(chunk_size=2, chunk_overlap=-1)
def test_merge_splits() -> None:
"""Test merging splits with a given separator."""
splitter = CharacterTextSplitter(separator=" ", chunk_size=9, chunk_overlap=2)
splits = ["foo", "bar", "baz"]
expected_output = ["foo bar", "baz"]
output = splitter._merge_splits(splits, separator=" ")
assert output == expected_output
def test_create_documents() -> None:
"""Test create documents method."""
texts = ["foo bar", "baz"]
splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=0)
docs = splitter.create_documents(texts)
expected_docs = [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert docs == expected_docs
def test_create_documents_with_metadata() -> None:
"""Test create documents with metadata method."""
texts = ["foo bar", "baz"]
splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=0)
docs = splitter.create_documents(texts, [{"source": "1"}, {"source": "2"}])
expected_docs = [
Document(page_content="foo", metadata={"source": "1"}),
Document(page_content="bar", metadata={"source": "1"}),
Document(page_content="baz", metadata={"source": "2"}),
]
assert docs == expected_docs
@pytest.mark.parametrize(
("splitter", "text", "expected_docs"),
[
(
CharacterTextSplitter(
separator=" ", chunk_size=7, chunk_overlap=3, add_start_index=True
),
"foo bar baz 123",
[
Document(page_content="foo bar", metadata={"start_index": 0}),
Document(page_content="bar baz", metadata={"start_index": 4}),
Document(page_content="baz 123", metadata={"start_index": 8}),
],
),
(
RecursiveCharacterTextSplitter(
chunk_size=6,
chunk_overlap=0,
separators=["\n\n", "\n", " ", ""],
add_start_index=True,
),
"w1 w1 w1 w1 w1 w1 w1 w1 w1",
[
Document(page_content="w1 w1", metadata={"start_index": 0}),
Document(page_content="w1 w1", metadata={"start_index": 6}),
Document(page_content="w1 w1", metadata={"start_index": 12}),
Document(page_content="w1 w1", metadata={"start_index": 18}),
Document(page_content="w1", metadata={"start_index": 24}),
],
),
],
)
def test_create_documents_with_start_index(
splitter: TextSplitter, text: str, expected_docs: list[Document]
) -> None:
"""Test create documents method."""
docs = splitter.create_documents([text])
assert docs == expected_docs
for doc in docs:
s_i = doc.metadata["start_index"]
assert text[s_i : s_i + len(doc.page_content)] == doc.page_content
def test_metadata_not_shallow() -> None:
"""Test that metadatas are not shallow."""
texts = ["foo bar"]
splitter = CharacterTextSplitter(separator=" ", chunk_size=3, chunk_overlap=0)
docs = splitter.create_documents(texts, [{"source": "1"}])
expected_docs = [
Document(page_content="foo", metadata={"source": "1"}),
Document(page_content="bar", metadata={"source": "1"}),
]
assert docs == expected_docs
docs[0].metadata["foo"] = 1
assert docs[0].metadata == {"source": "1", "foo": 1}
assert docs[1].metadata == {"source": "1"}
def test_iterative_text_splitter_keep_separator() -> None:
chunk_size = 5
output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=True)
assert output == [
"....5",
"X..3",
"Y...4",
"X....5",
"Y...",
]
def test_iterative_text_splitter_discard_separator() -> None:
chunk_size = 5
output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=False)
assert output == [
"....5",
"..3",
"...4",
"....5",
"...",
]
def __test_iterative_text_splitter(
*, chunk_size: int, keep_separator: bool
) -> list[str]:
chunk_size += 1 if keep_separator else 0
splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=0,
separators=["X", "Y"],
keep_separator=keep_separator,
)
text = "....5X..3Y...4X....5Y..."
output = splitter.split_text(text)
for chunk in output:
assert len(chunk) <= chunk_size, f"Chunk is larger than {chunk_size}"
return output
def test_iterative_text_splitter() -> None:
"""Test iterative text splitter."""
text = """Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.
This is a weird text to write, but gotta test the splittingggg some how.
Bye!\n\n-H."""
splitter = RecursiveCharacterTextSplitter(chunk_size=10, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = [
"Hi.",
"I'm",
"Harrison.",
"How? Are?",
"You?",
"Okay then",
"f f f f.",
"This is a",
"weird",
"text to",
"write,",
"but gotta",
"test the",
"splitting",
"gggg",
"some how.",
"Bye!",
"-H.",
]
assert output == expected_output
def test_split_documents() -> None:
"""Test split_documents."""
splitter = CharacterTextSplitter(separator="", chunk_size=1, chunk_overlap=0)
docs = [
Document(page_content="foo", metadata={"source": "1"}),
Document(page_content="bar", metadata={"source": "2"}),
Document(page_content="baz", metadata={"source": "1"}),
]
expected_output = [
Document(page_content="f", metadata={"source": "1"}),
Document(page_content="o", metadata={"source": "1"}),
Document(page_content="o", metadata={"source": "1"}),
Document(page_content="b", metadata={"source": "2"}),
Document(page_content="a", metadata={"source": "2"}),
Document(page_content="r", metadata={"source": "2"}),
Document(page_content="b", metadata={"source": "1"}),
Document(page_content="a", metadata={"source": "1"}),
Document(page_content="z", metadata={"source": "1"}),
]
assert splitter.split_documents(docs) == expected_output
def test_python_text_splitter() -> None:
splitter = PythonCodeTextSplitter(chunk_size=30, chunk_overlap=0)
splits = splitter.split_text(FAKE_PYTHON_TEXT)
split_0 = """class Foo:\n\n def bar():"""
split_1 = """def foo():"""
split_2 = """def testing_func():"""
split_3 = """def bar():"""
expected_splits = [split_0, split_1, split_2, split_3]
assert splits == expected_splits
FAKE_JSX_TEXT = """
import React from 'react';
import OtherComponent from './OtherComponent';
function MyComponent() {
const [count, setCount] = React.useState(0);
const handleClick = () => {
setCount(count + 1);
};
return (
<div>
<h1>Counter: {count}</h1>
<button onClick={handleClick}>
Increment
</button>
<OtherComponent />
</div>
);
}
export default MyComponent;
"""
def test_jsx_text_splitter() -> None:
splitter = JSFrameworkTextSplitter(chunk_size=30, chunk_overlap=0)
splits = splitter.split_text(FAKE_JSX_TEXT)
expected_splits = [
"\nimport React from 'react';\n"
"import OtherComponent from './OtherComponent';\n",
"\nfunction MyComponent() {\n const [count, setCount] = React.useState(0);",
"\n\n const handleClick = () => {\n setCount(count + 1);\n };",
"return (",
"<div>",
"<h1>Counter: {count}</h1>\n ",
"<button onClick={handleClick}>\n Increment\n </button>\n ",
"<OtherComponent />\n </div>\n );\n}\n",
"export default MyComponent;",
]
assert [s.strip() for s in splits] == [s.strip() for s in expected_splits]
FAKE_VUE_TEXT = """
<template>
<div>
<h1>{{ title }}</h1>
<button @click="increment">
Count is: {{ count }}
</button>
</div>
</template>
<script>
export default {
data() {
return {
title: 'Counter App',
count: 0
}
},
methods: {
increment() {
this.count++
}
}
}
</script>
<style>
button {
color: blue;
}
</style>
"""
def test_vue_text_splitter() -> None:
splitter = JSFrameworkTextSplitter(chunk_size=30, chunk_overlap=0)
splits = splitter.split_text(FAKE_VUE_TEXT)
expected_splits = [
"<template>",
"<div>",
"<h1>{{ title }}</h1>",
'<button @click="increment">\n Count is: {{ count }}\n'
" </button>\n </div>\n</template>",
"<script>",
"export",
" default {\n data() {\n return {\n title: 'Counter App',\n "
"count: 0\n }\n },\n methods: {\n increment() {\n "
"this.count++\n }\n }\n}\n</script>",
"<style>\nbutton {\n color: blue;\n}\n</style>",
]
assert [s.strip() for s in splits] == [s.strip() for s in expected_splits]
FAKE_SVELTE_TEXT = """
<script>
let count = 0
function increment() {
count += 1
}
</script>
<main>
<h1>Counter App</h1>
<button on:click={increment}>
Count is: {count}
</button>
</main>
<style>
button {
color: blue;
}
</style>
"""
def test_svelte_text_splitter() -> None:
splitter = JSFrameworkTextSplitter(chunk_size=30, chunk_overlap=0)
splits = splitter.split_text(FAKE_SVELTE_TEXT)
expected_splits = [
"<script>\n let count = 0",
"\n\n function increment() {\n count += 1\n }\n</script>",
"<main>",
"<h1>Counter App</h1>",
"<button on:click={increment}>\n Count is: {count}\n </button>\n</main>",
"<style>\n button {\n color: blue;\n }\n</style>",
]
assert [s.strip() for s in splits] == [s.strip() for s in expected_splits]
CHUNK_SIZE = 16
def test_python_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.PYTHON, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
def hello_world():
print("Hello, World!")
# Call the function
hello_world()
"""
chunks = splitter.split_text(code)
assert chunks == [
"def",
"hello_world():",
'print("Hello,',
'World!")',
"# Call the",
"function",
"hello_world()",
]
def test_golang_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.GO, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
package main
import "fmt"
func helloWorld() {
fmt.Println("Hello, World!")
}
func main() {
helloWorld()
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"package main",
'import "fmt"',
"func",
"helloWorld() {",
'fmt.Println("He',
"llo,",
'World!")',
"}",
"func main() {",
"helloWorld()",
"}",
]
def test_rst_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.RST, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
Sample Document
===============
Section
-------
This is the content of the section.
Lists
-----
- Item 1
- Item 2
- Item 3
Comment
*******
Not a comment
.. This is a comment
"""
chunks = splitter.split_text(code)
assert chunks == [
"Sample Document",
"===============",
"Section",
"-------",
"This is the",
"content of the",
"section.",
"Lists",
"-----",
"- Item 1",
"- Item 2",
"- Item 3",
"Comment",
"*******",
"Not a comment",
".. This is a",
"comment",
]
# Special test for special characters
code = "harry\n***\nbabylon is"
chunks = splitter.split_text(code)
assert chunks == ["harry", "***\nbabylon is"]
def test_proto_file_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.PROTO, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
syntax = "proto3";
package example;
message Person {
string name = 1;
int32 age = 2;
repeated string hobbies = 3;
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"syntax =",
'"proto3";',
"package",
"example;",
"message Person",
"{",
"string name",
"= 1;",
"int32 age =",
"2;",
"repeated",
"string hobbies",
"= 3;",
"}",
]
def test_javascript_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.JS, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
function helloWorld() {
console.log("Hello, World!");
}
// Call the function
helloWorld();
"""
chunks = splitter.split_text(code)
assert chunks == [
"function",
"helloWorld() {",
'console.log("He',
"llo,",
'World!");',
"}",
"// Call the",
"function",
"helloWorld();",
]
def test_cobol_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.COBOL, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
IDENTIFICATION DIVISION.
PROGRAM-ID. HelloWorld.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 GREETING PIC X(12) VALUE 'Hello, World!'.
PROCEDURE DIVISION.
DISPLAY GREETING.
STOP RUN.
"""
chunks = splitter.split_text(code)
assert chunks == [
"IDENTIFICATION",
"DIVISION.",
"PROGRAM-ID.",
"HelloWorld.",
"DATA DIVISION.",
"WORKING-STORAGE",
"SECTION.",
"01 GREETING",
"PIC X(12)",
"VALUE 'Hello,",
"World!'.",
"PROCEDURE",
"DIVISION.",
"DISPLAY",
"GREETING.",
"STOP RUN.",
]
def test_typescript_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.TS, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
function helloWorld(): void {
console.log("Hello, World!");
}
// Call the function
helloWorld();
"""
chunks = splitter.split_text(code)
assert chunks == [
"function",
"helloWorld():",
"void {",
'console.log("He',
"llo,",
'World!");',
"}",
"// Call the",
"function",
"helloWorld();",
]
def test_java_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.JAVA, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
public class HelloWorld {
public static void main(String[] args) {
System.out.println("Hello, World!");
}
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"public class",
"HelloWorld {",
"public",
"static void",
"main(String[]",
"args) {",
"System.out.prin",
'tln("Hello,',
'World!");',
"}\n}",
]
def test_kotlin_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.KOTLIN, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
| Foo |
python | PrefectHQ__prefect | src/integrations/prefect-docker/prefect_docker/host.py | {
"start": 643,
"end": 3854
} | class ____(Block):
"""
Block used to manage settings for interacting with a Docker host.
Attributes:
base_url: URL to the Docker server, e.g. `unix:///var/run/docker.sock`
or `tcp://127.0.0.1:1234`. If this is not set, the client will
be configured from environment variables.
version: The version of the API to use. Set to auto to
automatically detect the server's version.
timeout: Default timeout for API calls, in seconds.
max_pool_size: The maximum number of connections to save in the pool.
client_kwargs: Additional keyword arguments to pass to
`docker.from_env()` or `DockerClient`.
Examples:
Get a Docker Host client.
```python
from prefect_docker import DockerHost
docker_host = DockerHost(
base_url="tcp://127.0.0.1:1234",
max_pool_size=4
)
with docker_host.get_client() as client:
... # Use the client for Docker operations
```
"""
_block_type_name = "Docker Host"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/14a315b79990200db7341e42553e23650b34bb96-250x250.png" # noqa
_description = "Store settings for interacting with a Docker host."
base_url: Optional[str] = Field(
default=None,
description="URL to the Docker host.",
title="Base URL",
examples=["unix:///var/run/docker.sock"],
)
version: str = Field(default="auto", description="The version of the API to use")
timeout: Optional[int] = Field(
default=None, description="Default timeout for API calls, in seconds."
)
max_pool_size: Optional[int] = Field(
default=None,
description="The maximum number of connections to save in the pool.",
)
client_kwargs: Dict[str, Any] = Field(
default_factory=dict,
title="Additional Configuration",
description=(
"Additional keyword arguments to pass to "
"`docker.from_env()` or `DockerClient`."
),
)
def get_client(self) -> docker.DockerClient:
"""
Gets a Docker Client to communicate with a Docker host.
Returns:
A Docker Client.
"""
logger = get_run_logger()
client_kwargs = {
"version": self.version,
"timeout": self.timeout,
"max_pool_size": self.max_pool_size,
**self.client_kwargs,
}
client_kwargs = {
key: value for key, value in client_kwargs.items() if value is not None
}
if self.base_url is None:
logger.debug(
f"Creating a Docker client from "
f"environment variables, using {self.version} version."
)
client = _ContextManageableDockerClient.from_env(**client_kwargs)
else:
logger.debug(
f"Creating a Docker client to {self.base_url} "
f"using {self.version} version."
)
client = _ContextManageableDockerClient(
base_url=self.base_url, **client_kwargs
)
return client
| DockerHost |
python | doocs__leetcode | solution/0600-0699/0647.Palindromic Substrings/Solution.py | {
"start": 0,
"end": 293
} | class ____:
def countSubstrings(self, s: str) -> int:
ans, n = 0, len(s)
for k in range(n * 2 - 1):
i, j = k // 2, (k + 1) // 2
while ~i and j < n and s[i] == s[j]:
ans += 1
i, j = i - 1, j + 1
return ans
| Solution |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 2816,
"end": 3170
} | class ____(BaseModel):
"""
A new signed transaction.
"""
model_config = ConfigDict(populate_by_name=True)
tx: HexBytes
"""
Bytes of the signed transaction.
"""
can_revert: bool = Field(alias="canRevert")
"""
If true, the transaction can revert without the bundle being considered invalid.
"""
| BundleTxItem |
python | google__pytype | pytype/pytd/printer.py | {
"start": 1339,
"end": 4001
} | class ____:
"""Imports tracker."""
def __init__(self):
self.track_imports = True
self._typing = _TypingImports()
self._direct_imports: dict[_AliasType, _NameType] = {}
self._from_imports: dict[_NameType, dict[_AliasType, _NameType]] = {}
# Map from fully qualified import name to alias
self._reverse_alias_map: dict[_NameType, _AliasType] = {}
@property
def typing_members(self):
return self._typing.members
def add(self, full_name: str, alias: str | None = None):
"""Adds an import.
Examples:
-------------------------------------------------------
Import Statement | Method Call
-------------------------------------------------------
import abc | add('abc')
import abc as xyz | add('abc', 'xyz')
import foo.bar | add('foo.bar')
from foo import bar | add('foo.bar', 'bar')
from foo import bar as baz | add('foo.bar', 'baz')
Args:
full_name: The full name of the thing being imported.
alias: The name that the imported thing is assigned to.
"""
if not self.track_imports:
return
alias = alias or full_name
if "." not in full_name or full_name == alias and not alias.endswith(".*"):
self._direct_imports[alias] = full_name
else:
module, name = full_name.rsplit(".", 1)
if name == "*":
alias = "*"
if module == "typing":
self._typing.add(name, alias)
else:
self._from_imports.setdefault(module, {})[alias] = name
self._reverse_alias_map[full_name] = alias
def decrement_typing_count(self, member: str):
self._typing.decrement_count(member)
def get_alias(self, name: str):
if name.startswith("typing."):
return self._typing.members.get(name.removeprefix("typing."))
return self._reverse_alias_map.get(name)
def to_import_statements(self):
"""Converts self to import statements."""
imports = self._typing.to_import_statements()
for alias, module in self._direct_imports.items():
if alias == module:
imports.append(f"import {module}")
else:
imports.append(f"import {module} as {alias}")
for module, members in self._from_imports.items():
targets = ", ".join(
sorted(
f"{name} as {alias}" if alias != name else name
for alias, name in members.items()
)
)
imports.append(f"from {module} import {targets}")
# Sort import lines lexicographically and ensure import statements come
# before from-import statements.
return sorted(imports, key=lambda s: (s.startswith("from "), s))
| _Imports |
python | getsentry__sentry | tests/sentry/middleware/test_ratelimit_middleware.py | {
"start": 10603,
"end": 12809
} | class ____(TestCase):
def test_default_rate_limit_values(self) -> None:
"""Ensure that the default rate limits are called for endpoints without overrides"""
class TestEndpoint(Endpoint):
pass
view = TestEndpoint.as_view()
rate_limit_config = get_rate_limit_config(view.view_class)
assert get_rate_limit_value(
"GET", RateLimitCategory.IP, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.IP)
assert get_rate_limit_value(
"POST", RateLimitCategory.ORGANIZATION, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.ORGANIZATION)
assert get_rate_limit_value(
"DELETE", RateLimitCategory.USER, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.USER)
def test_override_rate_limit(self) -> None:
"""Override one or more of the default rate limits"""
class TestEndpoint(Endpoint):
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {RateLimitCategory.IP: RateLimit(limit=100, window=5)},
"POST": {RateLimitCategory.USER: RateLimit(limit=20, window=4)},
}
)
view = TestEndpoint.as_view()
rate_limit_config = get_rate_limit_config(view.view_class)
assert get_rate_limit_value("GET", RateLimitCategory.IP, rate_limit_config) == RateLimit(
100, 5
)
# get is not overriddent for user, hence we use the default
assert get_rate_limit_value(
"GET", RateLimitCategory.USER, rate_limit_config
) == get_default_rate_limits_for_group("default", category=RateLimitCategory.USER)
# get is not overriddent for IP, hence we use the default
assert get_rate_limit_value(
"POST", RateLimitCategory.IP, rate_limit_config
) == get_default_rate_limits_for_group("default", category=RateLimitCategory.IP)
assert get_rate_limit_value("POST", RateLimitCategory.USER, rate_limit_config) == RateLimit(
20, 4
)
| TestGetRateLimitValue |
python | mwaskom__seaborn | tests/test_regression.py | {
"start": 15807,
"end": 23576
} | class ____:
rs = np.random.RandomState(56)
df = pd.DataFrame(dict(x=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.randint(0, 1, 90),
g=np.repeat(list("abc"), 30),
h=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_regplot_basic(self):
f, ax = plt.subplots()
lm.regplot(x="x", y="y", data=self.df)
assert len(ax.lines) == 1
assert len(ax.collections) == 2
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_regplot_selective(self):
f, ax = plt.subplots()
ax = lm.regplot(x="x", y="y", data=self.df, scatter=False, ax=ax)
assert len(ax.lines) == 1
assert len(ax.collections) == 1
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot(x="x", y="y", data=self.df, fit_reg=False)
assert len(ax.lines) == 0
assert len(ax.collections) == 1
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot(x="x", y="y", data=self.df, ci=None)
assert len(ax.lines) == 1
assert len(ax.collections) == 1
ax.clear()
def test_regplot_scatter_kws_alpha(self):
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5, 0.5]])
ax = lm.regplot(x="x", y="y", data=self.df,
scatter_kws={'color': color})
assert ax.collections[0]._alpha is None
assert ax.collections[0]._facecolors[0, 3] == 0.5
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot(x="x", y="y", data=self.df,
scatter_kws={'color': color})
assert ax.collections[0]._alpha == 0.8
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot(x="x", y="y", data=self.df,
scatter_kws={'color': color, 'alpha': 0.4})
assert ax.collections[0]._alpha == 0.4
f, ax = plt.subplots()
color = 'r'
ax = lm.regplot(x="x", y="y", data=self.df,
scatter_kws={'color': color})
assert ax.collections[0]._alpha == 0.8
f, ax = plt.subplots()
alpha = .3
ax = lm.regplot(x="x", y="y", data=self.df,
x_bins=5, fit_reg=False,
scatter_kws={"alpha": alpha})
for line in ax.lines:
assert line.get_alpha() == alpha
def test_regplot_binned(self):
ax = lm.regplot(x="x", y="y", data=self.df, x_bins=5)
assert len(ax.lines) == 6
assert len(ax.collections) == 2
def test_lmplot_no_data(self):
with pytest.raises(TypeError):
# keyword argument `data` is required
lm.lmplot(x="x", y="y")
def test_lmplot_basic(self):
g = lm.lmplot(x="x", y="y", data=self.df)
ax = g.axes[0, 0]
assert len(ax.lines) == 1
assert len(ax.collections) == 2
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_lmplot_hue(self):
g = lm.lmplot(x="x", y="y", data=self.df, hue="h")
ax = g.axes[0, 0]
assert len(ax.lines) == 2
assert len(ax.collections) == 4
def test_lmplot_markers(self):
g1 = lm.lmplot(x="x", y="y", data=self.df, hue="h", markers="s")
assert g1.hue_kws == {"marker": ["s", "s"]}
g2 = lm.lmplot(x="x", y="y", data=self.df, hue="h", markers=["o", "s"])
assert g2.hue_kws == {"marker": ["o", "s"]}
with pytest.raises(ValueError):
lm.lmplot(x="x", y="y", data=self.df, hue="h",
markers=["o", "s", "d"])
def test_lmplot_marker_linewidths(self):
g = lm.lmplot(x="x", y="y", data=self.df, hue="h",
fit_reg=False, markers=["o", "+"])
c = g.axes[0, 0].collections
assert c[1].get_linewidths()[0] == mpl.rcParams["lines.linewidth"]
def test_lmplot_facets(self):
g = lm.lmplot(x="x", y="y", data=self.df, row="g", col="h")
assert g.axes.shape == (3, 2)
g = lm.lmplot(x="x", y="y", data=self.df, col="u", col_wrap=4)
assert g.axes.shape == (6,)
g = lm.lmplot(x="x", y="y", data=self.df, hue="h", col="u")
assert g.axes.shape == (1, 6)
def test_lmplot_hue_col_nolegend(self):
g = lm.lmplot(x="x", y="y", data=self.df, col="h", hue="h")
assert g._legend is None
def test_lmplot_scatter_kws(self):
g = lm.lmplot(x="x", y="y", hue="h", data=self.df, ci=None)
red_scatter, blue_scatter = g.axes[0, 0].collections
red, blue = color_palette(n_colors=2)
npt.assert_array_equal(red, red_scatter.get_facecolors()[0, :3])
npt.assert_array_equal(blue, blue_scatter.get_facecolors()[0, :3])
@pytest.mark.parametrize("sharex", [True, False])
def test_lmplot_facet_truncate(self, sharex):
g = lm.lmplot(
data=self.df, x="x", y="y", hue="g", col="h",
truncate=False, facet_kws=dict(sharex=sharex),
)
for ax in g.axes.flat:
for line in ax.lines:
xdata = line.get_xdata()
assert ax.get_xlim() == tuple(xdata[[0, -1]])
def test_lmplot_sharey(self):
df = pd.DataFrame(dict(
x=[0, 1, 2, 0, 1, 2],
y=[1, -1, 0, -100, 200, 0],
z=["a", "a", "a", "b", "b", "b"],
))
with pytest.warns(UserWarning):
g = lm.lmplot(data=df, x="x", y="y", col="z", sharey=False)
ax1, ax2 = g.axes.flat
assert ax1.get_ylim()[0] > ax2.get_ylim()[0]
assert ax1.get_ylim()[1] < ax2.get_ylim()[1]
def test_lmplot_facet_kws(self):
xlim = -4, 20
g = lm.lmplot(
data=self.df, x="x", y="y", col="h", facet_kws={"xlim": xlim}
)
for ax in g.axes.flat:
assert ax.get_xlim() == xlim
def test_residplot(self):
x, y = self.df.x, self.df.y
ax = lm.residplot(x=x, y=y)
resid = y - np.polyval(np.polyfit(x, y, 1), x)
x_plot, y_plot = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, x_plot)
npt.assert_array_almost_equal(resid, y_plot)
@pytest.mark.skipif(_no_statsmodels, reason="no statsmodels")
def test_residplot_lowess(self):
ax = lm.residplot(x="x", y="y", data=self.df, lowess=True)
assert len(ax.lines) == 2
x, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, np.sort(self.df.x))
@pytest.mark.parametrize("option", ["robust", "lowess"])
@pytest.mark.skipif(not _no_statsmodels, reason="statsmodels installed")
def test_residplot_statsmodels_missing_errors(self, long_df, option):
with pytest.raises(RuntimeError, match=rf"`{option}=True` requires"):
lm.residplot(long_df, x="x", y="y", **{option: True})
def test_three_point_colors(self):
x, y = np.random.randn(2, 3)
ax = lm.regplot(x=x, y=y, color=(1, 0, 0))
color = ax.collections[0].get_facecolors()
npt.assert_almost_equal(color[0, :3],
(1, 0, 0))
def test_regplot_xlim(self):
f, ax = plt.subplots()
x, y1, y2 = np.random.randn(3, 50)
lm.regplot(x=x, y=y1, truncate=False)
lm.regplot(x=x, y=y2, truncate=False)
line1, line2 = ax.lines
assert np.array_equal(line1.get_xdata(), line2.get_xdata())
| TestRegressionPlots |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 133990,
"end": 136561
} | class ____(Response):
"""
Response of projects.get_unique_metric_variants endpoint.
:param metrics: A list of metric variants reported for tasks in this project
:type metrics: Sequence[MetricVariantResult]
"""
_service = "projects"
_action = "get_unique_metric_variants"
_version = "2.23"
_schema = {
"definitions": {
"metric_variant_result": {
"properties": {
"metric": {
"description": "Metric name",
"type": ["string", "null"],
},
"metric_hash": {
"description": "Metric name hash. Used instead of the metric name when categorizing\n last metrics events in task objects.",
"type": ["string", "null"],
},
"variant": {
"description": "Variant name",
"type": ["string", "null"],
},
"variant_hash": {
"description": "Variant name hash. Used instead of the variant name when categorizing\n last metrics events in task objects.",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"metrics": {
"description": "A list of metric variants reported for tasks in this project",
"items": {"$ref": "#/definitions/metric_variant_result"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, metrics: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetUniqueMetricVariantsResponse, self).__init__(**kwargs)
self.metrics = metrics
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricVariantResult.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", MetricVariantResult, is_array=True)
self._property_metrics = value
| GetUniqueMetricVariantsResponse |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 86804,
"end": 87060
} | class ____(BlockwiseTail):
def _task(self, name: Key, index: int) -> Task:
return Task(
name,
operator.getitem,
TaskRef((self.frame._name, index)),
slice(-self.n, None),
)
| BlockwiseTailIndex |
python | ray-project__ray | python/ray/tests/test_output.py | {
"start": 1545,
"end": 4989
} | class ____:
def __init__(self):
time.sleep(1)
# NOTE: We should save actor, otherwise it will be out of scope.
actors = [Foo.remote() for _ in range(30)]
for actor in actors:
try:
ray.get(actor.__ray_ready__.remote())
except ray.exceptions.OutOfMemoryError:
# When running the test on a small machine,
# some actors might be killed by the memory monitor.
# We just catch and ignore the error.
pass
"""
out_str = run_string_as_driver(script)
print(out_str)
assert "PYTHON worker processes have been started" in out_str, out_str
assert out_str.count("RAY_DEDUP_LOGS") == 1, out_str
assert "[repeated" in out_str, out_str
def test_logger_config_with_ray_init():
"""Test that the logger is correctly configured when ray.init is called."""
script = """
import ray
ray.init(num_cpus=1)
"""
out_str = run_string_as_driver(script)
assert "INFO" in out_str, out_str
assert len(out_str) != 0
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_spill_logs():
script = """
import ray
ray.init(object_store_memory=200e6)
x = []
for _ in range(10):
x.append(ray.put(bytes(100 * 1024 * 1024)))
"""
stdout_str, stderr_str = run_string_as_driver_stdout_stderr(
script, env={"RAY_verbose_spill_logs": "1"}
)
out_str = stdout_str + stderr_str
assert "Spilled " in out_str
stdout_str, stderr_str = run_string_as_driver_stdout_stderr(
script, env={"RAY_verbose_spill_logs": "0"}
)
out_str = stdout_str + stderr_str
assert "Spilled " not in out_str
def _hook(env):
return {"env_vars": {"HOOK_KEY": "HOOK_VALUE"}}
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize("skip_hook", [True, False])
def test_runtime_env_hook(skip_hook):
ray_init_snippet = "ray.init(_skip_env_hook=True)" if skip_hook else ""
script = f"""
import ray
import os
{ray_init_snippet}
@ray.remote
def f():
return os.environ.get("HOOK_KEY")
print(ray.get(f.remote()))
"""
proc = run_string_as_driver_nonblocking(
script, env={"RAY_RUNTIME_ENV_HOOK": "ray.tests.test_output._hook"}
)
out_str = proc.stdout.read().decode("ascii") + proc.stderr.read().decode("ascii")
print(out_str)
if skip_hook:
assert "HOOK_VALUE" not in out_str
else:
assert "HOOK_VALUE" in out_str
def test_env_hook_skipped_for_ray_client(start_cluster, monkeypatch):
monkeypatch.setenv("RAY_RUNTIME_ENV_HOOK", "ray.tests.test_output._hook")
cluster, address = start_cluster
ray.init(address)
@ray.remote
def f():
return os.environ.get("HOOK_KEY")
using_ray_client = address.startswith("ray://")
if using_ray_client:
assert ray.get(f.remote()) is None
else:
assert ray.get(f.remote()) == "HOOK_VALUE"
@pytest.mark.parametrize(
"ray_start_cluster_head_with_env_vars",
[
{
"num_cpus": 1,
"env_vars": {
"RAY_enable_autoscaler_v2": "0",
},
},
{
"num_cpus": 1,
"env_vars": {
"RAY_enable_autoscaler_v2": "1",
},
},
],
indirect=True,
)
def test_autoscaler_warn_infeasible(ray_start_cluster_head_with_env_vars):
script = """
import ray
@ray.remote(resources={{"does_not_exist": 1}})
| Foo |
python | pypa__pip | tests/unit/test_options.py | {
"start": 1148,
"end": 7213
} | class ____(AddFakeCommandMixin):
"""
Tests for confirming our option precedence:
cli -> environment -> subcommand config -> global config -> option
defaults
"""
def get_config_section(self, section: str) -> list[tuple[str, str]]:
config = {
"global": [("timeout", "-3")],
"fake": [("timeout", "-2")],
}
return config[section]
def get_config_section_global(self, section: str) -> list[tuple[str, str]]:
config: dict[str, list[tuple[str, str]]] = {
"global": [("timeout", "-3")],
"fake": [],
}
return config[section]
def test_env_override_default_int(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that environment variable overrides an int option default.
"""
monkeypatch.setenv("PIP_TIMEOUT", "-1")
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert options.timeout == -1
@pytest.mark.parametrize("values", [["F1"], ["F1", "F2"]])
def test_env_override_default_append(
self, values: list[str], monkeypatch: pytest.MonkeyPatch
) -> None:
"""
Test that environment variable overrides an append option default.
"""
monkeypatch.setenv("PIP_FIND_LINKS", " ".join(values))
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert options.find_links == values
@pytest.mark.parametrize("choices", [["w"], ["s", "w"]])
def test_env_override_default_choice(
self, choices: list[str], monkeypatch: pytest.MonkeyPatch
) -> None:
"""
Test that environment variable overrides a choice option default.
"""
monkeypatch.setenv("PIP_EXISTS_ACTION", " ".join(choices))
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert options.exists_action == choices
@pytest.mark.parametrize("name", ["PIP_LOG_FILE", "PIP_LOCAL_LOG"])
def test_env_alias_override_default(
self, name: str, monkeypatch: pytest.MonkeyPatch
) -> None:
"""
When an option has multiple long forms, test that the technique of
using the env variable, "PIP_<long form>" works for all cases.
(e.g. PIP_LOG_FILE and PIP_LOCAL_LOG should all work)
"""
monkeypatch.setenv(name, "override.log")
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert options.log == "override.log"
def test_cli_override_environment(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test the cli overrides and environment variable
"""
monkeypatch.setenv("PIP_TIMEOUT", "-1")
# FakeCommand intentionally returns the wrong type.
options, args = cast(
tuple[Values, list[str]], main(["fake", "--timeout", "-2"])
)
assert options.timeout == -2
@pytest.mark.parametrize(
"pip_no_cache_dir",
[
# Enabling --no-cache-dir means no cache directory.
"1",
"true",
"on",
"yes",
# For historical / backwards compatibility reasons, we also disable
# the cache directory if provided a value that translates to 0.
"0",
"false",
"off",
"no",
],
)
def test_cache_dir__PIP_NO_CACHE_DIR(
self, pip_no_cache_dir: str, monkeypatch: pytest.MonkeyPatch
) -> None:
"""
Test setting the PIP_NO_CACHE_DIR environment variable without
passing any command-line flags.
"""
monkeypatch.setenv("PIP_NO_CACHE_DIR", pip_no_cache_dir)
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert options.cache_dir is False
@pytest.mark.parametrize("pip_no_cache_dir", ["yes", "no"])
def test_cache_dir__PIP_NO_CACHE_DIR__with_cache_dir(
self,
pip_no_cache_dir: str,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""
Test setting PIP_NO_CACHE_DIR while also passing an explicit
--cache-dir value.
"""
monkeypatch.setenv("PIP_NO_CACHE_DIR", pip_no_cache_dir)
# FakeCommand intentionally returns the wrong type.
options, args = cast(
tuple[Values, list[str]], main(["--cache-dir", "/cache/dir", "fake"])
)
# The command-line flag takes precedence.
assert options.cache_dir == "/cache/dir"
@pytest.mark.parametrize("pip_no_cache_dir", ["yes", "no"])
def test_cache_dir__PIP_NO_CACHE_DIR__with_no_cache_dir(
self,
pip_no_cache_dir: str,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""
Test setting PIP_NO_CACHE_DIR while also passing --no-cache-dir.
"""
monkeypatch.setenv("PIP_NO_CACHE_DIR", pip_no_cache_dir)
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["--no-cache-dir", "fake"]))
# The command-line flag should take precedence (which has the same
# value in this case).
assert options.cache_dir is False
def test_cache_dir__PIP_NO_CACHE_DIR_invalid__with_no_cache_dir(
self,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str],
) -> None:
"""
Test setting PIP_NO_CACHE_DIR to an invalid value while also passing
--no-cache-dir.
"""
monkeypatch.setenv("PIP_NO_CACHE_DIR", "maybe")
expected_err = "--no-cache-dir error: invalid truth value 'maybe'"
with assert_option_error(capsys, expected=expected_err):
main(["--no-cache-dir", "fake"])
| TestOptionPrecedence |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/links/emr.py | {
"start": 1524,
"end": 4385
} | class ____(BaseAwsLink):
"""Helper class for constructing Amazon EMR Logs Link."""
name = "EMR Cluster Logs"
key = "emr_logs"
format_str = BASE_AWS_CONSOLE_LINK + "/s3/buckets/{log_uri}?region={region_name}&prefix={job_flow_id}/"
def format_link(self, **kwargs) -> str:
if not kwargs.get("log_uri"):
return ""
return super().format_link(**kwargs)
def get_serverless_log_uri(*, s3_log_uri: str, application_id: str, job_run_id: str) -> str:
"""
Retrieve the S3 URI to EMR Serverless Job logs.
Any EMR Serverless job may have a different S3 logging location (or none), which is an S3 URI.
The logging location is then {s3_uri}/applications/{application_id}/jobs/{job_run_id}.
"""
return f"{s3_log_uri}/applications/{application_id}/jobs/{job_run_id}"
def get_serverless_dashboard_url(
*,
aws_conn_id: str | None = None,
emr_serverless_client: boto3.client = None,
application_id: str,
job_run_id: str,
) -> ParseResult | None:
"""
Retrieve the URL to EMR Serverless dashboard.
The URL is a one-use, ephemeral link that expires in 1 hour and is accessible without authentication.
Either an AWS connection ID or existing EMR Serverless client must be passed.
If the connection ID is passed, a client is generated using that connection.
"""
if not exactly_one(aws_conn_id, emr_serverless_client):
raise AirflowException("Requires either an AWS connection ID or an EMR Serverless Client.")
if aws_conn_id:
# If get_dashboard_for_job_run fails for whatever reason, fail after 1 attempt
# so that the rest of the links load in a reasonable time frame.
hook = EmrServerlessHook(aws_conn_id=aws_conn_id, config={"retries": {"total_max_attempts": 1}})
emr_serverless_client = hook.conn
response = emr_serverless_client.get_dashboard_for_job_run(
applicationId=application_id, jobRunId=job_run_id
)
if "url" not in response:
return None
log_uri = urlparse(response["url"])
return log_uri
def get_log_uri(
*, cluster: dict[str, Any] | None = None, emr_client: boto3.client = None, job_flow_id: str | None = None
) -> str | None:
"""
Retrieve the S3 URI to the EMR Job logs.
Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id..
"""
if not exactly_one(bool(cluster), emr_client and job_flow_id):
raise AirflowException(
"Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id."
)
cluster_info = (cluster or emr_client.describe_cluster(ClusterId=job_flow_id))["Cluster"]
if "LogUri" not in cluster_info:
return None
log_uri = S3Hook.parse_s3_url(cluster_info["LogUri"])
return "/".join(log_uri)
| EmrLogsLink |
python | kamyu104__LeetCode-Solutions | Python/the-most-similar-path-in-a-graph.py | {
"start": 70,
"end": 987
} | class ____(object):
def mostSimilar(self, n, roads, names, targetPath):
"""
:type n: int
:type roads: List[List[int]]
:type names: List[str]
:type targetPath: List[str]
:rtype: List[int]
"""
adj = [[] for _ in xrange(n)]
for u, v in roads:
adj[u].append(v)
adj[v].append(u)
dp = [[0]*n for _ in xrange(len(targetPath)+1)]
for i in xrange(1, len(targetPath)+1):
for v in xrange(n):
dp[i][v] = (names[v] != targetPath[i-1]) + min(dp[i-1][u] for u in adj[v])
path = [dp[-1].index(min(dp[-1]))]
for i in reversed(xrange(2, len(targetPath)+1)):
for u in adj[path[-1]]:
if dp[i-1][u]+(names[path[-1]] != targetPath[i-1]) == dp[i][path[-1]]:
path.append(u)
break
return path[::-1]
| Solution |
python | tensorflow__tensorflow | tensorflow/python/profiler/pprof_profiler.py | {
"start": 4676,
"end": 6639
} | class ____(object):
"""Keeps track of `Location` protos for pprof profile.
`Locations` store information about function call locations.
"""
def __init__(self, functions):
"""Constructor.
Args:
functions: A `Functions` object.
"""
self._functions = functions
# Maps tuples in the form (file_path, called_function_name, line_number)
# to `Location` protos.
self._location_key_to_location = {}
def index_of(
self, file_path, line_number, called_function_name, called_file_path,
called_function_start_line):
"""Returns index of the location, adding the location if needed.
Args:
file_path: (string) Path to file that makes the call.
line_number: (integer) Call line number.
called_function_name: (string) Function name of the function called at
`file_path` and `line_number`.
called_file_path: (string) Path to file where the called function is
defined.
called_function_start_line: (integer) Start line number of called
function definition in `called_file_path` file.
Returns:
Index of location.
"""
location_key = (file_path, called_function_name, line_number)
if location_key in self._location_key_to_location:
location = self._location_key_to_location[location_key]
return location.id
else:
# Location indexes should start from 1
location_index = len(self._location_key_to_location) + 1
location = profile_pb2.Location()
location.id = location_index
self._location_key_to_location[location_key] = location
line = location.line.add()
line.function_id = self._functions.index_of(
called_file_path, called_function_name, called_function_start_line)
line.line = line_number
return location_index
def location_protos(self):
"""Returns list of `profile_pb2.Location` protos."""
return self._location_key_to_location.values()
| Locations |
python | readthedocs__readthedocs.org | readthedocs/sso/migrations/0003_allow_saml_with_old_dashboard.py | {
"start": 150,
"end": 712
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("sso", "0002_add_saml_app"),
]
operations = [
migrations.AddField(
model_name="ssointegration",
name="using_old_dashboard",
field=models.BooleanField(
default=False,
null=True,
blank=True,
help_text="Whether the SSO integration is using the old dashboard for authentication. Mainly used for SAML integrations.",
),
),
]
| Migration |
python | scikit-learn__scikit-learn | sklearn/model_selection/_search.py | {
"start": 7557,
"end": 16353
} | class ____:
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
Returns
-------
params : dict of str to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4,
... random_state=rng))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, *, random_state=None):
if not isinstance(param_distributions, (Mapping, Iterable)):
raise TypeError(
"Parameter distribution is not a dict or a list,"
f" got: {param_distributions!r} of type "
f"{type(param_distributions).__name__}"
)
if isinstance(param_distributions, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_distributions = [param_distributions]
for dist in param_distributions:
if not isinstance(dist, dict):
raise TypeError(
"Parameter distribution is not a dict ({!r})".format(dist)
)
for key in dist:
if not isinstance(dist[key], Iterable) and not hasattr(
dist[key], "rvs"
):
raise TypeError(
f"Parameter grid for parameter {key!r} is not iterable "
f"or a distribution (value={dist[key]})"
)
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = param_distributions
def _is_all_lists(self):
return all(
all(not hasattr(v, "rvs") for v in dist.values())
for dist in self.param_distributions
)
def __iter__(self):
rng = check_random_state(self.random_state)
# if all distributions are given as lists, we want to sample without
# replacement
if self._is_all_lists():
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
n_iter = self.n_iter
if grid_size < n_iter:
warnings.warn(
"The total space of parameters %d is smaller "
"than n_iter=%d. Running %d iterations. For exhaustive "
"searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size),
UserWarning,
)
n_iter = grid_size
for i in sample_without_replacement(grid_size, n_iter, random_state=rng):
yield param_grid[i]
else:
for _ in range(self.n_iter):
dist = rng.choice(self.param_distributions)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(dist.items())
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rng)
else:
params[k] = v[rng.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
if self._is_all_lists():
grid_size = len(ParameterGrid(self.param_distributions))
return min(self.n_iter, grid_size)
else:
return self.n_iter
def _check_refit(search_cv, attr):
if not search_cv.refit:
raise AttributeError(
f"This {type(search_cv).__name__} instance was initialized with "
f"`refit=False`. {attr} is available only after refitting on the best "
"parameters. You can refit an estimator manually using the "
"`best_params_` attribute"
)
def _search_estimator_has(attr):
"""Check if we can delegate a method to the underlying estimator.
Calling a prediction method will only be available if `refit=True`. In
such case, we check first the fitted best estimator. If it is not
fitted, we check the unfitted estimator.
Checking the unfitted estimator allows to use `hasattr` on the `SearchCV`
instance even before calling `fit`.
"""
def check(self):
_check_refit(self, attr)
if hasattr(self, "best_estimator_"):
# raise an AttributeError if `attr` does not exist
getattr(self.best_estimator_, attr)
return True
# raise an AttributeError if `attr` does not exist
getattr(self.estimator, attr)
return True
return check
def _yield_masked_array_for_each_param(candidate_params):
"""
Yield a masked array for each candidate param.
`candidate_params` is a sequence of params which were used in
a `GridSearchCV`. We use masked arrays for the results, as not
all params are necessarily present in each element of
`candidate_params`. For example, if using `GridSearchCV` with
a `SVC` model, then one might search over params like:
- kernel=["rbf"], gamma=[0.1, 1]
- kernel=["poly"], degree=[1, 2]
and then param `'gamma'` would not be present in entries of
`candidate_params` corresponding to `kernel='poly'`.
"""
n_candidates = len(candidate_params)
param_results = defaultdict(dict)
for cand_idx, params in enumerate(candidate_params):
for name, value in params.items():
param_results["param_%s" % name][cand_idx] = value
for key, param_result in param_results.items():
param_list = list(param_result.values())
try:
arr = np.array(param_list)
except ValueError:
# This can happen when param_list contains lists of different
# lengths, for example:
# param_list=[[1], [2, 3]]
arr_dtype = np.dtype(object)
else:
# There are two cases when we don't use the automatically inferred
# dtype when creating the array and we use object instead:
# - string dtype
# - when array.ndim > 1, that means that param_list was something
# like a list of same-size sequences, which gets turned into a
# multi-dimensional array but we want a 1d array
arr_dtype = arr.dtype if arr.dtype.kind != "U" and arr.ndim == 1 else object
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate (which may not contain all the params).
ma = MaskedArray(np.empty(n_candidates, dtype=arr_dtype), mask=True)
for index, value in param_result.items():
# Setting the value at an index unmasks that index
ma[index] = value
yield (key, ma)
| ParameterSampler |
python | kamyu104__LeetCode-Solutions | Python/count-pairs-that-form-a-complete-day-i.py | {
"start": 383,
"end": 638
} | class ____(object):
def countCompleteDayPairs(self, hours):
"""
:type hours: List[int]
:rtype: int
"""
return sum((hours[i]+hours[j])%24 == 0 for i in xrange(len(hours)-1) for j in xrange(i+1, len(hours)))
| Solution2 |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/base.py | {
"start": 1353,
"end": 3048
} | class ____(SuccessMessageMixin, CheckOrganizationsEnabled):
"""
Mixin class that provides organization sublevel objects.
This mixin uses several class level variables
org_url_field
The URL kwarg name for the organization slug
admin_only
Boolean the dictacts access for organization owners only or just member
access
"""
org_url_field = "slug"
admin_only = True
def get_queryset(self):
"""Return queryset that returns organizations for user."""
return self.get_organization_queryset()
def get_organization_queryset(self):
"""
Return organizations queryset for the request user.
This will return organizations that the user has admin/owner access to
if :py:data:`admin_only` is True. Otherwise, this will return
organizations where the request user is a member of the team
"""
if self.admin_only:
return Organization.objects.for_admin_user(user=self.request.user)
return Organization.objects.for_user(user=self.request.user)
@lru_cache(maxsize=1)
def get_organization(self):
"""Return organization determined by url kwarg."""
if self.org_url_field not in self.kwargs:
return None
return get_object_or_404(
self.get_organization_queryset(),
slug=self.kwargs[self.org_url_field],
)
def get_context_data(self, **kwargs):
"""Add organization to context data."""
context = super().get_context_data(**kwargs)
organization = self.get_organization()
context["organization"] = organization
return context
| OrganizationMixin |
python | huggingface__transformers | src/transformers/models/owlvit/image_processing_owlvit_fast.py | {
"start": 1195,
"end": 8219
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 768, "width": 768}
default_to_square = True
crop_size = {"height": 768, "width": 768}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = None
do_convert_rgb = None
model_input_names = ["pixel_values"]
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection
def post_process_object_detection(
self,
outputs: "OwlViTObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`OwlViTObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
batch_size = len(batch_logits)
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# batch_logits of shape (batch_size, num_queries, num_classes)
batch_class_logits = torch.max(batch_logits, dim=-1)
batch_scores = torch.sigmoid(batch_class_logits.values)
batch_labels = batch_class_logits.indices
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes):
keep = scores > threshold
scores = scores[keep]
labels = labels[keep]
boxes = boxes[keep]
results.append({"scores": scores, "labels": labels, "boxes": boxes})
return results
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_image_guided_detection
def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None):
"""
Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO
api.
Args:
outputs ([`OwlViTImageGuidedObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.0):
Minimum confidence threshold to use to filter out predicted boxes.
nms_threshold (`float`, *optional*, defaults to 0.3):
IoU threshold for non-maximum suppression of overlapping boxes.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model. All labels are set to None as
`OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection.
"""
logits, target_boxes = outputs.logits, outputs.target_pred_boxes
if target_sizes is not None and len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes is not None and target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
# Convert to [x0, y0, x1, y1] format
target_boxes = center_to_corners_format(target_boxes)
# Apply non-maximum suppression (NMS)
if nms_threshold < 1.0:
for idx in range(target_boxes.shape[0]):
for i in torch.argsort(-scores[idx]):
if not scores[idx][i]:
continue
ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0]
ious[i] = -1.0 # Mask self-IoU.
scores[idx][ious > nms_threshold] = 0.0
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
target_boxes = _scale_boxes(target_boxes, target_sizes)
# Compute box display alphas based on prediction scores
results = []
alphas = torch.zeros_like(scores)
for idx in range(target_boxes.shape[0]):
# Select scores for boxes matching the current query:
query_scores = scores[idx]
if not query_scores.nonzero().numel():
continue
# Apply threshold on scores before scaling
query_scores[query_scores < threshold] = 0.0
# Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1.
# All other boxes will either belong to a different query, or will not be shown.
max_score = torch.max(query_scores) + 1e-6
query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9)
query_alphas = torch.clip(query_alphas, 0.0, 1.0)
alphas[idx] = query_alphas
mask = alphas[idx] > 0
box_scores = alphas[idx][mask]
boxes = target_boxes[idx][mask]
results.append({"scores": box_scores, "labels": None, "boxes": boxes})
return results
__all__ = ["OwlViTImageProcessorFast"]
| OwlViTImageProcessorFast |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py | {
"start": 1371,
"end": 8345
} | class ____(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv2 with Registers
[facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of register tokens to use.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
Example:
```python
>>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
>>> # Initializing a Dinov2WithRegisters base style configuration
>>> configuration = Dinov2WithRegistersConfig()
>>> # Initializing a model (with random weights) from the base style configuration
>>> model = Dinov2WithRegistersModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinov2_with_registers"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
layerscale_value=1.0,
drop_path_rate=0.0,
use_swiglu_ffn=False,
num_register_tokens=4,
out_features=None,
out_indices=None,
apply_layernorm=True,
reshape_hidden_states=True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.use_swiglu_ffn = use_swiglu_ffn
self.num_register_tokens = num_register_tokens
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
__all__ = ["Dinov2WithRegistersConfig"]
| Dinov2WithRegistersConfig |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/tests/test_ray_event_publisher.py | {
"start": 579,
"end": 1484
} | class ____(PublisherClientInterface):
"""Test implementation of PublisherClientInterface."""
def __init__(
self,
batch_size: int = 1,
side_effect=lambda batch: PublishStats(True, 1, 0),
):
self.batch_size = batch_size
self.publish_calls = []
self._side_effect = side_effect
async def publish(self, batch) -> PublishStats:
self.publish_calls.append(batch)
return self._side_effect(batch)
def count_num_events_in_batch(self, batch) -> int:
return self.batch_size
async def close(self) -> None:
pass
@pytest.fixture
def base_kwargs():
"""Common kwargs for publisher initialization."""
return {
"name": "test",
"max_retries": 2,
"initial_backoff": 0,
"max_backoff": 0,
"jitter_ratio": 0,
"enable_publisher_stats": True,
}
| MockPublisherClient |
python | nedbat__coveragepy | tests/test_python.py | {
"start": 487,
"end": 2174
} | class ____(CoverageTest):
"""Tests of `get_zip_bytes`."""
run_in_temp_dir = False
@pytest.mark.parametrize(
"encoding",
["utf-8", "gb2312", "hebrew", "shift_jis", "cp1252"],
)
def test_get_encoded_zip_files(self, encoding: str) -> None:
# See igor.py, do_zipmods, for the text of these files.
zip_file = "tests/zipmods.zip"
sys.path.append(zip_file) # So we can import the files.
filename = zip_file + "/encoded_" + encoding + ".py"
filename = os_sep(filename)
zip_data = get_zip_bytes(filename)
assert zip_data is not None
zip_text = zip_data.decode(encoding)
assert "All OK" in zip_text
# Run the code to see that we really got it encoded properly.
mod = __import__("encoded_" + encoding)
assert mod.encoding == encoding
def test_source_for_file(tmp_path: pathlib.Path) -> None:
src = str(tmp_path / "a.py")
assert source_for_file(src) == src
assert source_for_file(src + "c") == src
assert source_for_file(src + "o") == src
unknown = src + "FOO"
assert source_for_file(unknown) == unknown
@pytest.mark.skipif(not env.WINDOWS, reason="not windows")
def test_source_for_file_windows(tmp_path: pathlib.Path) -> None:
a_py = tmp_path / "a.py"
src = str(a_py)
# On windows if a pyw exists, it is an acceptable source
path_windows = tmp_path / "a.pyw"
path_windows.write_text("", encoding="utf-8")
assert str(path_windows) == source_for_file(src + "c")
# If both pyw and py exist, py is preferred
a_py.write_text("", encoding="utf-8")
assert source_for_file(src + "c") == src
| GetZipBytesTest |
python | walkccc__LeetCode | solutions/3531. Count Covered Buildings/3531.py | {
"start": 0,
"end": 601
} | class ____:
def countCoveredBuildings(self, n: int, buildings: list[list[int]]) -> int:
northernmost = [math.inf] * (n + 1)
southernmost = [0] * (n + 1)
westernmost = [math.inf] * (n + 1)
easternmost = [0] * (n + 1)
for x, y in buildings:
northernmost[x] = min(northernmost[x], y)
southernmost[x] = max(southernmost[x], y)
westernmost[y] = min(westernmost[y], x)
easternmost[y] = max(easternmost[y], x)
return sum(northernmost[x] < y < southernmost[x]
and westernmost[y] < x < easternmost[y]
for x, y in buildings)
| Solution |
python | google__jax | jax/_src/core.py | {
"start": 96591,
"end": 98962
} | class ____(effects.Effect):
pass
array_ref_effect = internal_mutable_array_effect = InternalMutableArrayEffect()
effects.control_flow_allowed_effects.add_type(InternalMutableArrayEffect)
effects.remat_allowed_effects.add_type(InternalMutableArrayEffect)
@ref_p.def_effectful_abstract_eval
def _ref_abstract_eval(init_aval, *, memory_space: Any, kind: Any):
from jax._src.state.types import AbstractRef # pytype: disable=import-error
return (AbstractRef(init_aval, memory_space=memory_space, kind=kind),
{internal_mutable_array_effect})
@ref_p.def_impl
def _ref_impl(init_val, *, memory_space: Any, kind: Any):
if memory_space is not None:
raise NotImplementedError(
"array ref with memory space only works inside of a `jit`.")
from jax._src.state.types import AbstractRef # pytype: disable=import-error
from jax._src.lax.lax import _array_copy # pytype: disable=import-error
aval = AbstractRef(typeof(init_val), kind=kind)
return Ref(aval, ArrayRefImpl(aval, _array_copy(init_val)))
def freeze(ref: Ref) -> Array:
"""Invalidate a given reference and return its final value.
For more information about mutable array references, refer to the
`Ref guide`_.
Args:
ref: A :class:`jax.ref.Ref` object.
Returns:
A :class:`jax.Array` containing the contents of ``ref``.
Examples:
>>> import jax
>>> ref = jax.new_ref(jax.numpy.arange(5))
>>> ref[3] = 100
>>> ref
Ref([ 0, 1, 2, 100, 4], dtype=int32)
>>> jax.ref.freeze(ref)
Array([ 0, 1, 2, 100, 4], dtype=int32)
.. _Ref guide: https://docs.jax.dev/en/latest/array_refs.html
"""
return freeze_p.bind(ref)
freeze_p = Primitive('freeze')
freeze_p.is_effectful = lambda params: True # type: ignore
freeze_p.ref_primitive = True
@freeze_p.def_effectful_abstract_eval
def freeze_abstract_eval(ref_aval):
return ref_aval.inner_aval, {internal_mutable_array_effect}
@freeze_p.def_impl
def _freeze_impl(ref):
return ref[()]
def accum_grad_in_ref(x):
return accum_grad_in_ref_p.bind(x)
accum_grad_in_ref_p = Primitive('accum_grad_in_ref')
accum_grad_in_ref_p.is_high = lambda *_: True # type: ignore
accum_grad_in_ref_p.to_lojax = lambda x: x # type: ignore
accum_grad_in_ref_p.def_abstract_eval(lambda x: x) # type: ignore
accum_grad_in_ref_p.def_impl(lambda x: x) # type: ignore
| InternalMutableArrayEffect |
python | django__django | tests/admin_views/tests.py | {
"start": 235199,
"end": 238385
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.s1 = Section.objects.create(name="Test section")
def setUp(self):
self.client.force_login(self.superuser)
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get(reverse("admin:admin_views_fabric_add"))
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get(
reverse("admin:admin_views_section_history", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.post(reverse("admin:logout"))
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get(reverse("admin:password_change"))
self.assertIsNone(get_max_age(response))
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get(reverse("admin:password_change_done"))
self.assertIsNone(get_max_age(response))
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get(reverse("admin:jsi18n"))
self.assertIsNone(get_max_age(response))
@override_settings(ROOT_URLCONF="admin_views.urls")
| NeverCacheTests |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 7196,
"end": 8665
} | class ____(test_lib.TestCase, parameterized.TestCase):
def _log_softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._log_softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax_v2(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
def testLogSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_ops.log_softmax_v2, [x_tf])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
| LogSoftmaxTest |
python | huggingface__transformers | src/transformers/models/camembert/modeling_camembert.py | {
"start": 16715,
"end": 17399
} | class ____(PreTrainedModel):
config_class = CamembertConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": CamembertLayer,
"attentions": CamembertSelfAttention,
"cross_attentions": CamembertCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, CamembertLMHead):
init.zeros_(module.bias)
| CamembertPreTrainedModel |
python | spack__spack | lib/spack/spack/util/executable.py | {
"start": 16870,
"end": 16984
} | class ____(spack.error.SpackError):
"""Raised when :class:`Executable` exits with an error code."""
| ProcessError |
python | etianen__django-reversion | tests/test_app/migrations/0001_initial.py | {
"start": 123,
"end": 5523
} | class ____(migrations.Migration):
initial = True
dependencies = [
('reversion', '0001_squashed_0004_auto_20160611_1202'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='v1', max_length=191)),
],
),
migrations.CreateModel(
name='TestModelEscapePK',
fields=[
('name', models.CharField(max_length=191, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='TestModelInline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inline_name', models.CharField(default='v1', max_length=191)),
('test_model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_app.testmodel')),
],
),
migrations.CreateModel(
name='TestModelRelated',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='v1', max_length=191)),
],
),
migrations.CreateModel(
name='TestModelWithNaturalKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='v1', max_length=191)),
],
),
migrations.CreateModel(
name='TestModelParent',
fields=[
('testmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='test_app.testmodel')),
('parent_name', models.CharField(default='parent v1', max_length=191)),
],
bases=('test_app.testmodel',),
),
migrations.CreateModel(
name='TestModelThrough',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='v1', max_length=191)),
('test_model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='test_app.testmodel')),
('test_model_related', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='test_app.testmodelrelated')),
],
),
migrations.CreateModel(
name='TestModelNestedInline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nested_inline_name', models.CharField(default='v1', max_length=191)),
('test_model_inline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_app.testmodelinline')),
],
),
migrations.CreateModel(
name='TestModelInlineByNaturalKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test_model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_app.testmodelwithnaturalkey')),
],
),
migrations.CreateModel(
name='TestModelGenericInline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField()),
('inline_name', models.CharField(default='v1', max_length=191)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
],
),
migrations.AddField(
model_name='testmodel',
name='related',
field=models.ManyToManyField(blank=True, related_name='_testmodel_related_+', to='test_app.TestModelRelated'),
),
migrations.AddField(
model_name='testmodel',
name='related_through',
field=models.ManyToManyField(blank=True, related_name='_testmodel_related_through_+', through='test_app.TestModelThrough', to='test_app.TestModelRelated'),
),
migrations.CreateModel(
name='TestMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=191)),
('revision', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reversion.revision')),
],
),
migrations.CreateModel(
name='TestModelWithUniqueConstraint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=191, unique=True)),
],
),
]
| Migration |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/assertions.py | {
"start": 25734,
"end": 30654
} | class ____:
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list_):
self.assert_(
len(result) == len(list_),
"result list is not the same size as test list, "
+ "for class "
+ class_.__name__,
)
for i in range(0, len(list_)):
self.assert_row(class_, result[i], list_[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(
rowobj.__class__ is class_, "item class is not " + repr(class_)
)
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(
getattr(rowobj, key) == value,
"attribute %s value %s does not match %s"
% (key, getattr(rowobj, key), value),
)
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = {immutabledict(e) for e in expected}
for wrong in filterfalse(lambda o: isinstance(o, cls), found):
fail(
'Unexpected type "%s", expected "%s"'
% (type(wrong).__name__, cls.__name__)
)
if len(found) != len(expected):
fail(
'Unexpected object count "%s", expected "%s"'
% (len(found), len(expected))
)
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1]
)
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found."
% (cls.__name__, repr(expected_item))
)
return True
def sql_execution_asserter(self, db=None):
if db is None:
from . import db as db
return assertsql.assert_engine(db)
def assert_sql_execution(self, db, callable_, *rules):
with self.sql_execution_asserter(db) as asserter:
result = callable_()
asserter.assert_(*rules)
return result
def assert_sql(self, db, callable_, rules):
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(
*[assertsql.CompiledSQL(k, v) for k, v in rule.items()]
)
else:
newrule = assertsql.CompiledSQL(*rule)
newrules.append(newrule)
return self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
return self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count)
)
@contextlib.contextmanager
def assert_execution(self, db, *rules):
with self.sql_execution_asserter(db) as asserter:
yield
asserter.assert_(*rules)
def assert_statement_count(self, db, count):
return self.assert_execution(db, assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_statement_count_multi_db(self, dbs, counts):
recs = [
(self.sql_execution_asserter(db), db, count)
for (db, count) in zip(dbs, counts)
]
asserters = []
for ctx, db, count in recs:
asserters.append(ctx.__enter__())
try:
yield
finally:
for asserter, (ctx, db, count) in zip(asserters, recs):
ctx.__exit__(None, None, None)
asserter.assert_(assertsql.CountStatements(count))
| AssertsExecutionResults |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/cache_key.py | {
"start": 2190,
"end": 14637
} | class ____:
"""Mixin for objects which can produce a cache key.
This class is usually in a hierarchy that starts with the
:class:`.HasTraverseInternals` base, but this is optional. Currently,
the class should be able to work on its own without including
:class:`.HasTraverseInternals`.
.. seealso::
:class:`.CacheKey`
:ref:`sql_caching`
"""
__slots__ = ()
_cache_key_traversal: _CacheKeyTraversalType = NO_CACHE
_is_has_cache_key = True
_hierarchy_supports_caching = True
"""private attribute which may be set to False to prevent the
inherit_cache warning from being emitted for a hierarchy of subclasses.
Currently applies to the :class:`.ExecutableDDLElement` hierarchy which
does not implement caching.
"""
inherit_cache: Optional[bool] = None
"""Indicate if this :class:`.HasCacheKey` instance should make use of the
cache key generation scheme used by its immediate superclass.
The attribute defaults to ``None``, which indicates that a construct has
not yet taken into account whether or not its appropriate for it to
participate in caching; this is functionally equivalent to setting the
value to ``False``, except that a warning is also emitted.
This flag can be set to ``True`` on a particular class, if the SQL that
corresponds to the object does not change based on attributes which
are local to this class, and not its superclass.
.. seealso::
:ref:`compilerext_caching` - General guideslines for setting the
:attr:`.HasCacheKey.inherit_cache` attribute for third-party or user
defined SQL constructs.
"""
__slots__ = ()
_generated_cache_key_traversal: Any
@classmethod
def _generate_cache_attrs(
cls,
) -> Union[_CacheKeyTraversalDispatchType, Literal[CacheConst.NO_CACHE]]:
"""generate cache key dispatcher for a new class.
This sets the _generated_cache_key_traversal attribute once called
so should only be called once per class.
"""
inherit_cache = cls.__dict__.get("inherit_cache", None)
inherit = bool(inherit_cache)
if inherit:
_cache_key_traversal = getattr(cls, "_cache_key_traversal", None)
if _cache_key_traversal is None:
try:
assert issubclass(cls, HasTraverseInternals)
_cache_key_traversal = cls._traverse_internals
except AttributeError:
cls._generated_cache_key_traversal = NO_CACHE
return NO_CACHE
assert _cache_key_traversal is not NO_CACHE, (
f"class {cls} has _cache_key_traversal=NO_CACHE, "
"which conflicts with inherit_cache=True"
)
# TODO: wouldn't we instead get this from our superclass?
# also, our superclass may not have this yet, but in any case,
# we'd generate for the superclass that has it. this is a little
# more complicated, so for the moment this is a little less
# efficient on startup but simpler.
return _cache_key_traversal_visitor.generate_dispatch(
cls,
_cache_key_traversal,
"_generated_cache_key_traversal",
)
else:
_cache_key_traversal = cls.__dict__.get(
"_cache_key_traversal", None
)
if _cache_key_traversal is None:
_cache_key_traversal = cls.__dict__.get(
"_traverse_internals", None
)
if _cache_key_traversal is None:
cls._generated_cache_key_traversal = NO_CACHE
if (
inherit_cache is None
and cls._hierarchy_supports_caching
):
util.warn(
"Class %s will not make use of SQL compilation "
"caching as it does not set the 'inherit_cache' "
"attribute to ``True``. This can have "
"significant performance implications including "
"some performance degradations in comparison to "
"prior SQLAlchemy versions. Set this attribute "
"to True if this object can make use of the cache "
"key generated by the superclass. Alternatively, "
"this attribute may be set to False which will "
"disable this warning." % (cls.__name__),
code="cprf",
)
return NO_CACHE
return _cache_key_traversal_visitor.generate_dispatch(
cls,
_cache_key_traversal,
"_generated_cache_key_traversal",
)
@util.preload_module("sqlalchemy.sql.elements")
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Optional[Tuple[Any, ...]]:
"""return an optional cache key.
The cache key is a tuple which can contain any series of
objects that are hashable and also identifies
this object uniquely within the presence of a larger SQL expression
or statement, for the purposes of caching the resulting query.
The cache key should be based on the SQL compiled structure that would
ultimately be produced. That is, two structures that are composed in
exactly the same way should produce the same cache key; any difference
in the structures that would affect the SQL string or the type handlers
should result in a different cache key.
If a structure cannot produce a useful cache key, the NO_CACHE
symbol should be added to the anon_map and the method should
return None.
"""
cls = self.__class__
id_, found = anon_map.get_anon(self)
if found:
return (id_, cls)
dispatcher: Union[
Literal[CacheConst.NO_CACHE],
_CacheKeyTraversalDispatchType,
]
try:
dispatcher = cls.__dict__["_generated_cache_key_traversal"]
except KeyError:
# traversals.py -> _preconfigure_traversals()
# may be used to run these ahead of time, but
# is not enabled right now.
# this block will generate any remaining dispatchers.
dispatcher = cls._generate_cache_attrs()
if dispatcher is NO_CACHE:
anon_map[NO_CACHE] = True
return None
result: Tuple[Any, ...] = (id_, cls)
# inline of _cache_key_traversal_visitor.run_generated_dispatch()
for attrname, obj, meth in dispatcher(
self, _cache_key_traversal_visitor
):
if obj is not None:
# TODO: see if C code can help here as Python lacks an
# efficient switch construct
if meth is STATIC_CACHE_KEY:
sck = obj._static_cache_key
if sck is NO_CACHE:
anon_map[NO_CACHE] = True
return None
result += (attrname, sck)
elif meth is ANON_NAME:
elements = util.preloaded.sql_elements
if isinstance(obj, elements._anonymous_label):
obj = obj.apply_map(anon_map) # type: ignore
result += (attrname, obj)
elif meth is CALL_GEN_CACHE_KEY:
result += (
attrname,
obj._gen_cache_key(anon_map, bindparams),
)
# remaining cache functions are against
# Python tuples, dicts, lists, etc. so we can skip
# if they are empty
elif obj:
if meth is CACHE_IN_PLACE:
result += (attrname, obj)
elif meth is PROPAGATE_ATTRS:
result += (
attrname,
obj["compile_state_plugin"],
(
obj["plugin_subject"]._gen_cache_key(
anon_map, bindparams
)
if obj["plugin_subject"]
else None
),
)
elif meth is InternalTraversal.dp_annotations_key:
# obj is here is the _annotations dict. Table uses
# a memoized version of it. however in other cases,
# we generate it given anon_map as we may be from a
# Join, Aliased, etc.
# see #8790
if self._gen_static_annotations_cache_key: # type: ignore # noqa: E501
result += self._annotations_cache_key # type: ignore # noqa: E501
else:
result += self._gen_annotations_cache_key(anon_map) # type: ignore # noqa: E501
elif (
meth is InternalTraversal.dp_clauseelement_list
or meth is InternalTraversal.dp_clauseelement_tuple
or meth
is InternalTraversal.dp_memoized_select_entities
):
result += (
attrname,
tuple(
[
elem._gen_cache_key(anon_map, bindparams)
for elem in obj
]
),
)
else:
result += meth( # type: ignore
attrname, obj, self, anon_map, bindparams
)
return result
def _generate_cache_key(self) -> Optional[CacheKey]:
"""return a cache key.
The cache key is a tuple which can contain any series of
objects that are hashable and also identifies
this object uniquely within the presence of a larger SQL expression
or statement, for the purposes of caching the resulting query.
The cache key should be based on the SQL compiled structure that would
ultimately be produced. That is, two structures that are composed in
exactly the same way should produce the same cache key; any difference
in the structures that would affect the SQL string or the type handlers
should result in a different cache key.
The cache key returned by this method is an instance of
:class:`.CacheKey`, which consists of a tuple representing the
cache key, as well as a list of :class:`.BindParameter` objects
which are extracted from the expression. While two expressions
that produce identical cache key tuples will themselves generate
identical SQL strings, the list of :class:`.BindParameter` objects
indicates the bound values which may have different values in
each one; these bound parameters must be consulted in order to
execute the statement with the correct parameters.
a :class:`_expression.ClauseElement` structure that does not implement
a :meth:`._gen_cache_key` method and does not implement a
:attr:`.traverse_internals` attribute will not be cacheable; when
such an element is embedded into a larger structure, this method
will return None, indicating no cache key is available.
"""
bindparams: List[BindParameter[Any]] = []
_anon_map = anon_map()
key = self._gen_cache_key(_anon_map, bindparams)
if NO_CACHE in _anon_map:
return None
else:
assert key is not None
return CacheKey(
key,
bindparams,
_anon_map.get(CacheConst.PARAMS), # type: ignore[arg-type]
)
| HasCacheKey |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_1.py | {
"start": 265,
"end": 321
} | class ____(pydantic.BaseModel):
x: datetime.datetime
| A |
python | pytorch__pytorch | test/dynamo/test_aot_compile.py | {
"start": 2677,
"end": 2882
} | class ____(torch.nn.Module):
def forward(self, x):
chunk = x.chunk(2, dim=-1)
y = chunk[0]
y_repeat = y.repeat_interleave(2, dim=-1)
return y_repeat
| RepeatInterleaveModule |
python | facebookresearch__faiss | demos/offline_ivf/dataset.py | {
"start": 2071,
"end": 5743
} | class ____:
def __init__(
self,
root: str,
file_descriptors: List[FileDescriptor],
d: int,
normalize: bool,
size: int,
):
assert os.path.exists(root)
self.root = root
self.file_descriptors = file_descriptors
self.d = d
self.normalize = normalize
self.size = size
self.file_offsets = [0]
t = 0
for f in self.file_descriptors:
xb = _memmap_vecs(
f"{self.root}/{f.name}", f.format, f.dtype, f.size, self.d
)
t += xb.shape[0]
self.file_offsets.append(t)
assert (
t == self.size
), "the sum of num of embeddings per file!=total num of embeddings"
def iterate(self, start: int, batch_size: int, dt: np.dtype):
buffer = np.empty(shape=(batch_size, self.d), dtype=dt)
rem = 0
for f in self.file_descriptors:
if start >= f.size:
start -= f.size
continue
logging.info(f"processing: {f.name}...")
xb = _memmap_vecs(
f"{self.root}/{f.name}",
f.format,
f.dtype,
f.size,
self.d,
)
if start > 0:
xb = xb[start:]
start = 0
req = min(batch_size - rem, xb.shape[0])
buffer[rem:rem + req] = xb[:req]
rem += req
if rem == batch_size:
if self.normalize:
faiss.normalize_L2(buffer)
yield buffer.copy()
rem = 0
for i in range(req, xb.shape[0], batch_size):
j = i + batch_size
if j <= xb.shape[0]:
tmp = xb[i:j].astype(dt)
if self.normalize:
faiss.normalize_L2(tmp)
yield tmp
else:
rem = xb.shape[0] - i
buffer[:rem] = xb[i:j]
if rem > 0:
tmp = buffer[:rem]
if self.normalize:
faiss.normalize_L2(tmp)
yield tmp
def get(self, idx: List[int]):
n = len(idx)
fidx = np.searchsorted(self.file_offsets, idx, "right")
res = np.empty(shape=(len(idx), self.d), dtype=np.float32)
for r, id, fid in zip(range(n), idx, fidx):
assert fid > 0 and fid <= len(self.file_descriptors), f"{fid}"
f = self.file_descriptors[fid - 1]
# deferring normalization until after reading the vec
vecs = _memmap_vecs(
f"{self.root}/{f.name}", f.format, f.dtype, f.size, self.d
)
i = id - self.file_offsets[fid - 1]
assert i >= 0 and i < vecs.shape[0]
res[r, :] = vecs[i] # TODO: find a faster way
if self.normalize:
faiss.normalize_L2(res)
return res
def sample(self, n, idx_fn, vecs_fn):
if vecs_fn and os.path.exists(vecs_fn):
vecs = np.load(vecs_fn)
assert vecs.shape == (n, self.d)
return vecs
if idx_fn and os.path.exists(idx_fn):
idx = np.load(idx_fn)
assert idx.size == n
else:
idx = np.array(sorted(random.sample(range(self.size), n)))
if idx_fn:
np.save(idx_fn, idx)
vecs = self.get(idx)
if vecs_fn:
np.save(vecs_fn, vecs)
return vecs
def get_first_n(self, n, dt):
assert n <= self.size
return next(self.iterate(0, n, dt))
| MultiFileVectorDataset |
python | huggingface__transformers | src/transformers/models/sam2/modeling_sam2.py | {
"start": 44208,
"end": 54306
} | class ____(nn.Module):
def __init__(self, config: Sam2MaskDecoderConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_multimask_outputs = config.num_multimask_outputs
self.num_mask_tokens = config.num_multimask_outputs + 1
self.iou_token = nn.Embedding(1, self.hidden_size)
self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size)
self.transformer = Sam2TwoWayTransformer(config)
# should we create a new class for this?
self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
self.upscale_layer_norm = Sam2LayerNorm(self.hidden_size // 4, data_format="channels_first")
self.activation = nn.GELU()
mlps_list = []
for _ in range(self.num_mask_tokens):
mlps_list += [Sam2FeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)]
self.output_hypernetworks_mlps = nn.ModuleList(mlps_list)
self.iou_prediction_head = Sam2FeedForward(
self.hidden_size,
config.iou_head_hidden_dim,
self.num_mask_tokens,
config.iou_head_depth,
sigmoid_output=True,
)
self.conv_s0 = nn.Conv2d(config.hidden_size, config.hidden_size // 8, kernel_size=1, stride=1)
self.conv_s1 = nn.Conv2d(config.hidden_size, config.hidden_size // 4, kernel_size=1, stride=1)
self.obj_score_token = nn.Embedding(1, self.hidden_size)
self.pred_obj_score_head = Sam2FeedForward(self.hidden_size, self.hidden_size, 1, 3)
self.dynamic_multimask_via_stability = config.dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = config.dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = config.dynamic_multimask_stability_thresh
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
high_resolution_features: list[torch.Tensor],
attention_similarity: Optional[torch.Tensor] = None,
target_embedding: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
The embeddings from the image encoder.
image_positional_embeddings (`torch.Tensor`):
Positional encoding with the shape of image_embeddings.
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes.
dense_prompt_embeddings (`torch.Tensor`):
The embeddings of the mask inputs.
multimask_output (`bool`):
Whether to return multiple masks or a single mask.
high_resolution_features (`list[torch.Tensor]`, *optional*):
The high-resolution features from the vision encoder.
attention_similarity (`torch.Tensor`, *optional*):
The attention similarity tensor.
target_embedding (`torch.Tensor`, *optional*):
The target embedding.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1]
# Concatenate output tokens
output_tokens = torch.cat(
[
self.obj_score_token.weight,
self.iou_token.weight,
self.mask_tokens.weight,
],
dim=0,
)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings.shape[0] != 0:
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
# Expand per-image data in batch direction to be per-mask
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, dim=0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
# Run the transformer
point_embeddings, image_embeddings = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
iou_token_out = point_embeddings[:, :, 1, :]
mask_tokens_out = point_embeddings[:, :, 2 : (2 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
image_embeddings = image_embeddings.transpose(2, 3).view(
batch_size * point_batch_size, num_channels, height, width
)
feat_s0, feat_s1 = high_resolution_features
feat_s0 = feat_s0.repeat_interleave(point_batch_size, dim=0)
feat_s1 = feat_s1.repeat_interleave(point_batch_size, dim=0)
upscaled_embedding = self.upscale_conv1(image_embeddings) + feat_s1
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding) + feat_s0)
hyper_in_list: list[torch.Tensor] = []
for i in range(self.num_mask_tokens):
current_mlp = self.output_hypernetworks_mlps[i]
hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.view(batch_size, point_batch_size, num_channels, height * width)
masks = (hyper_in @ upscaled_embedding).view(batch_size, point_batch_size, -1, height, width)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
object_score_logits = self.pred_obj_score_head(point_embeddings[:, :, 0, :])
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
elif self.dynamic_multimask_via_stability and not self.training:
mask_slice = slice(0, 1)
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
else:
mask_slice = slice(0, 1)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
sam_tokens_out = mask_tokens_out[:, :, mask_slice] # [b, 3, c] shape
return masks, iou_pred, sam_tokens_out, object_score_logits
def _get_stability_scores(self, mask_logits):
"""
Compute stability scores of the mask logits based on the IoU between upper and
lower thresholds.
"""
mask_logits = mask_logits.flatten(-2)
stability_delta = self.dynamic_multimask_stability_delta
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
return stability_scores
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
"""
When outputting a single mask, if the stability score from the current single-mask
output (based on output token 0) falls below a threshold, we instead select from
multi-mask outputs (based on output token 1~3) the mask with the highest predicted
IoU score. This is intended to ensure a valid mask for both clicking and tracking.
"""
# The best mask from multimask output tokens (1~3)
multimask_logits = all_mask_logits[:, :, 1:, :, :]
multimask_iou_scores = all_iou_scores[:, :, 1:]
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) # [B, P]
best_scores_inds_expanded = best_scores_inds.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
best_scores_inds_expanded = best_scores_inds_expanded.expand(
-1, -1, 1, multimask_logits.size(-2), multimask_logits.size(-1)
)
best_multimask_logits = torch.gather(multimask_logits, 2, best_scores_inds_expanded) # [B, P, 1, H, W]
best_multimask_iou_scores = torch.gather(multimask_iou_scores, 2, best_scores_inds.unsqueeze(-1)) # [B, P, 1]
# The mask from singlemask output token 0 and its stability score
singlemask_logits = all_mask_logits[:, :, 0:1, :, :]
singlemask_iou_scores = all_iou_scores[:, :, 0:1]
stability_scores = self._get_stability_scores(singlemask_logits)
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
# Dynamically fall back to best multimask output upon low stability scores.
mask_logits_out = torch.where(
is_stable[..., None, None].expand_as(singlemask_logits),
singlemask_logits,
best_multimask_logits,
)
iou_scores_out = torch.where(
is_stable.expand_as(singlemask_iou_scores),
singlemask_iou_scores,
best_multimask_iou_scores,
)
return mask_logits_out, iou_scores_out
@auto_docstring(
custom_intro="""
Segment Anything Model 2 (SAM 2) for generating segmentation masks, given an input image and
input points and labels, boxes, or masks.
"""
)
| Sam2MaskDecoder |
python | scipy__scipy | scipy/ndimage/tests/test_measurements.py | {
"start": 16033,
"end": 51403
} | class ____:
def test_label_default_dtype(self, xp):
test_array = np.random.rand(10, 10)
test_array = xp.asarray(test_array)
label, no_features = ndimage.label(test_array > 0.5)
assert label.dtype in (xp.int32, xp.int64)
# Shouldn't raise an exception
ndimage.find_objects(label)
def test_find_objects01(self, xp):
data = xp.ones([], dtype=xp.int64)
out = ndimage.find_objects(data)
assert out == [()]
def test_find_objects02(self, xp):
data = xp.zeros([], dtype=xp.int64)
out = ndimage.find_objects(data)
assert out == []
def test_find_objects03(self, xp):
data = xp.ones([1], dtype=xp.int64)
out = ndimage.find_objects(data)
assert out == [(slice(0, 1, None),)]
def test_find_objects04(self, xp):
data = xp.zeros([1], dtype=xp.int64)
out = ndimage.find_objects(data)
assert out == []
def test_find_objects05(self, xp):
data = xp.ones([5], dtype=xp.int64)
out = ndimage.find_objects(data)
assert out == [(slice(0, 5, None),)]
def test_find_objects06(self, xp):
data = xp.asarray([1, 0, 2, 2, 0, 3])
out = ndimage.find_objects(data)
assert out == [(slice(0, 1, None),),
(slice(2, 4, None),),
(slice(5, 6, None),)]
def test_find_objects07(self, xp):
data = xp.asarray([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out = ndimage.find_objects(data)
assert out == []
def test_find_objects08(self, xp):
data = xp.asarray([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert out == [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
(slice(3, 5, None), slice(0, 2, None)),
(slice(5, 6, None), slice(3, 5, None))]
def test_find_objects09(self, xp):
data = xp.asarray([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert out == [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
None,
(slice(5, 6, None), slice(3, 5, None))]
@make_xp_test_case(ndimage.value_indices)
def test_value_indices01(xp):
"Test dictionary keys and entries"
data = xp.asarray([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
vi = ndimage.value_indices(data, ignore_value=0)
true_keys = [1, 2, 4]
assert list(vi.keys()) == true_keys
truevi = {k: xp.nonzero(data == k) for k in true_keys}
vi = ndimage.value_indices(data, ignore_value=0)
assert vi.keys() == truevi.keys()
for key in vi.keys():
assert len(vi[key]) == len(truevi[key])
for v, true_v in zip(vi[key], truevi[key]):
xp_assert_equal(v, true_v)
@make_xp_test_case(ndimage.value_indices)
def test_value_indices02(xp):
"Test input checking"
data = xp.zeros((5, 4), dtype=xp.float32)
msg = "Parameter 'arr' must be an integer array"
with assert_raises(ValueError, match=msg):
ndimage.value_indices(data)
@make_xp_test_case(ndimage.value_indices)
def test_value_indices03(xp):
"Test different input array shapes, from 1-D to 4-D"
for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
a = np.asarray((12*[1]+12*[2]+12*[3]), dtype=np.int32)
a = np.reshape(a, shape)
trueKeys = np.unique(a)
a = xp.asarray(a)
vi = ndimage.value_indices(a)
assert list(vi.keys()) == list(trueKeys)
for k in [int(x) for x in trueKeys]:
trueNdx = xp.nonzero(a == k)
assert len(vi[k]) == len(trueNdx)
for vik, true_vik in zip(vi[k], trueNdx):
xp_assert_equal(vik, true_vik)
@make_xp_test_case(ndimage.sum)
def test_sum01(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([], dtype=dtype)
output = ndimage.sum(input)
assert output == 0
@make_xp_test_case(ndimage.sum)
def test_sum02(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.zeros([0, 4], dtype=dtype)
output = ndimage.sum(input)
assert output == 0
@make_xp_test_case(ndimage.sum)
def test_sum03(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.ones([], dtype=dtype)
output = ndimage.sum(input)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.sum)
def test_sum04(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 2], dtype=dtype)
output = ndimage.sum(input)
assert_almost_equal(output, xp.asarray(3.0), check_0d=False)
@make_xp_test_case(ndimage.sum)
def test_sum05(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.sum(input)
assert_almost_equal(output, xp.asarray(10.0), check_0d=False)
@make_xp_test_case(ndimage.sum)
def test_sum06(xp):
labels = np.asarray([], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([], dtype=dtype)
output = ndimage.sum(input, labels=labels)
assert output == 0
@make_xp_test_case(ndimage.sum)
def test_sum07(xp):
labels = np.ones([0, 4], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.zeros([0, 4], dtype=dtype)
output = ndimage.sum(input, labels=labels)
assert output == 0
@make_xp_test_case(ndimage.sum)
def test_sum08(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 2], dtype=dtype)
output = ndimage.sum(input, labels=labels)
assert output == 1
@make_xp_test_case(ndimage.sum)
def test_sum09(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, xp.asarray(4.0), check_0d=False)
@make_xp_test_case(ndimage.sum)
def test_sum10(xp):
labels = np.asarray([1, 0], dtype=bool)
input = np.asarray([[1, 2], [3, 4]], dtype=bool)
labels = xp.asarray(labels)
input = xp.asarray(input)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, xp.asarray(2.0), check_0d=False)
@make_xp_test_case(ndimage.sum)
def test_sum11(xp):
labels = xp.asarray([1, 2], dtype=xp.int8)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.sum(input, labels=labels,
index=2)
assert_almost_equal(output, xp.asarray(6.0), check_0d=False)
@make_xp_test_case(ndimage.sum)
def test_sum12(xp):
labels = xp.asarray([[1, 2], [2, 4]], dtype=xp.int8)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.sum(input, labels=labels, index=xp.asarray([4, 8, 2]))
assert_array_almost_equal(output, xp.asarray([4.0, 0.0, 5.0]))
@make_xp_test_case(ndimage.sum)
def test_sum_labels(xp):
labels = xp.asarray([[1, 2], [2, 4]], dtype=xp.int8)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output_sum = ndimage.sum(input, labels=labels, index=xp.asarray([4, 8, 2]))
output_labels = ndimage.sum_labels(
input, labels=labels, index=xp.asarray([4, 8, 2]))
assert xp.all(output_sum == output_labels)
assert_array_almost_equal(output_labels, xp.asarray([4.0, 0.0, 5.0]))
@make_xp_test_case(ndimage.mean)
def test_mean01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, xp.asarray(2.0), check_0d=False)
@make_xp_test_case(ndimage.mean)
def test_mean02(xp):
labels = np.asarray([1, 0], dtype=bool)
input = np.asarray([[1, 2], [3, 4]], dtype=bool)
labels = xp.asarray(labels)
input = xp.asarray(input)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.mean)
def test_mean03(xp):
labels = xp.asarray([1, 2])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.mean(input, labels=labels,
index=2)
assert_almost_equal(output, xp.asarray(3.0), check_0d=False)
@make_xp_test_case(ndimage.mean)
def test_mean04(xp):
labels = xp.asarray([[1, 2], [2, 4]], dtype=xp.int8)
with np.errstate(all='ignore'):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.mean(input, labels=labels,
index=xp.asarray([4, 8, 2]))
# XXX: output[[0, 2]] does not work in array-api-strict; annoying
# assert_array_almost_equal(output[[0, 2]], xp.asarray([4.0, 2.5]))
assert output[0] == 4.0
assert output[2] == 2.5
assert xp.isnan(output[1])
@make_xp_test_case(ndimage.minimum)
def test_minimum01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.minimum)
def test_minimum02(xp):
labels = np.asarray([1, 0], dtype=bool)
input = np.asarray([[2, 2], [2, 4]], dtype=bool)
labels = xp.asarray(labels)
input = xp.asarray(input)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.minimum)
def test_minimum03(xp):
labels = xp.asarray([1, 2])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.minimum(input, labels=labels,
index=2)
assert_almost_equal(output, xp.asarray(2.0), check_0d=False)
@make_xp_test_case(ndimage.minimum)
def test_minimum04(xp):
labels = xp.asarray([[1, 2], [2, 3]])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.minimum(input, labels=labels,
index=xp.asarray([2, 3, 8]))
assert_array_almost_equal(output, xp.asarray([2.0, 4.0, 0.0]))
@make_xp_test_case(ndimage.maximum)
def test_maximum01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.maximum(input, labels=labels)
assert_almost_equal(output, xp.asarray(3.0), check_0d=False)
@make_xp_test_case(ndimage.maximum)
def test_maximum02(xp):
labels = np.asarray([1, 0], dtype=bool)
input = np.asarray([[2, 2], [2, 4]], dtype=bool)
labels = xp.asarray(labels)
input = xp.asarray(input)
output = ndimage.maximum(input, labels=labels)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.maximum)
def test_maximum03(xp):
labels = xp.asarray([1, 2])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.maximum(input, labels=labels,
index=2)
assert_almost_equal(output, xp.asarray(4.0), check_0d=False)
@make_xp_test_case(ndimage.maximum)
def test_maximum04(xp):
labels = xp.asarray([[1, 2], [2, 3]])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.maximum(input, labels=labels,
index=xp.asarray([2, 3, 8]))
assert_array_almost_equal(output, xp.asarray([3.0, 4.0, 0.0]))
@make_xp_test_case(ndimage.maximum)
def test_maximum05(xp):
# Regression test for ticket #501 (Trac)
x = xp.asarray([-3, -2, -1])
assert ndimage.maximum(x) == -1
@make_xp_test_case(ndimage.median)
def test_median01(xp):
a = xp.asarray([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
labels = xp.asarray([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
output = ndimage.median(a, labels=labels, index=xp.asarray([1, 2, 3]))
assert_array_almost_equal(output, xp.asarray([2.5, 4.0, 6.0]))
@make_xp_test_case(ndimage.median)
def test_median02(xp):
a = xp.asarray([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
output = ndimage.median(a)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.median)
def test_median03(xp):
a = xp.asarray([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
labels = xp.asarray([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
output = ndimage.median(a, labels=labels)
assert_almost_equal(output, xp.asarray(3.0), check_0d=False)
@make_xp_test_case(ndimage.median)
def test_median_gh12836_bool(xp):
# test boolean addition fix on example from gh-12836
a = np.asarray([1, 1], dtype=bool)
a = xp.asarray(a)
output = ndimage.median(a, labels=xp.ones((2,)), index=xp.asarray([1]))
assert_array_almost_equal(output, xp.asarray([1.0]))
@make_xp_test_case(ndimage.median)
def test_median_no_int_overflow(xp):
# test integer overflow fix on example from gh-12836
a = xp.asarray([65, 70], dtype=xp.int8)
output = ndimage.median(a, labels=xp.ones((2,)), index=xp.asarray([1]))
assert_array_almost_equal(output, xp.asarray([67.5]))
@make_xp_test_case(ndimage.variance)
def test_variance01(xp):
with np.errstate(all='ignore'):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([], dtype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Mean of empty slice", RuntimeWarning)
output = ndimage.variance(input)
assert xp.isnan(output)
@make_xp_test_case(ndimage.variance)
def test_variance02(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1], dtype=dtype)
output = ndimage.variance(input)
assert_almost_equal(output, xp.asarray(0.0), check_0d=False)
@make_xp_test_case(ndimage.variance)
def test_variance03(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 3], dtype=dtype)
output = ndimage.variance(input)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.variance)
def test_variance04(xp):
input = np.asarray([1, 0], dtype=bool)
input = xp.asarray(input)
output = ndimage.variance(input)
assert_almost_equal(output, xp.asarray(0.25), check_0d=False)
@make_xp_test_case(ndimage.variance)
def test_variance05(xp):
labels = xp.asarray([2, 2, 3])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 3, 8], dtype=dtype)
output = ndimage.variance(input, labels, 2)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.variance)
def test_variance06(xp):
labels = xp.asarray([2, 2, 3, 3, 4])
with np.errstate(all='ignore'):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 3, 8, 10, 8], dtype=dtype)
output = ndimage.variance(input, labels, xp.asarray([2, 3, 4]))
assert_array_almost_equal(output, xp.asarray([1.0, 1.0, 0.0]))
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation01(xp):
with np.errstate(all='ignore'):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([], dtype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Mean of empty slice", RuntimeWarning)
output = ndimage.standard_deviation(input)
assert xp.isnan(output)
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation02(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1], dtype=dtype)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, xp.asarray(0.0), check_0d=False)
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation03(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 3], dtype=dtype)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation04(xp):
input = np.asarray([1, 0], dtype=bool)
input = xp.asarray(input)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, xp.asarray(0.5), check_0d=False)
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation05(xp):
labels = xp.asarray([2, 2, 3])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 3, 8], dtype=dtype)
output = ndimage.standard_deviation(input, labels, 2)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation06(xp):
labels = xp.asarray([2, 2, 3, 3, 4])
with np.errstate(all='ignore'):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([1, 3, 8, 10, 8], dtype=dtype)
output = ndimage.standard_deviation(
input, labels, xp.asarray([2, 3, 4])
)
assert_array_almost_equal(output, xp.asarray([1.0, 1.0, 0.0]))
@make_xp_test_case(ndimage.standard_deviation)
def test_standard_deviation07(xp):
labels = xp.asarray([1])
with np.errstate(all='ignore'):
for type in types:
if is_torch(xp) and type == 'uint8':
pytest.xfail("value cannot be converted to type uint8 "
"without overflow")
dtype = getattr(xp, type)
input = xp.asarray([-0.00619519], dtype=dtype)
output = ndimage.standard_deviation(input, labels, xp.asarray([1]))
assert_array_almost_equal(output, xp.asarray([0]))
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.minimum_position(input, labels=labels)
assert output == (0, 0)
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position02(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.minimum_position(input)
assert output == (1, 2)
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position03(xp):
input = np.asarray([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.minimum_position(input)
assert output == (1, 2)
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position04(xp):
input = np.asarray([[5, 4, 2, 5],
[3, 7, 1, 2],
[1, 5, 1, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.minimum_position(input)
assert output == (0, 0)
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position05(xp):
labels = xp.asarray([1, 2, 0, 4])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 2, 3]], dtype=dtype)
output = ndimage.minimum_position(input, labels)
assert output == (2, 0)
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position06(xp):
labels = xp.asarray([1, 2, 3, 4])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.minimum_position(input, labels, 2)
assert output == (0, 1)
@make_xp_test_case(ndimage.minimum_position)
def test_minimum_position07(xp):
labels = xp.asarray([1, 2, 3, 4])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.minimum_position(input, labels,
xp.asarray([2, 3]))
assert output[0] == (0, 1)
assert output[1] == (1, 2)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output = ndimage.maximum_position(input,
labels=labels)
assert output == (1, 0)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position02(xp):
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.maximum_position(input)
assert output == (1, 2)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position03(xp):
input = np.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.maximum_position(input)
assert output == (0, 0)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position04(xp):
labels = xp.asarray([1, 2, 0, 4])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.maximum_position(input, labels)
assert output == (1, 1)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position05(xp):
labels = xp.asarray([1, 2, 0, 4])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.maximum_position(input, labels, 1)
assert output == (0, 0)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position06(xp):
labels = xp.asarray([1, 2, 0, 4])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.maximum_position(input, labels,
xp.asarray([1, 2]))
assert output[0] == (0, 0)
assert output[1] == (1, 1)
@make_xp_test_case(ndimage.maximum_position)
def test_maximum_position07(xp):
# Test float labels
labels = xp.asarray([1.0, 2.5, 0.0, 4.5])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=dtype)
output = ndimage.maximum_position(input, labels,
xp.asarray([1.0, 4.5]))
assert output[0] == (0, 0)
assert output[1] == (0, 3)
@make_xp_test_case(ndimage.extrema, ndimage.minimum, ndimage.maximum,
ndimage.minimum_position, ndimage.maximum_position)
def test_extrema01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output1 = ndimage.extrema(input, labels=labels)
output2 = ndimage.minimum(input, labels=labels)
output3 = ndimage.maximum(input, labels=labels)
output4 = ndimage.minimum_position(input,
labels=labels)
output5 = ndimage.maximum_position(input,
labels=labels)
assert output1 == (output2, output3, output4, output5)
@make_xp_test_case(ndimage.extrema, ndimage.minimum, ndimage.maximum,
ndimage.minimum_position, ndimage.maximum_position)
def test_extrema02(xp):
labels = xp.asarray([1, 2])
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output1 = ndimage.extrema(input, labels=labels,
index=2)
output2 = ndimage.minimum(input, labels=labels,
index=2)
output3 = ndimage.maximum(input, labels=labels,
index=2)
output4 = ndimage.minimum_position(input,
labels=labels, index=2)
output5 = ndimage.maximum_position(input,
labels=labels, index=2)
assert output1 == (output2, output3, output4, output5)
@make_xp_test_case(ndimage.extrema, ndimage.minimum, ndimage.maximum,
ndimage.minimum_position, ndimage.maximum_position)
def test_extrema03(xp):
labels = xp.asarray([[1, 2], [2, 3]])
for type in types:
if is_torch(xp) and type in ("uint16", "uint32", "uint64"):
pytest.xfail("https://github.com/pytorch/pytorch/issues/58734")
dtype = getattr(xp, type)
input = xp.asarray([[1, 2], [3, 4]], dtype=dtype)
output1 = ndimage.extrema(input,
labels=labels,
index=xp.asarray([2, 3, 8]))
output2 = ndimage.minimum(input,
labels=labels,
index=xp.asarray([2, 3, 8]))
output3 = ndimage.maximum(input, labels=labels,
index=xp.asarray([2, 3, 8]))
output4 = ndimage.minimum_position(input,
labels=labels,
index=xp.asarray([2, 3, 8]))
output5 = ndimage.maximum_position(input,
labels=labels,
index=xp.asarray([2, 3, 8]))
assert_array_almost_equal(output1[0], output2)
assert_array_almost_equal(output1[1], output3)
assert output1[2] == output4
assert output1[3] == output5
@make_xp_test_case(ndimage.extrema, ndimage.minimum, ndimage.maximum,
ndimage.minimum_position, ndimage.maximum_position)
def test_extrema04(xp):
labels = xp.asarray([1, 2, 0, 4])
for type in types:
if is_torch(xp) and type in ("uint16", "uint32", "uint64"):
pytest.xfail("https://github.com/pytorch/pytorch/issues/58734")
dtype = getattr(xp, type)
input = xp.asarray([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], dtype=dtype)
output1 = ndimage.extrema(input, labels, xp.asarray([1, 2]))
output2 = ndimage.minimum(input, labels, xp.asarray([1, 2]))
output3 = ndimage.maximum(input, labels, xp.asarray([1, 2]))
output4 = ndimage.minimum_position(input, labels,
xp.asarray([1, 2]))
output5 = ndimage.maximum_position(input, labels,
xp.asarray([1, 2]))
assert_array_almost_equal(output1[0], output2)
assert_array_almost_equal(output1[1], output3)
assert output1[2] == output4
assert output1[3] == output5
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass01(xp):
expected = (0.0, 0.0)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 0], [0, 0]], dtype=dtype)
output = ndimage.center_of_mass(input)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass02(xp):
expected = (1, 0)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[0, 0], [1, 0]], dtype=dtype)
output = ndimage.center_of_mass(input)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass03(xp):
expected = (0, 1)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[0, 1], [0, 0]], dtype=dtype)
output = ndimage.center_of_mass(input)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass04(xp):
expected = (1, 1)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[0, 0], [0, 1]], dtype=dtype)
output = ndimage.center_of_mass(input)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass05(xp):
expected = (0.5, 0.5)
for type in types:
dtype = getattr(xp, type)
input = xp.asarray([[1, 1], [1, 1]], dtype=dtype)
output = ndimage.center_of_mass(input)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass06(xp):
expected = (0.5, 0.5)
input = np.asarray([[1, 2], [3, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.center_of_mass(input)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass07(xp):
labels = xp.asarray([1, 0])
expected = (0.5, 0.0)
input = np.asarray([[1, 2], [3, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.center_of_mass(input, labels)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass08(xp):
labels = xp.asarray([1, 2])
expected = (0.5, 1.0)
input = np.asarray([[5, 2], [3, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.center_of_mass(input, labels, 2)
assert output == expected
@make_xp_test_case(ndimage.center_of_mass)
def test_center_of_mass09(xp):
labels = xp.asarray((1, 2))
expected = xp.asarray([(0.5, 0.0), (0.5, 1.0)], dtype=xp.float64)
input = np.asarray([[1, 2], [1, 1]], dtype=bool)
input = xp.asarray(input)
output = ndimage.center_of_mass(input, labels, xp.asarray([1, 2]))
xp_assert_equal(xp.asarray(output), xp.asarray(expected))
@make_xp_test_case(ndimage.histogram)
def test_histogram01(xp):
expected = xp.ones(10)
input = xp.arange(10)
output = ndimage.histogram(input, 0, 10, 10)
assert_array_almost_equal(output, expected)
@make_xp_test_case(ndimage.histogram)
def test_histogram02(xp):
labels = xp.asarray([1, 1, 1, 1, 2, 2, 2, 2])
expected = xp.asarray([0, 2, 0, 1, 1])
input = xp.asarray([1, 1, 3, 4, 3, 3, 3, 3])
output = ndimage.histogram(input, 0, 4, 5, labels, 1)
assert_array_almost_equal(output, expected)
@skip_xp_backends(np_only=True, reason='object arrays')
@make_xp_test_case(ndimage.histogram)
def test_histogram03(xp):
labels = xp.asarray([1, 0, 1, 1, 2, 2, 2, 2])
expected1 = xp.asarray([0, 1, 0, 1, 1])
expected2 = xp.asarray([0, 0, 0, 3, 0])
input = xp.asarray([1, 1, 3, 4, 3, 5, 3, 3])
output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
assert_array_almost_equal(output[0], expected1)
assert_array_almost_equal(output[1], expected2)
@make_xp_test_case(ndimage.mean, ndimage.variance, ndimage.standard_deviation,
ndimage.median, ndimage.minimum, ndimage.maximum)
def test_stat_funcs_2d(xp):
a = xp.asarray([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
lbl = xp.asarray([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
mean = ndimage.mean(a, labels=lbl, index=xp.asarray([1, 2]))
xp_assert_equal(mean, xp.asarray([7.0, 4.0], dtype=xp.float64))
var = ndimage.variance(a, labels=lbl, index=xp.asarray([1, 2]))
xp_assert_equal(var, xp.asarray([2.5, 1.0], dtype=xp.float64))
std = ndimage.standard_deviation(a, labels=lbl, index=xp.asarray([1, 2]))
assert_array_almost_equal(std, xp.sqrt(xp.asarray([2.5, 1.0], dtype=xp.float64)))
med = ndimage.median(a, labels=lbl, index=xp.asarray([1, 2]))
xp_assert_equal(med, xp.asarray([7.0, 4.0], dtype=xp.float64))
min = ndimage.minimum(a, labels=lbl, index=xp.asarray([1, 2]))
xp_assert_equal(min, xp.asarray([5, 3]), check_dtype=False)
max = ndimage.maximum(a, labels=lbl, index=xp.asarray([1, 2]))
xp_assert_equal(max, xp.asarray([9, 5]), check_dtype=False)
@skip_xp_backends("cupy", reason="no watershed_ift on CuPy")
@make_xp_test_case(ndimage.watershed_ift)
| TestFindObjects |
python | jina-ai__jina | jina/enums.py | {
"start": 6868,
"end": 7286
} | class ____(BetterEnum):
"""Data input type in the request generator."""
AUTO = 0 # auto inference the input type from data (!WARN: could be slow as it relies on try-execept)
DOCUMENT = 1 # the input is a full document
CONTENT = 2 # the input is just the content of the document
DICT = 3 # the input is a dictionary representing a Document, needed while pydantic model not available
| DataInputType |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/enumerate_test.py | {
"start": 1267,
"end": 2118
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testEnumerate(self):
components = (["a", "b"], [1, 2], [37.0, 38])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).enumerate(
start)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset)[0])
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual((), dataset_output_shapes[0])
self.assertEqual([tensor_shape.TensorShape([])] * 3,
[shape for shape in dataset_output_shapes[1]])
self.assertDatasetProduces(dataset, [(20, (b"a", 1, 37.0)),
(21, (b"b", 2, 38.0))])
| EnumerateTest |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 14509,
"end": 14885
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.LazyLinear(10)
self.relu1 = torch.nn.ReLU()
self.fc2 = torch.nn.LazyLinear(1)
self.relu2 = torch.nn.ReLU()
def forward(self, input):
x = self.relu1(self.fc1(input))
y = self.relu2(self.fc2(x))
return y
| LazyMLP |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_dunder_call.py | {
"start": 1864,
"end": 2027
} | class ____:
def __init__(self, state):
self._state = state
def __eq__(self, other: Any) -> bool:
return self._state.__eq__(other)
| CustomState |
python | google__jax | tests/dtypes_test.py | {
"start": 33276,
"end": 34786
} | class ____(jtu.JaxTestCase):
@parameterized.parameters([True, False])
def test_extended_dtypes_at_rest(self, jit):
# Test a trivial isomorphic-to-float32 extended dtype working with EArray
from jax._src import core
from jax._src.interpreters import pxla
class foo(dtypes.extended): pass
class FooTyRules:
allow_conversion: bool = True
@staticmethod
def physical_element_aval(foo_dtype):
return core.ShapedArray((), dtypes.dtype('float32'))
@staticmethod
def global_sharded_result_handler(aval, out_sharding, committed):
phys_sharding = out_sharding # unlike KeyTyRules, assume same shape
phys_aval = core.physical_aval(aval)
phys_handler_maker = pxla.global_result_handlers[core.ShapedArray]
phys_handler = phys_handler_maker(phys_aval, phys_sharding, committed)
return lambda bufs: earray.EArray(aval, phys_handler(bufs))
@dataclasses.dataclass(frozen=True)
class FooTy(dtypes.ExtendedDType):
name: str = 'foo'
_rules: type = FooTyRules
type: type = foo
# Can we make one?
def f(x):
return jax.lax.convert_element_type(x, FooTy())
if jit:
f = jax.jit(f)
x = f(jnp.arange(3, dtype='float32')) # don't crash
self.assertIsInstance(x.dtype, FooTy)
# Can we consume one?
def g(x):
self.assertIsInstance(x.dtype, FooTy)
return x
if jit:
g = jax.jit(g)
y = g(x)
self.assertIsInstance(y.dtype, FooTy)
| EArrayTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py | {
"start": 98,
"end": 151
} | class ____:
def __str__(self):
return 1
| Int |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_sys.py | {
"start": 6866,
"end": 53183
} | class ____(__TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# gh-125842: Windows uses 32-bit unsigned integers for exit codes
# so a -1 exit code is sometimes interpreted as 0xffff_ffff.
rc, out, err = assert_python_failure('-c', 'import sys; sys.exit(0xffff_ffff)')
self.assertIn(rc, (-1, 0xff, 0xffff_ffff))
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# Overflow results in a -1 exit code, which may be converted to 0xff
# or 0xffff_ffff.
rc, out, err = assert_python_failure('-c', 'import sys; sys.exit(2**128)')
self.assertIn(rc, (-1, 0xff, 0xffff_ffff))
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
@support.requires_subprocess()
def test_exit_codes_under_repl(self):
# GH-129900: SystemExit, or things that raised it, didn't
# get their return code propagated by the REPL
import tempfile
exit_ways = [
"exit",
"__import__('sys').exit",
"raise SystemExit"
]
for exitfunc in exit_ways:
for return_code in (0, 123):
with self.subTest(exitfunc=exitfunc, return_code=return_code):
with tempfile.TemporaryFile("w+") as stdin:
stdin.write(f"{exitfunc}({return_code})\n")
stdin.seek(0)
proc = subprocess.run([sys.executable], stdin=stdin)
self.assertEqual(proc.returncode, return_code)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_getrecursionlimit(self):
limit = sys.getrecursionlimit()
self.assertIsInstance(limit, int)
self.assertGreater(limit, 1)
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
def test_setrecursionlimit(self):
old_limit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(10_005)
self.assertEqual(sys.getrecursionlimit(), 10_005)
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
finally:
sys.setrecursionlimit(old_limit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
old_limit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(old_limit)
@test.support.cpython_only
def test_setrecursionlimit_to_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
old_limit = sys.getrecursionlimit()
try:
depth = support.get_recursion_depth()
with self.subTest(limit=sys.getrecursionlimit(), depth=depth):
# depth + 1 is OK
sys.setrecursionlimit(depth + 1)
# reset the limit to be able to call self.assertRaises()
# context manager
sys.setrecursionlimit(old_limit)
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(depth)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
finally:
sys.setrecursionlimit(old_limit)
@unittest.skipUnless(support.Py_GIL_DISABLED, "only meaningful if the GIL is disabled")
@threading_helper.requires_working_threading()
def test_racing_recursion_limit(self):
from threading import Thread
def something_recursive():
def count(n):
if n > 0:
return count(n - 1) + 1
return 0
count(50)
def set_recursion_limit():
for limit in range(100, 200):
sys.setrecursionlimit(limit)
threads = []
for _ in range(5):
threads.append(Thread(target=set_recursion_limit))
for _ in range(5):
threads.append(Thread(target=something_recursive))
with threading_helper.catch_threading_exception() as cm:
with threading_helper.start_threads(threads):
pass
if cm.exc_value:
raise cm.exc_value
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here originally had to be a global in order for this test to pass
# while tracing with a python function. Tracing used to call
# PyFrame_FastToLocals, which would add a copy of any locals to the
# frame object, causing the ref count to increase by 2 instead of 1.
# While that no longer happens (due to PEP 667), this test case retains
# its original global-based implementation
# PEP 683's immortal objects also made this point moot, since the
# refcount for None doesn't change anyway. Maybe this test should be
# using a different constant value? (e.g. an integer)
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
# Singleton refcnts don't change
self.assertEqual(sys.getrefcount(None), c)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
@unittest.expectedFailure
def test_getframemodulename(self):
# Default depth gets ourselves
self.assertEqual(__name__, sys._getframemodulename())
self.assertEqual("unittest.case", sys._getframemodulename(1))
i = 0
f = sys._getframe(i)
while f:
self.assertEqual(
f.f_globals['__name__'],
sys._getframemodulename(i) or '__main__'
)
i += 1
f2 = f.f_back
try:
f = sys._getframe(i)
except ValueError:
break
self.assertIs(f, f2)
self.assertIsNone(sys._getframemodulename(i))
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
try:
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
finally:
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
g_raised = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
while True:
try:
raise ValueError("oops")
except ValueError:
g_raised.set()
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
g_raised.wait(timeout=support.LONG_TIMEOUT)
try:
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual(None, d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_value = d.pop(thread_id)
stack = traceback.extract_stack(exc_value.__traceback__.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue((sourceline.startswith("if leave_g.wait(") or
sourceline.startswith("g_raised.set()")))
finally:
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 4)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertGreaterEqual(sys.int_info.default_max_str_digits, 500)
self.assertGreaterEqual(sys.int_info.str_digits_check_threshold, 100)
self.assertGreater(sys.int_info.default_max_str_digits,
sys.int_info.str_digits_check_threshold)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.int_info.default_max_str_digits, int)
self.assertIsInstance(sys.int_info.str_digits_check_threshold, int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
elif algo == 3:
self.assertEqual(sys.hash_info.algorithm, "siphash13")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'pthread-stubs', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
if sys.platform.startswith(("linux", "android", "freebsd")):
self.assertEqual(info.name, "pthread")
elif sys.platform == "win32":
self.assertEqual(info.name, "nt")
elif sys.platform == "emscripten":
self.assertIn(info.name, {"pthread", "pthread-stubs"})
elif sys.platform == "wasi":
self.assertEqual(info.name, "pthread-stubs")
@unittest.skipUnless(support.is_emscripten, "only available on Emscripten")
def test_emscripten_info(self):
self.assertEqual(len(sys._emscripten_info), 4)
self.assertIsInstance(sys._emscripten_info.emscripten_version, tuple)
self.assertIsInstance(sys._emscripten_info.runtime, (str, type(None)))
self.assertIsInstance(sys._emscripten_info.pthreads, bool)
self.assertIsInstance(sys._emscripten_info.shared_memory, bool)
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
has_is_interned = (test.support.check_impl_detail(cpython=True)
or hasattr(sys, '_is_interned'))
self.assertRaises(TypeError, sys.intern)
self.assertRaises(TypeError, sys.intern, b'abc')
if has_is_interned:
self.assertRaises(TypeError, sys._is_interned)
self.assertRaises(TypeError, sys._is_interned, b'abc')
s = "never interned before" + str(random.randrange(0, 10**9))
self.assertTrue(sys.intern(s) is s)
if has_is_interned:
self.assertIs(sys._is_interned(s), True)
s2 = s.swapcase().swapcase()
if has_is_interned:
self.assertIs(sys._is_interned(s2), False)
self.assertTrue(sys.intern(s2) is s)
if has_is_interned:
self.assertIs(sys._is_interned(s2), False)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
if has_is_interned:
self.assertIs(sys._is_interned(S("abc")), False)
@support.cpython_only
@requires_subinterpreters
def test_subinterp_intern_dynamically_allocated(self):
# Implementation detail: Dynamically allocated strings
# are distinct between interpreters
s = "never interned before" + str(random.randrange(0, 10**9))
t = sys.intern(s)
self.assertIs(t, s)
interp = interpreters.create()
interp.exec(textwrap.dedent(f'''
import sys
# set `s`, avoid parser interning & constant folding
s = str({s.encode()!r}, 'utf-8')
t = sys.intern(s)
assert id(t) != {id(s)}, (id(t), {id(s)})
assert id(t) != {id(t)}, (id(t), {id(t)})
'''))
@support.cpython_only
@requires_subinterpreters
def test_subinterp_intern_statically_allocated(self):
# Implementation detail: Statically allocated strings are shared
# between interpreters.
# See Tools/build/generate_global_objects.py for the list
# of strings that are always statically allocated.
for s in ('__init__', 'CANCELLED', '<module>', 'utf-8',
'{{', '', '\n', '_', 'x', '\0', '\N{CEDILLA}', '\xff',
):
with self.subTest(s=s):
t = sys.intern(s)
interp = interpreters.create()
interp.exec(textwrap.dedent(f'''
import sys
# set `s`, avoid parser interning & constant folding
s = str({s.encode()!r}, 'utf-8')
t = sys.intern(s)
assert id(t) == {id(t)}, (id(t), {id(t)})
'''))
@support.cpython_only
@requires_subinterpreters
def test_subinterp_intern_singleton(self):
# Implementation detail: singletons are used for 0- and 1-character
# latin1 strings.
for s in '', '\n', '_', 'x', '\0', '\N{CEDILLA}', '\xff':
with self.subTest(s=s):
interp = interpreters.create()
interp.exec(textwrap.dedent(f'''
import sys
# set `s`, avoid parser interning & constant folding
s = str({s.encode()!r}, 'utf-8')
assert id(s) == {id(s)}
t = sys.intern(s)
'''))
self.assertTrue(sys._is_interned(s))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding", "safe_path", "int_max_str_digits")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr in ("dev_mode", "safe_path") else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
@force_not_colorized
@support.requires_subprocess()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
@support.requires_subprocess()
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@support.requires_subprocess()
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
@support.requires_subprocess()
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
@support.requires_subprocess()
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
# Output of sys._debugmallocstats() depends on configure flags.
# The sysconfig vars are not available on Windows.
if sys.platform != "win32":
with_freelists = sysconfig.get_config_var("WITH_FREELISTS")
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_freelists:
self.assertIn(b"free PyDictObjects", err)
if with_pymalloc:
self.assertIn(b'Small block threshold', err)
if not with_freelists and not with_pymalloc:
self.assertFalse(err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testinternalcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testinternalcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_gil_enabled(self):
if support.Py_GIL_DISABLED:
self.assertIs(type(sys._is_gil_enabled()), bool)
else:
self.assertTrue(sys._is_gil_enabled())
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(sys.platform == "android", "Android only")
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
@force_not_colorized
@support.requires_subprocess()
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' f2()',
b' ~~^^',
b' File "<string>", line 6, in f2',
b' f1()',
b' ~~^^',
b' File "<string>", line 4, in f1',
b' 1 / 0',
b' ~~^~~',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[4:])
check(1, traceback[:1] + traceback[7:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@support.requires_subprocess()
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
self.assertEqual(os.path.normpath(sys._stdlib_dir),
os.path.normpath(expected))
@unittest.skipUnless(hasattr(sys, 'getobjects'), 'need sys.getobjects()')
def test_getobjects(self):
# sys.getobjects(0)
all_objects = sys.getobjects(0)
self.assertIsInstance(all_objects, list)
self.assertGreater(len(all_objects), 0)
# sys.getobjects(0, MyType)
class MyType:
pass
size = 100
my_objects = [MyType() for _ in range(size)]
get_objects = sys.getobjects(0, MyType)
self.assertEqual(len(get_objects), size)
for obj in get_objects:
self.assertIsInstance(obj, MyType)
# sys.getobjects(3, MyType)
get_objects = sys.getobjects(3, MyType)
self.assertEqual(len(get_objects), 3)
@unittest.skipUnless(hasattr(sys, '_stats_on'), 'need Py_STATS build')
def test_pystats(self):
# Call the functions, just check that they don't crash
# Cannot save/restore state.
sys._stats_on()
sys._stats_off()
sys._stats_clear()
sys._stats_dump()
@test.support.cpython_only
@unittest.skipUnless(hasattr(sys, 'abiflags'), 'need sys.abiflags')
def test_disable_gil_abi(self):
self.assertEqual('t' in sys.abiflags, support.Py_GIL_DISABLED)
@test.support.cpython_only
| SysModuleTest |
python | walkccc__LeetCode | solutions/2349. Design a Number Container System/2349.py | {
"start": 41,
"end": 699
} | class ____:
def __init__(self):
self.numberToIndices = collections.defaultdict(SortedSet)
self.indexToNumber = {}
def change(self, index: int, number: int) -> None:
if index in self.indexToNumber:
originalNumber = self.indexToNumber[index]
self.numberToIndices[originalNumber].remove(index)
if len(self.numberToIndices[originalNumber]) == 0:
del self.numberToIndices[originalNumber]
self.indexToNumber[index] = number
self.numberToIndices[number].add(index)
def find(self, number: int) -> int:
if number in self.numberToIndices:
return self.numberToIndices[number][0]
return -1
| NumberContainers |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 33315,
"end": 33869
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('pointers', types.EphemeralArray(types.CPointer(dtype), ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(FlatIter, self).__init__(dmm, fe_type, members)
@register_default(types.UniTupleIter)
| FlatIter |
python | lazyprogrammer__machine_learning_examples | rl3/a2c/atari_wrappers.py | {
"start": 1222,
"end": 1868
} | class ____(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
| FireResetEnv |
python | spack__spack | lib/spack/spack/test/concretization/core.py | {
"start": 126524,
"end": 186501
} | class ____:
"""Collects tests on edge properties"""
@pytest.mark.parametrize(
"spec_str,expected_satisfies,expected_not_satisfies",
[
("conditional-edge", ["^zlib@2.0"], ["^zlib-api"]),
("conditional-edge~foo", ["^zlib@2.0"], ["^zlib-api"]),
(
"conditional-edge+foo",
["^zlib@1.0", "^zlib-api", "^[virtuals=zlib-api] zlib"],
["^[virtuals=mpi] zlib"],
),
],
)
def test_condition_triggered_by_edge_property(
self, spec_str, expected_satisfies, expected_not_satisfies
):
"""Tests that we can enforce constraints based on edge attributes"""
s = spack.concretize.concretize_one(spec_str)
for expected in expected_satisfies:
assert s.satisfies(expected), str(expected)
for not_expected in expected_not_satisfies:
assert not s.satisfies(not_expected), str(not_expected)
def test_virtuals_provided_together_but_only_one_required_in_dag(self):
"""Tests that we can use a provider that provides more than one virtual together,
and is providing only one, iff the others are not needed in the DAG.
o blas-only-client
| [virtual=blas]
o openblas (provides blas and lapack together)
"""
s = spack.concretize.concretize_one("blas-only-client ^openblas")
assert s.satisfies("^[virtuals=blas] openblas")
assert not s.satisfies("^[virtuals=blas,lapack] openblas")
def test_reusable_externals_match(mock_packages, tmp_path: pathlib.Path):
spec = Spec("mpich@4.1~debug build_system=generic arch=linux-ubuntu23.04-zen2 %gcc@13.1.0")
spec.external_path = str(tmp_path)
spec.external_modules = ["mpich/4.1"]
spec._mark_concrete()
assert spack.solver.reuse._is_reusable(
spec,
{
"mpich": {
"externals": [
{"spec": "mpich@4.1", "prefix": str(tmp_path), "modules": ["mpich/4.1"]}
]
}
},
local=False,
)
def test_reusable_externals_match_virtual(mock_packages, tmp_path: pathlib.Path):
spec = Spec("mpich@4.1~debug build_system=generic arch=linux-ubuntu23.04-zen2 %gcc@13.1.0")
spec.external_path = str(tmp_path)
spec.external_modules = ["mpich/4.1"]
spec._mark_concrete()
assert spack.solver.reuse._is_reusable(
spec,
{
"mpi": {
"externals": [
{"spec": "mpich@4.1", "prefix": str(tmp_path), "modules": ["mpich/4.1"]}
]
}
},
local=False,
)
def test_reusable_externals_different_prefix(mock_packages, tmp_path: pathlib.Path):
spec = Spec("mpich@4.1~debug build_system=generic arch=linux-ubuntu23.04-zen2 %gcc@13.1.0")
spec.external_path = "/other/path"
spec.external_modules = ["mpich/4.1"]
spec._mark_concrete()
assert not spack.solver.reuse._is_reusable(
spec,
{
"mpich": {
"externals": [
{"spec": "mpich@4.1", "prefix": str(tmp_path), "modules": ["mpich/4.1"]}
]
}
},
local=False,
)
@pytest.mark.parametrize("modules", [None, ["mpich/4.1", "libfabric/1.19"]])
def test_reusable_externals_different_modules(mock_packages, tmp_path: pathlib.Path, modules):
spec = Spec("mpich@4.1~debug build_system=generic arch=linux-ubuntu23.04-zen2 %gcc@13.1.0")
spec.external_path = str(tmp_path)
spec.external_modules = modules
spec._mark_concrete()
assert not spack.solver.reuse._is_reusable(
spec,
{
"mpich": {
"externals": [
{"spec": "mpich@4.1", "prefix": str(tmp_path), "modules": ["mpich/4.1"]}
]
}
},
local=False,
)
def test_reusable_externals_different_spec(mock_packages, tmp_path: pathlib.Path):
spec = Spec("mpich@4.1~debug build_system=generic arch=linux-ubuntu23.04-zen2 %gcc@13.1.0")
spec.external_path = str(tmp_path)
spec._mark_concrete()
assert not spack.solver.reuse._is_reusable(
spec,
{"mpich": {"externals": [{"spec": "mpich@4.1 +debug", "prefix": str(tmp_path)}]}},
local=False,
)
def test_concretization_version_order():
versions = [
(Version("develop"), {}),
(Version("1.0"), {}),
(Version("2.0"), {"deprecated": True}),
(Version("1.1"), {}),
(Version("1.1alpha1"), {}),
(Version("0.9"), {"preferred": True}),
]
result = [
v
for v, _ in sorted(
versions, key=spack.package_base.concretization_version_order, reverse=True
)
]
assert result == [
Version("0.9"), # preferred
Version("1.1"), # latest non-deprecated final version
Version("1.0"), # latest non-deprecated final version
Version("1.1alpha1"), # prereleases
Version("develop"), # likely development version
Version("2.0"), # deprecated
]
@pytest.mark.parametrize(
"roots,reuse_yaml,expected,not_expected,expected_length",
[
(
["mpileaks"],
{"roots": True, "include": ["^mpich"]},
["^mpich"],
["^mpich2", "^zmpi"],
# Reused from store + externals
2 + 15,
),
(
["mpileaks"],
{"roots": True, "include": ["externaltest"]},
["externaltest"],
["^mpich", "^mpich2", "^zmpi"],
# Reused from store + externals
1 + 15,
),
],
)
@pytest.mark.usefixtures("mutable_database", "mock_store", "do_not_check_runtimes_on_reuse")
@pytest.mark.not_on_windows("Expected length is different on Windows")
def test_filtering_reused_specs(
roots, reuse_yaml, expected, not_expected, expected_length, mutable_config
):
"""Tests that we can select which specs are to be reused, using constraints as filters"""
# Assume all specs have a runtime dependency
mutable_config.set("concretizer:reuse", reuse_yaml)
selector = spack.solver.asp.ReusableSpecsSelector(mutable_config)
specs = selector.reusable_specs(roots)
assert len(specs) == expected_length
for constraint in expected:
assert all(x.satisfies(constraint) for x in specs if not x.external)
for constraint in not_expected:
assert all(not x.satisfies(constraint) for x in specs if not x.external)
@pytest.mark.usefixtures("mutable_database", "mock_store")
@pytest.mark.parametrize(
"reuse_yaml,expected_length",
[
(
{"from": [{"type": "local"}]},
# Local store + externals
19 + 15,
),
(
{"from": [{"type": "buildcache"}]},
# Local store + externals
0 + 15,
),
],
)
@pytest.mark.not_on_windows("Expected length is different on Windows")
def test_selecting_reused_sources(
reuse_yaml, expected_length, mutable_config, do_not_check_runtimes_on_reuse
):
"""Tests that we can turn on/off sources of reusable specs"""
# Assume all specs have a runtime dependency
mutable_config.set("concretizer:reuse", reuse_yaml)
selector = spack.solver.asp.ReusableSpecsSelector(mutable_config)
specs = selector.reusable_specs(["mpileaks"])
assert len(specs) == expected_length
# Compiler wrapper is not reused, as it might have changed from previous installations
assert not [x for x in specs if x.name == "compiler-wrapper"]
@pytest.mark.parametrize(
"specs,include,exclude,expected",
[
# "foo" discarded by include rules (everything compiled with GCC)
(["cmake@3.27.9 %gcc", "foo %clang"], ["%gcc"], [], ["cmake@3.27.9 %gcc"]),
# "cmake" discarded by exclude rules (everything compiled with GCC but cmake)
(["cmake@3.27.9 %gcc", "foo %gcc"], ["%gcc"], ["cmake"], ["foo %gcc"]),
],
)
def test_spec_filters(specs, include, exclude, expected):
specs = [Spec(x) for x in specs]
expected = [Spec(x) for x in expected]
f = spack.solver.reuse.SpecFilter(
factory=lambda: specs, is_usable=lambda x: True, include=include, exclude=exclude
)
assert f.selected_specs() == expected
@pytest.mark.regression("38484")
def test_git_ref_version_can_be_reused(install_mockery, do_not_check_runtimes_on_reuse):
first_spec = spack.concretize.concretize_one(
spack.spec.Spec("git-ref-package@git.2.1.5=2.1.5~opt")
)
PackageInstaller([first_spec.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
# reproducer of the issue is that spack will solve when there is a change to the base spec
second_spec = spack.concretize.concretize_one(
spack.spec.Spec("git-ref-package@git.2.1.5=2.1.5+opt")
)
assert second_spec.dag_hash() != first_spec.dag_hash()
# we also want to confirm that reuse actually works so leave variant off to
# let solver reuse
third_spec = spack.spec.Spec("git-ref-package@git.2.1.5=2.1.5")
assert first_spec.satisfies(third_spec)
third_spec = spack.concretize.concretize_one(third_spec)
assert third_spec.dag_hash() == first_spec.dag_hash()
@pytest.mark.parametrize("standard_version", ["2.0.0", "2.1.5", "2.1.6"])
def test_reuse_prefers_standard_over_git_versions(
standard_version, install_mockery, do_not_check_runtimes_on_reuse
):
"""
order matters in this test. typically reuse would pick the highest versioned installed match
but we want to prefer the standard version over git ref based versions
so install git ref last and ensure it is not picked up by reuse
"""
standard_spec = spack.concretize.concretize_one(
spack.spec.Spec(f"git-ref-package@{standard_version}")
)
PackageInstaller([standard_spec.package], fake=True, explicit=True).install()
git_spec = spack.concretize.concretize_one("git-ref-package@git.2.1.5=2.1.5")
PackageInstaller([git_spec.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
test_spec = spack.concretize.concretize_one("git-ref-package@2")
assert git_spec.dag_hash() != test_spec.dag_hash()
assert standard_spec.dag_hash() == test_spec.dag_hash()
@pytest.mark.parametrize("unify", [True, "when_possible", False])
def test_spec_unification(unify, mutable_config, mock_packages):
spack.config.set("concretizer:unify", unify)
a = "pkg-a"
a_restricted = "pkg-a^pkg-b foo=baz"
b = "pkg-b foo=none"
unrestricted = spack.cmd.parse_specs([a, b], concretize=True)
a_concrete_unrestricted = [s for s in unrestricted if s.name == "pkg-a"][0]
b_concrete_unrestricted = [s for s in unrestricted if s.name == "pkg-b"][0]
assert (a_concrete_unrestricted["pkg-b"] == b_concrete_unrestricted) == (unify is not False)
maybe_fails = pytest.raises if unify is True else spack.llnl.util.lang.nullcontext
with maybe_fails(spack.solver.asp.UnsatisfiableSpecError):
_ = spack.cmd.parse_specs([a_restricted, b], concretize=True)
@pytest.mark.usefixtures("mutable_config", "mock_packages", "do_not_check_runtimes_on_reuse")
@pytest.mark.parametrize(
"spec_str, error_type",
[
(f"git-ref-package@main commit={'a' * 40}", None),
(f"git-ref-package@main commit={'a' * 39}", AssertionError),
(f"git-ref-package@2.1.6 commit={'a' * 40}", spack.error.UnsatisfiableSpecError),
(f"git-ref-package@git.2.1.6=2.1.6 commit={'a' * 40}", None),
(f"git-ref-package@git.{'a' * 40}=2.1.6 commit={'a' * 40}", None),
],
)
def test_spec_containing_commit_variant(spec_str, error_type):
spec = spack.spec.Spec(spec_str)
if error_type is None:
spack.concretize.concretize_one(spec)
else:
with pytest.raises(error_type):
spack.concretize.concretize_one(spec)
@pytest.mark.usefixtures("mutable_config", "mock_packages", "do_not_check_runtimes_on_reuse")
@pytest.mark.parametrize(
"spec_str",
[
f"git-test-commit@git.main commit={'a' * 40}",
f"git-test-commit@git.v1.0 commit={'a' * 40}",
"git-test-commit@{sha} commit={sha}",
"git-test-commit@{sha} commit=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
],
)
def test_spec_with_commit_interacts_with_lookup(mock_git_version_info, monkeypatch, spec_str):
# This test will be short lived. Technically we could do further checks with a Lookup
# but skipping impl since we are going to deprecate
repo_path, filename, commits = mock_git_version_info
file_url = pathlib.Path(repo_path).as_uri()
monkeypatch.setattr(spack.package_base.PackageBase, "git", file_url, raising=False)
spec = spack.spec.Spec(spec_str.format(sha=commits[-1]))
spack.concretize.concretize_one(spec)
@pytest.mark.usefixtures("mutable_config", "mock_packages", "do_not_check_runtimes_on_reuse")
@pytest.mark.parametrize("version_str", [f"git.{'a' * 40}=main", "git.2.1.5=main"])
def test_relationship_git_versions_and_commit_variant(version_str):
"""
Confirm that GitVersions auto assign and populates the commit variant correctly
"""
# This should be a short lived test and can be deleted when we remove GitVersions
spec = spack.spec.Spec(f"git-ref-package@{version_str}")
spec = spack.concretize.concretize_one(spec)
if spec.version.commit_sha:
assert spec.version.commit_sha == spec.variants["commit"].value
else:
assert "commit" not in spec.variants
@pytest.mark.usefixtures("install_mockery", "do_not_check_runtimes_on_reuse")
def test_abstract_commit_spec_reuse():
commit = "abcd" * 10
spec_str_1 = f"git-ref-package@develop commit={commit}"
spec_str_2 = f"git-ref-package commit={commit}"
spec1 = spack.concretize.concretize_one(spec_str_1)
PackageInstaller([spec1.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
spec2 = spack.concretize.concretize_one(spec_str_2)
assert spec2.dag_hash() == spec1.dag_hash()
@pytest.mark.usefixtures("install_mockery", "do_not_check_runtimes_on_reuse")
@pytest.mark.parametrize(
"installed_commit, incoming_commit, reusable",
[("a" * 40, "b" * 40, False), (None, "b" * 40, False), ("a" * 40, None, True)],
)
def test_commit_variant_can_be_reused(installed_commit, incoming_commit, reusable):
# install a non-default variant to test if reuse picks it
if installed_commit:
spec_str_1 = f"git-ref-package@develop commit={installed_commit} ~opt"
else:
spec_str_1 = "git-ref-package@develop ~opt"
if incoming_commit:
spec_str_2 = f"git-ref-package@develop commit={incoming_commit}"
else:
spec_str_2 = "git-ref-package@develop"
spec1 = spack.concretize.concretize_one(spack.spec.Spec(spec_str_1))
PackageInstaller([spec1.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
spec2 = spack.spec.Spec(spec_str_2)
spec2 = spack.concretize.concretize_one(spec2)
assert (spec1.dag_hash() == spec2.dag_hash()) == reusable
@pytest.mark.regression("42679")
@pytest.mark.parametrize("compiler_str", ["gcc@=9.4.0", "gcc@=9.4.0-foo"])
def test_selecting_compiler_with_suffix(mutable_config, mock_packages, compiler_str):
"""Tests that we can select compilers whose versions differ only for a suffix."""
packages_yaml = syaml.load_config(
"""
packages:
gcc:
externals:
- spec: "gcc@9.4.0-foo languages='c,c++'"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one(f"libelf %{compiler_str}")
assert s["c"].satisfies(compiler_str)
def test_duplicate_compiler_in_externals(mutable_config, mock_packages):
"""Tests that having duplicate compilers in packages.yaml do not raise and error."""
packages_yaml = syaml.load_config(
"""
packages:
gcc:
externals:
- spec: "gcc@9.4.0 languages='c,c++'"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
- spec: "gcc@9.4.0 languages='c,c++'"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one("libelf %gcc@9.4")
assert s["c"].satisfies("gcc@9.4.0")
@pytest.mark.parametrize(
"spec_str,expected",
[
("gcc@14 %gcc@9.4.0", ["gcc@14", "%c,cxx=gcc@9.4.0", "^gcc-runtime@9.4.0"]),
# If we don't specify a compiler, we should get the default compiler which is gcc
("gcc@14", ["gcc@14", "%c,cxx=gcc@10", "^gcc-runtime@10"]),
],
)
def test_compiler_can_depend_on_themselves_to_build(
spec_str, expected, default_mock_concretization
):
"""Tests that a compiler can depend on "itself" to bootstrap."""
s = default_mock_concretization(spec_str)
assert not s.external
for c in expected:
assert s.satisfies(c)
def test_compiler_attribute_is_tolerated_in_externals(
mutable_config, mock_packages, tmp_path: pathlib.Path
):
"""Tests that we don't error out if an external specifies a compiler in the old way,
provided that a suitable external compiler exists.
"""
packages_yaml = syaml.load_config(
f"""
packages:
cmake:
externals:
- spec: "cmake@3.27.4 %gcc@10"
prefix: {tmp_path}
buildable: false
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one("cmake")
assert s.external and s.external_path == str(tmp_path)
def test_compiler_can_be_built_with_other_compilers(config, mock_packages):
"""Tests that a compiler can be built also with another compiler."""
s = spack.concretize.concretize_one("llvm@18 +clang %gcc")
assert s.satisfies("llvm@18")
c_compiler = s.dependencies(virtuals=("c",))
assert len(c_compiler) == 1 and c_compiler[0].satisfies("gcc@10")
@pytest.mark.parametrize(
"spec_str,expected",
[
# Only one compiler is in the DAG, so pick the external associated with it
("dyninst %clang", "clang"),
("dyninst %gcc", "gcc"),
# Both compilers are in the DAG, so pick the best external according to other criteria
("dyninst %clang ^libdwarf%gcc", "clang"),
("dyninst %gcc ^libdwarf%clang", "clang"),
],
)
def test_compiler_match_for_externals_is_taken_into_account(
spec_str, expected, mutable_config, mock_packages, tmp_path: pathlib.Path
):
"""Tests that compiler annotation for externals are somehow taken into account for a match"""
packages_yaml = syaml.load_config(
f"""
packages:
libelf:
externals:
- spec: "libelf@0.8.12 %gcc@10"
prefix: {tmp_path / 'gcc'}
- spec: "libelf@0.8.13 %clang"
prefix: {tmp_path / 'clang'}
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one(spec_str)
libelf = s["libelf"]
assert libelf.external and libelf.external_path == str(tmp_path / expected)
@pytest.mark.parametrize(
"spec_str,expected",
[
# Only one compiler is in the DAG, so pick the external associated with it
("dyninst %gcc@10", "libelf-gcc10"),
("dyninst %gcc@9", "libelf-gcc9"),
# Both compilers are in the DAG, so pick the best external according to other criteria
("dyninst %gcc@10 ^libdwarf%gcc@9", "libelf-gcc9"),
],
)
def test_compiler_match_for_externals_with_versions(
spec_str, expected, mutable_config, mock_packages, tmp_path: pathlib.Path
):
"""Tests that version constraints are taken into account for compiler annotations
on externals
"""
packages_yaml = syaml.load_config(
f"""
packages:
libelf:
buildable: false
externals:
- spec: "libelf@0.8.12 %gcc@10"
prefix: {tmp_path / 'libelf-gcc10'}
- spec: "libelf@0.8.13 %gcc@9.4.0"
prefix: {tmp_path / 'libelf-gcc9'}
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one(spec_str)
libelf = s["libelf"]
assert libelf.external and libelf.external_path == str(tmp_path / expected)
def test_specifying_compilers_with_virtuals_syntax(default_mock_concretization):
"""Tests that we can pin compilers to nodes using the %[virtuals=...] syntax"""
# clang will be used for both C and C++, since they are provided together
mpich = default_mock_concretization("mpich %[virtuals=fortran] gcc %clang")
assert mpich["fortran"].satisfies("gcc")
assert mpich["c"].satisfies("llvm")
assert mpich["cxx"].satisfies("llvm")
# gcc is the default compiler
mpileaks = default_mock_concretization(
"mpileaks ^libdwarf %gcc ^mpich %[virtuals=fortran] gcc %clang"
)
assert mpileaks["c"].satisfies("gcc")
libdwarf = mpileaks["libdwarf"]
assert libdwarf["c"].satisfies("gcc")
assert libdwarf["c"].satisfies("gcc")
mpich = mpileaks["mpi"]
assert mpich["fortran"].satisfies("gcc")
assert mpich["c"].satisfies("llvm")
assert mpich["cxx"].satisfies("llvm")
@pytest.mark.regression("49847")
@pytest.mark.xfail(sys.platform == "win32", reason="issues with install mockery")
def test_reuse_when_input_specifies_build_dep(install_mockery, do_not_check_runtimes_on_reuse):
"""Test that we can reuse a spec when specifying build dependencies in the input"""
pkgb_old = spack.concretize.concretize_one(spack.spec.Spec("pkg-b@0.9 %gcc@9"))
PackageInstaller([pkgb_old.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
result = spack.concretize.concretize_one("pkg-b %gcc")
assert pkgb_old.dag_hash() == result.dag_hash()
result = spack.concretize.concretize_one("pkg-a ^pkg-b %gcc@9")
assert pkgb_old.dag_hash() == result["pkg-b"].dag_hash()
assert result.satisfies("%gcc@9")
result = spack.concretize.concretize_one("pkg-a %gcc@10 ^pkg-b %gcc@9")
assert pkgb_old.dag_hash() == result["pkg-b"].dag_hash()
@pytest.mark.regression("49847")
def test_reuse_when_requiring_build_dep(
install_mockery, do_not_check_runtimes_on_reuse, mutable_config
):
"""Test that we can reuse a spec when specifying build dependencies in requirements"""
mutable_config.set("packages:all:require", "%gcc")
pkgb_old = spack.concretize.concretize_one(spack.spec.Spec("pkg-b@0.9"))
PackageInstaller([pkgb_old.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
result = spack.concretize.concretize_one("pkg-b")
assert pkgb_old.dag_hash() == result.dag_hash(), result.tree()
@pytest.mark.regression("50167")
def test_input_analysis_and_conditional_requirements(default_mock_concretization):
"""Tests that input analysis doesn't account for conditional requirement
to discard possible dependencies.
If the requirement is conditional, and impossible to achieve on the current
platform, the valid search space is still the complement of the condition that
activates the requirement.
"""
libceed = default_mock_concretization("libceed")
assert libceed["libxsmm"].satisfies("@main")
assert libceed["libxsmm"].satisfies("platform=test")
@pytest.mark.parametrize(
"compiler_str,expected,not_expected",
[
# Compilers are matched to some other external, so the compiler that picked is concrete
("gcc@10", ["%gcc", "%gcc@10"], ["%clang", "%gcc@9"]),
("gcc@9.4.0", ["%gcc", "%gcc@9"], ["%clang", "%gcc@10"]),
("clang", ["%clang", "%llvm+clang"], ["%gcc", "%gcc@9", "%gcc@10"]),
],
)
@pytest.mark.regression("49841")
def test_installing_external_with_compilers_directly(
compiler_str, expected, not_expected, mutable_config, mock_packages, tmp_path: pathlib.Path
):
"""Tests that version constraints are taken into account for compiler annotations
on externals
"""
spec_str = f"libelf@0.8.12 %{compiler_str}"
packages_yaml = syaml.load_config(
f"""
packages:
libelf:
buildable: false
externals:
- spec: {spec_str}
prefix: {tmp_path / 'libelf'}
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one(spec_str)
assert s.external
assert all(s.satisfies(c) for c in expected)
assert all(not s.satisfies(c) for c in not_expected)
@pytest.mark.regression("49841")
def test_using_externals_with_compilers(mutable_config, mock_packages, tmp_path: pathlib.Path):
"""Tests that version constraints are taken into account for compiler annotations
on externals, even imposed as transitive deps.
"""
packages_yaml = syaml.load_config(
f"""
packages:
libelf:
buildable: false
externals:
- spec: libelf@0.8.12 %gcc@10
prefix: {tmp_path / 'libelf'}
"""
)
mutable_config.set("packages", packages_yaml["packages"])
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one("dyninst%gcc@10.2.1 ^libelf@0.8.12 %gcc@:9")
s = spack.concretize.concretize_one("dyninst%gcc@10.2.1 ^libelf@0.8.12 %gcc@10:")
libelf = s["libelf"]
assert libelf.external and libelf.satisfies("%gcc")
@pytest.mark.regression("50161")
def test_installed_compiler_and_better_external(
install_mockery, do_not_check_runtimes_on_reuse, mutable_config
):
"""Tests that we always prefer a higher-priority external compiler, when we have a
lower-priority compiler installed, and we try to concretize a spec without specifying
the compiler dependency.
"""
pkg_b = spack.concretize.concretize_one(spack.spec.Spec("pkg-b %clang"))
PackageInstaller([pkg_b.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", False):
pkg_a = spack.concretize.concretize_one("pkg-a")
assert pkg_a["c"].satisfies("gcc@10"), pkg_a.tree()
assert pkg_a["pkg-b"]["c"].satisfies("gcc@10")
with spack.config.override("concretizer:reuse", False):
mpileaks = spack.concretize.concretize_one("mpileaks")
assert mpileaks.satisfies("%gcc@10")
@pytest.mark.regression("50006")
def test_concrete_multi_valued_variants_in_externals(
mutable_config, mock_packages, tmp_path: pathlib.Path
):
"""Tests that concrete multivalued variants in externals cannot be extended with additional
values when concretizing.
"""
packages_yaml = syaml.load_config(
f"""
packages:
gcc:
buildable: false
externals:
- spec: gcc@12.1.0 languages:='c,c++'
prefix: {tmp_path / 'gcc-12'}
extra_attributes:
compilers:
c: {tmp_path / 'gcc-12'}/bin/gcc
cxx: {tmp_path / 'gcc-12'}/bin/g++
- spec: gcc@14.1.0 languages:=fortran
prefix: {tmp_path / 'gcc-14'}
extra_attributes:
compilers:
fortran: {tmp_path / 'gcc-14'}/bin/gfortran
"""
)
mutable_config.set("packages", packages_yaml["packages"])
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
spack.concretize.concretize_one("pkg-b %gcc@14")
s = spack.concretize.concretize_one("pkg-b %gcc")
assert s["c"].satisfies("gcc@12.1.0"), s.tree()
assert s["c"].external
assert s["c"].satisfies("languages=c,c++") and not s["c"].satisfies("languages=fortran")
def test_concrete_multi_valued_in_input_specs(default_mock_concretization):
"""Tests that we can use := to specify exactly multivalued variants in input specs."""
s = default_mock_concretization("gcc languages:=fortran")
assert not s.external and s["c"].external
assert s.satisfies("languages:=fortran")
assert not s.satisfies("languages=c") and not s.satisfies("languages=c++")
def test_concrete_multi_valued_variants_in_requirements(mutable_config, mock_packages):
"""Tests that concrete multivalued variants can be imposed by requirements."""
packages_yaml = syaml.load_config(
"""
packages:
pkg-a:
require:
- libs:=static
"""
)
mutable_config.set("packages", packages_yaml["packages"])
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
spack.concretize.concretize_one("pkg-a libs=shared")
spack.concretize.concretize_one("pkg-a libs=shared,static")
s = spack.concretize.concretize_one("pkg-a")
assert s.satisfies("libs:=static")
assert not s.satisfies("libs=shared")
def test_concrete_multi_valued_variants_in_depends_on(default_mock_concretization):
"""Tests the use of := in depends_on directives"""
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
default_mock_concretization("gmt-concrete-mv-dependency ^mvdefaults foo:=c")
default_mock_concretization("gmt-concrete-mv-dependency ^mvdefaults foo:=a,c")
default_mock_concretization("gmt-concrete-mv-dependency ^mvdefaults foo:=b,c")
s = default_mock_concretization("gmt-concrete-mv-dependency")
assert s.satisfies("^mvdefaults foo:=a,b"), s.tree()
assert not s.satisfies("^mvdefaults foo=c")
def test_concrete_multi_valued_variants_when_args(default_mock_concretization):
"""Tests the use of := in conflicts and when= arguments"""
# Check conflicts("foo:=a,b", when="@0.9")
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
default_mock_concretization("mvdefaults@0.9 foo:=a,b")
for c in ("foo:=a", "foo:=a,b,c", "foo:=a,c", "foo:=b,c"):
s = default_mock_concretization(f"mvdefaults@0.9 {c}")
assert s.satisfies(c)
# Check depends_on("pkg-b", when="foo:=b,c")
s = default_mock_concretization("mvdefaults foo:=b,c")
assert s.satisfies("^pkg-b")
for c in ("foo:=a", "foo:=a,b,c", "foo:=a,b", "foo:=a,c"):
s = default_mock_concretization(f"mvdefaults {c}")
assert not s.satisfies("^pkg-b")
@pytest.mark.usefixtures("mock_packages")
@pytest.mark.parametrize(
"constraint_in_yaml,unsat_request,sat_request",
[
# Arch parts
pytest.param(
"target=x86_64",
"target=core2",
"target=x86_64",
marks=pytest.mark.skipif(
platform.machine() != "x86_64", reason="only valid for x86_64"
),
),
pytest.param(
"target=core2",
"target=x86_64",
"target=core2",
marks=pytest.mark.skipif(
platform.machine() != "x86_64", reason="only valid for x86_64"
),
),
("os=debian6", "os=redhat6", "os=debian6"),
("platform=test", "platform=linux", "platform=test"),
# Variants
("~lld", "+lld", "~lld"),
("+lld", "~lld", "+lld"),
],
)
def test_spec_parts_on_fresh_compilers(
constraint_in_yaml, unsat_request, sat_request, mutable_config, tmp_path: pathlib.Path
):
"""Tests that spec parts like targets and variants in `%<package> target=<target> <variants>`
are associated with `package` for `%` just as they would be for `^`, when we concretize
without reusing.
"""
packages_yaml = syaml.load_config(
f"""
packages:
llvm::
buildable: false
externals:
- spec: "llvm@20 +clang {constraint_in_yaml}"
prefix: {tmp_path / 'llvm-20'}
"""
)
mutable_config.set("packages", packages_yaml["packages"])
# Check the abstract spec is formed correctly
abstract_spec = Spec(f"pkg-a %llvm@20 +clang {unsat_request}")
assert abstract_spec["llvm"].satisfies(f"@20 +clang {unsat_request}")
# Check that we can't concretize the spec, since llvm is not buildable
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
spack.concretize.concretize_one(abstract_spec)
# Check we can instead concretize if we use the correct constraint
s = spack.concretize.concretize_one(f"pkg-a %llvm@20 +clang {sat_request}")
assert s["c"].external and s["c"].satisfies(f"@20 +clang {sat_request}")
@pytest.mark.usefixtures("mock_packages", "mutable_database")
@pytest.mark.parametrize(
"constraint_in_yaml,unsat_request,sat_request",
[
# Arch parts
pytest.param(
"target=x86_64",
"target=core2",
"target=x86_64",
marks=pytest.mark.skipif(
platform.machine() != "x86_64", reason="only valid for x86_64"
),
),
pytest.param(
"target=core2",
"target=x86_64",
"target=core2",
marks=pytest.mark.skipif(
platform.machine() != "x86_64", reason="only valid for x86_64"
),
),
("os=debian6", "os=redhat6", "os=debian6"),
("platform=test", "platform=linux", "platform=test"),
# Variants
("~lld", "+lld", "~lld"),
("+lld", "~lld", "+lld"),
],
)
def test_spec_parts_on_reused_compilers(
constraint_in_yaml, unsat_request, sat_request, mutable_config, tmp_path: pathlib.Path
):
"""Tests that requests of the form <package>%<compiler> <requests> are considered for reused
specs, even though build dependency are not part of the ASP problem.
"""
packages_yaml = syaml.load_config(
f"""
packages:
c:
require: llvm
cxx:
require: llvm
llvm::
buildable: false
externals:
- spec: "llvm+clang@20 {constraint_in_yaml}"
prefix: {tmp_path / 'llvm-20'}
mpileaks:
buildable: true
"""
)
mutable_config.set("packages", packages_yaml["packages"])
# Install the spec
installed_spec = spack.concretize.concretize_one(f"mpileaks %llvm@20 {sat_request}")
PackageInstaller([installed_spec.package], fake=True, explicit=True).install()
# Make mpileaks not buildable
mutable_config.set("packages:mpileaks:buildable", False)
# Check we can't concretize with the unsat request...
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
spack.concretize.concretize_one(f"mpileaks %llvm@20 {unsat_request}")
# ...but we can with the original constraint
with spack.config.override("concretizer:reuse", True):
s = spack.concretize.concretize_one(f"mpileaks %llvm@20 {sat_request}")
assert s.dag_hash() == installed_spec.dag_hash()
def test_use_compiler_by_hash(mock_packages, mutable_database, mutable_config):
"""Tests that we can reuse an installed compiler specifying its hash"""
installed_spec = spack.concretize.concretize_one("gcc@14.0")
PackageInstaller([installed_spec.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
s = spack.concretize.concretize_one(f"mpileaks %gcc/{installed_spec.dag_hash()}")
assert s["c"].dag_hash() == installed_spec.dag_hash()
@pytest.mark.parametrize(
"spec_str,expected,not_expected",
[
# Simple build requirement on gcc, as a provider for c
(
"mpileaks %gcc",
["%[deptypes=build] gcc"],
["%[deptypes=link] gcc", "%[deptypes=run] gcc"],
),
# Require mpich as a direct dependency of mpileaks
(
"mpileaks %[deptypes=link] mpich",
["%[deptypes=build,link] mpich", "^callpath%[deptypes=build,link] mpich"],
["%[deptypes=run] mpich"],
),
(
"mpileaks %[deptypes=link] mpich+debug", # non-default variant
["%[deptypes=build,link] mpich+debug"],
["% mpich~debug"],
),
# Require mpich as a direct dependency of two nodes, with compatible constraints
(
"mpileaks %mpich+debug ^callpath %mpich@3.0.3", # non-default variant
[
"%[deptypes=build,link] mpich@3.0.3+debug",
"^callpath %[deptypes=build,link] mpich@3.0.3+debug",
],
["%mpich~debug"],
),
# Package that has a conditional link dependency on a compiler
("emacs +native", ["%[virtuals=c deptypes=build,link] gcc"], []),
("emacs +native %gcc", ["%[virtuals=c deptypes=build,link] gcc"], []),
("emacs +native %[virtuals=c] gcc", ["%[virtuals=c deptypes=build,link] gcc"], []),
# Package that depends on llvm as a library and also needs C and C++ compilers
(
"llvm-client",
["%[virtuals=c,cxx deptypes=build] gcc", "%[deptypes=build,link] llvm"],
["%c=llvm"],
),
(
"llvm-client %c,cxx=gcc",
["%[virtuals=c,cxx deptypes=build] gcc", "%[deptypes=build,link] llvm"],
["%c=llvm"],
),
("llvm-client %c,cxx=llvm", ["%[virtuals=c,cxx deptypes=build,link] llvm"], ["%gcc"]),
],
)
def test_specifying_direct_dependencies(
spec_str, expected, not_expected, default_mock_concretization
):
"""Tests solving % in different scenarios, either for runtime or buildtime dependencies."""
concrete_spec = default_mock_concretization(spec_str)
for c in expected:
assert concrete_spec.satisfies(c)
for c in not_expected:
assert not concrete_spec.satisfies(c)
@pytest.mark.parametrize(
"spec_str,conditional_spec,expected",
[
# Abstract spec is False, cause the set of possible solutions in the rhs is smaller
("mpich", "%[when=+debug] llvm", (False, True)),
# Abstract spec is True, since we know the condition never applies
("mpich~debug", "%[when=+debug] llvm", (True, True)),
# In this case we know the condition applies
("mpich+debug", "%[when=+debug] llvm", (False, False)),
("mpich+debug %llvm+clang", "%[when=+debug] llvm", (True, True)),
("mpich+debug", "%[when=+debug] gcc", (False, True)),
# Conditional specs on the lhs
("mpich %[when=+debug] gcc", "mpich %gcc", (False, True)),
("mpich %[when=+debug] gcc", "mpich %llvm", (False, False)),
("mpich %[when=+debug] gcc", "mpich %[when=+debug] gcc", (True, True)),
("mpileaks ^[when=+opt] callpath@0.9", "mpileaks ^callpath@1.0", (False, True)),
("mpileaks ^[when=+opt] callpath@1.0", "mpileaks ^callpath@1.0", (False, True)),
("mpileaks ^[when=+opt] callpath@1.0", "mpileaks ^[when=+opt] callpath@1.0", (True, True)),
# Conditional specs on both sides
(
"mpileaks ^[when=+opt] callpath@1.0",
"mpileaks ^[when=+opt+debug] callpath@1.0",
(True, True),
),
(
"mpileaks ^[when=+opt+debug] callpath@1.0",
"mpileaks ^[when=+opt] callpath@1.0",
(False, True),
),
(
"mpileaks ^[when=+opt] callpath@1.0",
"mpileaks ^[when=~debug] callpath@1.0",
(False, True),
),
# Different conditional specs associated with different nodes in the DAG, where one does
# not apply since the condition is not met
(
"mpileaks %[when='%mpi' virtuals=mpi] zmpi ^libelf %[when='%mpi' virtuals=mpi] mpich",
"mpileaks %[virtuals=mpi] zmpi",
(False, True),
),
(
"mpileaks %[when='%mpi' virtuals=mpi] mpich ^libelf %[when='%mpi' virtuals=mpi] zmpi",
"mpileaks %[virtuals=mpi] mpich",
(False, True),
),
],
)
def test_satisfies_conditional_spec(
spec_str, conditional_spec, expected, default_mock_concretization
):
"""Tests satisfies semantic when testing an abstract spec and its concretized counterpart
with a conditional spec.
"""
abstract_spec = Spec(spec_str)
concrete_spec = default_mock_concretization(spec_str)
expected_abstract, expected_concrete = expected
assert abstract_spec.satisfies(conditional_spec) is expected_abstract
assert concrete_spec.satisfies(conditional_spec) is expected_concrete
assert concrete_spec.satisfies(abstract_spec)
@pytest.mark.not_on_windows("Tests use linux paths")
@pytest.mark.regression("51001")
def test_selecting_externals_with_compilers_as_root(mutable_config, mock_packages):
"""Tests that we can select externals that have a compiler in their spec, even when
they are root.
"""
packages_yaml = syaml.load_config(
"""
packages:
gcc::
externals:
- spec: "gcc@9.4.0 languages='c,c++'"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
llvm::
buildable: false
externals:
- spec: "llvm@20 +clang"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
mpich:
buildable: false
externals:
- spec: "mpich@3.4.3 %gcc"
prefix: /path/mpich/gcc
- spec: "mpich@3.4.3 %clang"
prefix: /path/mpich/clang
"""
)
mutable_config.set("packages", packages_yaml["packages"])
# Select mpich as the root spec
s = spack.concretize.concretize_one("mpich %clang")
assert s.external
assert s.prefix == "/path/mpich/clang"
s = spack.concretize.concretize_one("mpich %gcc")
assert s.external
assert s.prefix == "/path/mpich/gcc"
# Select mpich as a dependency
s = spack.concretize.concretize_one("mpileaks ^mpi=mpich %clang")
assert s["mpi"].external
assert s["mpi"].prefix == "/path/mpich/clang"
s = spack.concretize.concretize_one("mpileaks ^mpi=mpich %gcc")
assert s["mpi"].external
assert s["mpi"].prefix == "/path/mpich/gcc"
@pytest.mark.not_on_windows("Tests use linux paths")
@pytest.mark.regression("51001")
@pytest.mark.parametrize(
"external_compiler,spec_str", [("gcc@8", "mpich %gcc@8.4"), ("gcc@8.4.0", "mpich %gcc@8")]
)
def test_selecting_externals_with_compilers_and_versions(
external_compiler, spec_str, mutable_config, mock_packages
):
"""Tests different scenarios of having a compiler specified with a version constraint, either
in the input spec or in the external spec.
"""
packages_yaml = syaml.load_config(
f"""
packages:
gcc:
externals:
- spec: "gcc@8.4.0 languages='c,c++'"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
mpich:
buildable: false
externals:
- spec: "mpich@3.4.3 %{external_compiler}"
prefix: /path/mpich/gcc
- spec: "mpich@3.4.3 %clang"
prefix: /path/mpich/clang
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one(spec_str)
assert s.external
assert s.prefix == "/path/mpich/gcc"
@pytest.mark.regression("51001")
@pytest.mark.parametrize(
"external_compiler,spec_str,error_match",
[
# Compiler is underspecified
("gcc", "mpich %gcc", "there are multiple external specs"),
("gcc@9", "mpich %gcc", "there are multiple external specs"),
# Compiler does not exist
("%oneapi", "mpich %gcc@8", "there is no"),
],
)
def test_errors_when_specifying_externals_with_compilers(
external_compiler, spec_str, error_match, mutable_config, mock_packages
):
"""Tests different errors that can occur in an external spec with a compiler specified."""
packages_yaml = syaml.load_config(
f"""
packages:
mpich:
buildable: false
externals:
- spec: "mpich@3.4.3 %{external_compiler}"
prefix: /path/mpich/gcc
- spec: "mpich@3.4.3 %clang"
prefix: /path/mpich/clang
"""
)
mutable_config.set("packages", packages_yaml["packages"])
with pytest.raises(ExternalDependencyError, match=error_match):
_ = spack.concretize.concretize_one(spec_str)
@pytest.mark.regression("51146,51067")
def test_caret_in_input_cannot_set_transitive_build_dependencies(default_mock_concretization):
"""Tests that a caret in the input spec does not set transitive build dependencies, and errors
with an appropriate message.
"""
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError, match="transitive 'link' or"):
_ = default_mock_concretization("multivalue-variant ^gmake")
@pytest.mark.regression("51167")
@pytest.mark.require_provenance
def test_commit_variant_enters_the_hash(mutable_config, mock_packages, monkeypatch):
"""Tests that an implicit commit variant, obtained from resolving the commit sha of a branch,
enters the hash of the spec.
"""
first_call = True
def _mock_resolve(spec) -> None:
if first_call:
spec.variants["commit"] = spack.variant.SingleValuedVariant("commit", f"{'b' * 40}")
return
spec.variants["commit"] = spack.variant.SingleValuedVariant("commit", f"{'a' * 40}")
monkeypatch.setattr(spack.package_base.PackageBase, "_resolve_git_provenance", _mock_resolve)
before = spack.concretize.concretize_one("git-ref-package@develop")
first_call = False
after = spack.concretize.concretize_one("git-ref-package@develop")
assert before.package.needs_commit(before.version)
assert before.satisfies(f"commit={'b' * 40}")
assert after.satisfies(f"commit={'a' * 40}")
assert before.dag_hash() != after.dag_hash()
@pytest.mark.regression("51180")
def test_reuse_with_mixed_compilers(mutable_config, mock_packages):
"""Tests that potentially reusing a spec with a mixed compiler set, will not interfere
with a request on one of the languages for the same package.
"""
packages_yaml = syaml.load_config(
"""
packages:
gcc:
externals:
- spec: "gcc@15.1 languages='c,c++,fortran'"
prefix: /path1
extra_attributes:
compilers:
c: /path1/bin/gcc
cxx: /path1/bin/g++
fortran: /path1/bin/gfortran
llvm:
externals:
- spec: "llvm@20 +flang+clang"
prefix: /path2
extra_attributes:
compilers:
c: /path2/bin/clang
cxx: /path2/bin/clang++
fortran: /path2/bin/flang
"""
)
mutable_config.set("packages", packages_yaml["packages"])
s = spack.concretize.concretize_one("openblas %c=gcc %fortran=llvm")
reusable_specs = list(s.traverse(root=True))
root_specs = [Spec("openblas %fortran=gcc")]
with spack.config.override("concretizer:reuse", True):
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, root_specs, reuse=reusable_specs)
assert len(result.specs) == 1
r = result.specs[0]
assert r.satisfies("openblas %fortran=gcc")
assert r.dag_hash() != s.dag_hash()
@pytest.mark.regression("51224")
def test_when_possible_above_all(mutable_config, mock_packages):
"""Tests that the criterion to solve as many specs as possible is above all other criteria."""
specs = [Spec("pkg-a"), Spec("pkg-b")]
solver = spack.solver.asp.Solver()
for result in solver.solve_in_rounds(specs):
criteria = sorted(result.criteria, reverse=True)
assert criteria[0].name == "number of input specs not concretized"
def test_concretization_cache_roundtrip(
mock_packages, use_concretization_cache, monkeypatch, mutable_config
):
"""Tests whether we can write the results of a clingo solve to the cache
and load the same spec request from the cache to produce identical specs"""
assert spack.config.get("concretizer:concretization_cache:enable")
# run one standard concretization to populate the cache and the setup method
# memoization
h = spack.concretize.concretize_one("hdf5")
# ASP output should be stable, concretizing the same spec
# should have the same problem output
# assert that we're not storing any new cache entries
def _ensure_no_store(self, problem: str, result, statistics, test=False):
# always throw, we never want to reach this code path
assert False, "Concretization cache hit expected"
# Assert that we're actually hitting the cache
cache_fetch = spack.solver.asp.ConcretizationCache.fetch
def _ensure_cache_hits(self, problem: str):
result, statistics = cache_fetch(self, problem)
assert result, "Expected successful concretization cache hit"
assert statistics, "Expected statistics to be non null on cache hit"
return result, statistics
monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "store", _ensure_no_store)
monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "fetch", _ensure_cache_hits)
# ensure subsequent concretizations of the same spec produce the same spec
# object
for _ in range(5):
assert h == spack.concretize.concretize_one("hdf5")
def test_concretization_cache_roundtrip_result(use_concretization_cache):
"""Ensure the concretization cache doesn't change Solver Result objects."""
specs = [Spec("hdf5")]
solver = spack.solver.asp.Solver()
result1 = solver.solve(specs)
result2 = solver.solve(specs)
assert result1 == result2
def test_concretization_cache_count_cleanup(use_concretization_cache, mutable_config):
"""Tests to ensure we are cleaning the cache when we should be respective to the
number of entries allowed in the cache"""
conc_cache_dir = use_concretization_cache
spack.config.set("concretizer:concretization_cache:entry_limit", 1000)
def names():
return set(
x.name
for x in conc_cache_dir.iterdir()
if (not x.is_dir() and not x.name.startswith("."))
)
assert len(names()) == 0
for i in range(1000):
name = spack.util.hash.b32_hash(f"mock_cache_file_{i}")
mock_cache_file = conc_cache_dir / name
mock_cache_file.touch()
before = names()
assert len(before) == 1000
# cleanup should be run after the 1,001st execution
spack.concretize.concretize_one("hdf5")
# ensure that half the elements were removed and that one more was created
after = names()
assert len(after) == 501
assert len(after - before) == 1 # one additional hash added by 1001st concretization
def test_concretization_cache_uncompressed_entry(use_concretization_cache, monkeypatch):
def _store(self, problem, result, statistics):
cache_path = self._cache_path_from_problem(problem)
with self.write_transaction(cache_path) as exists:
if exists:
return
try:
with open(cache_path, "x", encoding="utf-8") as cache_entry:
cache_dict = {"results": result.to_dict(), "statistics": statistics}
cache_entry.write(json.dumps(cache_dict))
except FileExistsError:
pass
monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "store", _store)
# Store the results in plaintext
spack.concretize.concretize_one("zlib")
# Ensure fetch can handle the plaintext cache entry
spack.concretize.concretize_one("zlib")
@pytest.mark.parametrize(
"asp_file",
[
"concretize.lp",
"heuristic.lp",
"display.lp",
"direct_dependency.lp",
"when_possible.lp",
"libc_compatibility.lp",
"os_compatibility.lp",
"splices.lp",
],
)
def test_concretization_cache_asp_canonicalization(asp_file):
path = os.path.join(os.path.dirname(spack.solver.asp.__file__), asp_file)
with open(path, "r", encoding="utf-8") as f:
original = [line.strip() for line in f.readlines()]
stripped = spack.solver.asp.strip_asp_problem(original)
diff = list(difflib.unified_diff(original, stripped))
assert all(
[
line == "-" or line.startswith("-%")
for line in diff
if line.startswith("-") and not line.startswith("---")
]
)
@pytest.mark.parametrize(
"node_completion,expected,not_expected",
[
("architecture_only", ["+clang", "~flang", "platform=test"], ["lld=*"]),
(
"default_variants",
["+clang", "~flang", "+lld", "platform=test"],
["~clang", "+flang", "~lld"],
),
],
)
def test_external_node_completion_from_config(
node_completion, expected, not_expected, mutable_config, mock_packages
):
"""Tests the different options for external node completion in the configuration file."""
mutable_config.set("concretizer:externals:completion", node_completion)
s = spack.concretize.concretize_one("llvm")
assert s.external
assert all(s.satisfies(c) for c in expected)
assert all(not s.satisfies(c) for c in not_expected)
@pytest.mark.parametrize(
"spec_str,packages_yaml,expected",
[
(
"mpileaks",
"""
packages:
mpileaks:
externals:
- spec: "mpileaks@2.3~debug+opt"
prefix: /user/path
dependencies:
- id: callpath_id
deptypes: link
- id: mpich_id
deptypes:
- "build"
- "link"
virtuals: "mpi"
callpath:
externals:
- spec: "callpath@1.0"
prefix: /user/path
id: callpath_id
dependencies:
- id: mpich_id
deptypes:
- "build"
- "link"
virtuals: "mpi"
mpich:
externals:
- spec: "mpich@3.0.4"
prefix: /user/path
id: mpich_id
""",
[
"%mpi=mpich@3.0.4",
"^callpath %mpi=mpich@3.0.4",
"%[deptypes=link] callpath",
"%[deptypes=build,link] mpich",
],
),
# Same, but using `spec:` instead of `id:` for dependencies
(
"mpileaks",
"""
packages:
mpileaks:
externals:
- spec: "mpileaks@2.3~debug+opt"
prefix: /user/path
dependencies:
- spec: callpath
deptypes: link
- spec: mpich
virtuals: "mpi"
callpath:
externals:
- spec: "callpath@1.0"
prefix: /user/path
dependencies:
- spec: mpich
virtuals: "mpi"
mpich:
externals:
- spec: "mpich@3.0.4"
prefix: /user/path
""",
[
"%mpi=mpich@3.0.4",
"^callpath %mpi=mpich@3.0.4",
"%[deptypes=link] callpath",
"%[deptypes=build,link] mpich",
],
),
],
)
def test_external_specs_with_dependencies(
spec_str, packages_yaml, expected, mutable_config, mock_packages
):
"""Tests that we can reconstruct external specs with dependencies."""
configuration = syaml.load_config(packages_yaml)
mutable_config.set("packages", configuration["packages"])
s = spack.concretize.concretize_one(spec_str)
assert all(node.external for node in s.traverse())
assert all(s.satisfies(c) for c in expected)
@pytest.mark.parametrize(
"default_target,expected",
[
# Specific target requested
("x86_64_v3", ["callpath target=x86_64_v3", "^mpich target=x86_64_v3"]),
# With ranges, be conservative by default
(":x86_64_v3", ["callpath target=x86_64", "^mpich target=x86_64"]),
("x86_64:x86_64_v3", ["callpath target=x86_64", "^mpich target=x86_64"]),
("x86_64:", ["callpath target=x86_64", "^mpich target=x86_64"]),
],
)
@pytest.mark.skipif(
spack.vendor.archspec.cpu.host().family != "x86_64", reason="test data for x86_64"
)
def test_target_requirements(default_target, expected, mutable_config, mock_packages):
"""Tests different scenarios where targets might be constrained by configuration and are not
specified in external specs
"""
configuration = syaml.load_config(
f"""
packages:
all:
require:
- "target={default_target}"
callpath:
buildable: false
externals:
- spec: "callpath@1.0"
prefix: /user/path
id: callpath_id
dependencies:
- id: mpich_id
deptypes:
- "build"
- "link"
virtuals: "mpi"
mpich:
externals:
- spec: "mpich@3.0.4"
prefix: /user/path
id: mpich_id
"""
)
mutable_config.set("packages", configuration["packages"])
s = spack.concretize.concretize_one("callpath")
assert s.external
assert all(s.satisfies(x) for x in expected), s.tree()
@pytest.mark.parametrize(
"spec_str,inline,yaml",
[
(
"cmake-client",
"""
packages:
cmake-client:
externals:
- spec: cmake-client@1.0 %cmake
prefix: /mock
cmake:
externals:
- spec: cmake@3.23.0
prefix: /mock
""",
"""
packages:
cmake-client:
externals:
- spec: cmake-client@1.0
prefix: /mock
dependencies:
- spec: cmake
cmake:
externals:
- spec: cmake@3.23.0
prefix: /mock
""",
),
(
"mpileaks",
"""
packages:
mpileaks:
externals:
- spec: "mpileaks@2.3~debug+opt %mpi=mpich %[deptypes=link] callpath"
prefix: /user/path
callpath:
externals:
- spec: "callpath@1.0 %mpi=mpich"
prefix: /user/path
mpich:
externals:
- spec: "mpich@3.0.4"
prefix: /user/path
""",
"""
packages:
mpileaks:
externals:
- spec: "mpileaks@2.3~debug+opt"
prefix: /user/path
dependencies:
- spec: callpath
deptypes: link
- spec: mpich
virtuals: "mpi"
callpath:
externals:
- spec: "callpath@1.0"
prefix: /user/path
dependencies:
- spec: mpich
virtuals: "mpi"
mpich:
externals:
- spec: "mpich@3.0.4"
prefix: /user/path
""",
),
],
)
def test_external_inline_equivalent_to_yaml(spec_str, inline, yaml, mutable_config, mock_packages):
"""Tests that the inline syntax for external specs is equivalent to the YAML syntax."""
configuration = syaml.load_config(inline)
mutable_config.set("packages", configuration["packages"])
inline_spec = spack.concretize.concretize_one(spec_str)
configuration = syaml.load_config(yaml)
mutable_config.set("packages", configuration["packages"])
yaml_spec = spack.concretize.concretize_one(spec_str)
assert inline_spec == yaml_spec
@pytest.mark.regression("51556")
def test_reusing_gcc_same_version_different_libcs(monkeypatch, mutable_config, mock_packages):
"""Tests that Spack can solve for specs when it reuses 2 GCCs at the same version,
but injecting different libcs.
"""
packages_yaml = syaml.load_config(
"""
packages:
gcc:
externals:
- spec: "gcc@12.3.0 languages='c,c++,fortran' os=debian6"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
fortran: /path/bin/gfortran
- spec: "gcc@12.3.0 languages='c,c++,fortran' os=redhat6"
prefix: /path
extra_attributes:
compilers:
c: /path/bin/gcc
cxx: /path/bin/g++
fortran: /path/bin/gfortran
"""
)
mutable_config.set("packages", packages_yaml["packages"])
mutable_config.set("concretizer:reuse", True)
def _mock_libc(self):
if self.spec.satisfies("os=debian6"):
return spack.spec.Spec("glibc@=2.31", external_path="/rocky9/path")
return spack.spec.Spec("glibc@=2.28", external_path="/rocky8/path")
monkeypatch.setattr(
spack.compilers.libraries.CompilerPropertyDetector, "default_libc", _mock_libc
)
# This should not raise
mpileaks = spack.concretize.concretize_one("mpileaks %c=gcc@12")
assert mpileaks.satisfies("%c=gcc@12")
| TestConcretizeEdges |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/analyzer_cli_test.py | {
"start": 64874,
"end": 78874
} | class ____(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
with session.Session(config=no_rewrite_session_config()) as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variable_v1.VariableV1(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies(
[x], y, name="control_deps/ctrl_dep_y")
z = math_ops.multiply(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies(
[x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command handler registry.
_, cls._registry = create_analyzer_cli(debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
file_io.delete_recursively(cls._dump_root)
def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info",
["control_deps/ctrl_dep_y"])
assert_node_attribute_lines(self, out, "control_deps/ctrl_dep_y",
"Identity", self._main_device,
[("AddV2", "control_deps/y")],
[("VariableV2", "control_deps/x")],
[("Mul", "control_deps/z")],
[("Identity", "control_deps/ctrl_dep_z")])
# Call node info on a node with control recipients.
out = self._registry.dispatch_command("ni", ["control_deps/x"])
assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2",
self._main_device, [], [],
[("Identity", "control_deps/x/read")],
[("Identity", "control_deps/ctrl_dep_y"),
("Identity", "control_deps/ctrl_dep_z")])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x/read"),
len(out.lines[10]), "ni -a -d -t control_deps/x/read")
if out.lines[13].endswith("control_deps/ctrl_dep_y"):
y_line = 13
z_line = 14
else:
y_line = 14
z_line = 13
check_menu_item(self, out, y_line,
len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"),
len(out.lines[y_line]),
"ni -a -d -t control_deps/ctrl_dep_y")
check_menu_item(self, out, z_line,
len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"),
len(out.lines[z_line]),
"ni -a -d -t control_deps/ctrl_dep_z")
def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs."""
# Do not include node op types.
node_name = "control_deps/z"
out = self._registry.dispatch_command("list_inputs", [node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
# Include node op types.
out = self._registry.dispatch_command("li", ["-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) [Identity] control_deps/x/read", "| |- ...",
"|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d.", " [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name has bold attribute.
self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node."""
# Do not include node op types.
node_name = "control_deps/z"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("list_inputs", [tensor_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-t", node_name, "-c"])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read",
"| | |- (3) [VariableV2] control_deps/x",
"| |- (2) [Identity] control_deps/ctrl_dep_y",
"| |- (3) [AddV2] control_deps/y",
"| | |- (4) [Identity] control_deps/x/read",
"| | | |- (5) [VariableV2] control_deps/x",
"| | |- (4) [Identity] control_deps/x/read",
"| | |- (5) [VariableV2] control_deps/x",
"| |- (3) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [AddV2] control_deps/y",
"| | |- (3) [Identity] control_deps/x/read",
"| | | |- (4) [VariableV2] control_deps/x",
"| | |- (3) [Identity] control_deps/x/read",
"| | |- (4) [VariableV2] control_deps/x",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 11,
len(out.lines[11]) - len("control_deps/ctrl_dep_y"),
len(out.lines[11]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 18,
len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command(
"li", ["-c", "-r", "-t", "-d", "2", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 2, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read", "| | |- ...",
"| |- (2) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [AddV2] control_deps/y", "| | |- ...",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x"),
len(out.lines[10]), "li -c -r control_deps/x")
def testListInputsNodeWithoutInputs(self):
"""List the inputs to a node without any input."""
node_name = "control_deps/x"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, control " % node_name +
"inputs included):", " [None]", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testListInputsNonexistentNode(self):
out = self._registry.dispatch_command(
"list_inputs", ["control_deps/z/foo"])
self.assertEqual([
"ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines)
def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit."""
out = self._registry.dispatch_command(
"lo", ["-c", "-r", "-t", "-d", "1", "control_deps/x"])
self.assertEqual([
"Recipients of node \"control_deps/x\" (Depth limit = 1, control "
"recipients included):",
"|- (1) [Identity] control_deps/x/read",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z",
"", "Legend:", " (d): recursion depth = d.",
" (Ctrl): Control input.",
" [Op]: Input node has op type Op."], out.lines)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "lo -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "lo -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/ctrl_dep_z"),
len(out.lines[5]), "lo -c -r control_deps/ctrl_dep_z")
# Verify the bold attribute of the node name.
self.assertEqual([(20, 20 + len("control_deps/x"), "bold")],
out.font_attr_segs[0])
@test_util.run_v1_only("b/120545219")
| AnalyzerCLIControlDepTest |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_eventid.py | {
"start": 1126,
"end": 1345
} | class ____(TypedDict):
organizationSlug: str
projectSlug: str
groupId: str
eventId: str
event: EventSerializerResponse
@region_silo_endpoint
@extend_schema(tags=["Organizations"])
| EventIdLookupResponse |
python | kamyu104__LeetCode-Solutions | Python/cut-off-trees-for-golf-event.py | {
"start": 1984,
"end": 3320
} | class ____(object):
def cutOffTree(self, forest):
"""
:type forest: List[List[int]]
:rtype: int
"""
def minStep(p1, p2):
min_steps = 0
lookup = {p1}
q = collections.deque([p1])
while q:
size = len(q)
for _ in xrange(size):
(i, j) = q.popleft()
if (i, j) == p2:
return min_steps
for i, j in (i+1, j), (i-1, j), (i, j+1), (i, j-1):
if not (0 <= i < m and 0 <= j < n and forest[i][j] and (i, j) not in lookup):
continue
q.append((i, j))
lookup.add((i, j))
min_steps += 1
return -1
m, n = len(forest), len(forest[0])
min_heap = []
for i in xrange(m):
for j in xrange(n):
if forest[i][j] > 1:
heapq.heappush(min_heap, (forest[i][j], (i, j)))
start = (0, 0)
result = 0
while min_heap:
tree = heapq.heappop(min_heap)
step = minStep(start, tree[1])
if step < 0:
return -1
result += step
start = tree[1]
return result
| Solution_TLE |
python | nedbat__coveragepy | tests/plugin2.py | {
"start": 770,
"end": 1105
} | class ____(CoveragePlugin):
"""A file tracer plugin for testing."""
def file_tracer(self, filename: str) -> FileTracer | None:
if "render.py" in filename:
return RenderFileTracer()
return None
def file_reporter(self, filename: str) -> FileReporter:
return MyFileReporter(filename)
| Plugin |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 191124,
"end": 192281
} | class ____(Binding):
"""
BindCheckbox schema wrapper.
Parameters
----------
input : Literal['checkbox']
debounce : float
If defined, delays event handling until the specified milliseconds have elapsed
since the last event was fired.
element : str, :class:`Element`
An optional CSS selector string indicating the parent element to which the input
element should be added. By default, all input elements are added within the parent
container of the Vega view.
name : str
By default, the signal name is used to label input elements. This ``name`` property
can be used instead to specify a custom label for the bound signal.
"""
_schema = {"$ref": "#/definitions/BindCheckbox"}
def __init__(
self,
input: Optional[Literal["checkbox"]] = Undefined,
debounce: Optional[float] = Undefined,
element: Optional[str | SchemaBase] = Undefined,
name: Optional[str] = Undefined,
**kwds,
):
super().__init__(
input=input, debounce=debounce, element=element, name=name, **kwds
)
| BindCheckbox |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_random_test.py | {
"start": 2337,
"end": 2759
} | class ____(RandomTestBase):
def setUp(self):
self.np_func = np_random.randn
self.onp_func = onp.random.randn
super(RandNTest, self).setUp()
@parameterized.parameters((), (2), (2, 3))
def test_float64(self, *dims):
self._test(*dims)
@parameterized.parameters((), (2), ((2,)), (2, 3))
def test_float32(self, *dims):
self._test(*dims, allow_float64=False, onp_dtype=np_dtypes.float32)
| RandNTest |
python | pytorch__pytorch | torch/testing/_internal/distributed/_shard/test_common.py | {
"start": 131,
"end": 1219
} | class ____(nn.Module):
def __init__(self, linear_size, rank=None, dtype=torch.float32):
super().__init__()
self.fc1 = nn.Linear(*linear_size[0], dtype=dtype)
self.gelu = nn.GELU()
self.fc2 = nn.Linear(*linear_size[1], dtype=dtype)
if rank is not None:
self.fc1.cuda(rank)
self.fc2.cuda(rank)
def forward(self, inp):
return self.fc2(self.gelu(self.fc1(inp)))
def get_weights(self):
if isinstance(self.fc1.weight, ShardedTensor):
weight1 = self.fc1.weight.local_tensor()
else:
weight1 = self.fc1.weight
if isinstance(self.fc2.weight, ShardedTensor):
weight2 = self.fc2.weight.local_tensor()
else:
weight2 = self.fc2.weight
return (weight1, weight2)
def get_biases(self):
return (self.fc1.bias, self.fc2.bias)
def get_weight_grads(self):
return (self.fc1.weight.grad, self.fc2.weight.grad)
def get_bias_grads(self):
return (self.fc1.bias.grad, self.fc2.bias.grad)
| SimpleMegatronLM |
python | ray-project__ray | python/ray/air/tests/mocked_wandb_integration.py | {
"start": 637,
"end": 746
} | class ____:
args: list
kwargs: dict
exclude: list
logs: list
config: dict
| LoggingActorState |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 28636,
"end": 30917
} | class ____(GeneratedAirbyteDestination):
class None_:
@public
def __init__(
self,
):
self.method = "none"
class ApiKeySecret:
@public
def __init__(self, apiKeyId: str, apiKeySecret: str):
self.method = "secret"
self.apiKeyId = check.str_param(apiKeyId, "apiKeyId")
self.apiKeySecret = check.str_param(apiKeySecret, "apiKeySecret")
class UsernamePassword:
@public
def __init__(self, username: str, password: str):
self.method = "basic"
self.username = check.str_param(username, "username")
self.password = check.str_param(password, "password")
@public
def __init__(
self,
name: str,
endpoint: str,
authenticationMethod: Union[
"ElasticsearchDestination.None_",
"ElasticsearchDestination.ApiKeySecret",
"ElasticsearchDestination.UsernamePassword",
],
upsert: Optional[bool] = None,
):
r"""Airbyte Destination for Elasticsearch.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/elasticsearch
Args:
name (str): The name of the destination.
endpoint (str): The full url of the Elasticsearch server
upsert (Optional[bool]): If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys.
authenticationMethod (Union[ElasticsearchDestination.None\\_, ElasticsearchDestination.ApiKeySecret, ElasticsearchDestination.UsernamePassword]): The type of authentication to be used
"""
self.endpoint = check.str_param(endpoint, "endpoint")
self.upsert = check.opt_bool_param(upsert, "upsert")
self.authenticationMethod = check.inst_param(
authenticationMethod,
"authenticationMethod",
(
ElasticsearchDestination.None_,
ElasticsearchDestination.ApiKeySecret,
ElasticsearchDestination.UsernamePassword,
),
)
super().__init__("Elasticsearch", name)
| ElasticsearchDestination |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol15.py | {
"start": 307,
"end": 474
} | class ____:
@property
def f(self: T) -> T:
return self
def m(self, item: T, callback: Callable[[T], str]) -> str: ...
x: Proto = Concrete()
| Concrete |
python | gevent__gevent | src/greentest/3.9/test_asyncore.py | {
"start": 10999,
"end": 13189
} | class ____(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(support.TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
support.unlink(support.TESTFN)
def test_recv(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(support.TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
with support.check_warnings(('', ResourceWarning)):
f = None
support.gc_collect()
def test_close_twice(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
| FileWrapperTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/data_structs/data_structs.py | {
"start": 8210,
"end": 8391
} | class ____(IndexStruct):
"""Empty index."""
@classmethod
def get_type(cls) -> IndexStructType:
"""Get type."""
return IndexStructType.EMPTY
| EmptyIndexStruct |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 221606,
"end": 224527
} | class ____:
def __init__(self):
super().__init__()
self.args = KernelArgs()
self.loops_code = BracesBuffer()
self.ws = WorkSharing(self.loops_code)
self.stack = contextlib.ExitStack()
self.stack.enter_context(self.ws)
self.scheduled_nodes = []
def new_kernel(self, cls, *args):
return cls(self.args, parallel_num_threads(), *args)
def finalize_kernel(self, new_kernel, nodes):
self.scheduled_nodes += nodes
code = self.loops_code
ws = self.ws
new_kernel.codegen_loops(code, ws)
def get_num_args(self):
arg_defs, _call_args, _arg_types = self.args.cpp_argdefs()
args_num = len(arg_defs)
return args_num
def codegen_group(self, name=None) -> str:
self.stack.close()
if not self.scheduled_nodes:
return ""
code = BracesBuffer()
# 1. Include header files
# TODO: support kernel profile on other platforms
enable_kernel_profile = config.cpp.enable_kernel_profile and sys.platform in [
"linux",
"win32",
]
if enable_kernel_profile:
code.writelines(["#include <torch/csrc/inductor/aoti_runtime/utils.h>"])
code.writeline("#include <torch/csrc/inductor/cpp_prefix.h>")
# 2. Function definition
kernel_decl_name = str(Placeholder.KERNEL_NAME) if name is None else name
kernel_name = str(Placeholder.DESCRIPTIVE_NAME) if name is None else name
arg_defs, _, _ = self.args.cpp_argdefs()
arg_defs = ",\n".ljust(25).join(arg_defs)
func_export_decl = get_export_declaration()
inline_attr = (
"C10_ALWAYS_INLINE_ATTRIBUTE" if config.cpp.force_inline_kernel else ""
)
code.writeline(
f'extern "C" {func_export_decl} void {inline_attr} {kernel_decl_name}({arg_defs})'
)
# 3. Function body
with code.indent():
if enable_kernel_profile:
graph_id = V.graph.graph_id
prefix = "graph_" + str(graph_id) + "_" if graph_id is not None else ""
code.writelines(
[
(
"torch::aot_inductor::RAIIAtenRecordFunctionHandle "
f'record_{prefix + kernel_name}_("{prefix + kernel_name}", nullptr);'
)
]
)
for old, new in self.args.aliases():
code.writeline(f"auto {old} = {new};")
code.splice(self.loops_code)
return code.getvalue()
def call_kernel(self, wrapper, kernel_name):
_, call_args, arg_types = self.args.cpp_argdefs()
wrapper.generate_kernel_call(
kernel_name,
call_args,
triton=False,
arg_types=arg_types,
)
| KernelGroup |
python | PrefectHQ__prefect | src/prefect/server/database/orm_models.py | {
"start": 34840,
"end": 35163
} | class ____(Base):
name: Mapped[str]
parent_block_schema_id: Mapped[uuid.UUID] = mapped_column(
sa.ForeignKey("block_schema.id", ondelete="cascade")
)
reference_block_schema_id: Mapped[uuid.UUID] = mapped_column(
sa.ForeignKey("block_schema.id", ondelete="cascade")
)
| BlockSchemaReference |
python | getsentry__sentry | src/sentry/api/endpoints/project_profiling_profile.py | {
"start": 2993,
"end": 3598
} | class ____(ProjectProfilingBaseEndpoint):
def get(
self, request: Request, project: Project, profiler_id: str, chunk_id: str
) -> HttpResponse:
if not features.has(
"organizations:continuous-profiling", project.organization, actor=request.user
):
return Response(status=404)
kwargs: dict[str, Any] = {
"method": "GET",
"path": f"/organizations/{project.organization_id}/projects/{project.id}/raw_chunks/{profiler_id}/{chunk_id}",
}
return proxy_profiling_service(**kwargs)
| ProjectProfilingRawChunkEndpoint |
python | tensorflow__tensorflow | tensorflow/python/util/type_annotations_test.py | {
"start": 958,
"end": 2799
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
(typing.Union[int, float], 'Union'),
(typing.Tuple[int, ...], 'Tuple'),
(typing.Tuple[int, float, float], 'Tuple'),
(typing.Mapping[int, float], 'Mapping'),
(typing.Union[typing.Tuple[int], typing.Tuple[int, ...]], 'Union'),
# These predicates return False for Generic types w/ no parameters:
(typing.Union, None),
(typing.Tuple, None),
(typing.Mapping, None),
(int, None),
(12, None),
])
def testGenericTypePredicates(self, tp, expected):
self.assertEqual(
type_annotations.is_generic_union(tp), expected == 'Union')
self.assertEqual(
type_annotations.is_generic_tuple(tp), expected == 'Tuple')
self.assertEqual(
type_annotations.is_generic_mapping(tp), expected == 'Mapping')
@parameterized.parameters([
(typing.Union[int, float], (int, float)),
(typing.Tuple[int, ...], (int, Ellipsis)),
(typing.Tuple[int, float, float], (
int,
float,
float,
)),
(typing.Mapping[int, float], (int, float)),
(typing.Union[typing.Tuple[int],
typing.Tuple[int,
...]], (typing.Tuple[int], typing.Tuple[int,
...])),
])
def testGetGenericTypeArgs(self, tp, expected):
self.assertEqual(type_annotations.get_generic_type_args(tp), expected)
def testIsForwardRef(self):
tp = typing.Union['B', int]
tp_args = type_annotations.get_generic_type_args(tp)
self.assertTrue(type_annotations.is_forward_ref(tp_args[0]))
self.assertFalse(type_annotations.is_forward_ref(tp_args[1]))
if __name__ == '__main__':
googletest.main()
| TypeAnnotationsTest |
python | astropy__astropy | astropy/logger.py | {
"start": 4377,
"end": 17990
} | class ____(Logger):
"""
This class is used to set up the Astropy logging.
The main functionality added by this class over the built-in
logging.Logger class is the ability to keep track of the origin of the
messages, the ability to enable logging of warnings.warn calls and
exceptions, and the addition of colorized output and context managers to
easily capture messages to a file or list.
"""
def makeRecord(
self,
name,
level,
pathname,
lineno,
msg,
args,
exc_info,
func=None,
extra=None,
sinfo=None,
):
if extra is None:
extra = {}
if "origin" not in extra:
current_module = find_current_module(1, finddiff=[True, "logging"])
if current_module is not None:
extra["origin"] = current_module.__name__
else:
extra["origin"] = "unknown"
return Logger.makeRecord(
self,
name,
level,
pathname,
lineno,
msg,
args,
exc_info,
func=func,
extra=extra,
sinfo=sinfo,
)
_showwarning_orig = None
def _showwarning(self, *args, **kwargs):
# Bail out if we are not catching a warning from Astropy
if not isinstance(args[0], AstropyWarning):
return self._showwarning_orig(*args, **kwargs)
warning = args[0]
# Deliberately not using isinstance here: We want to display
# the class name only when it's not the default class,
# AstropyWarning. The name of subclasses of AstropyWarning should
# be displayed.
if type(warning) not in (AstropyWarning, AstropyUserWarning):
message = f"{warning.__class__.__name__}: {args[0]}"
else:
message = str(args[0])
mod_path = args[2]
# Now that we have the module's path, we look through sys.modules to
# find the module object and thus the fully-package-specified module
# name. The module.__file__ is the original source file name.
mod_name = None
mod_path = Path(mod_path).with_suffix("")
for mod in sys.modules.values():
try:
# Believe it or not this can fail in some cases:
# https://github.com/astropy/astropy/issues/2671
path = Path(getattr(mod, "__file__", "")).with_suffix("")
except Exception:
continue
if path == mod_path:
mod_name = mod.__name__
break
if mod_name is not None:
self.warning(message, extra={"origin": mod_name})
else:
self.warning(message)
def warnings_logging_enabled(self):
return self._showwarning_orig is not None
def enable_warnings_logging(self):
"""
Enable logging of warnings.warn() calls.
Once called, any subsequent calls to ``warnings.warn()`` are
redirected to this logger and emitted with level ``WARN``. Note that
this replaces the output from ``warnings.warn``.
This can be disabled with ``disable_warnings_logging``.
"""
if self.warnings_logging_enabled():
raise LoggingError("Warnings logging has already been enabled")
self._showwarning_orig = warnings.showwarning
warnings.showwarning = self._showwarning
def disable_warnings_logging(self):
"""
Disable logging of warnings.warn() calls.
Once called, any subsequent calls to ``warnings.warn()`` are no longer
redirected to this logger.
This can be re-enabled with ``enable_warnings_logging``.
"""
if not self.warnings_logging_enabled():
raise LoggingError("Warnings logging has not been enabled")
if warnings.showwarning != self._showwarning:
raise LoggingError(
"Cannot disable warnings logging: "
"warnings.showwarning was not set by this "
"logger, or has been overridden"
)
warnings.showwarning = self._showwarning_orig
self._showwarning_orig = None
_excepthook_orig = None
def _excepthook(self, etype, value, traceback):
if traceback is None:
mod = None
else:
tb = traceback
while tb.tb_next is not None:
tb = tb.tb_next
mod = inspect.getmodule(tb)
# include the error type in the message.
if len(value.args) > 0:
message = f"{etype.__name__}: {str(value)}"
else:
message = str(etype.__name__)
if mod is not None:
self.error(message, extra={"origin": mod.__name__})
else:
self.error(message)
self._excepthook_orig(etype, value, traceback)
def exception_logging_enabled(self):
"""
Determine if the exception-logging mechanism is enabled.
Returns
-------
exclog : bool
True if exception logging is on, False if not.
"""
if _WITHIN_IPYTHON:
from IPython import get_ipython
return _AstLogIPYExc in get_ipython().custom_exceptions
else:
return self._excepthook_orig is not None
def enable_exception_logging(self):
"""
Enable logging of exceptions.
Once called, any uncaught exceptions will be emitted with level
``ERROR`` by this logger, before being raised.
This can be disabled with ``disable_exception_logging``.
"""
if self.exception_logging_enabled():
raise LoggingError("Exception logging has already been enabled")
if _WITHIN_IPYTHON:
# IPython has its own way of dealing with excepthook
from IPython import get_ipython
# We need to locally define the function here, because IPython
# actually makes this a member function of their own class
def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None):
# First use our excepthook
self._excepthook(etype, evalue, tb)
# Now also do IPython's traceback
ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
# now register the function with IPython
# note that we include _AstLogIPYExc so `disable_exception_logging`
# knows that it's disabling the right thing
get_ipython().set_custom_exc(
(BaseException, _AstLogIPYExc), ipy_exc_handler
)
# and set self._excepthook_orig to a no-op
self._excepthook_orig = lambda etype, evalue, tb: None
else:
# standard python interpreter
self._excepthook_orig = sys.excepthook
sys.excepthook = self._excepthook
def disable_exception_logging(self):
"""
Disable logging of exceptions.
Once called, any uncaught exceptions will no longer be emitted by this
logger.
This can be re-enabled with ``enable_exception_logging``.
"""
if not self.exception_logging_enabled():
raise LoggingError("Exception logging has not been enabled")
if _WITHIN_IPYTHON:
# IPython has its own way of dealing with exceptions
from IPython import get_ipython
get_ipython().set_custom_exc((), None)
else:
# standard python interpreter
if sys.excepthook != self._excepthook:
raise LoggingError(
"Cannot disable exception logging: "
"sys.excepthook was not set by this logger, "
"or has been overridden"
)
sys.excepthook = self._excepthook_orig
self._excepthook_orig = None
def enable_color(self):
"""
Enable colorized output.
"""
_conf.use_color = True
def disable_color(self):
"""
Disable colorized output.
"""
_conf.use_color = False
@contextmanager
def log_to_file(self, filename, filter_level=None, filter_origin=None):
"""
Context manager to temporarily log messages to a file.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
By default, the logger already outputs log messages to a file set in
the Astropy configuration file. Using this context manager does not
stop log messages from being output to that file, nor does it stop log
messages from being printed to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_file('myfile.log'):
# your code here
"""
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(filename, encoding=encoding)
if filter_level is not None:
fh.setLevel(filter_level)
if filter_origin is not None:
fh.addFilter(FilterOrigin(filter_origin))
f = logging.Formatter(conf.log_file_format)
fh.setFormatter(f)
self.addHandler(fh)
yield
fh.close()
self.removeHandler(fh)
@contextmanager
def log_to_list(self, filter_level=None, filter_origin=None):
"""
Context manager to temporarily log messages to a list.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
Using this context manager does not stop log messages from being
output to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_list() as log_list:
# your code here
"""
lh = ListHandler()
if filter_level is not None:
lh.setLevel(filter_level)
if filter_origin is not None:
lh.addFilter(FilterOrigin(filter_origin))
self.addHandler(lh)
yield lh.log_list
self.removeHandler(lh)
def _set_defaults(self):
"""
Reset logger to its initial state.
"""
# Reset any previously installed hooks
if self.warnings_logging_enabled():
self.disable_warnings_logging()
if self.exception_logging_enabled():
self.disable_exception_logging()
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(conf.log_level)
# Set up the stdout handler
sh = StreamHandler()
self.addHandler(sh)
# Set up the main log file handler if requested (but this might fail if
# configuration directory or log file is not writeable).
if conf.log_to_file:
log_file_path = conf.log_file_path
# "None" as a string because it comes from config
try:
_ASTROPY_TEST_ # noqa: B018
testing_mode = True
except NameError:
testing_mode = False
try:
if log_file_path == "" or testing_mode:
log_file_path = (
_config.get_config_dir_path("astropy") / "astropy.log"
)
else:
log_file_path = Path(log_file_path).expanduser()
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(log_file_path, encoding=encoding)
except OSError as e:
warnings.warn(
f"log file {log_file_path!r} could not be opened for writing:"
f" {str(e)}",
RuntimeWarning,
)
else:
formatter = logging.Formatter(conf.log_file_format)
fh.setFormatter(formatter)
fh.setLevel(conf.log_file_level)
self.addHandler(fh)
if conf.log_warnings:
self.enable_warnings_logging()
if conf.log_exceptions:
self.enable_exception_logging()
| AstropyLogger |
python | django__django | django/db/migrations/serializer.py | {
"start": 8888,
"end": 9245
} | class ____(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper
# OperationWriter._write()
return string.rstrip(","), imports
| OperationSerializer |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-box/llama_index/readers/box/BoxReaderAIPrompt/base.py | {
"start": 495,
"end": 4671
} | class ____(BoxReaderBase):
"""
A reader class for loading data from Box files using a custom AI prompt.
This class inherits from the `BaseReader` class and allows specifying a
custom AI prompt for data extraction. It utilizes the provided BoxClient object
to interact with the Box API and extracts data based on the prompt.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
_box_client: BoxClient
@classmethod
def class_name(cls) -> str:
return "BoxReaderAIPrompt"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
# def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
def load_data(
self,
ai_prompt: str,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
individual_document_prompt: bool = True,
) -> List[Document]:
"""
Extracts data from Box files using a custom AI prompt and creates Document objects.
This method utilizes a user-provided AI prompt to potentially extract
more specific data from the Box files compared to pre-configured AI
services like Box AI Extract. It then creates Document objects containing
the extracted data along with file metadata.
Args:
ai_prompt (str): The custom AI prompt that specifies what data to
extract from the files.
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract data from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract data from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts data from sub-folders within the specified folder.
Defaults to False.
individual_document_prompt (bool, optional): If True, applies the
provided AI prompt to each document individually. If False,
all documents are used for context to the answer.
Defaults to True.
Returns:
List[Document]: A list of Document objects containing the extracted
data and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get box files information
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_ai_response_from_box_files(
box_client=self._box_client,
box_files=box_files,
ai_prompt=ai_prompt,
individual_document_prompt=individual_document_prompt,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.ai_response if file.ai_response else ""
doc.metadata["ai_prompt"] = file.ai_prompt
doc.metadata["ai_response"] = file.ai_response if file.ai_response else ""
docs.append(doc)
return docs
def load_resource(self, box_file_id: str, ai_prompt: str) -> List[Document]:
"""
Load data from a specific resource.
Args:
resource (str): The resource identifier.
Returns:
List[Document]: A list of documents loaded from the resource.
"""
return self.load_data(file_ids=[box_file_id], ai_prompt=ai_prompt)
| BoxReaderAIPrompt |
python | pytorch__pytorch | torch/utils/data/sampler.py | {
"start": 6869,
"end": 7519
} | class ____(Sampler[int]):
r"""Samples elements randomly from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
generator (Generator): Generator used in sampling.
"""
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
for i in torch.randperm(len(self.indices), generator=self.generator).tolist():
yield self.indices[i]
def __len__(self) -> int:
return len(self.indices)
| SubsetRandomSampler |
python | pallets__jinja | tests/test_bytecode_cache.py | {
"start": 644,
"end": 1068
} | class ____:
class Error(Exception):
pass
key = None
value = None
timeout = None
def get(self, key):
return self.value
def set(self, key, value, timeout=None):
self.key = key
self.value = value
self.timeout = timeout
def get_side_effect(self, key):
raise self.Error()
def set_side_effect(self, *args):
raise self.Error()
| MockMemcached |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 27836,
"end": 28190
} | class ____(object):
def nextToken(self):
pass
def __iter__(self):
return TokenStreamIterator(self)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamIterator ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| TokenStream |
python | cython__cython | pyximport/pyximport.py | {
"start": 13606,
"end": 18510
} | class ____(object):
build_dir=True
build_in_temp=True
setup_args={} #None
def _have_importers():
has_py_importer = False
has_pyx_importer = False
for importer in sys.meta_path:
if isinstance(importer, PyxImportMetaFinder):
if isinstance(importer, PyImportMetaFinder):
has_py_importer = True
else:
has_pyx_importer = True
return has_py_importer, has_pyx_importer
def install(pyximport=True, pyimport=False, build_dir=None, build_in_temp=True,
setup_args=None, reload_support=False,
load_py_module_on_import_failure=False, inplace=False,
language_level=None):
""" Main entry point for pyxinstall.
Call this to install the ``.pyx`` import hook in
your meta-path for a single Python process. If you want it to be
installed whenever you use Python, add it to your ``sitecustomize``
(as described above).
:param pyximport: If set to False, does not try to import ``.pyx`` files.
:param pyimport: You can pass ``pyimport=True`` to also
install the ``.py`` import hook
in your meta-path. Note, however, that it is rather experimental,
will not work at all for some ``.py`` files and packages, and will
heavily slow down your imports due to search and compilation.
Use at your own risk.
:param build_dir: By default, compiled modules will end up in a ``.pyxbld``
directory in the user's home directory. Passing a different path
as ``build_dir`` will override this.
:param build_in_temp: If ``False``, will produce the C files locally. Working
with complex dependencies and debugging becomes more easy. This
can principally interfere with existing files of the same name.
:param setup_args: Dict of arguments for Distribution.
See ``distutils.core.setup()``.
:param reload_support: Enables support for dynamic
``reload(my_module)``, e.g. after a change in the Cython code.
Additional files ``<so_path>.reloadNN`` may arise on that account, when
the previously loaded module file cannot be overwritten.
:param load_py_module_on_import_failure: If the compilation of a ``.py``
file succeeds, but the subsequent import fails for some reason,
retry the import with the normal ``.py`` module instead of the
compiled module. Note that this may lead to unpredictable results
for modules that change the system state during their import, as
the second import will rerun these modifications in whatever state
the system was left after the import of the compiled module
failed.
:param inplace: Install the compiled module
(``.so`` for Linux and Mac / ``.pyd`` for Windows)
next to the source file.
:param language_level: The source language level to use: 2 or 3.
The default is to use the language level of the current Python
runtime for .py files and Py2 for ``.pyx`` files.
"""
if setup_args is None:
setup_args = {}
if not build_dir:
build_dir = os.path.join(os.path.expanduser('~'), '.pyxbld')
global pyxargs
pyxargs = PyxArgs() #$pycheck_no
pyxargs.build_dir = build_dir
pyxargs.build_in_temp = build_in_temp
pyxargs.setup_args = (setup_args or {}).copy()
pyxargs.reload_support = reload_support
pyxargs.load_py_module_on_import_failure = load_py_module_on_import_failure
has_py_importer, has_pyx_importer = _have_importers()
py_importer, pyx_importer = None, None
if pyimport and not has_py_importer:
py_importer = PyImportMetaFinder(pyxbuild_dir=build_dir, inplace=inplace,
language_level=language_level)
# make sure we import Cython before we install the import hook
import Cython.Compiler.Main, Cython.Compiler.Pipeline, Cython.Compiler.Optimize
sys.meta_path.insert(0, py_importer)
if pyximport and not has_pyx_importer:
pyx_importer = PyxImportMetaFinder(pyxbuild_dir=build_dir, inplace=inplace,
language_level=language_level)
sys.meta_path.append(pyx_importer)
return py_importer, pyx_importer
def uninstall(py_importer, pyx_importer):
"""
Uninstall an import hook.
"""
try:
sys.meta_path.remove(py_importer)
except ValueError:
pass
try:
sys.meta_path.remove(pyx_importer)
except ValueError:
pass
# MAIN
def show_docs():
import __main__
__main__.__name__ = mod_name
for name in dir(__main__):
item = getattr(__main__, name)
try:
setattr(item, "__module__", mod_name)
except (AttributeError, TypeError):
pass
help(__main__)
if __name__ == '__main__':
show_docs()
| PyxArgs |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 15962,
"end": 17248
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
b1 = Blog.objects.create(name='Blog 1')
b2 = Blog.objects.create(name='Blog 2')
# Multiple entries on Lennon published in 1979 - distinct should deduplicate
Entry.objects.create(blog=b1, headline='Something about Lennon', pub_date=datetime.date(1979, 1, 1))
Entry.objects.create(blog=b1, headline='Another thing about Lennon', pub_date=datetime.date(1979, 6, 1))
# Entry on Lennon *and* a separate entry in 1979 - should not match
Entry.objects.create(blog=b2, headline='Something unrelated', pub_date=datetime.date(1979, 1, 1))
Entry.objects.create(blog=b2, headline='Retrospective on Lennon', pub_date=datetime.date(1990, 6, 1))
def test_multiple_filter_conditions(self):
class SearchListView(generics.ListAPIView):
queryset = Blog.objects.all()
serializer_class = BlogSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=name', 'entry__headline', '=entry__pub_date__year')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'Lennon,1979'})
response = view(request)
assert len(response.data) == 1
| SearchFilterToManyTests |
python | viewflow__viewflow | tests/json/test_json__boolean.py | {
"start": 233,
"end": 753
} | class ____(TestCase):
def test_crud(self):
model = BooleanFieldModel(boolean_field=False)
self.assertIsInstance(
model._meta.get_field('boolean_field'),
models.BooleanField
)
self.assertEqual(model.data, {
'boolean_field': False
})
model.save()
model = BooleanFieldModel.objects.get()
self.assertEqual(model.data, {
'boolean_field': False
})
self.assertEqual(model.boolean_field, False)
| Test |
python | automl__auto-sklearn | test/test_pipeline/components/data_preprocessing/test_numerical_imputation.py | {
"start": 247,
"end": 1401
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformations = []
for i in range(2):
transformation, original = _test_preprocessing(NumericalImputation)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue((transformations[-1] == transformations[-2]).all())
def test_default_configuration_sparse_data(self):
transformations = []
transformation, original = _test_preprocessing(
NumericalImputation, make_sparse=True
)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation.data == original.data).all())
self.assertIsInstance(transformation, sparse.csc_matrix)
transformations.append(transformation)
def test_preprocessing_dtype(self):
super(NumericalImputationTest, self)._test_preprocessing_dtype(
NumericalImputation, add_NaNs=True
)
| NumericalImputationTest |
python | networkx__networkx | networkx/algorithms/flow/tests/test_maxflow.py | {
"start": 12741,
"end": 17526
} | class ____:
def setup_method(self):
G = nx.DiGraph()
G.add_edge("x", "a", capacity=3.0)
G.add_edge("x", "b", capacity=1.0)
G.add_edge("a", "c", capacity=3.0)
G.add_edge("b", "c", capacity=5.0)
G.add_edge("b", "d", capacity=4.0)
G.add_edge("d", "e", capacity=2.0)
G.add_edge("c", "y", capacity=2.0)
G.add_edge("e", "y", capacity=3.0)
self.G = G
H = nx.DiGraph()
H.add_edge(0, 1, capacity=1.0)
H.add_edge(1, 2, capacity=1.0)
self.H = H
def test_flow_func_not_callable(self):
elements = ["this_should_be_callable", 10, {1, 2, 3}]
G = nx.Graph()
G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity")
for flow_func in interface_funcs:
for element in elements:
pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element)
pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element)
def test_flow_func_parameters(self):
G = self.G
fv = 3.0
for interface_func in interface_funcs:
for flow_func in flow_funcs:
errmsg = (
f"Assertion failed in function: {flow_func.__name__} "
f"in interface {interface_func.__name__}"
)
result = interface_func(G, "x", "y", flow_func=flow_func)
if interface_func in max_min_funcs:
result = result[0]
assert fv == result, errmsg
def test_minimum_cut_no_cutoff(self):
G = self.G
pytest.raises(
nx.NetworkXError,
nx.minimum_cut,
G,
"x",
"y",
flow_func=preflow_push,
cutoff=1.0,
)
pytest.raises(
nx.NetworkXError,
nx.minimum_cut_value,
G,
"x",
"y",
flow_func=preflow_push,
cutoff=1.0,
)
def test_kwargs(self):
G = self.H
fv = 1.0
to_test = (
(shortest_augmenting_path, {"two_phase": True}),
(preflow_push, {"global_relabel_freq": 5}),
)
for interface_func in interface_funcs:
for flow_func, kwargs in to_test:
errmsg = (
f"Assertion failed in function: {flow_func.__name__} "
f"in interface {interface_func.__name__}"
)
result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs)
if interface_func in max_min_funcs:
result = result[0]
assert fv == result, errmsg
def test_kwargs_default_flow_func(self):
G = self.H
for interface_func in interface_funcs:
pytest.raises(
nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2
)
def test_reusing_residual(self):
G = self.G
fv = 3.0
s, t = "x", "y"
R = build_residual_network(G, "capacity")
for interface_func in interface_funcs:
for flow_func in flow_funcs:
errmsg = (
f"Assertion failed in function: {flow_func.__name__} "
f"in interface {interface_func.__name__}"
)
for i in range(3):
result = interface_func(
G, "x", "y", flow_func=flow_func, residual=R
)
if interface_func in max_min_funcs:
result = result[0]
assert fv == result, errmsg
# Tests specific to one algorithm
def test_preflow_push_global_relabel_freq():
G = nx.DiGraph()
G.add_edge(1, 2, capacity=1)
R = preflow_push(G, 1, 2, global_relabel_freq=None)
assert R.graph["flow_value"] == 1
pytest.raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1)
def test_preflow_push_makes_enough_space():
# From ticket #1542
G = nx.DiGraph()
nx.add_path(G, [0, 1, 3], capacity=1)
nx.add_path(G, [1, 2, 3], capacity=1)
R = preflow_push(G, 0, 3, value_only=False)
assert R.graph["flow_value"] == 1
def test_shortest_augmenting_path_two_phase():
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge("s", (i, 0), capacity=1)
nx.add_path(G, ((i, j) for j in range(p)), capacity=1)
G.add_edge((i, p - 1), "t", capacity=1)
R = shortest_augmenting_path(G, "s", "t", two_phase=True)
assert R.graph["flow_value"] == k
R = shortest_augmenting_path(G, "s", "t", two_phase=False)
assert R.graph["flow_value"] == k
| TestMaxFlowMinCutInterface |
python | django-extensions__django-extensions | tests/management/commands/test_reset_db.py | {
"start": 3200,
"end": 6180
} | class ____(TestCase):
"""Tests for reset_db command and mysql engine."""
@mock.patch("sys.stdout", new_callable=StringIO)
@mock.patch("django_extensions.management.commands.reset_db.input")
def test_should_cancel_reset_db_if_input_is_different_than_yes(
self, m_input, m_stdout
):
m_input.return_value = "no"
call_command("reset_db")
self.assertEqual("Reset cancelled.\n", m_stdout.getvalue())
@mock.patch("sys.stdout", new_callable=StringIO)
def test_should_drop_and_create_database_with_characterset_utf8_and_print_success_messsage(
self, m_stdout
):
m_database = mock.MagicMock()
m_database.__spec__ = mock.Mock()
m_connection = mock.Mock()
m_database.connect.return_value = m_connection
expected_calls = [
mock.call("DROP DATABASE IF EXISTS `test_db`"),
mock.call("CREATE DATABASE `test_db` CHARACTER SET utf8"),
]
with mock.patch.dict("sys.modules", MySQLdb=m_database):
call_command("reset_db", "--noinput", verbosity=2)
m_database.connect.assert_called_once_with(
host="127.0.0.1", passwd="bar", user="foo"
)
m_connection.query.assert_has_calls(expected_calls, any_order=False)
self.assertEqual("Reset successful.\n", m_stdout.getvalue())
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "test_db",
"USER": "foo",
"PASSWORD": "bar",
"HOST": "/var/run/mysqld/mysql.sock",
"PORT": "3306",
},
}
)
@mock.patch("sys.stdout", new_callable=StringIO)
def test_should_drop_and_create_database_without_characterset_and_print_success_messsage(
self, m_stdout
):
m_database = mock.MagicMock()
m_database.__spec__ = mock.Mock()
m_connection = mock.Mock()
m_database.connect.return_value = m_connection
expected_calls = [
mock.call("DROP DATABASE IF EXISTS `test_db`"),
mock.call("CREATE DATABASE `test_db`"),
]
with mock.patch.dict("sys.modules", MySQLdb=m_database):
call_command("reset_db", "--noinput", "--no-utf8", verbosity=2)
m_database.connect.assert_called_once_with(
passwd="bar",
port=3306,
unix_socket="/var/run/mysqld/mysql.sock",
user="foo",
)
m_connection.query.assert_has_calls(expected_calls, any_order=False)
self.assertEqual("Reset successful.\n", m_stdout.getvalue())
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "test_db",
"USER": "foo",
"PASSWORD": "bar",
"HOST": "127.0.0.1",
"PORT": "5432",
}
}
)
| ResetDbMysqlTests |
python | django__django | tests/one_to_one/models.py | {
"start": 2328,
"end": 2508
} | class ____(models.Model):
target = models.OneToOneField(
Target, models.CASCADE, to_field="name", primary_key=True
)
# Test related objects visibility.
| ToFieldPointer |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-rankgpt-rerank/llama_index/postprocessor/rankgpt_rerank/base.py | {
"start": 846,
"end": 7070
} | class ____(BaseNodePostprocessor):
"""RankGPT-based reranker."""
top_n: int = Field(default=5, description="Top N nodes to return from reranking.")
llm: Optional[LLM] = None
verbose: bool = Field(
default=False, description="Whether to print intermediate steps."
)
rankgpt_rerank_prompt: BasePromptTemplate = Field(
description="rankGPT rerank prompt."
)
def __init__(
self,
top_n: int = 5,
llm: Optional[LLM] = None,
verbose: bool = False,
rankgpt_rerank_prompt: Optional[BasePromptTemplate] = None,
):
rankgpt_rerank_prompt = rankgpt_rerank_prompt or RANKGPT_RERANK_PROMPT
super().__init__(
verbose=verbose,
llm=llm,
top_n=top_n,
rankgpt_rerank_prompt=rankgpt_rerank_prompt,
)
@classmethod
def class_name(cls) -> str:
return "RankGPTRerank"
def _ensure_llm(self) -> None:
if not self.llm:
try:
from llama_index.llms.openai import OpenAI
self.llm = OpenAI(model="gpt-3.5-turbo-16k")
except ImportError:
raise RuntimeError(
"OpenAI LLM is not available. Please install `llama-index-llms-openai` "
"or provide an alternative LLM instance."
)
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.llm.metadata.model_name,
)
)
if query_bundle is None:
raise ValueError("Query bundle must be provided.")
items = {
"query": query_bundle.query_str,
"hits": [
{"content": node.get_content(metadata_mode=MetadataMode.EMBED)}
for node in nodes
],
}
messages = self.create_permutation_instruction(item=items)
permutation = self.run_llm(messages=messages)
if permutation.message is not None and permutation.message.content is not None:
rerank_ranks = self._receive_permutation(
items, str(permutation.message.content)
)
if self.verbose:
print_text(f"After Reranking, new rank list for nodes: {rerank_ranks}")
initial_results: List[NodeWithScore] = []
for idx in rerank_ranks:
initial_results.append(
NodeWithScore(node=nodes[idx].node, score=nodes[idx].score)
)
dispatcher.event(ReRankEndEvent(nodes=initial_results[: self.top_n]))
return initial_results[: self.top_n]
else:
dispatcher.event(ReRankEndEvent(nodes=nodes[: self.top_n]))
return nodes[: self.top_n]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"rankgpt_rerank_prompt": self.rankgpt_rerank_prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "rankgpt_rerank_prompt" in prompts:
self.rankgpt_rerank_prompt = prompts["rankgpt_rerank_prompt"]
def _get_prefix_prompt(self, query: str, num: int) -> List[ChatMessage]:
return [
ChatMessage(
role="system",
content="You are RankGPT, an intelligent assistant that can rank passages based on their relevancy to the query.",
),
ChatMessage(
role="user",
content=f"I will provide you with {num} passages, each indicated by number identifier []. \nRank the passages based on their relevance to query: {query}.",
),
ChatMessage(role="assistant", content="Okay, please provide the passages."),
]
def _get_post_prompt(self, query: str, num: int) -> str:
return self.rankgpt_rerank_prompt.format(query=query, num=num)
def create_permutation_instruction(self, item: Dict[str, Any]) -> List[ChatMessage]:
query = item["query"]
num = len(item["hits"])
messages = self._get_prefix_prompt(query, num)
rank = 0
for hit in item["hits"]:
rank += 1
content = hit["content"]
content = content.replace("Title: Content: ", "")
content = content.strip()
# For Japanese should cut by character: content = content[:int(max_length)]
content = " ".join(content.split()[:300])
messages.append(ChatMessage(role="user", content=f"[{rank}] {content}"))
messages.append(
ChatMessage(role="assistant", content=f"Received passage [{rank}].")
)
messages.append(
ChatMessage(role="user", content=self._get_post_prompt(query, num))
)
return messages
def run_llm(self, messages: Sequence[ChatMessage]) -> ChatResponse:
self._ensure_llm()
return self.llm.chat(messages)
def _clean_response(self, response: str) -> str:
new_response = ""
for c in response:
if not c.isdigit():
new_response += " "
else:
new_response += c
return new_response.strip()
def _remove_duplicate(self, response: List[int]) -> List[int]:
new_response = []
for c in response:
if c not in new_response:
new_response.append(c)
return new_response
def _receive_permutation(self, item: Dict[str, Any], permutation: str) -> List[int]:
rank_end = len(item["hits"])
response = self._clean_response(permutation)
response_list = [int(x) - 1 for x in response.split()]
response_list = self._remove_duplicate(response_list)
response_list = [ss for ss in response_list if ss in range(rank_end)]
return response_list + [
tt for tt in range(rank_end) if tt not in response_list
] # add the rest of the rank
| RankGPTRerank |
python | huggingface__transformers | src/transformers/models/bart/modeling_bart.py | {
"start": 3491,
"end": 5018
} | class ____(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| BartScaledWordEmbedding |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_int.py | {
"start": 35024,
"end": 39077
} | class ____(__TestCase):
# Tests of the functions in _pylong.py. Those get used when the
# number of digits in the input values are large enough.
def setUp(self):
super().setUp()
self._previous_limit = sys.get_int_max_str_digits()
sys.set_int_max_str_digits(0)
def tearDown(self):
sys.set_int_max_str_digits(self._previous_limit)
super().tearDown()
def _test_pylong_int_to_decimal(self, n, suffix):
s = str(n)
self.assertEqual(s[-10:], suffix)
s2 = str(-n)
self.assertEqual(s2, '-' + s)
s3 = '%d' % n
self.assertEqual(s3, s)
s4 = b'%d' % n
self.assertEqual(s4, s.encode('ascii'))
def test_pylong_int_to_decimal(self):
self._test_pylong_int_to_decimal((1 << 100_000), '9883109376')
self._test_pylong_int_to_decimal((1 << 100_000) - 1, '9883109375')
self._test_pylong_int_to_decimal(10**30_000, '0000000000')
self._test_pylong_int_to_decimal(10**30_000 - 1, '9999999999')
self._test_pylong_int_to_decimal(3**60_000, '9313200001')
@support.requires_resource('cpu')
def test_pylong_int_to_decimal_2(self):
self._test_pylong_int_to_decimal(2**1_000_000, '2747109376')
self._test_pylong_int_to_decimal(10**300_000, '0000000000')
self._test_pylong_int_to_decimal(3**600_000, '3132000001')
def test_pylong_int_divmod(self):
n = (1 << 100_000)
a, b = divmod(n*3 + 1, n)
assert a == 3 and b == 1
def test_pylong_str_to_int(self):
v1 = 1 << 100_000
s = str(v1)
v2 = int(s)
assert v1 == v2
v3 = int(' -' + s)
assert -v1 == v3
v4 = int(' +' + s + ' ')
assert v1 == v4
with self.assertRaises(ValueError) as err:
int(s + 'z')
with self.assertRaises(ValueError) as err:
int(s + '_')
with self.assertRaises(ValueError) as err:
int('_' + s)
@support.cpython_only # tests implementation details of CPython.
@unittest.skipUnless(_pylong, "_pylong module required")
@mock.patch.object(_pylong, "int_to_decimal_string")
def test_pylong_misbehavior_error_path_to_str(
self, mock_int_to_str):
with support.adjust_int_max_str_digits(20_000):
big_value = int('7'*19_999)
mock_int_to_str.return_value = None # not a str
with self.assertRaises(TypeError) as ctx:
str(big_value)
self.assertIn('_pylong.int_to_decimal_string did not',
str(ctx.exception))
mock_int_to_str.side_effect = RuntimeError("testABC")
with self.assertRaises(RuntimeError):
str(big_value)
@support.cpython_only # tests implementation details of CPython.
@unittest.skipUnless(_pylong, "_pylong module required")
@mock.patch.object(_pylong, "int_from_string")
def test_pylong_misbehavior_error_path_from_str(
self, mock_int_from_str):
big_value = '7'*19_999
with support.adjust_int_max_str_digits(20_000):
mock_int_from_str.return_value = b'not an int'
with self.assertRaises(TypeError) as ctx:
int(big_value)
self.assertIn('_pylong.int_from_string did not',
str(ctx.exception))
mock_int_from_str.side_effect = RuntimeError("test123")
with self.assertRaises(RuntimeError):
int(big_value)
def test_pylong_roundtrip(self):
from random import randrange, getrandbits
bits = 5000
while bits <= 1_000_000:
bits += randrange(-100, 101) # break bitlength patterns
hibit = 1 << (bits - 1)
n = hibit | getrandbits(bits - 1)
assert n.bit_length() == bits
sn = str(n)
self.assertFalse(sn.startswith('0'))
self.assertEqual(n, int(sn))
bits <<= 1
if __name__ == "__main__":
run_tests()
| PyLongModuleTests |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 15805,
"end": 16441
} | class ____(TestCase):
"""Tests for ``first_true()``"""
def test_something_true(self):
"""Test with no keywords"""
self.assertEqual(mi.first_true(range(10)), 1)
def test_nothing_true(self):
"""Test default return value."""
self.assertIsNone(mi.first_true([0, 0, 0]))
def test_default(self):
"""Test with a default keyword"""
self.assertEqual(mi.first_true([0, 0, 0], default='!'), '!')
def test_pred(self):
"""Test with a custom predicate"""
self.assertEqual(
mi.first_true([2, 4, 6], pred=lambda x: x % 3 == 0), 6
)
| FirstTrueTests |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 215459,
"end": 215760
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("CWE", graphql_name="node")
| CWEEdge |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/issues.py | {
"start": 1609,
"end": 6887
} | class ____(DiscordMessageBuilder):
def __init__(
self,
group: Group,
event: GroupEvent | None = None,
tags: set[str] | None = None,
rules: list[Rule] | None = None,
link_to_event: bool = False,
issue_details: bool = False,
notification: ProjectNotification | None = None,
) -> None:
self.group = group
self.event = event
self.tags = tags
self.rules = rules
self.link_to_event = link_to_event
self.issue_details = issue_details
self.notification = notification
def build(self, notification_uuid: str | None = None) -> DiscordMessage:
project = Project.objects.get_from_cache(id=self.group.project_id)
event_for_tags = self.event or self.group.get_latest_event()
timestamp = (
max(self.group.last_seen, self.event.datetime) if self.event else self.group.last_seen
)
obj: Group | GroupEvent = self.event if self.event is not None else self.group
rule_id = None
rule_environment_id = None
if self.rules:
rule_environment_id = self.rules[0].environment_id
if features.has("organizations:workflow-engine-ui-links", self.group.organization):
rule_id = int(get_key_from_rule_data(self.rules[0], "workflow_id"))
elif should_fire_workflow_actions(self.group.organization, self.group.type):
rule_id = int(get_key_from_rule_data(self.rules[0], "legacy_rule_id"))
else:
rule_id = self.rules[0].id
url = None
if features.has("organizations:workflow-engine-ui-links", self.group.organization):
url = get_title_link_workflow_engine_ui(
self.group,
self.event,
self.link_to_event,
self.issue_details,
self.notification,
ExternalProviders.DISCORD,
rule_id,
rule_environment_id,
notification_uuid=notification_uuid,
)
else:
url = get_title_link(
self.group,
self.event,
self.link_to_event,
self.issue_details,
self.notification,
ExternalProviders.DISCORD,
rule_id,
rule_environment_id,
notification_uuid=notification_uuid,
)
embeds = [
DiscordMessageEmbed(
title=build_attachment_title(obj),
description=build_attachment_text(self.group, self.event) or None,
url=url,
color=LEVEL_TO_COLOR[get_color(event_for_tags, self.notification, self.group)],
# We can't embed urls in Discord embed footers.
footer=DiscordMessageEmbedFooter(
build_footer(
group=self.group,
project=project,
url_format="{text}",
rules=self.rules,
)
),
fields=build_tag_fields(event_for_tags, self.tags),
timestamp=timestamp,
)
]
components = build_components(self.group, project)
return self._build(embeds=embeds, components=components)
def build_tag_fields(
event_for_tags: GroupEvent | None, tags: set[str] | None = None
) -> list[DiscordMessageEmbedField]:
fields: list[DiscordMessageEmbedField] = []
if tags:
event_tags = event_for_tags.tags if event_for_tags else []
for key, value in event_tags:
std_key = tagstore.backend.get_standardized_key(key)
if std_key not in tags:
continue
labeled_value = tagstore.backend.get_tag_value_label(key, value)
fields.append(
DiscordMessageEmbedField(
std_key,
labeled_value,
inline=True,
)
)
return fields
def build_components(
group: Group,
project: Project,
) -> list[DiscordMessageComponent]:
archive_button = DiscordButton(
custom_id=f"{CustomIds.ARCHIVE}:{group.id}",
label="Archive",
)
resolve_button = DiscordButton(
custom_id=f"{CustomIds.RESOLVE_DIALOG}:{group.id}", label="Resolve..."
)
assign_button = DiscordButton(
custom_id=f"{CustomIds.ASSIGN_DIALOG}:{group.id}", label="Assign..."
)
status = group.get_status()
if not project.flags.has_releases:
resolve_button = DiscordButton(
custom_id=f"{CustomIds.RESOLVE}:{group.id}",
label="Resolve",
)
if status == GroupStatus.RESOLVED:
resolve_button = DiscordButton(
custom_id=f"{CustomIds.UNRESOLVE}:{group.id}",
label="Unresolve",
)
if status == GroupStatus.IGNORED:
archive_button = DiscordButton(
custom_id=f"{CustomIds.MARK_ONGOING}:{group.id}",
label="Mark as Ongoing",
)
return [
DiscordActionRow(components=[resolve_button, archive_button, assign_button]),
]
| DiscordIssuesMessageBuilder |
python | pydantic__pydantic | tests/benchmarks/test_discriminated_unions.py | {
"start": 150,
"end": 239
} | class ____(BaseModel):
state_type: Literal['nested']
substate: AnyState
| NestedState |
python | redis__redis-py | redis/commands/search/query.py | {
"start": 109,
"end": 11316
} | class ____:
"""
Query is used to build complex queries that have more parameters than just
the query string. The query string is set in the constructor, and other
options have setter functions.
The setter functions return the query object so they can be chained.
i.e. `Query("foo").verbatim().filter(...)` etc.
"""
def __init__(self, query_string: str) -> None:
"""
Create a new query object.
The query string is set in the constructor, and other options have
setter functions.
"""
self._query_string: str = query_string
self._offset: int = 0
self._num: int = 10
self._no_content: bool = False
self._no_stopwords: bool = False
self._fields: Optional[List[str]] = None
self._verbatim: bool = False
self._with_payloads: bool = False
self._with_scores: bool = False
self._scorer: Optional[str] = None
self._filters: List = list()
self._ids: Optional[Tuple[str, ...]] = None
self._slop: int = -1
self._timeout: Optional[float] = None
self._in_order: bool = False
self._sortby: Optional[SortbyField] = None
self._return_fields: List = []
self._return_fields_decode_as: dict = {}
self._summarize_fields: List = []
self._highlight_fields: List = []
self._language: Optional[str] = None
self._expander: Optional[str] = None
self._dialect: int = DEFAULT_DIALECT
def query_string(self) -> str:
"""Return the query string of this query only."""
return self._query_string
def limit_ids(self, *ids) -> "Query":
"""Limit the results to a specific set of pre-known document
ids of any length."""
self._ids = ids
return self
def return_fields(self, *fields) -> "Query":
"""Add fields to return fields."""
for field in fields:
self.return_field(field)
return self
def return_field(
self,
field: str,
as_field: Optional[str] = None,
decode_field: Optional[bool] = True,
encoding: Optional[str] = "utf8",
) -> "Query":
"""
Add a field to the list of fields to return.
- **field**: The field to include in query results
- **as_field**: The alias for the field
- **decode_field**: Whether to decode the field from bytes to string
- **encoding**: The encoding to use when decoding the field
"""
self._return_fields.append(field)
self._return_fields_decode_as[field] = encoding if decode_field else None
if as_field is not None:
self._return_fields += ("AS", as_field)
return self
def _mk_field_list(self, fields: Optional[Union[List[str], str]]) -> List:
if not fields:
return []
return [fields] if isinstance(fields, str) else list(fields)
def summarize(
self,
fields: Optional[List] = None,
context_len: Optional[int] = None,
num_frags: Optional[int] = None,
sep: Optional[str] = None,
) -> "Query":
"""
Return an abridged format of the field, containing only the segments of
the field that contain the matching term(s).
If `fields` is specified, then only the mentioned fields are
summarized; otherwise, all results are summarized.
Server-side defaults are used for each option (except `fields`)
if not specified
- **fields** List of fields to summarize. All fields are summarized
if not specified
- **context_len** Amount of context to include with each fragment
- **num_frags** Number of fragments per document
- **sep** Separator string to separate fragments
"""
args = ["SUMMARIZE"]
fields = self._mk_field_list(fields)
if fields:
args += ["FIELDS", str(len(fields))] + fields
if context_len is not None:
args += ["LEN", str(context_len)]
if num_frags is not None:
args += ["FRAGS", str(num_frags)]
if sep is not None:
args += ["SEPARATOR", sep]
self._summarize_fields = args
return self
def highlight(
self, fields: Optional[List[str]] = None, tags: Optional[List[str]] = None
) -> "Query":
"""
Apply specified markup to matched term(s) within the returned field(s).
- **fields** If specified, then only those mentioned fields are
highlighted, otherwise all fields are highlighted
- **tags** A list of two strings to surround the match.
"""
args = ["HIGHLIGHT"]
fields = self._mk_field_list(fields)
if fields:
args += ["FIELDS", str(len(fields))] + fields
if tags:
args += ["TAGS"] + list(tags)
self._highlight_fields = args
return self
def language(self, language: str) -> "Query":
"""
Analyze the query as being in the specified language.
:param language: The language (e.g. `chinese` or `english`)
"""
self._language = language
return self
def slop(self, slop: int) -> "Query":
"""Allow a maximum of N intervening non-matched terms between
phrase terms (0 means exact phrase).
"""
self._slop = slop
return self
def timeout(self, timeout: float) -> "Query":
"""overrides the timeout parameter of the module"""
self._timeout = timeout
return self
def in_order(self) -> "Query":
"""
Match only documents where the query terms appear in
the same order in the document.
i.e., for the query "hello world", we do not match "world hello"
"""
self._in_order = True
return self
def scorer(self, scorer: str) -> "Query":
"""
Use a different scoring function to evaluate document relevance.
Default is `TFIDF`.
Since Redis 8.0 default was changed to BM25STD.
:param scorer: The scoring function to use
(e.g. `TFIDF.DOCNORM` or `BM25`)
"""
self._scorer = scorer
return self
def get_args(self) -> List[Union[str, int, float]]:
"""Format the redis arguments for this query and return them."""
args: List[Union[str, int, float]] = [self._query_string]
args += self._get_args_tags()
args += self._summarize_fields + self._highlight_fields
args += ["LIMIT", self._offset, self._num]
return args
def _get_args_tags(self) -> List[Union[str, int, float]]:
args: List[Union[str, int, float]] = []
if self._no_content:
args.append("NOCONTENT")
if self._fields:
args.append("INFIELDS")
args.append(len(self._fields))
args += self._fields
if self._verbatim:
args.append("VERBATIM")
if self._no_stopwords:
args.append("NOSTOPWORDS")
if self._filters:
for flt in self._filters:
if not isinstance(flt, Filter):
raise AttributeError("Did not receive a Filter object.")
args += flt.args
if self._with_payloads:
args.append("WITHPAYLOADS")
if self._scorer:
args += ["SCORER", self._scorer]
if self._with_scores:
args.append("WITHSCORES")
if self._ids:
args.append("INKEYS")
args.append(len(self._ids))
args += self._ids
if self._slop >= 0:
args += ["SLOP", self._slop]
if self._timeout is not None:
args += ["TIMEOUT", self._timeout]
if self._in_order:
args.append("INORDER")
if self._return_fields:
args.append("RETURN")
args.append(len(self._return_fields))
args += self._return_fields
if self._sortby:
if not isinstance(self._sortby, SortbyField):
raise AttributeError("Did not receive a SortByField.")
args.append("SORTBY")
args += self._sortby.args
if self._language:
args += ["LANGUAGE", self._language]
if self._expander:
args += ["EXPANDER", self._expander]
if self._dialect:
args += ["DIALECT", self._dialect]
return args
def paging(self, offset: int, num: int) -> "Query":
"""
Set the paging for the query (defaults to 0..10).
- **offset**: Paging offset for the results. Defaults to 0
- **num**: How many results do we want
"""
self._offset = offset
self._num = num
return self
def verbatim(self) -> "Query":
"""Set the query to be verbatim, i.e., use no query expansion
or stemming.
"""
self._verbatim = True
return self
def no_content(self) -> "Query":
"""Set the query to only return ids and not the document content."""
self._no_content = True
return self
def no_stopwords(self) -> "Query":
"""
Prevent the query from being filtered for stopwords.
Only useful in very big queries that you are certain contain
no stopwords.
"""
self._no_stopwords = True
return self
def with_payloads(self) -> "Query":
"""Ask the engine to return document payloads."""
self._with_payloads = True
return self
def with_scores(self) -> "Query":
"""Ask the engine to return document search scores."""
self._with_scores = True
return self
def limit_fields(self, *fields: str) -> "Query":
"""
Limit the search to specific TEXT fields only.
- **fields**: Each element should be a string, case sensitive field name
from the defined schema.
"""
self._fields = list(fields)
return self
def add_filter(self, flt: "Filter") -> "Query":
"""
Add a numeric or geo filter to the query.
**Currently, only one of each filter is supported by the engine**
- **flt**: A NumericFilter or GeoFilter object, used on a
corresponding field
"""
self._filters.append(flt)
return self
def sort_by(self, field: str, asc: bool = True) -> "Query":
"""
Add a sortby field to the query.
- **field** - the name of the field to sort by
- **asc** - when `True`, sorting will be done in ascending order
"""
self._sortby = SortbyField(field, asc)
return self
def expander(self, expander: str) -> "Query":
"""
Add an expander field to the query.
- **expander** - the name of the expander
"""
self._expander = expander
return self
def dialect(self, dialect: int) -> "Query":
"""
Add a dialect field to the query.
- **dialect** - dialect version to execute the query under
"""
self._dialect = dialect
return self
| Query |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-mdx-builder/sphinxcontrib/mdxbuilder/writers/mdx.py | {
"start": 706,
"end": 3889
} | class ____(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r"(\s+|" # any whitespace
r"(?<=\s)(?::[a-z-]+:)?`\S+|" # interpreted text start
r"[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|" # hyphenated words
r"(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))"
) # em-dash
def _wrap_chunks(self, chunks: list[str]) -> list[str]:
"""The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
lines: list[str] = []
if self.width <= 0:
raise ValueError(f"invalid width {self.width!r} (must be > 0)")
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if self.drop_whitespace and chunks[-1].strip() == "" and lines:
del chunks[-1]
while chunks:
line = column_width(chunks[-1])
if cur_len + line <= width:
cur_line.append(chunks.pop())
cur_len += line
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if self.drop_whitespace and cur_line and cur_line[-1].strip() == "":
del cur_line[-1]
if cur_line:
lines.append(indent + "".join(cur_line))
return lines
def _break_word(self, word: str, space_left: int) -> tuple[str, str]:
"""Break line by unicode width instead of len(word)."""
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[: i - 1], word[i - 1 :]
return word, ""
def _split(self, text: str) -> list[str]:
"""Override original method that only split by 'wordsep_re'.
This '_split' splits wide-characters into chunks by one character.
"""
def split(t: str) -> list[str]:
return super(TextWrapper, self)._split(t)
chunks: list[str] = []
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split("".join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(
self, reversed_chunks: list[str], cur_line: list[str], cur_len: int, width: int
) -> None:
"""Override original method for using self._break_word() instead of slice."""
space_left = max(width - cur_len, 1)
if self.break_long_words:
line, rest = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(line)
reversed_chunks[-1] = rest
elif not cur_line:
cur_line.append(reversed_chunks.pop())
| TextWrapper |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 14668,
"end": 14952
} | class ____(TestCase):
"""Tests for ``consumer()``"""
def test_consumer(self):
@mi.consumer
def eater():
while True:
x = yield # noqa
e = eater()
e.send('hi') # without @consumer, would raise TypeError
| ConsumerTests |
python | google__jax | tests/jaxpr_effects_test.py | {
"start": 20793,
"end": 22607
} | class ____(jtu.JaxTestCase):
def test_cannot_pmap_unlowerable_effect(self):
def f(x):
# abc is not lowerable
effect_p.bind(effect='abc')
return x
with self.assertRaisesRegex(
ValueError, "Cannot lower jaxpr with effects: {'abc'}"):
jax.pmap(f)(jnp.arange(jax.local_device_count()))
def test_cannot_pmap_ordered_effect(self):
def f(x):
# foo is lowerable and ordered
effect_p.bind(effect=foo_effect)
return x
if config.pmap_shmap_merge.value:
if jax.device_count() == 1:
self.skipTest("This test won't raise with 1 device.")
if jtu.device_under_test() == "gpu":
self.skipTest("Test does not raise under GPU.")
if jtu.device_under_test() == "tpu" and jtu.get_tpu_version() > 3:
self.skipTest("Test does not raise under TPU v4+.")
regex = r"The following ordered effects are not supported for more than 1 device: \[foo\]"
else:
regex = "Ordered effects not supported in `pmap`."
with self.assertRaisesRegex(
ValueError, regex):
jax.pmap(f)(jnp.arange(jax.local_device_count()))
def test_can_pmap_unordered_effect(self):
def f(x):
# bar is lowerable and unordered
effect_p.bind(effect=bar_effect)
return x
jax.pmap(f)(jnp.arange(jax.local_device_count()))
def test_can_pmap_unordered_callback(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices.")
log = set()
def log_value(x):
log.add(int(x))
return ()
@jax.pmap
def f(x):
callback_p.bind(
x, callback=log_value, effect=unordered_log_effect, out_avals=())
return x + 1
f(jnp.arange(2)).block_until_ready()
jax.effects_barrier()
self.assertSetEqual({0, 1}, log)
| ParallelEffectsTest |
python | huggingface__transformers | tests/trainer/test_trainer_distributed_worker_seed.py | {
"start": 738,
"end": 1046
} | class ____(Dataset):
def __init__(self):
self.length = 64
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
x = random.random()
y = np.random.random()
z = torch.rand([]).item()
return {"x": torch.tensor([x, y, z])}
| DummyDataset |
python | walkccc__LeetCode | solutions/296. Best Meeting Point/296.py | {
"start": 0,
"end": 616
} | class ____:
def minTotalDistance(self, grid: list[list[int]]) -> int:
m = len(grid)
n = len(grid[0])
# i indices s.t. grid[i][j] == 1
I = [i for i in range(m) for j in range(n) if grid[i][j]]
# j indices s.t. grid[i][j] == 1
J = [j for j in range(n) for i in range(m) if grid[i][j]]
def minTotalDistance(grid: list[int]) -> int:
summ = 0
i = 0
j = len(grid) - 1
while i < j:
summ += grid[j] - grid[i]
i += 1
j -= 1
return summ
# sum(i - median(I)) + sum(j - median(J))
return minTotalDistance(I) + minTotalDistance(J)
| Solution |
python | doocs__leetcode | solution/2800-2899/2865.Beautiful Towers I/Solution.py | {
"start": 0,
"end": 460
} | class ____:
def maximumSumOfHeights(self, maxHeights: List[int]) -> int:
ans, n = 0, len(maxHeights)
for i, x in enumerate(maxHeights):
y = t = x
for j in range(i - 1, -1, -1):
y = min(y, maxHeights[j])
t += y
y = x
for j in range(i + 1, n):
y = min(y, maxHeights[j])
t += y
ans = max(ans, t)
return ans
| Solution |
python | ansible__ansible | lib/ansible/utils/display.py | {
"start": 5643,
"end": 11978
} | class ____(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except (ImportError, KeyError, OSError):
# deprecated: description='only OSError is required for Python 3.13+' python_version='3.12'
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
if not os.path.isdir(path):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s %(levelname)s| %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print(f"[WARNING]: DEFAULT_LOG_PATH can not be a directory '{path}', aborting", file=sys.stderr)
else:
print(f"[WARNING]: log file at '{path}' is not writeable and we cannot create it, aborting\n", file=sys.stderr)
# map color to log levels, in order of priority (low to high)
color_to_log_level = {C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_VERBOSE: logging.INFO,
C.COLOR_OK: logging.INFO,
C.COLOR_INCLUDED: logging.INFO,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_WARN: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_ERROR: logging.ERROR}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
def _synchronize_textiowrapper(tio: t.TextIO, lock: threading.RLock):
"""
This decorator ensures that the supplied RLock is held before invoking the wrapped methods.
It is intended to prevent background threads from holding the Python stdout/stderr buffer lock on a file object during a fork.
Since background threads are abandoned in child forks, locks they hold are orphaned in a locked state.
Attempts to acquire an orphaned lock in this state will block forever, effectively hanging the child process on stdout/stderr writes.
The shared lock is permanently disabled immediately after a fork.
This prevents hangs in early post-fork code (e.g., stdio writes from pydevd, coverage, etc.) before user code has resumed and released the lock.
"""
def _wrap_with_lock(f, lock):
def disable_lock():
nonlocal lock
lock = contextlib.nullcontext()
os.register_at_fork(after_in_child=disable_lock)
@wraps(f)
def locking_wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return locking_wrapper
buffer = tio.buffer
# monkeypatching the underlying file-like object isn't great, but likely safer than subclassing
buffer.write = _wrap_with_lock(buffer.write, lock) # type: ignore[method-assign]
buffer.flush = _wrap_with_lock(buffer.flush, lock) # type: ignore[method-assign]
def setraw(fd: int, when: int = termios.TCSAFLUSH) -> None:
"""Put terminal into a raw mode.
Copied from ``tty`` from CPython 3.11.0, and modified to not remove OPOST from OFLAG
OPOST is kept to prevent an issue with multi line prompts from being corrupted now that display
is proxied via the queue from forks. The problem is a race condition, in that we proxy the display
over the fork, but before it can be displayed, this plugin will have continued executing, potentially
setting stdout and stdin to raw which remove output post processing that commonly converts NL to CRLF
"""
mode = termios.tcgetattr(fd)
mode[tty.IFLAG] = mode[tty.IFLAG] & ~(termios.BRKINT | termios.ICRNL | termios.INPCK | termios.ISTRIP | termios.IXON)
mode[tty.OFLAG] = mode[tty.OFLAG] & ~(termios.OPOST)
mode[tty.CFLAG] = mode[tty.CFLAG] & ~(termios.CSIZE | termios.PARENB)
mode[tty.CFLAG] = mode[tty.CFLAG] | termios.CS8
mode[tty.LFLAG] = mode[tty.LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
mode[tty.CC][termios.VMIN] = 1
mode[tty.CC][termios.VTIME] = 0
termios.tcsetattr(fd, when, mode)
def clear_line(stdout: t.BinaryIO) -> None:
stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
def setup_prompt(stdin_fd: int, stdout_fd: int, seconds: int, echo: bool) -> None:
setraw(stdin_fd)
# Only set stdout to raw mode if it is a TTY. This is needed when redirecting
# stdout to a file since a file cannot be set to raw mode.
if os.isatty(stdout_fd):
setraw(stdout_fd)
if echo:
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] = new_settings[3] | termios.ECHO
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
def setupterm() -> None:
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
except (curses.error, TypeError, io.UnsupportedOperation):
global HAS_CURSES
HAS_CURSES = False
else:
global MOVE_TO_BOL
global CLEAR_TO_EOL
# curses.tigetstr() returns None in some circumstances
MOVE_TO_BOL = curses.tigetstr('cr') or MOVE_TO_BOL
CLEAR_TO_EOL = curses.tigetstr('el') or CLEAR_TO_EOL
| FilterUserInjector |
python | google__jax | tests/pallas/mosaic_gpu_test.py | {
"start": 213559,
"end": 215019
} | class ____(PallasSm90ATest):
# WGMMA
def test_stage6(self):
self.skip_if_wg_semantics() # `fa.optimization_barrier` does not support f16 arrays.
m_block = n_block = 64
k_block = 32
x = jnp.arange(128 * 128, dtype=jnp.float16).reshape(128, 128)
@functools.partial(
self.kernel, out_shape=x, grid=(2, 2), grid_names=("m", "n")
)
def kernel(l_ref, r_ref, o_ref):
def compute(_, l_smem, r_smem, o_smem):
def do_wgmma(acc_ref):
plgpu.wgmma(acc_ref, l_smem, r_smem)
return acc_ref[...]
o_smem[...] += pl.run_scoped(do_wgmma, plgpu.ACC((m_block, n_block), jnp.float16))
m = lax.axis_index("m")
n = lax.axis_index("n")
transforms = self.default_transforms(swizzle=64, dtype=jnp.float16)
plgpu.emit_pipeline(
compute,
grid=(l_ref.shape[1] // k_block,),
in_specs=[
plgpu.BlockSpec(
(m_block, k_block), lambda k: (m, k), transforms=transforms
),
plgpu.BlockSpec(
(k_block, n_block), lambda k: (k, n), transforms=transforms
),
],
out_specs=[
plgpu.BlockSpec(
(m_block, n_block), lambda k: (m, n), transforms=transforms
)
],
)(l_ref, r_ref, o_ref)
np.testing.assert_allclose(kernel(x, x), x @ x)
# TODO(apaszke): Clusters and multicast
| ExamplesSm90ATest |
python | joke2k__faker | faker/providers/date_time/th_TH/__init__.py | {
"start": 9706,
"end": 11663
} | class ____(DateParseTypeProvider):
def date(
self,
pattern: str = "%-d %b %Y",
end_datetime: Optional[DateParseType] = None,
thai_digit: bool = False,
buddhist_era: bool = True,
) -> str:
"""
Get a date string between January 1, 1970 and now
:param pattern: format
:param end_datetime: datetime
:param thai_digit: use Thai digit or not (default: False)
:param buddhist_era: use Buddist era or not (default: True)
:example: '08 พ.ย. 2563'
:example: '๐๘ พ.ย. 2563' (thai_digit = True)
:example: '8 พฤศิจกายน 2020' (pattern: str = "%-d %B %Y", buddhist_era = False)
"""
return thai_strftime(
self.date_time(end_datetime=end_datetime),
pattern,
thai_digit,
buddhist_era,
)
def time(
self,
pattern: str = "%H:%M:%S",
end_datetime: Optional[DateParseType] = None,
thai_digit: bool = False,
) -> str:
"""
Get a time string (24h format by default)
:param pattern: format
:param end_datetime: datetime
:param thai_digit: use Thai digit or not (default: False)
:example: '15:02:34'
:example: '๑๕:๐๒:๓๔' (thai_digit = True)
"""
return thai_strftime(
self.date_time(end_datetime=end_datetime),
pattern,
thai_digit,
)
def century(self, thai_digit: bool = False, buddhist_era: bool = True) -> str:
"""
:param thai_digi:t use Thai digit or not (default: False)
:param buddhist:_era use Buddist era or not (default: True)
:example: '20'
"""
end_century = 22
if buddhist_era:
end_century = 26
text = str(self.random_element(range(1, end_century)))
if thai_digit:
text = text.translate(_HA_TH_DIGITS)
return text
| Provider |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/async/guestbook.py | {
"start": 977,
"end": 1153
} | class ____(ndb.Model):
text = ndb.StringProperty()
when = ndb.DateTimeProperty(auto_now_add=True)
author = ndb.KeyProperty(kind=Account) # references Account
| Message |
python | kamyu104__LeetCode-Solutions | Python/mark-elements-on-array-by-performing-queries.py | {
"start": 56,
"end": 930
} | class ____(object):
def unmarkedSumArray(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
total = sum(nums)
lookup = [False]*len(nums)
min_heap = [(x, i) for i, x in enumerate(nums)]
heapq.heapify(min_heap)
result = []
for i, k in queries:
if not lookup[i]:
lookup[i] = True
total -= nums[i]
for _ in xrange(k):
while min_heap:
x, i = heapq.heappop(min_heap)
if lookup[i]:
continue
lookup[i] = True
total -= x
break
if not min_heap:
break
result.append(total)
return result
| Solution |
python | django__django | tests/user_commands/management/commands/hal.py | {
"start": 68,
"end": 1062
} | class ____(BaseCommand):
help = "Useless command."
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="app_label",
nargs="*",
help="Specify the app label(s) to works on.",
)
parser.add_argument("--empty", action="store_true", help="Do nothing.")
def handle(self, *app_labels, **options):
app_labels = set(app_labels)
if options["empty"]:
self.stdout.write()
self.stdout.write("Dave, I can't do that.")
return
if not app_labels:
raise CommandError("I'm sorry Dave, I'm afraid I can't do that.")
# raise an error if some --parameter is flowing from options to args
for app_label in app_labels:
if app_label.startswith("--"):
raise CommandError("Sorry, Dave, I can't let you do that.")
self.stdout.write("Dave, my mind is going. I can feel it. I can feel it.")
| Command |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 74883,
"end": 76400
} | class ____(Request):
"""
Delete a dataview
:param dataview: Datatview ID
:type dataview: str
:param force: Allow deletion of the published dataview
:type force: bool
"""
_service = "dataviews"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"dataview": {"description": "Datatview ID", "type": "string"},
"force": {
"default": False,
"description": "Allow deletion of the published dataview",
"type": "boolean",
},
},
"required": ["dataview"],
"type": "object",
}
def __init__(self, dataview, force=False, **kwargs):
super(DeleteRequest, self).__init__(**kwargs)
self.dataview = dataview
self.force = force
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
self.assert_isinstance(value, "dataview", six.string_types)
self._property_dataview = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| DeleteRequest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.