language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployments.py | {
"start": 89665,
"end": 100997
} | class ____:
@pytest.fixture
async def flows(self, session):
flow_1 = await models.flows.create_flow(
session=session,
flow=schemas.core.Flow(name="my-flow-1"),
)
flow_2 = await models.flows.create_flow(
session=session,
flow=schemas.core.Flow(name="my-flow-2"),
)
await session.commit()
return flow_1, flow_2
@pytest.fixture(autouse=True)
async def deployments(
self,
session,
flows,
):
flow_1, flow_2 = flows
deployment_1 = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment X",
flow_id=flow_1.id,
),
)
deployment_2 = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment Y",
flow_id=flow_2.id,
),
)
await session.commit()
return deployment_1, deployment_2
@pytest.fixture(autouse=True)
async def flow_runs(
self,
session,
deployments,
):
deployment_1, deployment_2 = deployments
# flow run 1 is in a SCHEDULED state 5 minutes ago
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment_1.flow_id,
deployment_id=deployment_1.id,
flow_version="0.1",
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=now_fn("UTC") - datetime.timedelta(minutes=5),
state_details=dict(
scheduled_time=now_fn("UTC") - datetime.timedelta(minutes=5)
),
),
),
)
# flow run 2 is in a SCHEDULED state 1 minute ago for deployment 1
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment_1.flow_id,
deployment_id=deployment_1.id,
flow_version="0.1",
tags=["tb12", "goat"],
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=now_fn("UTC") - datetime.timedelta(minutes=1),
state_details=dict(
scheduled_time=now_fn("UTC") - datetime.timedelta(minutes=1)
),
),
),
)
# flow run 3 is in a SCHEDULED state 1 minute ago for deployment 2
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment_2.flow_id,
deployment_id=deployment_2.id,
flow_version="0.1",
tags=["tb12", "goat"],
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=now_fn("UTC") - datetime.timedelta(minutes=1),
state_details=dict(
scheduled_time=now_fn("UTC") - datetime.timedelta(minutes=1)
),
),
),
)
await session.commit()
return flow_run_1, flow_run_2, flow_run_3
async def test_get_scheduled_runs_for_a_deployment(
self,
ephemeral_client_with_lifespan,
deployments,
flow_runs,
):
deployment_1, _deployment_2 = deployments
response = await ephemeral_client_with_lifespan.post(
"/deployments/get_scheduled_flow_runs",
json=dict(deployment_ids=[str(deployment_1.id)]),
)
assert response.status_code == 200
assert {res["id"] for res in response.json()} == {
str(flow_run.id) for flow_run in flow_runs[:2]
}
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
assert_status_events(deployment_1.name, ["prefect.deployment.ready"])
async def test_get_scheduled_runs_for_multiple_deployments(
self,
ephemeral_client_with_lifespan,
deployments,
flow_runs,
):
deployment_1, deployment_2 = deployments
response = await ephemeral_client_with_lifespan.post(
"/deployments/get_scheduled_flow_runs",
json=dict(deployment_ids=[str(deployment_1.id), str(deployment_2.id)]),
)
assert response.status_code == 200
assert {res["id"] for res in response.json()} == {
str(flow_run.id) for flow_run in flow_runs
}
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
assert_status_events(deployment_1.name, ["prefect.deployment.ready"])
assert_status_events(deployment_2.name, ["prefect.deployment.ready"])
async def test_get_scheduled_runs_respects_limit(
self,
hosted_api_client,
flow_runs,
deployments,
):
deployment_1, _deployment_2 = deployments
response = await hosted_api_client.post(
"/deployments/get_scheduled_flow_runs",
json=dict(deployment_ids=[str(deployment_1.id)], limit=1),
)
assert response.status_code == 200
assert {res["id"] for res in response.json()} == {str(flow_runs[0].id)}
# limit should still be constrained by Orion settings though
response = await hosted_api_client.post(
"/deployments/get_scheduled_flow_runs",
json=dict(limit=9001),
)
assert response.status_code == 422
async def test_get_scheduled_runs_respects_scheduled_before(
self,
hosted_api_client,
flow_runs,
deployments,
):
deployment_1, _deployment_2 = deployments
# picks up one of the runs for the first deployment, but not the other
response = await hosted_api_client.post(
"/deployments/get_scheduled_flow_runs",
json=dict(
deployment_ids=[str(deployment_1.id)],
scheduled_before=str(now_fn("UTC") - datetime.timedelta(minutes=2)),
),
)
assert response.status_code == 200
assert {res["id"] for res in response.json()} == {str(flow_runs[0].id)}
async def test_get_scheduled_runs_sort_order(
self,
hosted_api_client,
flow_runs,
deployments,
):
"""Should sort by next scheduled start time ascending"""
deployment_1, deployment_2 = deployments
response = await hosted_api_client.post(
"/deployments/get_scheduled_flow_runs",
json=dict(deployment_ids=[str(deployment_1.id), str(deployment_2.id)]),
)
assert response.status_code == 200
assert [res["id"] for res in response.json()] == [
str(flow_run.id) for flow_run in flow_runs[:3]
]
async def test_get_scheduled_flow_runs_updates_last_polled_time_and_status(
self,
hosted_api_client,
flow_runs,
deployments,
):
deployment_1, deployment_2 = deployments
response1 = await hosted_api_client.get(f"/deployments/{deployment_1.id}")
assert response1.status_code == 200
assert response1.json()["last_polled"] is None
assert response1.json()["status"] == "NOT_READY"
response2 = await hosted_api_client.get(f"/deployments/{deployment_2.id}")
assert response2.status_code == 200
assert response2.json()["last_polled"] is None
assert response2.json()["status"] == "NOT_READY"
updated_response = await hosted_api_client.post(
"/deployments/get_scheduled_flow_runs",
json=dict(deployment_ids=[str(deployment_1.id)]),
)
assert updated_response.status_code == 200
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
updated_response_deployment_1 = await hosted_api_client.get(
f"/deployments/{deployment_1.id}"
)
assert updated_response_deployment_1.status_code == 200
assert updated_response_deployment_1.json()["last_polled"] is not None
assert (
updated_response_deployment_1.json()["last_polled"]
> (now_fn("UTC") - datetime.timedelta(minutes=1)).isoformat()
)
assert updated_response_deployment_1.json()["status"] == "READY"
same_response_deployment_2 = await hosted_api_client.get(
f"/deployments/{deployment_2.id}"
)
assert same_response_deployment_2.status_code == 200
assert same_response_deployment_2.json()["last_polled"] is None
assert same_response_deployment_2.json()["status"] == "NOT_READY"
async def test_get_scheduled_flow_runs_updates_last_polled_time_and_status_multiple_deployments(
self,
hosted_api_client,
flow_runs,
deployments,
):
deployment_1, deployment_2 = deployments
response_1 = await hosted_api_client.get(f"/deployments/{deployment_1.id}")
assert response_1.status_code == 200
assert response_1.json()["last_polled"] is None
assert response_1.json()["status"] == "NOT_READY"
response_2 = await hosted_api_client.get(f"/deployments/{deployment_2.id}")
assert response_2.status_code == 200
assert response_2.json()["last_polled"] is None
assert response_2.json()["status"] == "NOT_READY"
updated_response = await hosted_api_client.post(
"/deployments/get_scheduled_flow_runs",
json=dict(deployment_ids=[str(deployment_1.id), str(deployment_2.id)]),
)
assert updated_response.status_code == 200
async for attempt in retry_asserts(max_attempts=10, delay=0.5):
with attempt:
updated_response_1 = await hosted_api_client.get(
f"/deployments/{deployment_1.id}"
)
assert updated_response_1.status_code == 200
assert updated_response_1.json()["last_polled"] is not None
assert (
updated_response_1.json()["last_polled"]
> (now_fn("UTC") - datetime.timedelta(minutes=1)).isoformat()
)
assert updated_response_1.json()["status"] == "READY"
updated_response_2 = await hosted_api_client.get(
f"/deployments/{deployment_2.id}"
)
assert updated_response_2.status_code == 200
assert (
updated_response_2.json()["last_polled"]
> (now_fn("UTC") - datetime.timedelta(minutes=1)).isoformat()
)
assert updated_response_2.json()["status"] == "READY"
| TestGetScheduledFlowRuns |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 79888,
"end": 95577
} | class ____(TestFeedExportBase):
_file_mark = "_%(batch_time)s_#%(batch_id)02d_"
async def run_and_export(
self, spider_cls: type[Spider], settings: dict[str, Any]
) -> dict[str, list[bytes]]:
"""Run spider with specified settings; return exported data."""
FEEDS = settings.get("FEEDS") or {}
settings["FEEDS"] = {
build_url(file_path): feed for file_path, feed in FEEDS.items()
}
content: defaultdict[str, list[bytes]] = defaultdict(list)
spider_cls.start_urls = [self.mockserver.url("/")]
crawler = get_crawler(spider_cls, settings)
await maybe_deferred_to_future(crawler.crawl())
for path, feed in FEEDS.items():
dir_name = Path(path).parent
if not dir_name.exists():
content[feed["format"]] = []
continue
for file in sorted(dir_name.iterdir()):
content[feed["format"]].append(file.read_bytes())
return content
async def assertExportedJsonLines(self, items, rows, settings=None):
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename() / "jl" / self._file_mark: {
"format": "jl"
},
},
}
)
batch_size = Settings(settings).getint("FEED_EXPORT_BATCH_ITEM_COUNT")
rows = [{k: v for k, v in row.items() if v} for row in rows]
data = await self.exported_data(items, settings)
for batch in data["jl"]:
got_batch = [
json.loads(to_unicode(batch_item)) for batch_item in batch.splitlines()
]
expected_batch, rows = rows[:batch_size], rows[batch_size:]
assert got_batch == expected_batch
async def assertExportedCsv(self, items, header, rows, settings=None):
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename() / "csv" / self._file_mark: {
"format": "csv"
},
},
}
)
batch_size = Settings(settings).getint("FEED_EXPORT_BATCH_ITEM_COUNT")
data = await self.exported_data(items, settings)
for batch in data["csv"]:
got_batch = csv.DictReader(to_unicode(batch).splitlines())
assert list(header) == got_batch.fieldnames
expected_batch, rows = rows[:batch_size], rows[batch_size:]
assert list(got_batch) == expected_batch
async def assertExportedXml(self, items, rows, settings=None):
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename() / "xml" / self._file_mark: {
"format": "xml"
},
},
}
)
batch_size = Settings(settings).getint("FEED_EXPORT_BATCH_ITEM_COUNT")
rows = [{k: v for k, v in row.items() if v} for row in rows]
data = await self.exported_data(items, settings)
for batch in data["xml"]:
root = lxml.etree.fromstring(batch)
got_batch = [{e.tag: e.text for e in it} for it in root.findall("item")]
expected_batch, rows = rows[:batch_size], rows[batch_size:]
assert got_batch == expected_batch
async def assertExportedMultiple(self, items, rows, settings=None):
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename() / "xml" / self._file_mark: {
"format": "xml"
},
self._random_temp_filename() / "json" / self._file_mark: {
"format": "json"
},
},
}
)
batch_size = Settings(settings).getint("FEED_EXPORT_BATCH_ITEM_COUNT")
rows = [{k: v for k, v in row.items() if v} for row in rows]
data = await self.exported_data(items, settings)
# XML
xml_rows = rows.copy()
for batch in data["xml"]:
root = lxml.etree.fromstring(batch)
got_batch = [{e.tag: e.text for e in it} for it in root.findall("item")]
expected_batch, xml_rows = xml_rows[:batch_size], xml_rows[batch_size:]
assert got_batch == expected_batch
# JSON
json_rows = rows.copy()
for batch in data["json"]:
got_batch = json.loads(batch.decode("utf-8"))
expected_batch, json_rows = json_rows[:batch_size], json_rows[batch_size:]
assert got_batch == expected_batch
async def assertExportedPickle(self, items, rows, settings=None):
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename() / "pickle" / self._file_mark: {
"format": "pickle"
},
},
}
)
batch_size = Settings(settings).getint("FEED_EXPORT_BATCH_ITEM_COUNT")
rows = [{k: v for k, v in row.items() if v} for row in rows]
data = await self.exported_data(items, settings)
for batch in data["pickle"]:
got_batch = self._load_until_eof(batch, load_func=pickle.load)
expected_batch, rows = rows[:batch_size], rows[batch_size:]
assert got_batch == expected_batch
async def assertExportedMarshal(self, items, rows, settings=None):
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename() / "marshal" / self._file_mark: {
"format": "marshal"
},
},
}
)
batch_size = Settings(settings).getint("FEED_EXPORT_BATCH_ITEM_COUNT")
rows = [{k: v for k, v in row.items() if v} for row in rows]
data = await self.exported_data(items, settings)
for batch in data["marshal"]:
got_batch = self._load_until_eof(batch, load_func=marshal.load)
expected_batch, rows = rows[:batch_size], rows[batch_size:]
assert got_batch == expected_batch
@deferred_f_from_coro_f
async def test_export_items(self):
"""Test partial deliveries in all supported formats"""
items = [
self.MyItem({"foo": "bar1", "egg": "spam1"}),
self.MyItem({"foo": "bar2", "egg": "spam2", "baz": "quux2"}),
self.MyItem({"foo": "bar3", "baz": "quux3"}),
]
rows = [
{"egg": "spam1", "foo": "bar1", "baz": ""},
{"egg": "spam2", "foo": "bar2", "baz": "quux2"},
{"foo": "bar3", "baz": "quux3", "egg": ""},
]
settings = {"FEED_EXPORT_BATCH_ITEM_COUNT": 2}
header = self.MyItem.fields.keys()
await self.assertExported(items, header, rows, settings=settings)
def test_wrong_path(self):
"""If path is without %(batch_time)s and %(batch_id) an exception must be raised"""
settings = {
"FEEDS": {
self._random_temp_filename(): {"format": "xml"},
},
"FEED_EXPORT_BATCH_ITEM_COUNT": 1,
}
crawler = get_crawler(settings_dict=settings)
with pytest.raises(NotConfigured):
FeedExporter(crawler)
@deferred_f_from_coro_f
async def test_export_no_items_not_store_empty(self):
for fmt in ("json", "jsonlines", "xml", "csv"):
settings = {
"FEEDS": {
self._random_temp_filename() / fmt / self._file_mark: {
"format": fmt
},
},
"FEED_EXPORT_BATCH_ITEM_COUNT": 1,
"FEED_STORE_EMPTY": False,
}
data = await self.exported_no_data(settings)
data = dict(data)
assert len(data[fmt]) == 0
@deferred_f_from_coro_f
async def test_export_no_items_store_empty(self):
formats = (
("json", b"[]"),
("jsonlines", b""),
("xml", b'<?xml version="1.0" encoding="utf-8"?>\n<items></items>'),
("csv", b""),
)
for fmt, expctd in formats:
settings = {
"FEEDS": {
self._random_temp_filename() / fmt / self._file_mark: {
"format": fmt
},
},
"FEED_STORE_EMPTY": True,
"FEED_EXPORT_INDENT": None,
"FEED_EXPORT_BATCH_ITEM_COUNT": 1,
}
data = await self.exported_no_data(settings)
data = dict(data)
assert data[fmt][0] == expctd
@deferred_f_from_coro_f
async def test_export_multiple_configs(self):
items = [
{"foo": "FOO", "bar": "BAR"},
{"foo": "FOO1", "bar": "BAR1"},
]
formats = {
"json": [
b'[\n{"bar": "BAR"}\n]',
b'[\n{"bar": "BAR1"}\n]',
],
"xml": [
(
b'<?xml version="1.0" encoding="latin-1"?>\n'
b"<items>\n <item>\n <foo>FOO</foo>\n </item>\n</items>"
),
(
b'<?xml version="1.0" encoding="latin-1"?>\n'
b"<items>\n <item>\n <foo>FOO1</foo>\n </item>\n</items>"
),
],
"csv": [
b"foo,bar\r\nFOO,BAR\r\n",
b"foo,bar\r\nFOO1,BAR1\r\n",
],
}
settings = {
"FEEDS": {
self._random_temp_filename() / "json" / self._file_mark: {
"format": "json",
"indent": 0,
"fields": ["bar"],
"encoding": "utf-8",
},
self._random_temp_filename() / "xml" / self._file_mark: {
"format": "xml",
"indent": 2,
"fields": ["foo"],
"encoding": "latin-1",
},
self._random_temp_filename() / "csv" / self._file_mark: {
"format": "csv",
"indent": None,
"fields": ["foo", "bar"],
"encoding": "utf-8",
},
},
"FEED_EXPORT_BATCH_ITEM_COUNT": 1,
}
data = await self.exported_data(items, settings)
for fmt, expected in formats.items():
for expected_batch, got_batch in zip(expected, data[fmt], strict=False):
assert got_batch == expected_batch
@deferred_f_from_coro_f
async def test_batch_item_count_feeds_setting(self):
items = [{"foo": "FOO"}, {"foo": "FOO1"}]
formats = {
"json": [
b'[{"foo": "FOO"}]',
b'[{"foo": "FOO1"}]',
],
}
settings = {
"FEEDS": {
self._random_temp_filename() / "json" / self._file_mark: {
"format": "json",
"indent": None,
"encoding": "utf-8",
"batch_item_count": 1,
},
},
}
data = await self.exported_data(items, settings)
for fmt, expected in formats.items():
for expected_batch, got_batch in zip(expected, data[fmt], strict=False):
assert got_batch == expected_batch
@deferred_f_from_coro_f
async def test_batch_path_differ(self):
"""
Test that the name of all batch files differ from each other.
So %(batch_id)d replaced with the current id.
"""
items = [
self.MyItem({"foo": "bar1", "egg": "spam1"}),
self.MyItem({"foo": "bar2", "egg": "spam2", "baz": "quux2"}),
self.MyItem({"foo": "bar3", "baz": "quux3"}),
]
settings = {
"FEEDS": {
self._random_temp_filename() / "%(batch_id)d": {
"format": "json",
},
},
"FEED_EXPORT_BATCH_ITEM_COUNT": 1,
}
data = await self.exported_data(items, settings)
assert len(items) == len(data["json"])
@inlineCallbacks
def test_stats_batch_file_success(self):
settings = {
"FEEDS": {
build_url(
str(self._random_temp_filename() / "json" / self._file_mark)
): {
"format": "json",
}
},
"FEED_EXPORT_BATCH_ITEM_COUNT": 1,
}
crawler = get_crawler(ItemSpider, settings)
yield crawler.crawl(total=2, mockserver=self.mockserver)
assert "feedexport/success_count/FileFeedStorage" in crawler.stats.get_stats()
assert crawler.stats.get_value("feedexport/success_count/FileFeedStorage") == 12
@pytest.mark.requires_boto3
@inlineCallbacks
def test_s3_export(self):
bucket = "mybucket"
items = [
self.MyItem({"foo": "bar1", "egg": "spam1"}),
self.MyItem({"foo": "bar2", "egg": "spam2", "baz": "quux2"}),
self.MyItem({"foo": "bar3", "baz": "quux3"}),
]
class CustomS3FeedStorage(S3FeedStorage):
stubs = []
def open(self, *args, **kwargs):
from botocore import __version__ as botocore_version # noqa: PLC0415
from botocore.stub import ANY, Stubber # noqa: PLC0415
expected_params = {
"Body": ANY,
"Bucket": bucket,
"Key": ANY,
}
if Version(botocore_version) >= Version("1.36.0"):
expected_params["ChecksumAlgorithm"] = ANY
stub = Stubber(self.s3_client)
stub.activate()
CustomS3FeedStorage.stubs.append(stub)
stub.add_response(
"put_object",
expected_params=expected_params,
service_response={},
)
return super().open(*args, **kwargs)
key = "export.csv"
uri = f"s3://{bucket}/{key}/%(batch_id)d.json"
batch_item_count = 1
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
"FEED_EXPORT_BATCH_ITEM_COUNT": batch_item_count,
"FEED_STORAGES": {
"s3": CustomS3FeedStorage,
},
"FEEDS": {
uri: {
"format": "json",
},
},
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(crawler, uri)
verifyObject(IFeedStorage, storage)
class TestSpider(scrapy.Spider):
name = "testspider"
def parse(self, response):
yield from items
TestSpider.start_urls = [self.mockserver.url("/")]
crawler = get_crawler(TestSpider, settings)
yield crawler.crawl()
assert len(CustomS3FeedStorage.stubs) == len(items)
for stub in CustomS3FeedStorage.stubs[:-1]:
stub.assert_no_pending_responses()
# Test that the FeedExporer sends the feed_exporter_closed and feed_slot_closed signals
| TestBatchDeliveries |
python | doocs__leetcode | lcof2/剑指 Offer II 015. 字符串中的所有变位词/Solution2.py | {
"start": 0,
"end": 771
} | class ____:
def findAnagrams(self, s: str, p: str) -> List[int]:
m, n = len(s), len(p)
if m < n:
return []
cnt = Counter()
for a, b in zip(s, p):
cnt[a] += 1
cnt[b] -= 1
diff = sum(x != 0 for x in cnt.values())
ans = []
if diff == 0:
ans.append(0)
for i in range(n, m):
a, b = s[i - n], s[i]
if cnt[a] == 0:
diff += 1
cnt[a] -= 1
if cnt[a] == 0:
diff -= 1
if cnt[b] == 0:
diff += 1
cnt[b] += 1
if cnt[b] == 0:
diff -= 1
if diff == 0:
ans.append(i - n + 1)
return ans
| Solution |
python | geekcomputers__Python | Snake Game Using Turtle/snake.py | {
"start": 270,
"end": 2512
} | class ____:
""" This class creates a snake body and contains methods for movement and extension. """
def __init__(self):
self.segments = []
self.create_snake()
self.head = self.segments[0]
def create_snake(self):
""" Creates the initial snake body. """
for position in STARTING_POSITIONS:
self.add_segment(position)
self.segments[0].color(colors.FIRST_SEGMENT_COLOR)
def add_segment(self, position):
""" Adds a new segment to the snake. """
new_segment = Turtle(shape="square")
new_segment.penup()
new_segment.goto(position)
new_segment.color(colors.BODY_COLOR)
self.segments.append(new_segment)
def extend(self):
""" Adds a new segment to the snake's tail. """
self.add_segment(self.segments[-1].position())
self.segments[0].color(colors.FIRST_SEGMENT_COLOR)
def move(self):
""" Moves the snake forward by moving each segment to the position of the one in front."""
for i in range(len(self.segments) - 1, 0, -1):
x = self.segments[i - 1].xcor()
y = self.segments[i - 1].ycor()
self.segments[i].goto(x, y)
self.head.forward(MOVE_DISTANCE)
def reset(self):
"""Hides the old snake and creates a new one for restarting the game."""
for segment in self.segments:
segment.hideturtle()
self.segments.clear()
self.create_snake()
self.head = self.segments[0]
def up(self):
"""Turns the snake's head upwards, preventing it from reversing."""
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
"""Turns the snake's head downwards, preventing it from reversing."""
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
"""Turns the snake's head to the left, preventing it from reversing."""
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
"""Turns the snake's head to the right, preventing it from reversing."""
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
| Snake |
python | psf__requests | tests/testserver/server.py | {
"start": 3845,
"end": 5147
} | class ____(Server):
def __init__(
self,
*,
handler=None,
host="localhost",
port=0,
requests_to_handle=1,
wait_to_close_event=None,
cert_chain=None,
keyfile=None,
mutual_tls=False,
cacert=None,
):
super().__init__(
handler=handler,
host=host,
port=port,
requests_to_handle=requests_to_handle,
wait_to_close_event=wait_to_close_event,
)
self.cert_chain = cert_chain
self.keyfile = keyfile
self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.ssl_context.load_cert_chain(self.cert_chain, keyfile=self.keyfile)
self.mutual_tls = mutual_tls
self.cacert = cacert
if mutual_tls:
# For simplicity, we're going to assume that the client cert is
# issued by the same CA as our Server certificate
self.ssl_context.verify_mode = ssl.CERT_OPTIONAL
self.ssl_context.load_verify_locations(self.cacert)
def _create_socket_and_bind(self):
sock = socket.socket()
sock = self.ssl_context.wrap_socket(sock, server_side=True)
sock.bind((self.host, self.port))
sock.listen()
return sock
| TLSServer |
python | huggingface__transformers | tests/models/clip/test_tokenization_clip.py | {
"start": 228,
"end": 3822
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "openai/clip-vit-base-patch32"
tokenizer_class = CLIPTokenizer
integration_expected_tokens = ['this</w>', 'is</w>', 'a</w>', 'test</w>', 'ðŁĺĬ</w>', 'i</w>', 'was</w>', 'born</w>', 'in</w>', '9</w>', '2</w>', '0</w>', '0</w>', '0</w>', ',</w>', 'and</w>', 'this</w>', 'is</w>', 'fal', 's', 'é</w>', '.</w>', 'çĶŁ', 'æ', '´', '»', 'ç', 'ļ', 'Ħ', '羣', 'è', '°', 'Ľ', 'æĺ', '¯</w>', 'hi</w>', 'hello</w>', 'hi</w>', 'hello</w>', 'hello</w>', '<</w>', 's</w>', '></w>', 'hi</w>', '<</w>', 's</w>', '></w>', 'there</w>', 'the</w>', 'following</w>', 'string</w>', 'should</w>', 'be</w>', 'properly</w>', 'en', 'coded</w>', ':</w>', 'hello</w>', '.</w>', 'but</w>', 'ird</w>', 'and</w>', 'à¸', 'Ľ</w>', 'ี</w>', 'ird</w>', 'à¸Ķ</w>', 'hey</w>', 'how</w>', 'are</w>', 'you</w>', 'doing</w>'] # fmt: skip
integration_expected_token_ids = [589, 533, 320, 1628, 3020, 328, 739, 2683, 530, 280, 273, 271, 271, 271, 267, 537, 589, 533, 2778, 82, 4166, 269, 33375, 162, 112, 119, 163, 248, 226, 41570, 164, 108, 249, 42891, 363, 1883, 3306, 1883, 3306, 3306, 283, 338, 285, 1883, 283, 338, 285, 997, 518, 3473, 9696, 1535, 655, 12560, 524, 33703, 281, 3306, 269, 767, 2770, 537, 1777, 505, 20278, 2770, 38825, 2189, 829, 631, 592, 1960] # fmt: skip
expected_tokens_from_ids = ['this</w>', 'is</w>', 'a</w>', 'test</w>', 'ðŁĺĬ</w>', 'i</w>', 'was</w>', 'born</w>', 'in</w>', '9</w>', '2</w>', '0</w>', '0</w>', '0</w>', ',</w>', 'and</w>', 'this</w>', 'is</w>', 'fal', 's', 'é</w>', '.</w>', 'çĶŁ', 'æ', '´', '»', 'ç', 'ļ', 'Ħ', '羣', 'è', '°', 'Ľ', 'æĺ', '¯</w>', 'hi</w>', 'hello</w>', 'hi</w>', 'hello</w>', 'hello</w>', '<</w>', 's</w>', '></w>', 'hi</w>', '<</w>', 's</w>', '></w>', 'there</w>', 'the</w>', 'following</w>', 'string</w>', 'should</w>', 'be</w>', 'properly</w>', 'en', 'coded</w>', ':</w>', 'hello</w>', '.</w>', 'but</w>', 'ird</w>', 'and</w>', 'à¸', 'Ľ</w>', 'ี</w>', 'ird</w>', 'à¸Ķ</w>', 'hey</w>', 'how</w>', 'are</w>', 'you</w>', 'doing</w>'] # fmt: skip
integration_expected_decoded_text = "this is a test 😊 i was born in 9 2 0 0 0 , and this is falsé . 生活的真谛是 hi hello hi hello hello < s > hi < s > there the following string should be properly encoded : hello . but ird and ป ี ird ด hey how are you doing"
@classmethod
def setUpClass(cls):
super().setUpClass()
from_pretrained_id = "openai/clip-vit-base-patch32"
tokenizer = CLIPTokenizer.from_pretrained(from_pretrained_id)
tokenizer.pad_token = getattr(tokenizer, "pad_token", None) or getattr(tokenizer, "eos_token", None)
tokenizer.save_pretrained(cls.tmpdirname)
vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip
cls.vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges_raw = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
cls.special_tokens_map = {"unk_token": "<unk>"}
cls.merges = []
for line in merges_raw:
line = line.strip()
if line and not line.startswith("#"):
cls.merges.append(tuple(line.split()))
tokenizer_from_vocab = CLIPTokenizer(vocab=cls.vocab_tokens, merges=cls.merges)
cls.tokenizers = [tokenizer, tokenizer_from_vocab]
def test_padding_to_multiple_of(self):
self.skipTest("Skipping padding to multiple of test bc vocab is too small.")
| CLIPTokenizationTest |
python | django__django | django/core/files/storage/__init__.py | {
"start": 445,
"end": 622
} | class ____(LazyObject):
def _setup(self):
self._wrapped = storages[DEFAULT_STORAGE_ALIAS]
storages = StorageHandler()
default_storage = DefaultStorage()
| DefaultStorage |
python | pytransitions__transitions | tests/test_states.py | {
"start": 7206,
"end": 7780
} | class ____(TestDiagramsLockedNested):
def setUp(self):
machine_cls = MachineFactory.get_predefined(locked=True, nested=True, graph=True)
@add_state_features(Error, Timeout, Volatile)
class CustomMachine(machine_cls): # type: ignore
pass
super(TestStatesDiagramsLockedNested, self).setUp()
self.machine_cls = CustomMachine
def test_nested_notebook(self):
# test will create a custom state machine already. This will cause errors when inherited.
self.assertTrue(True)
| TestStatesDiagramsLockedNested |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 136343,
"end": 137563
} | class ____(ColumnElement[Any]):
"""Represent SQL for a Python array-slice object.
This is not a specific SQL construct at this level, but
may be interpreted by specific dialects, e.g. PostgreSQL.
"""
__visit_name__ = "slice"
_traverse_internals: _TraverseInternalsType = [
("start", InternalTraversal.dp_clauseelement),
("stop", InternalTraversal.dp_clauseelement),
("step", InternalTraversal.dp_clauseelement),
]
def __init__(self, start, stop, step, _name=None):
self.start = coercions.expect(
roles.ExpressionElementRole,
start,
name=_name,
type_=type_api.INTEGERTYPE,
)
self.stop = coercions.expect(
roles.ExpressionElementRole,
stop,
name=_name,
type_=type_api.INTEGERTYPE,
)
self.step = coercions.expect(
roles.ExpressionElementRole,
step,
name=_name,
type_=type_api.INTEGERTYPE,
)
self.type = type_api.NULLTYPE
def self_group(self, against: Optional[OperatorType] = None) -> Self:
assert against is operator.getitem
return self
| Slice |
python | Netflix__metaflow | metaflow/procpoll.py | {
"start": 32,
"end": 225
} | class ____(object):
def __init__(self, fd, can_read=False, is_terminated=False):
self.fd = fd
self.can_read = can_read
self.is_terminated = is_terminated
| ProcPollEvent |
python | ansible__ansible | lib/ansible/_internal/_errors/_handler.py | {
"start": 724,
"end": 1045
} | class ____(BaseException):
"""Internal flow control exception for skipping code blocks within a `Skippable` context manager."""
def __init__(self) -> None:
super().__init__('Skipping ignored action due to use of `skip_on_ignore`. It is a bug to encounter this message outside of debugging.')
| _SkipException |
python | allegroai__clearml | clearml/utilities/pyhocon/config_parser.py | {
"start": 1795,
"end": 6137
} | class ____(object):
@classmethod
def parse_file(cls, filename, encoding='utf-8', required=True, resolve=True, unresolved_value=DEFAULT_SUBSTITUTION):
"""Parse file
:param filename: filename
:type filename: basestring
:param encoding: file encoding
:type encoding: basestring
:param required: If true, raises an exception if can't load file
:type required: boolean
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its
substitution expression (e.g., ${x})
:type unresolved_value: class
:return: Config object
:type return: Config
"""
try:
with codecs.open(filename, 'r', encoding=encoding) as fd:
content = fd.read()
return cls.parse_string(content, os.path.dirname(filename), resolve, unresolved_value)
except IOError as e:
if required:
raise e
logger.warn('Cannot include file %s. File does not exist or cannot be read.', filename)
return []
@classmethod
def parse_URL(cls, url, timeout=None, resolve=True, required=False, unresolved_value=DEFAULT_SUBSTITUTION):
"""Parse URL
:param url: url to parse
:type url: basestring
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by
its substitution expression (e.g., ${x})
:type unresolved_value: boolean
:return: Config object or []
:type return: Config or list
"""
socket_timeout = socket._GLOBAL_DEFAULT_TIMEOUT if timeout is None else timeout
try:
with contextlib.closing(urlopen(url, timeout=socket_timeout)) as fd:
content = fd.read() if use_urllib2 else fd.read().decode('utf-8')
return cls.parse_string(content, os.path.dirname(url), resolve, unresolved_value)
except (HTTPError, URLError) as e:
logger.warn('Cannot include url %s. Resource is inaccessible.', url)
if required:
raise e
else:
return []
@classmethod
def parse_string(cls, content, basedir=None, resolve=True, unresolved_value=DEFAULT_SUBSTITUTION):
"""Parse URL
:param content: content to parse
:type content: basestring
:param resolve: If true, resolve substitutions
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by
its substitution expression (e.g., ${x})
:type unresolved_value: boolean
:return: Config object
:type return: Config
"""
return ConfigParser().parse(content, basedir, resolve, unresolved_value)
@classmethod
def from_dict(cls, dictionary, root=False):
"""Convert dictionary (and ordered dictionary) into a ConfigTree
:param dictionary: dictionary to convert
:type dictionary: dict
:return: Config object
:type return: Config
"""
def create_tree(value):
if isinstance(value, dict):
res = ConfigTree(root=root)
for key, child_value in value.items():
res.put(key, create_tree(child_value))
return res
if isinstance(value, list):
return [create_tree(v) for v in value]
else:
return value
return create_tree(dictionary)
| ConfigFactory |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-homecoming-of-a-robot-in-a-grid.py | {
"start": 33,
"end": 531
} | class ____(object):
def minCost(self, startPos, homePos, rowCosts, colCosts):
"""
:type startPos: List[int]
:type homePos: List[int]
:type rowCosts: List[int]
:type colCosts: List[int]
:rtype: int
"""
[x0, y0], [x1, y1] = startPos, homePos
return (sum(rowCosts[i] for i in xrange(min(x0, x1), max(x0, x1)+1))-rowCosts[x0]) + \
(sum(colCosts[i] for i in xrange(min(y0, y1), max(y0, y1)+1))-colCosts[y0])
| Solution |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/middleware/anthropic_tools.py | {
"start": 36263,
"end": 37670
} | class ____(_FilesystemClaudeFileToolMiddleware):
"""Filesystem-based text editor tool middleware.
Provides Anthropic's `text_editor` tool using local filesystem for storage.
User handles persistence via volumes, git, or other mechanisms.
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import FilesystemTextEditorToolMiddleware
agent = create_agent(
model=model,
tools=[],
middleware=[FilesystemTextEditorToolMiddleware(root_path="/workspace")],
)
```
"""
def __init__(
self,
*,
root_path: str,
allowed_prefixes: list[str] | None = None,
max_file_size_mb: int = 10,
) -> None:
"""Initialize the text editor middleware.
Args:
root_path: Root directory for file operations.
allowed_prefixes: Optional list of allowed virtual path prefixes.
Defaults to `['/']`.
max_file_size_mb: Maximum file size in MB
Defaults to `10`.
"""
super().__init__(
tool_type=TEXT_EDITOR_TOOL_TYPE,
tool_name=TEXT_EDITOR_TOOL_NAME,
root_path=root_path,
allowed_prefixes=allowed_prefixes,
max_file_size_mb=max_file_size_mb,
)
| FilesystemClaudeTextEditorMiddleware |
python | encode__httpx | httpx/_utils.py | {
"start": 4111,
"end": 8285
} | class ____:
"""
A utility class currently used for making lookups against proxy keys...
# Wildcard matching...
>>> pattern = URLPattern("all://")
>>> pattern.matches(httpx.URL("http://example.com"))
True
# Witch scheme matching...
>>> pattern = URLPattern("https://")
>>> pattern.matches(httpx.URL("https://example.com"))
True
>>> pattern.matches(httpx.URL("http://example.com"))
False
# With domain matching...
>>> pattern = URLPattern("https://example.com")
>>> pattern.matches(httpx.URL("https://example.com"))
True
>>> pattern.matches(httpx.URL("http://example.com"))
False
>>> pattern.matches(httpx.URL("https://other.com"))
False
# Wildcard scheme, with domain matching...
>>> pattern = URLPattern("all://example.com")
>>> pattern.matches(httpx.URL("https://example.com"))
True
>>> pattern.matches(httpx.URL("http://example.com"))
True
>>> pattern.matches(httpx.URL("https://other.com"))
False
# With port matching...
>>> pattern = URLPattern("https://example.com:1234")
>>> pattern.matches(httpx.URL("https://example.com:1234"))
True
>>> pattern.matches(httpx.URL("https://example.com"))
False
"""
def __init__(self, pattern: str) -> None:
from ._urls import URL
if pattern and ":" not in pattern:
raise ValueError(
f"Proxy keys should use proper URL forms rather "
f"than plain scheme strings. "
f'Instead of "{pattern}", use "{pattern}://"'
)
url = URL(pattern)
self.pattern = pattern
self.scheme = "" if url.scheme == "all" else url.scheme
self.host = "" if url.host == "*" else url.host
self.port = url.port
if not url.host or url.host == "*":
self.host_regex: typing.Pattern[str] | None = None
elif url.host.startswith("*."):
# *.example.com should match "www.example.com", but not "example.com"
domain = re.escape(url.host[2:])
self.host_regex = re.compile(f"^.+\\.{domain}$")
elif url.host.startswith("*"):
# *example.com should match "www.example.com" and "example.com"
domain = re.escape(url.host[1:])
self.host_regex = re.compile(f"^(.+\\.)?{domain}$")
else:
# example.com should match "example.com" but not "www.example.com"
domain = re.escape(url.host)
self.host_regex = re.compile(f"^{domain}$")
def matches(self, other: URL) -> bool:
if self.scheme and self.scheme != other.scheme:
return False
if (
self.host
and self.host_regex is not None
and not self.host_regex.match(other.host)
):
return False
if self.port is not None and self.port != other.port:
return False
return True
@property
def priority(self) -> tuple[int, int, int]:
"""
The priority allows URLPattern instances to be sortable, so that
we can match from most specific to least specific.
"""
# URLs with a port should take priority over URLs without a port.
port_priority = 0 if self.port is not None else 1
# Longer hostnames should match first.
host_priority = -len(self.host)
# Longer schemes should match first.
scheme_priority = -len(self.scheme)
return (port_priority, host_priority, scheme_priority)
def __hash__(self) -> int:
return hash(self.pattern)
def __lt__(self, other: URLPattern) -> bool:
return self.priority < other.priority
def __eq__(self, other: typing.Any) -> bool:
return isinstance(other, URLPattern) and self.pattern == other.pattern
def is_ipv4_hostname(hostname: str) -> bool:
try:
ipaddress.IPv4Address(hostname.split("/")[0])
except Exception:
return False
return True
def is_ipv6_hostname(hostname: str) -> bool:
try:
ipaddress.IPv6Address(hostname.split("/")[0])
except Exception:
return False
return True
| URLPattern |
python | pydata__xarray | xarray/coding/cftimeindex.py | {
"start": 7651,
"end": 30761
} | class ____(pd.Index):
"""Custom Index for working with CF calendars and dates
All elements of a CFTimeIndex must be cftime.datetime objects.
Parameters
----------
data : array or CFTimeIndex
Sequence of cftime.datetime objects to use in index
name : str, default: None
Name of the resulting index
See Also
--------
date_range
"""
_data: np.ndarray
year = _field_accessor("year", "The year of the datetime")
month = _field_accessor("month", "The month of the datetime")
day = _field_accessor("day", "The days of the datetime")
hour = _field_accessor("hour", "The hours of the datetime")
minute = _field_accessor("minute", "The minutes of the datetime")
second = _field_accessor("second", "The seconds of the datetime")
microsecond = _field_accessor("microsecond", "The microseconds of the datetime")
dayofyear = _field_accessor(
"dayofyr", "The ordinal day of year of the datetime", "1.0.2.1"
)
dayofweek = _field_accessor("dayofwk", "The day of week of the datetime", "1.0.2.1")
days_in_month = _field_accessor(
"daysinmonth", "The number of days in the month of the datetime", "1.1.0.0"
)
date_type = property(get_date_type)
def __new__(cls, data, name=None, **kwargs):
assert_all_valid_date_type(data)
if name is None and hasattr(data, "name"):
name = data.name
result = object.__new__(cls)
result._data = np.array(data, dtype="O")
result.name = name
result._cache = {}
return result
def __repr__(self):
"""
Return a string representation for this object.
"""
klass_name = type(self).__name__
display_width = OPTIONS["display_width"]
offset = len(klass_name) + 2
if len(self) <= ITEMS_IN_REPR_MAX_ELSE_ELLIPSIS:
datastr = format_times(
self.values, display_width, offset=offset, first_row_offset=0
)
else:
front_str = format_times(
self.values[:REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END],
display_width,
offset=offset,
first_row_offset=0,
last_row_end=",",
)
end_str = format_times(
self.values[-REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END:],
display_width,
offset=offset,
first_row_offset=offset,
)
datastr = "\n".join([front_str, f"{' ' * offset}...", end_str])
attrs_str = format_attrs(self)
# oneliner only if smaller than display_width
full_repr_str = f"{klass_name}([{datastr}], {attrs_str})"
if len(full_repr_str) > display_width:
# if attrs_str too long, one per line
if len(attrs_str) >= display_width - offset:
attrs_str = attrs_str.replace(",", f",\n{' ' * (offset - 2)}")
full_repr_str = (
f"{klass_name}([{datastr}],\n{' ' * (offset - 1)}{attrs_str})"
)
return full_repr_str
def _partial_date_slice(self, resolution, parsed):
"""Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> da = xr.DataArray(
... [1, 2],
... coords=[[DatetimeNoLeap(2001, 1, 1), DatetimeNoLeap(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray (time: 1)> Size: 8B
array([1])
Coordinates:
* time (time) object 8B 2001-01-01 00:00:00
>>> da = xr.DataArray(
... [1, 2],
... coords=[[pd.Timestamp(2001, 1, 1), pd.Timestamp(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray ()> Size: 8B
array(1)
Coordinates:
time datetime64[ns] 8B 2001-01-01
>>> da = xr.DataArray(
... [1, 2],
... coords=[[pd.Timestamp(2001, 1, 1, 1), pd.Timestamp(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray (time: 1)> Size: 8B
array([1])
Coordinates:
* time (time) datetime64[ns] 8B 2001-01-01T01:00:00
"""
start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)
times = self._data
if self.is_monotonic_increasing:
if len(times) and (
(start < times[0] and end < times[0])
or (start > times[-1] and end > times[-1])
):
# we are out of range
raise KeyError
# a monotonic (sorted) series can be sliced
left = times.searchsorted(start, side="left")
right = times.searchsorted(end, side="right")
return slice(left, right)
lhs_mask = times >= start
rhs_mask = times <= end
return np.flatnonzero(lhs_mask & rhs_mask)
def _get_string_slice(self, key):
"""Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice"""
parsed, resolution = _parse_iso8601(self.date_type, key)
try:
loc = self._partial_date_slice(resolution, parsed)
except KeyError as err:
raise KeyError(key) from err
return loc
def _get_nearest_indexer(self, target, limit, tolerance):
"""Adapted from pandas.Index._get_nearest_indexer"""
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
left_distances = abs(self.values[left_indexer] - target.values)
right_distances = abs(self.values[right_indexer] - target.values)
if self.is_monotonic_increasing:
condition = (left_distances < right_distances) | (right_indexer == -1)
else:
condition = (left_distances <= right_distances) | (right_indexer == -1)
indexer = np.where(condition, left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
"""Adapted from pandas.Index._filter_indexer_tolerance"""
if isinstance(target, pd.Index):
distance = abs(self.values[indexer] - target.values)
else:
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
def get_loc(self, key):
"""Adapted from pandas.tseries.index.DatetimeIndex.get_loc"""
if isinstance(key, str):
return self._get_string_slice(key)
else:
return super().get_loc(key)
def _maybe_cast_slice_bound(self, label, side):
"""Adapted from
pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound
"""
if not isinstance(label, str):
return label
parsed, resolution = _parse_iso8601(self.date_type, label)
start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)
if self.is_monotonic_decreasing and len(self) > 1:
return end if side == "left" else start
return start if side == "left" else end
# TODO: Add ability to use integer range outside of iloc?
# e.g. series[1:5].
def get_value(self, series, key):
"""Adapted from pandas.tseries.index.DatetimeIndex.get_value"""
if np.asarray(key).dtype == np.dtype(bool):
return series.iloc[key]
elif isinstance(key, slice):
return series.iloc[self.slice_indexer(key.start, key.stop, key.step)]
else:
return series.iloc[self.get_loc(key)]
def __contains__(self, key: Any) -> bool:
"""Adapted from
pandas.tseries.base.DatetimeIndexOpsMixin.__contains__"""
try:
result = self.get_loc(key)
return (
is_scalar(result)
or isinstance(result, slice)
or (isinstance(result, np.ndarray) and result.size > 0)
)
except (KeyError, TypeError, ValueError):
return False
def contains(self, key: Any) -> bool:
"""Needed for .loc based partial-string indexing"""
return self.__contains__(key)
def shift( # type: ignore[override,unused-ignore]
self,
periods: int | float,
freq: str | timedelta | BaseCFTimeOffset | None = None,
) -> Self:
"""Shift the CFTimeIndex a multiple of the given frequency.
See the documentation for :py:func:`~xarray.date_range` for a
complete listing of valid frequency strings.
Parameters
----------
periods : int, float if freq of days or below
Periods to shift by
freq : str, datetime.timedelta or BaseCFTimeOffset
A frequency string or datetime.timedelta object to shift by
Returns
-------
CFTimeIndex
See Also
--------
pandas.DatetimeIndex.shift
Examples
--------
>>> index = xr.date_range("2000", periods=1, freq="ME", use_cftime=True)
>>> index
CFTimeIndex([2000-01-31 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
>>> index.shift(1, "ME")
CFTimeIndex([2000-02-29 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
>>> index.shift(1.5, "24h")
CFTimeIndex([2000-02-01 12:00:00],
dtype='object', length=1, calendar='standard', freq=None)
"""
from xarray.coding.cftime_offsets import BaseCFTimeOffset
if freq is None:
# None type is required to be compatible with base pd.Index class
raise TypeError(
f"`freq` argument cannot be None for {type(self).__name__}.shift"
)
if isinstance(freq, timedelta):
return self + periods * freq
if isinstance(freq, str | BaseCFTimeOffset):
from xarray.coding.cftime_offsets import to_offset
return self + periods * to_offset(freq)
raise TypeError(
f"'freq' must be of type str or datetime.timedelta, got {type(freq)}."
)
def __add__(self, other) -> Self:
if isinstance(other, pd.TimedeltaIndex):
other = other.to_pytimedelta()
return type(self)(np.array(self) + other)
def __radd__(self, other) -> Self:
if isinstance(other, pd.TimedeltaIndex):
other = other.to_pytimedelta()
return type(self)(other + np.array(self))
def __sub__(self, other):
if _contains_datetime_timedeltas(other):
return type(self)(np.array(self) - other)
if isinstance(other, pd.TimedeltaIndex):
return type(self)(np.array(self) - other.to_pytimedelta())
if _contains_cftime_datetimes(np.array(other)):
try:
return pd.TimedeltaIndex(np.array(self) - np.array(other))
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
) from err
return NotImplemented
def __rsub__(self, other):
try:
return pd.TimedeltaIndex(other - np.array(self))
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
) from err
def to_datetimeindex(
self, unsafe: bool = False, time_unit: PDDatetimeUnitOptions | None = None
) -> pd.DatetimeIndex:
"""If possible, convert this index to a pandas.DatetimeIndex.
Parameters
----------
unsafe : bool
Flag to turn off calendar mismatch warnings (default ``False``).
time_unit : str
Time resolution of resulting DatetimeIndex. Can be one of `"s"`,
``"ms"``, ``"us"``, or ``"ns"`` (default ``"ns"``).
Returns
-------
pandas.DatetimeIndex
Raises
------
ValueError
If the CFTimeIndex contains dates that are not possible in the
standard calendar or outside the range representable by the
specified ``time_unit``.
Warns
-----
RuntimeWarning
If converting from a non-standard calendar, or a Gregorian
calendar with dates prior to the reform (1582-10-15).
Warnings
--------
Note that for non-proleptic Gregorian calendars, this will change the
calendar type of the index. In that case the result of this method
should be used with caution.
Examples
--------
>>> times = xr.date_range(
... "2000", periods=2, calendar="gregorian", use_cftime=True
... )
>>> times
CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00],
dtype='object', length=2, calendar='standard', freq=None)
>>> times.to_datetimeindex(time_unit="ns")
DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)
"""
if not self._data.size:
return pd.DatetimeIndex([])
if time_unit is None:
emit_user_level_warning(
"In a future version of xarray to_datetimeindex will default "
"to returning a 'us'-resolution DatetimeIndex instead of a "
"'ns'-resolution DatetimeIndex. This warning can be silenced "
"by explicitly passing the `time_unit` keyword argument.",
FutureWarning,
)
time_unit = "ns"
nptimes = cftime_to_nptime(self, time_unit=time_unit)
calendar = infer_calendar_name(self)
if calendar not in _STANDARD_CALENDARS and not unsafe:
emit_user_level_warning(
"Converting a CFTimeIndex with dates from a non-standard "
f"calendar, {calendar!r}, to a pandas.DatetimeIndex, which "
"uses dates from the standard calendar. This may lead to "
"subtle errors in operations that depend on the length of "
"time between dates.",
RuntimeWarning,
)
if calendar == "standard" and not unsafe:
reform_date = self.date_type(1582, 10, 15)
if self.min() < reform_date:
emit_user_level_warning(
"Converting a CFTimeIndex with dates from a Gregorian "
"calendar that fall before the reform date of 1582-10-15 "
"to a pandas.DatetimeIndex. During this time period the "
"Gregorian calendar and the proleptic Gregorian calendar "
"of the DatetimeIndex do not exactly align. This warning "
"can be silenced by setting unsafe=True.",
RuntimeWarning,
)
return pd.DatetimeIndex(nptimes)
def strftime(self, date_format):
"""
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc
<https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
Parameters
----------
date_format : str
Date format string (e.g. "%Y-%m-%d")
Returns
-------
pandas.Index
Index of formatted strings
Examples
--------
>>> rng = xr.date_range(
... start="2000",
... periods=5,
... freq="2MS",
... calendar="noleap",
... use_cftime=True,
... )
>>> rng.strftime("%B %d, %Y, %r")
Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM',
'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM',
'September 01, 2000, 12:00:00 AM'],
dtype='object')
"""
return pd.Index([date.strftime(date_format) for date in self._data])
@property
def asi8(self):
"""Convert to integers with units of microseconds since 1970-01-01."""
from xarray.core.resample_cftime import exact_cftime_datetime_difference
if not self._data.size:
return np.array([], dtype=np.int64)
epoch = self.date_type(1970, 1, 1)
return np.array(
[
_total_microseconds(exact_cftime_datetime_difference(epoch, date))
for date in self.values
],
dtype=np.int64,
)
@property
def calendar(self):
"""The calendar used by the datetimes in the index."""
if not self._data.size:
return None
return infer_calendar_name(self)
@property
def freq(self):
"""The frequency used by the dates in the index."""
from xarray.coding.frequencies import infer_freq
# min 3 elemtents required to determine freq
if self._data.size < 3:
return None
return infer_freq(self)
def _round_via_method(self, freq, method):
"""Round dates using a specified method."""
from xarray.coding.cftime_offsets import CFTIME_TICKS, Day, to_offset
if not self._data.size:
return CFTimeIndex(np.array(self))
offset = to_offset(freq)
if isinstance(offset, Day):
# Following pandas, "In the 'round' context, Day unambiguously
# means 24h, not calendar-day"
offset_as_timedelta = timedelta(days=offset.n)
elif isinstance(offset, CFTIME_TICKS):
offset_as_timedelta = offset.as_timedelta()
else:
raise ValueError(f"{offset} is a non-fixed frequency")
unit = _total_microseconds(offset_as_timedelta)
values = self.asi8
rounded = method(values, unit)
return _cftimeindex_from_i8(rounded, self.date_type, self.name)
def floor(self, freq):
"""Round dates down to fixed frequency.
Parameters
----------
freq : str
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See `frequency
aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
for a list of possible values.
Returns
-------
CFTimeIndex
"""
return self._round_via_method(freq, _floor_int)
def ceil(self, freq):
"""Round dates up to fixed frequency.
Parameters
----------
freq : str
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See `frequency
aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
for a list of possible values.
Returns
-------
CFTimeIndex
"""
return self._round_via_method(freq, _ceil_int)
def round(self, freq):
"""Round dates to a fixed frequency.
Parameters
----------
freq : str
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See `frequency
aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
for a list of possible values.
Returns
-------
CFTimeIndex
"""
return self._round_via_method(freq, _round_to_nearest_half_even)
@property
def is_leap_year(self):
if TYPE_CHECKING:
import cftime
else:
cftime = attempt_import("cftime")
func = np.vectorize(cftime.is_leap_year)
return func(self.year, calendar=self.calendar)
def _parse_array_of_cftime_strings(strings, date_type):
"""Create a numpy array from an array of strings.
For use in generating dates from strings for use with interp. Assumes the
array is either 0-dimensional or 1-dimensional.
Parameters
----------
strings : array of strings
Strings to convert to dates
date_type : cftime.datetime type
Calendar type to use for dates
Returns
-------
np.array
"""
return np.array([_parse_iso8601(date_type, s)[0] for s in strings.ravel()]).reshape(
strings.shape
)
def _contains_datetime_timedeltas(array):
"""Check if an input array contains datetime.timedelta objects."""
array = np.atleast_1d(array)
return isinstance(array[0], timedelta)
def _cftimeindex_from_i8(values, date_type, name):
"""Construct a CFTimeIndex from an array of integers.
Parameters
----------
values : np.array
Integers representing microseconds since 1970-01-01.
date_type : cftime.datetime
Type of date for the index.
name : str
Name of the index.
Returns
-------
CFTimeIndex
"""
epoch = date_type(1970, 1, 1)
dates = np.array([epoch + timedelta(microseconds=int(value)) for value in values])
return CFTimeIndex(dates, name=name)
def _total_microseconds(delta):
"""Compute the total number of microseconds of a datetime.timedelta.
Parameters
----------
delta : datetime.timedelta
Input timedelta.
Returns
-------
int
"""
return delta / timedelta(microseconds=1)
def _floor_int(values, unit):
"""Copied from pandas."""
return values - np.remainder(values, unit)
def _ceil_int(values, unit):
"""Copied from pandas."""
return values + np.remainder(-values, unit)
def _round_to_nearest_half_even(values, unit):
"""Copied from pandas."""
if unit % 2:
return _ceil_int(values - unit // 2, unit)
quotient, remainder = np.divmod(values, unit)
mask = np.logical_or(
remainder > (unit // 2), np.logical_and(remainder == (unit // 2), quotient % 2)
)
quotient[mask] += 1
return quotient * unit
| CFTimeIndex |
python | Netflix__metaflow | metaflow/exception.py | {
"start": 4475,
"end": 5161
} | class ____(MetaflowException):
headline = "Missing artifacts in merge"
def __init__(self, msg, unhandled):
super(MissingInMergeArtifactsException, self).__init__(msg)
self.artifact_names = unhandled
# Import any exceptions defined by a Metaflow extensions packages
try:
from metaflow.extension_support import get_modules, multiload_globals
multiload_globals(get_modules("exceptions"), globals())
finally:
# Erase all temporary names to avoid leaking things
for _n in ["get_modules", "multiload_globals"]:
try:
del globals()[_n]
except KeyError:
pass
del globals()["_n"]
| MissingInMergeArtifactsException |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-collect-all-apples-in-a-tree.py | {
"start": 1901,
"end": 3076
} | class ____(object):
def minTime(self, n, edges, hasApple):
"""
:type n: int
:type edges: List[List[int]]
:type hasApple: List[bool]
:rtype: int
"""
graph = collections.defaultdict(list)
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
result = [0]
s = [(1, (-1, 0, result))]
while s:
step, params = s.pop()
if step == 1:
par, node, ret = params
tmp = [int(hasApple[node])]
s.append((3, (tmp, ret)))
for nei in reversed(graph[node]):
if nei == par:
continue
new_ret = [0]
s.append((2, (new_ret, tmp, ret)))
s.append((1, (node, nei, new_ret)))
elif step == 2:
new_ret, tmp, ret = params
ret[0] += new_ret[0]
tmp[0] |= bool(new_ret[0])
else:
tmp, ret = params
ret[0] += tmp[0]
return 2*max(result[0]-1, 0)
# Time: O(n)
# Space: O(n)
| Solution2 |
python | dagster-io__dagster | python_modules/libraries/dagster-k8s/dagster_k8s/pipes.py | {
"start": 2310,
"end": 13704
} | class ____(PipesMessageReader):
"""Message reader that reads messages from kubernetes pod logs."""
@contextmanager
def read_messages(
self,
handler: PipesMessageHandler,
) -> Iterator[PipesParams]:
self._handler = handler
try:
yield {PipesDefaultMessageWriter.STDIO_KEY: PipesDefaultMessageWriter.STDERR}
finally:
self._handler = None
def _get_consume_logs_request_timeout(self) -> Optional[int]:
request_timeout_env_var = os.getenv("DAGSTER_PIPES_K8S_CONSUME_POD_LOGS_REQUEST_TIMEOUT")
if request_timeout_env_var:
return int(request_timeout_env_var)
return DEFAULT_DAGSTER_PIPES_K8S_CONSUME_POD_LOGS_REQUEST_TIMEOUT
def consume_pod_logs(
self,
context: Union[OpExecutionContext, AssetExecutionContext],
core_api: kubernetes.client.CoreV1Api,
pod_name: str,
namespace: str,
):
last_seen_timestamp = None
catching_up_after_retry = False
request_timeout = self._get_consume_logs_request_timeout()
retries_remaining = int(
os.getenv(
"DAGSTER_PIPES_K8S_CONSUME_POD_LOGS_RETRIES", str(DEFAULT_CONSUME_POD_LOGS_RETRIES)
)
)
handler = check.not_none(
self._handler, "can only consume logs within scope of context manager"
)
while True:
# On retry, re-connect to the log stream for new messages since the last seen timestamp
# (with a buffer to ensure none are missed). The messages are deduplicated by timestamp below.
if last_seen_timestamp:
since_seconds = int(
max(time.time() - parse_time_string(last_seen_timestamp).timestamp(), 0)
+ int(os.getenv("DAGSTER_PIPES_K8S_CONSUME_POD_LOGS_BUFFER_SECONDS", "300"))
)
else:
since_seconds = None
try:
for log_item in _process_log_stream(
core_api.read_namespaced_pod_log(
pod_name,
namespace,
follow=True,
timestamps=True,
since_seconds=since_seconds,
_preload_content=False, # avoid JSON processing
_request_timeout=request_timeout,
)
):
timestamp = log_item.timestamp
message = log_item.log
if (
catching_up_after_retry
and timestamp
and last_seen_timestamp
and timestamp <= last_seen_timestamp
):
# This is a log that we've already seen before from before we retried
continue
else:
catching_up_after_retry = False
extract_message_or_forward_to_stdout(handler, message)
if timestamp:
last_seen_timestamp = (
max(last_seen_timestamp, timestamp)
if last_seen_timestamp
else timestamp
)
return
except Exception as e:
# Expected read timeouts can occur for long-running pods if a request timeout is set
expected_read_timeout = isinstance(e, ReadTimeoutError) and request_timeout
if expected_read_timeout:
# Expected so doesn't need to be logged to event log, but write to stdout
# for visibility
logging.getLogger("dagster").info("Re-connecting to pod logs stream")
else:
if retries_remaining == 0:
raise
retries_remaining -= 1
context.log.warning(
f"Error consuming pod logs. {retries_remaining} retr{('y' if retries_remaining == 1 else 'ies')} remaining",
exc_info=True,
)
catching_up_after_retry = True
@contextmanager
def async_consume_pod_logs(
self,
context: Union[OpExecutionContext, AssetExecutionContext],
core_api: kubernetes.client.CoreV1Api,
pod_name: str,
namespace: str,
) -> Generator:
"""Consume all logs from all containers within the pod.
Args:
context (Union[OpExecutionContext, AssetExecutionContext]): The execution context.
core_api: The k8s core API.
pod_name: The pod to collect logs from.
namespace: The namespace to collect logs from.
"""
handler = check.not_none(
self._handler, "can only consume logs within scope of context manager"
)
pods = core_api.list_namespaced_pod(
namespace=namespace, field_selector=f"metadata.name={pod_name}"
).items
containers = []
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerstatus-v1-core
for pod in pods:
if pod.status.init_container_statuses:
containers.extend(
[
container_status.name
for container_status in pod.status.init_container_statuses
]
)
if pod.status.container_statuses:
containers.extend(
[container_status.name for container_status in pod.status.container_statuses]
)
pod_exit_event = threading.Event()
logger = context.log.getChild("consume_pod_logs")
logger.setLevel(logging.WARNING)
with merge_streams(
streams={
f"{pod_name}:{container}": self._extract_logs(
pod_exit_event=pod_exit_event,
read_namespaced_pod_log=core_api.read_namespaced_pod_log,
list_namespaced_pod=core_api.list_namespaced_pod,
pod_name=pod_name,
namespace=namespace,
container=container,
logger=logger.getChild(f"_extract_logs({container})"),
)
for container in containers
},
log_handler=lambda log_line: extract_message_or_forward_to_stdout(handler, log_line),
stream_processor=_process_log_stream,
logger=logger,
):
yield
logger.info("Setting the pod exit event to do the cleanup of the streams")
pod_exit_event.set()
def _extract_logs(
self,
pod_exit_event: threading.Event,
read_namespaced_pod_log: Callable,
list_namespaced_pod: Callable,
pod_name: str,
namespace: str,
container: str,
logger: logging.Logger,
max_attempts: int = 3,
sleep_between_attempts: float = 0.5,
sleeper: Callable = time.sleep,
) -> Generator:
"""Return the streams of the Kubernetes logs with the appropriate buffer time.
Args:
pod_exit_event (threading.Event): The threading event that indicates to the
log reading thread that the pod has exited
read_namespaced_pod_log (kubernetes.client.CoreV1Api): The Kubernetes CoreV1Api client function for reading
logs.
list_namespaced_pod (kubernetes.client.CoreV1Api): The Kubernetes CoreV1Api client function for listing
pods and their state.
pod_name (str): The name of the Pipes Pod
namespace (str): The namespace the pod lives in.
container (str): The container to read logs from.
logger (logging.Logger): A logger instance for diagnostic logs.
max_attempts (int): The number of attempts to read logs in the beginning in
case we get a failure due to pod still starting.
sleep_between_attempts (float): Sleep between attempts in the beginning.
sleeper (Callable): The time.sleep equivalent.
Yields:
The Kubernetes pod log stream generator
"""
# Yield the actual stream here to hide implementation detail from caller
# If readiness/liveness probes aren't configured
# pods can reach the "Ready" state from the API perspective
# but still reject incoming communication
attempt = 0
common_args = {
"name": pod_name,
"namespace": namespace,
"container": container,
"_preload_content": False, # avoid JSON processing
"timestamps": True, # Include timestamps for ordering and deduplication
"follow": True,
}
# Attempt to get the stream for the first time
while attempt < max_attempts:
try:
yield read_namespaced_pod_log(since_seconds=3600, **common_args).stream()
break
except kubernetes.client.ApiException as e:
if e.status in ["400", 400] and "PodInitializing" in str(e):
# PodInitializing cannot accept log consumption
sleeper(sleep_between_attempts)
sleep_between_attempts *= 2 # exponential backoff
attempt += 1
continue
# After stream is initially yielded in above loop this while loop is a safeguard against the
# stream ending while the pod has not exitted. If so, we need to refresh the stream.
while not pod_exit_event.is_set():
# List the pods now and then use the status to decide whether we should exit
pods = list_namespaced_pod(
namespace=namespace, field_selector=f"metadata.name={pod_name}"
).items
try:
yield read_namespaced_pod_log(since_seconds=5, **common_args).stream()
except Exception:
logger.exception(f"{container}: exception in getting logs")
break
# The logs are still available once the pod has exited and the above call will succeed, we add this extra
# statement where we will exit if the status of the container was terminated before we read the logs. That
# ensures that we get all of the logs (the merge_streams will deduplicate them) and we don't waste CPU
# cycles whilst trying to get more logs.
pod = pods[0] if pods else None
if pod is None:
break
all_statuses = []
all_statuses.extend(pod.status.init_container_statuses or [])
all_statuses.extend(pod.status.container_statuses or [])
if not all_statuses:
break
state_by_name = {status.name: status.state for status in all_statuses}
if state_by_name[container].terminated is not None:
break
def no_messages_debug_text(self) -> str:
return "Attempted to read messages by extracting them from kubernetes pod logs directly."
| PipesK8sPodLogsMessageReader |
python | joerick__pyinstrument | pyinstrument/renderers/base.py | {
"start": 268,
"end": 889
} | class ____:
"""
Abstract base class for renderers.
"""
output_file_extension: str = "txt"
"""
Renderer output file extension without dot prefix. The default value is `txt`
"""
output_is_binary: bool = False
"""
Whether the output of this renderer is binary data. The default value is `False`.
"""
def __init__(self):
pass
def render(self, session: Session) -> str:
"""
Return a string that contains the rendered form of `frame`.
"""
raise NotImplementedError()
class MisconfigurationError(Exception):
pass
| Renderer |
python | walkccc__LeetCode | solutions/3442. Maximum Difference Between Even and Odd Frequency I/3442.py | {
"start": 0,
"end": 324
} | class ____:
def maxDifference(self, s: str) -> int:
count = collections.Counter(s)
maxOdd = max((freq for freq in count.values()
if freq % 2 == 1), default=0)
minEven = min((freq for freq in count.values()
if freq % 2 == 0), default=len(s))
return maxOdd - minEven
| Solution |
python | dask__distributed | distributed/http/utils.py | {
"start": 183,
"end": 1184
} | class ____(web.RequestHandler):
def initialize(self, dask_server=None, extra=None):
self.server = dask_server
self.extra = extra or {}
def get_template_path(self):
return os.path.join(dirname, "templates")
def redirect(path):
class Redirect(RequestHandler):
def get(self):
self.redirect(path)
return Redirect
def get_handlers(server, modules: list[str], prefix="/"): # type: ignore[no-untyped-def]
prefix = prefix or ""
prefix = "/" + prefix.strip("/")
if not prefix.endswith("/"):
prefix = prefix + "/"
_routes = []
for module_name in modules:
module = importlib.import_module(module_name)
_routes.extend(module.routes)
routes = []
for url, cls, kwargs in _routes:
if has_keyword(cls.initialize, "dask_server"):
kwargs = toolz.assoc(kwargs, "dask_server", server)
routes.append((prefix + url.lstrip("/"), cls, kwargs))
return routes
| RequestHandler |
python | huggingface__transformers | src/transformers/models/table_transformer/modeling_table_transformer.py | {
"start": 3522,
"end": 4437
} | class ____(Seq2SeqModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`TableTransformerForObjectDetection`].
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->TableTransformer,DetrImageProcessor->DetrImageProcessor
| TableTransformerModelOutput |
python | apache__airflow | airflow-core/src/airflow/metrics/validators.py | {
"start": 10528,
"end": 10837
} | class ____(ListValidator):
"""Match the provided strings anywhere in the metric name."""
def test(self, name: str) -> bool:
if self.validate_list is not None:
return super()._has_pattern_match(name)
return True # default is all metrics are allowed
| PatternAllowListValidator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image14.py | {
"start": 315,
"end": 998
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image14.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(1, 4.5)
worksheet.set_row(2, 35.25)
worksheet.set_column("C:E", 3.29)
worksheet.set_column("F:F", 10.71)
worksheet.insert_image("C2", self.image_dir + "logo.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | lepture__mistune | src/mistune/directives/_base.py | {
"start": 1655,
"end": 3660
} | class ____(metaclass=ABCMeta):
parser: Type[DirectiveParser]
directive_pattern: Optional[str] = None
def __init__(self, plugins: List["DirectivePlugin"]):
self._methods: Dict[
str,
Callable[
["BlockParser", Match[str], "BlockState"],
Union[Dict[str, Any], List[Dict[str, Any]]],
],
] = {}
self.__plugins = plugins
def register(
self,
name: str,
fn: Callable[
["BlockParser", Match[str], "BlockState"],
Union[Dict[str, Any], List[Dict[str, Any]]],
],
) -> None:
self._methods[name] = fn
def parse_method(
self, block: "BlockParser", m: Match[str], state: "BlockState"
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
_type = self.parser.parse_type(m)
method = self._methods.get(_type)
if method:
try:
token = method(block, m, state)
except ValueError as e:
token = {"type": "block_error", "raw": str(e)}
else:
text = m.group(0)
token = {
"type": "block_error",
"raw": text,
}
if isinstance(token, list):
for tok in token:
state.append_token(tok)
else:
state.append_token(token)
return token
@abstractmethod
def parse_directive(self, block: "BlockParser", m: Match[str], state: "BlockState") -> Optional[int]:
raise NotImplementedError()
def register_block_parser(self, md: "Markdown", before: Optional[str] = None) -> None:
md.block.register(
self.parser.name,
self.directive_pattern,
self.parse_directive,
before=before,
)
def __call__(self, markdown: "Markdown") -> None:
for plugin in self.__plugins:
plugin.parser = self.parser
plugin(self, markdown)
| BaseDirective |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 35084,
"end": 35283
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ADMIN", "MAINTAIN", "READ", "TRIAGE", "WRITE")
| RepositoryPermission |
python | getsentry__sentry | tests/sentry/projects/project_rules/test_updater.py | {
"start": 760,
"end": 8177
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.org = self.create_organization(name="bloop", owner=self.user)
self.project = self.create_project(
teams=[self.create_team()], name="foo", fire_project_created=True
)
self.rule = self.create_project_rule(project=self.project)
self.updater = ProjectRuleUpdater(rule=self.rule, project=self.project)
def test_update_name(self) -> None:
self.updater.name = "Cool New Rule"
self.updater.run()
assert self.rule.label == "Cool New Rule"
def test_update_owner(self) -> None:
self.updater.owner = Actor.from_id(user_id=self.user.id)
self.updater.run()
with assume_test_silo_mode_of(User):
self.user = User.objects.get(id=self.user.id)
assert (self.rule.owner_user_id, self.rule.owner_team_id) == (self.user.id, None)
team = self.create_team()
self.updater.owner = Actor.from_id(team_id=team.id)
self.updater.run()
assert (self.rule.owner_user_id, self.rule.owner_team_id) == (None, team.id)
self.updater.owner = None
self.updater.run()
assert self.rule.owner_team_id is None
assert self.rule.owner_user_id is None
def test_update_environment(self) -> None:
self.updater.environment = 3
self.updater.run()
assert self.rule.environment_id == 3
def test_update_environment_when_none(self) -> None:
self.rule.environment_id = 3
self.rule.save()
assert self.rule.environment_id == 3
self.updater.run()
assert self.rule.environment_id is None
def test_update_project(self) -> None:
project2 = self.create_project(organization=self.org)
self.updater.project = project2
self.updater.run()
assert self.rule.project == project2
def test_update_actions(self) -> None:
self.updater.actions = [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
}
]
self.updater.run()
assert self.rule.data["actions"] == [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
}
]
def test_update_action_match(self) -> None:
self.updater.action_match = "any"
self.updater.run()
assert self.rule.data["action_match"] == "any"
def test_update_filter_match(self) -> None:
self.updater.filter_match = "any"
self.updater.run()
assert self.rule.data["filter_match"] == "any"
def test_update_conditions(self) -> None:
self.updater.conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
self.updater.run()
assert self.rule.data["conditions"] == [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
def test_update_frequency(self) -> None:
self.updater.frequency = 5
self.updater.run()
assert self.rule.data["frequency"] == 5
def test_dual_update_workflow_engine(self) -> None:
IssueAlertMigrator(self.rule, user_id=self.user.id).run()
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
},
{
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "foo",
"match": "is",
},
]
new_user_id = self.create_user().id
ProjectRuleUpdater(
rule=self.rule,
name="Updated Rule",
owner=Actor.from_id(new_user_id),
project=self.project,
action_match="all",
filter_match="any",
conditions=conditions,
environment=None,
actions=[
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
}
],
frequency=5,
).run()
alert_rule_detector = AlertRuleDetector.objects.get(rule_id=self.rule.id)
alert_rule_workflow = AlertRuleWorkflow.objects.get(rule_id=self.rule.id)
detector = alert_rule_detector.detector
assert detector.project_id == self.project.id
assert detector.type == ErrorGroupType.slug
workflow = alert_rule_workflow.workflow
assert workflow.config["frequency"] == 5
assert workflow.owner_user_id == new_user_id
assert workflow.owner_team_id is None
assert workflow.environment is None
when_dcg = workflow.when_condition_group
assert when_dcg
assert when_dcg.logic_type == "all"
assert len(when_dcg.conditions.all()) == 1
data_condition = list(when_dcg.conditions.all())[0]
assert data_condition.type == Condition.FIRST_SEEN_EVENT
action_filter = WorkflowDataConditionGroup.objects.get(workflow=workflow).condition_group
assert action_filter.logic_type == "any-short"
assert len(action_filter.conditions.all()) == 1
data_condition = list(action_filter.conditions.all())[0]
assert data_condition.type == Condition.TAGGED_EVENT
assert data_condition.comparison == {"key": "foo", "match": "is"}
action = DataConditionGroupAction.objects.get(condition_group=action_filter).action
assert action.type == Action.Type.PLUGIN
def test_dual_create_workflow_engine__errors_on_invalid_conditions(self) -> None:
IssueAlertMigrator(self.rule, user_id=self.user.id).run()
conditions = [
{
"interval": "1h",
"id": EventFrequencyCondition.id,
"value": "-1",
"comparisonType": "asdf",
},
{
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "foo",
"match": "is",
},
]
new_user_id = self.create_user().id
with pytest.raises(Exception):
ProjectRuleUpdater(
rule=self.rule,
name="Updated Rule",
owner=Actor.from_id(new_user_id),
project=self.project,
action_match="all",
filter_match="any",
conditions=conditions,
environment=None,
actions=[
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
}
],
frequency=5,
).run()
not_updated_rule = Rule.objects.get(id=self.rule.id)
assert not_updated_rule == self.rule
| TestUpdater |
python | conda__conda | conda/base/constants.py | {
"start": 6054,
"end": 6571
} | class ____(EnumMeta):
def __call__(cls, value, *args, **kwargs):
try:
return super().__call__(value, *args, **kwargs)
except ValueError:
if isinstance(value, str):
from ..auxlib.type_coercion import typify
value = typify(value)
if value is True:
value = "flexible"
elif value is False:
value = cls.DISABLED
return super().__call__(value, *args, **kwargs)
| ChannelPriorityMeta |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 80440,
"end": 81043
} | class ____(GeoJsonBaseField):
"""A GeoJSON field storing list of Polygons.
The data is represented as:
.. code-block:: js
{'type' : 'MultiPolygon' ,
'coordinates' : [[
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
], [
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
]
}
You can either pass a dict with the full information or a list
of Polygons.
Requires mongodb >= 2.6
"""
_type = "MultiPolygon"
| MultiPolygonField |
python | ray-project__ray | rllib/offline/estimators/tests/test_ope_math.py | {
"start": 464,
"end": 3076
} | class ____(TorchPolicyV2):
"""A fake policy used in test ope math to emulate a target policy that is better
and worse than the random behavioral policy.
In case of an improved policy, we assign higher probs to those actions that
attained a higher reward and lower probs to those actions that attained a lower
reward. We do the reverse in case of a worse policy.
"""
def __init__(self, observation_space, action_space, sample_batch, improved=True):
self.sample_batch = sample_batch
self.improved = improved
self.config = {}
# things that are needed for FQE Torch Model
self.model = ...
self.observation_space = observation_space
self.action_space = action_space
self.device = "cpu"
def action_distribution_fn(self, model, obs_batch=None, **kwargs):
# used in DM and DR (FQE torch model to be precise)
dist_class = TorchCategorical
inds = obs_batch[SampleBatch.OBS][:, 0]
old_rewards = self.sample_batch[SampleBatch.REWARDS][inds]
old_actions = self.sample_batch[SampleBatch.ACTIONS][inds]
dist_inputs = torch.ones((len(inds), self.action_space.n), dtype=torch.float32)
# add 0.5 to the action that gave a good reward (2) and subtract 0.5 from the
# action that gave a bad reward (1)
# to achieve this I can just subtract 1.5 from old_reward
delta = old_rewards - 1.5
if not self.improved:
# reverse the logic for a worse policy
delta = -delta
dist_inputs[torch.arange(len(inds)), old_actions] = (
dist_inputs[torch.arange(len(inds)), old_actions] + delta
).float()
return dist_inputs, dist_class, None
def compute_log_likelihoods(
self,
actions,
obs_batch,
*args,
**kwargs,
):
# used in IS and WIS
inds = obs_batch[:, 0]
old_probs = self.sample_batch[SampleBatch.ACTION_PROB][inds]
old_rewards = self.sample_batch[SampleBatch.REWARDS][inds]
if self.improved:
# assign 50 percent higher prob to those that gave a good reward and 50
# percent lower prob to those that gave a bad reward
# rewards are 1 or 2 in this case
new_probs = (old_rewards == 2) * 1.5 * old_probs + (
old_rewards == 1
) * 0.5 * old_probs
else:
new_probs = (old_rewards == 2) * 0.5 * old_probs + (
old_rewards == 1
) * 1.5 * old_probs
return np.log(new_probs)
| FakePolicy |
python | paramiko__paramiko | paramiko/ssh_gss.py | {
"start": 8211,
"end": 15038
} | class ____(_SSH_GSSAuth):
"""
Implementation of the GSS-API MIT Kerberos Authentication for SSH2,
using the older (unmaintained) python-gssapi package.
:see: `.GSSAuth`
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
self._gss_flags = (
gssapi.C_PROT_READY_FLAG,
gssapi.C_INTEG_FLAG,
gssapi.C_MUTUAL_FLAG,
gssapi.C_DELEG_FLAG,
)
else:
self._gss_flags = (
gssapi.C_PROT_READY_FLAG,
gssapi.C_INTEG_FLAG,
gssapi.C_MUTUAL_FLAG,
)
def ssh_init_sec_context(
self, target, desired_mech=None, username=None, recv_token=None
):
"""
Initialize a GSS-API context.
:param str username: The name of the user who attempts to login
:param str target: The hostname of the target to connect to
:param str desired_mech: The negotiated GSS-API mechanism
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param str recv_token: The GSS-API token received from the Server
:raises:
`.SSHException` -- Is raised if the desired mechanism of the client
is not supported
:return: A ``String`` if the GSS-API has returned a token or
``None`` if no token was returned
"""
from pyasn1.codec.der import decoder
self._username = username
self._gss_host = target
targ_name = gssapi.Name(
"host@" + self._gss_host, gssapi.C_NT_HOSTBASED_SERVICE
)
ctx = gssapi.Context()
ctx.flags = self._gss_flags
if desired_mech is None:
krb5_mech = gssapi.OID.mech_from_string(self._krb5_mech)
else:
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
raise SSHException("Unsupported mechanism OID.")
else:
krb5_mech = gssapi.OID.mech_from_string(self._krb5_mech)
token = None
try:
if recv_token is None:
self._gss_ctxt = gssapi.InitContext(
peer_name=targ_name,
mech_type=krb5_mech,
req_flags=ctx.flags,
)
token = self._gss_ctxt.step(token)
else:
token = self._gss_ctxt.step(recv_token)
except gssapi.GSSException:
message = "{} Target: {}".format(sys.exc_info()[1], self._gss_host)
raise gssapi.GSSException(message)
self._gss_ctxt_status = self._gss_ctxt.established
return token
def ssh_get_mic(self, session_id, gss_kex=False):
"""
Create the MIC token for a SSH2 message.
:param str session_id: The SSH session ID
:param bool gss_kex: Generate the MIC for GSS-API Key Exchange or not
:return: gssapi-with-mic:
Returns the MIC token from GSS-API for the message we created
with ``_ssh_build_mic``.
gssapi-keyex:
Returns the MIC token from GSS-API with the SSH session ID as
message.
"""
self._session_id = session_id
if not gss_kex:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
mic_token = self._gss_ctxt.get_mic(mic_field)
else:
# for key exchange with gssapi-keyex
mic_token = self._gss_srv_ctxt.get_mic(self._session_id)
return mic_token
def ssh_accept_sec_context(self, hostname, recv_token, username=None):
"""
Accept a GSS-API context (server mode).
:param str hostname: The servers hostname
:param str username: The name of the user who attempts to login
:param str recv_token: The GSS-API Token received from the server,
if it's not the initial call.
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
"""
# hostname and username are not required for GSSAPI, but for SSPI
self._gss_host = hostname
self._username = username
if self._gss_srv_ctxt is None:
self._gss_srv_ctxt = gssapi.AcceptContext()
token = self._gss_srv_ctxt.step(recv_token)
self._gss_srv_ctxt_status = self._gss_srv_ctxt.established
return token
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``gssapi.GSSException`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if self._username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
self._gss_srv_ctxt.verify_mic(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
self._gss_ctxt.verify_mic(self._session_id, mic_token)
@property
def credentials_delegated(self):
"""
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
"""
if self._gss_srv_ctxt.delegated_cred is not None:
return True
return False
def save_client_creds(self, client_token):
"""
Save the Client token in a file. This is used by the SSH server
to store the client credentials if credentials are delegated
(server mode).
:param str client_token: The GSS-API token received form the client
:raises:
``NotImplementedError`` -- Credential delegation is currently not
supported in server mode
"""
raise NotImplementedError
| _SSH_GSSAPI_OLD |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 25619,
"end": 30552
} | class ____:
"""
This class represents hypergeometric formulae.
Explanation
===========
Its data members are:
- z, the argument
- closed_form, the closed form expression
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (see _compute_basis)
Examples
========
>>> from sympy.abc import a, b, z
>>> from sympy.simplify.hyperexpand import Formula, Hyper_Function
>>> func = Hyper_Function((a/2, a/3 + b, (1+a)/2), (a, b, (a+b)/7))
>>> f = Formula(func, z, None, [a, b])
"""
def _compute_basis(self, closed_form):
"""
Compute a set of functions B=(f1, ..., fn), a nxn matrix M
and a 1xn matrix C such that:
closed_form = C B
z d/dz B = M B.
"""
afactors = [_x + a for a in self.func.ap]
bfactors = [_x + b - 1 for b in self.func.bq]
expr = _x*Mul(*bfactors) - self.z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree() - 1
b = [closed_form]
for _ in range(n):
b.append(self.z*b[-1].diff(self.z))
self.B = Matrix(b)
self.C = Matrix([[1] + [0]*n])
m = eye(n)
m = m.col_insert(0, zeros(n, 1))
l = poly.all_coeffs()[1:]
l.reverse()
self.M = m.row_insert(n, -Matrix([l])/poly.all_coeffs()[0])
def __init__(self, func, z, res, symbols, B=None, C=None, M=None):
z = sympify(z)
res = sympify(res)
symbols = [x for x in sympify(symbols) if func.has(x)]
self.z = z
self.symbols = symbols
self.B = B
self.C = C
self.M = M
self.func = func
# TODO with symbolic parameters, it could be advantageous
# (for prettier answers) to compute a basis only *after*
# instantiation
if res is not None:
self._compute_basis(res)
@property
def closed_form(self):
return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero)
def find_instantiations(self, func):
"""
Find substitutions of the free symbols that match ``func``.
Return the substitution dictionaries as a list. Note that the returned
instantiations need not actually match, or be valid!
"""
from sympy.solvers import solve
ap = func.ap
bq = func.bq
if len(ap) != len(self.func.ap) or len(bq) != len(self.func.bq):
raise TypeError('Cannot instantiate other number of parameters')
symbol_values = []
for a in self.symbols:
if a in self.func.ap.args:
symbol_values.append(ap)
elif a in self.func.bq.args:
symbol_values.append(bq)
else:
raise ValueError("At least one of the parameters of the "
"formula must be equal to %s" % (a,))
base_repl = [dict(list(zip(self.symbols, values)))
for values in product(*symbol_values)]
abuckets, bbuckets = [sift(params, _mod1) for params in [ap, bq]]
a_inv, b_inv = [{a: len(vals) for a, vals in bucket.items()}
for bucket in [abuckets, bbuckets]]
critical_values = [[0] for _ in self.symbols]
result = []
_n = Dummy()
for repl in base_repl:
symb_a, symb_b = [sift(params, lambda x: _mod1(x.xreplace(repl)))
for params in [self.func.ap, self.func.bq]]
for bucket, obucket in [(abuckets, symb_a), (bbuckets, symb_b)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (mod not in bucket) or (mod not in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
break
for a, vals in zip(self.symbols, critical_values):
if repl[a].free_symbols:
continue
exprs = [expr for expr in obucket[mod] if expr.has(a)]
repl0 = repl.copy()
repl0[a] += _n
for expr in exprs:
for target in bucket[mod]:
n0, = solve(expr.xreplace(repl0) - target, _n)
if n0.free_symbols:
raise ValueError("Value should not be true")
vals.append(n0)
else:
values = []
for a, vals in zip(self.symbols, critical_values):
a0 = repl[a]
min_ = floor(min(vals))
max_ = ceiling(max(vals))
values.append([a0 + n for n in range(min_, max_ + 1)])
result.extend(dict(list(zip(self.symbols, l))) for l in product(*values))
return result
| Formula |
python | doocs__leetcode | solution/0700-0799/0746.Min Cost Climbing Stairs/Solution.py | {
"start": 0,
"end": 273
} | class ____:
def minCostClimbingStairs(self, cost: List[int]) -> int:
@cache
def dfs(i: int) -> int:
if i >= len(cost):
return 0
return cost[i] + min(dfs(i + 1), dfs(i + 2))
return min(dfs(0), dfs(1))
| Solution |
python | pydantic__pydantic | tests/mypy/modules/frozen_field.py | {
"start": 40,
"end": 132
} | class ____(BaseModel):
a: int = Field(default=1, frozen=True)
foo = Foo()
foo.a = 2
| Foo |
python | ray-project__ray | release/train_tests/benchmark/image_classification/parquet/factory.py | {
"start": 2468,
"end": 4832
} | class ____(
ImageClassificationTorchDataLoaderFactory, S3ParquetReader
):
"""Factory for creating PyTorch DataLoaders for Parquet image classification.
Features:
- Parquet file reading with row count-based distribution
- Worker-based file distribution for balanced workloads
- Row limits per worker for controlled processing
- Dataset instance caching for efficiency
"""
def __init__(
self, benchmark_config: BenchmarkConfig, data_dirs: Dict[str, str]
) -> None:
"""Initialize factory with benchmark configuration.
Args:
benchmark_config: Configuration for benchmark parameters
"""
super().__init__(benchmark_config)
S3ParquetReader.__init__(
self
) # Initialize S3ParquetReader to set up _s3_client
self.train_url = data_dirs[DatasetKey.TRAIN]
self._cached_datasets: Optional[Dict[str, IterableDataset]] = None
def get_iterable_datasets(self) -> Dict[str, IterableDataset]:
"""Get train and validation datasets with worker-specific configurations.
Returns:
Dictionary containing:
- "train": Training dataset with random transforms
- "val": Validation dataset without transforms
"""
if self._cached_datasets is not None:
return self._cached_datasets
# Get row limits for workers and total processing
(
limit_training_rows_per_worker,
limit_validation_rows_per_worker,
) = self._get_worker_row_limits()
# Create training dataset
train_file_urls = self._get_file_urls(self.train_url)
train_ds = S3ParquetImageIterableDataset(
file_urls=train_file_urls,
random_transforms=True,
limit_rows_per_worker=limit_training_rows_per_worker,
)
# Create validation dataset
val_file_urls = train_file_urls
val_ds = S3ParquetImageIterableDataset(
file_urls=val_file_urls,
random_transforms=False,
limit_rows_per_worker=limit_validation_rows_per_worker,
)
self._cached_datasets = {
DatasetKey.TRAIN: train_ds,
DatasetKey.VALID: val_ds,
}
return self._cached_datasets
| ImageClassificationParquetTorchDataLoaderFactory |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_openapi.py | {
"start": 15183,
"end": 15290
} | class ____(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
| PetSchema |
python | eventlet__eventlet | eventlet/zipkin/_thrift/zipkinCore/ttypes.py | {
"start": 722,
"end": 3264
} | class ____:
"""
Attributes:
- ipv4
- port
- service_name
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'ipv4', None, None, ), # 1
(2, TType.I16, 'port', None, None, ), # 2
(3, TType.STRING, 'service_name', None, None, ), # 3
)
def __init__(self, ipv4=None, port=None, service_name=None,):
self.ipv4 = ipv4
self.port = port
self.service_name = service_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.ipv4 = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.port = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.service_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Endpoint')
if self.ipv4 is not None:
oprot.writeFieldBegin('ipv4', TType.I32, 1)
oprot.writeI32(self.ipv4)
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I16, 2)
oprot.writeI16(self.port)
oprot.writeFieldEnd()
if self.service_name is not None:
oprot.writeFieldBegin('service_name', TType.STRING, 3)
oprot.writeString(self.service_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| Endpoint |
python | huggingface__transformers | src/transformers/models/conditional_detr/modular_conditional_detr.py | {
"start": 290,
"end": 3261
} | class ____(DetrImageProcessorFast):
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100
):
"""
Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`ConditionalDetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
__all__ = ["ConditionalDetrImageProcessorFast"]
| ConditionalDetrImageProcessorFast |
python | huggingface__transformers | src/transformers/models/levit/modeling_levit.py | {
"start": 22699,
"end": 24682
} | class ____(LevitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.levit = LevitModel(config)
# Classifier head
self.classifier = (
LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
if config.num_labels > 0
else torch.nn.Identity()
)
self.classifier_distill = (
LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
if config.num_labels > 0
else torch.nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, LevitForImageClassificationWithTeacherOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = sequence_output.mean(1)
cls_logits, distill_logits = self.classifier(sequence_output), self.classifier_distill(sequence_output)
logits = (cls_logits + distill_logits) / 2
if not return_dict:
output = (logits, cls_logits, distill_logits) + outputs[2:]
return output
return LevitForImageClassificationWithTeacherOutput(
logits=logits,
cls_logits=cls_logits,
distillation_logits=distill_logits,
hidden_states=outputs.hidden_states,
)
__all__ = [
"LevitForImageClassification",
"LevitForImageClassificationWithTeacher",
"LevitModel",
"LevitPreTrainedModel",
]
| LevitForImageClassificationWithTeacher |
python | PyCQA__pylint | tests/functional/u/useless/useless_object_inheritance.py | {
"start": 645,
"end": 687
} | class ____(B): # positive test case
pass
| G |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 14030,
"end": 14455
} | class ____(_GenerativeProvider):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.CONTEXTUALAI, frozen=True, exclude=True
)
model: Optional[str]
temperature: Optional[float]
topP: Optional[float]
maxNewTokens: Optional[int]
systemPrompt: Optional[str]
avoidCommentary: Optional[bool]
knowledge: Optional[List[str]]
| _GenerativeContextualAIConfig |
python | pytorch__pytorch | torch/nn/utils/prune.py | {
"start": 18213,
"end": 19069
} | class ____(BasePruningMethod):
r"""Utility pruning method that does not prune any units but generates the pruning parametrization with a mask of ones."""
PRUNING_TYPE = "unstructured"
def compute_mask(self, t, default_mask):
mask = default_mask
return mask
@classmethod
def apply(cls, module, name): # type: ignore[override]
r"""Add pruning on the fly and reparametrization of a tensor.
Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
"""
return super().apply(module, name)
| Identity |
python | huggingface__transformers | tests/models/gemma3n/test_modeling_gemma3n.py | {
"start": 4510,
"end": 9594
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nAudioEncoder,) if is_torch_available() else ()
test_missing_keys = False
is_generative = False
_is_stateful = True
main_input_name = "audio_mel"
def setUp(self):
self.model_tester = Gemma3nAudioModelTester(self)
self.config_tester = ConfigTester(self, config_class=Gemma3nAudioConfig, hidden_size=37)
torch.manual_seed(0)
# The following values are golden outputs from a deterministic run of the components.
# They are used to ensure that changes to the code do not alter the numerical output.
# Generated with seeds np.random.seed(0) and torch.manual_seed(0).
self.expected_input_features_shape = (2, 48, 32)
self.expected_input_features_slice = np.array([-5.733152, -5.337127, -4.916284, -4.378989, -3.7622747])
self.expected_input_features_mask_shape = (2, 48)
self.expected_input_features_mask_slice = np.array([True, True, True, True, False])
self.expected_encoder_output_shape = (2, 3, 32)
self.expected_encoder_output_slice = torch.tensor([-0.4159, 0.6459, 0.6305, 2.2902, 0.9683])
self.expected_encoder_mask_shape = (2, 3)
self.expected_encoder_mask_slice = torch.tensor([False, False, True])
# Prepare a shared feature extractor and raw audio for the tests
self.feature_extractor = Gemma3nAudioFeatureExtractor(**self.model_tester.get_feature_extractor_config())
np.random.seed(0)
raw_speech_1 = np.sin(2 * np.pi * 440 * np.linspace(0, 1, self.model_tester.raw_audio_length)).astype(
np.float32
)
raw_speech_2 = np.random.randn(self.model_tester.raw_audio_length // 2).astype(np.float32)
self.raw_speech = [raw_speech_1, raw_speech_2]
@unittest.skip("Audio encoder does not support attention output")
def test_attention_outputs(self):
pass
@unittest.skip("Audio encoder does not support hidden state output")
def test_hidden_states_output(self):
pass
@unittest.skip("Audio encoder returns a tuple, not a ModelOutput object, skipping equivalence test.")
def test_model_outputs_equivalence(self):
pass
@unittest.skip("Audio encoder does not support retaining gradients on hidden states/attentions.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("Audio encoder does not have a concept of token embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("Audio encoder does not have a concept of token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip("This model has a complex downsampling scheme that is hard to test with the generic batching test.")
def test_batching_equivalence(self):
pass
def test_feature_extractor(self):
"""
Tests the feature extractor's output against pre-computed golden values.
This ensures the NumPy-based audio preprocessing is correct and consistent.
"""
audio_inputs = self.feature_extractor(
self.raw_speech, padding="longest", pad_to_multiple_of=128, return_tensors="np"
)
input_features = audio_inputs["input_features"]
self.assertEqual(input_features.shape, self.expected_input_features_shape)
np.testing.assert_allclose(input_features[0, 0, :5], self.expected_input_features_slice, rtol=1e-5, atol=1e-5)
input_features_mask = audio_inputs["input_features_mask"]
self.assertEqual(input_features_mask.shape, self.expected_input_features_mask_shape)
# The second audio sample is shorter (22 frames vs 48), so its mask should become False at index 22
np.testing.assert_array_equal(input_features_mask[1, 21:26], self.expected_input_features_mask_slice)
def test_audio_encoder(self):
"""
Tests the audio encoder's forward pass against pre-computed golden values.
This ensures the PyTorch-based audio encoding model is correct and consistent.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = Gemma3nAudioEncoder(config).to(torch_device).eval()
with torch.no_grad():
encoder_output, encoder_mask = model(**inputs_dict)
# Check output encodings
self.assertEqual(encoder_output.shape, self.expected_encoder_output_shape)
torch.testing.assert_close(
encoder_output[0, 0, :5], self.expected_encoder_output_slice.to(torch_device), rtol=1e-4, atol=1e-4
)
# Check output mask (True means padded)
# Second sample has 22 feature frames. After downsampling by 4 (conv) -> 5 frames. After downsampling by 4 (reduction) -> 1 frame.
# So the mask should be [False, True, True]
self.assertEqual(encoder_mask.shape, self.expected_encoder_mask_shape)
torch.testing.assert_close(encoder_mask[1, :], self.expected_encoder_mask_slice.to(torch_device))
| Gemma3nAudioModelTest |
python | sympy__sympy | sympy/stats/frv_types.py | {
"start": 7243,
"end": 9872
} | class ____(SingleFiniteDistribution):
_argnames = ('p', 'succ', 'fail')
@staticmethod
def check(p, succ, fail):
_value_check((p >= 0, p <= 1),
"p should be in range [0, 1].")
@property
def set(self):
return {self.succ, self.fail}
def pmf(self, x):
if isinstance(self.succ, Symbol) and isinstance(self.fail, Symbol):
return Piecewise((self.p, x == self.succ),
(1 - self.p, x == self.fail),
(S.Zero, True))
return Piecewise((self.p, Eq(x, self.succ)),
(1 - self.p, Eq(x, self.fail)),
(S.Zero, True))
def Bernoulli(name, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a Bernoulli process.
Parameters
==========
p : Rational number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success
fail : Integer/symbol/string
Represents event of failure
Examples
========
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X).dict
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X).dict
{Heads: 1/2, Tails: 1/2}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_distribution
.. [2] https://mathworld.wolfram.com/BernoulliDistribution.html
"""
return rv(name, BernoulliDistribution, p, succ, fail)
def Coin(name, p=S.Half):
r"""
Create a Finite Random Variable representing a Coin toss.
This is an equivalent of a Bernoulli random variable with
"H" and "T" as success and failure events respectively.
Parameters
==========
p : Rational Number between 0 and 1
Represents probability of getting "Heads", by default is Half
Examples
========
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C).dict
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2).dict
{H: 3/5, T: 2/5}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Binomial
References
==========
.. [1] https://en.wikipedia.org/wiki/Coin_flipping
"""
return rv(name, BernoulliDistribution, p, 'H', 'T')
| BernoulliDistribution |
python | django__django | tests/view_tests/views.py | {
"start": 10837,
"end": 14203
} | class ____:
@sensitive_variables("sauce")
def method(self, request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's
# source is displayed in the exception report.
cooked_eggs = "".join(["s", "c", "r", "a", "m", "b", "l", "e", "d"]) # NOQA
sauce = "".join( # NOQA
["w", "o", "r", "c", "e", "s", "t", "e", "r", "s", "h", "i", "r", "e"]
)
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables("sauce")
async def async_method(self, request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's
# source is displayed in the exception report.
cooked_eggs = "".join(["s", "c", "r", "a", "m", "b", "l", "e", "d"]) # NOQA
sauce = "".join( # NOQA
["w", "o", "r", "c", "e", "s", "t", "e", "r", "s", "h", "i", "r", "e"]
)
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables("sauce")
async def _async_method_inner(self, request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's
# source is displayed in the exception report.
cooked_eggs = "".join(["s", "c", "r", "a", "m", "b", "l", "e", "d"]) # NOQA
sauce = "".join( # NOQA
["w", "o", "r", "c", "e", "s", "t", "e", "r", "s", "h", "i", "r", "e"]
)
raise Exception
async def async_method_nested(self, request):
try:
await self._async_method_inner(request)
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_method_view(request):
return Klass().method(request)
async def async_sensitive_method_view(request):
return await Klass().async_method(request)
async def async_sensitive_method_view_nested(request):
return await Klass().async_method_nested(request)
@sensitive_variables("sauce")
@sensitive_post_parameters("bacon-key", "sausage-key")
def multivalue_dict_key_error(request):
cooked_eggs = "".join(["s", "c", "r", "a", "m", "b", "l", "e", "d"]) # NOQA
sauce = "".join( # NOQA
["w", "o", "r", "c", "e", "s", "t", "e", "r", "s", "h", "i", "r", "e"]
)
try:
request.POST["bar"]
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def json_response_view(request):
return JsonResponse(
{
"a": [1, 2, 3],
"foo": {"bar": "baz"},
# Make sure datetime and Decimal objects would be serialized
# properly
"timestamp": datetime.datetime(2013, 5, 19, 20),
"value": decimal.Decimal("3.14"),
}
)
| Klass |
python | getsentry__sentry | src/sentry/notifications/notifications/activity/base.py | {
"start": 1114,
"end": 2728
} | class ____(ProjectNotification, abc.ABC):
metrics_key = "activity"
notification_setting_type_enum = NotificationSettingEnum.WORKFLOW
template_path = "sentry/emails/activity/generic"
def __init__(self, activity: Activity) -> None:
super().__init__(activity.project)
self.activity = activity
@property
@abc.abstractmethod
def title(self) -> str:
"""The header for Workflow notifications."""
def get_base_context(self) -> MutableMapping[str, Any]:
"""The most basic context shared by every notification type."""
return {
"data": self.activity.data,
"title": self.title,
"project": self.project,
"project_link": self.get_project_link(),
**super().get_base_context(),
}
def get_recipient_context(
self, recipient: Actor, extra_context: Mapping[str, Any]
) -> MutableMapping[str, Any]:
context = super().get_recipient_context(recipient, extra_context)
return {**context, **get_reason_context(context)}
@property
def reference(self) -> Model | None:
return self.activity
@abc.abstractmethod
def get_context(self) -> MutableMapping[str, Any]:
pass
@abc.abstractmethod
def get_participants_with_group_subscription_reason(self) -> ParticipantMap:
pass
def send(self) -> None:
return send_activity_notification(self)
def get_log_params(self, recipient: Actor) -> Mapping[str, Any]:
return {"activity": self.activity, **super().get_log_params(recipient)}
| ActivityNotification |
python | encode__httpx | httpx/_auth.py | {
"start": 442,
"end": 3191
} | class ____:
"""
Base class for all authentication schemes.
To implement a custom authentication scheme, subclass `Auth` and override
the `.auth_flow()` method.
If the authentication scheme does I/O such as disk access or network calls, or uses
synchronization primitives such as locks, you should override `.sync_auth_flow()`
and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized
implementations that will be used by `Client` and `AsyncClient` respectively.
"""
requires_request_body = False
requires_response_body = False
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
"""
Execute the authentication flow.
To dispatch a request, `yield` it:
```
yield request
```
The client will `.send()` the response back into the flow generator. You can
access it like so:
```
response = yield request
```
A `return` (or reaching the end of the generator) will result in the
client returning the last response obtained from the server.
You can dispatch as many requests as is necessary.
"""
yield request
def sync_auth_flow(
self, request: Request
) -> typing.Generator[Request, Response, None]:
"""
Execute the authentication flow synchronously.
By default, this defers to `.auth_flow()`. You should override this method
when the authentication scheme does I/O and/or uses concurrency primitives.
"""
if self.requires_request_body:
request.read()
flow = self.auth_flow(request)
request = next(flow)
while True:
response = yield request
if self.requires_response_body:
response.read()
try:
request = flow.send(response)
except StopIteration:
break
async def async_auth_flow(
self, request: Request
) -> typing.AsyncGenerator[Request, Response]:
"""
Execute the authentication flow asynchronously.
By default, this defers to `.auth_flow()`. You should override this method
when the authentication scheme does I/O and/or uses concurrency primitives.
"""
if self.requires_request_body:
await request.aread()
flow = self.auth_flow(request)
request = next(flow)
while True:
response = yield request
if self.requires_response_body:
await response.aread()
try:
request = flow.send(response)
except StopIteration:
break
| Auth |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/dist_autograd_test.py | {
"start": 48237,
"end": 96857
} | class ____(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for _ in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
f"Could not find autograd context with id: {context_id}",
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with (
dist_autograd.context(),
self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
),
dist_autograd.context(),
):
pass
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(worker_name(dst_rank), ret_requires_grad).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, False)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE, False)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return next(event for event in function_events if partial_key in event.name)
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context():
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
@dist_init
def test_backward_no_grad_on_tensor(self):
self._backward_no_grad_on_tensor(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
False,
)
@dist_init
def test_backward_simple(self):
self._backward_simple(
self._next_rank(),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False,
)
@dist_init
def test_backward_simple_self(self):
self._backward_simple(
self.rank,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False,
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False,
)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False,
)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False,
)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(create_tensor, _run_trainer, False)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(
create_torchscript_tensor, _run_trainer_torchscript, False
)
@dist_init
def test_backward_multiple_round_trips(self):
self._backward_multiple_round_trips(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
None,
False,
)
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2))
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t2, t3))
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@skip_but_pass_in_sandcastle_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2))
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
f"Could not find autograd context with id: {context_id}",
):
res = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
self._backward_different_dtypes(
torch.rand((3, 3), requires_grad=True, dtype=torch.float32),
torch.rand((3, 3), requires_grad=True, dtype=torch.float64),
False,
)
@dist_init
def test_backward_simple_python_udf(self):
self._backward_simple_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False,
)
@dist_init
def test_backward_simple_script_call(self):
self._backward_simple_script_call(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False,
)
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@skip_but_pass_in_sandcastle_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point.
store.set("test_backward_node_failure_python_udf_rank0_done", "True")
else:
# Wait for backward to finish on rank 0.
store.wait(
["test_backward_node_failure_python_udf_rank0_done"],
timedelta(seconds=10),
)
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return t1 * t2 * t3 * t4 * res
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False,
)
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for _ in range(100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
f"Could not find autograd context with id: {context_id}",
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
return grad_map[embedding.weight]
@classmethod
def _mixed_requires_grad_operaton(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
self._mixed_requires_grad(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=False),
False,
)
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need at least one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for _ in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
self._nested_backward_accumulate_grads(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False,
)
@dist_init
def test_multiple_backward(self):
self._multiple_backward(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False,
)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
f"worker{self._next_rank()}",
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(
context_id, [loss], retain_graph=True
)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
"""
Similar to test in test_autograd.py.
"""
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.0])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(
context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))]
)
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contiguous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse_coo_tensor(ni, nv, (10, 3), dtype=torch.float32)
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for _ in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for _ in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst, _compare_owner_value, args=(context_id, rref, t3.grad)
)
)
| DistAutogradTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass4.py | {
"start": 206,
"end": 242
} | class ____(Generic[P, R]): ...
| ParentA |
python | tensorflow__tensorflow | tensorflow/python/ops/control_flow_v2_func_graphs.py | {
"start": 825,
"end": 1618
} | class ____(func_graph.FuncGraph):
"""Contains control flow-specific FuncGraph logic."""
def __init__(self, *args, **kwargs):
super(ControlFlowFuncGraph, self).__init__(*args, **kwargs)
outer_graph = self.outer_graph
# Unlike tf.function, control flow FuncGraphs are generally created one per
# op. This means hard-coding any outer device scopes in the body (rather
# than inspecting the call-time placement of the control flow op) makes
# sense.
self._device_function_stack = outer_graph._device_function_stack.copy() # pylint: disable=protected-access
self.is_control_flow_graph = True
if ops.executing_eagerly_outside_functions():
func_graph.override_func_graph_name_scope(
self, self.outer_graph.get_name_scope())
| ControlFlowFuncGraph |
python | django__django | tests/queries/models.py | {
"start": 5895,
"end": 6086
} | class ____(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ["name", "extra"]
| CustomPk |
python | kennethreitz__tablib | src/tablib/formats/_json.py | {
"start": 303,
"end": 1476
} | class ____:
title = 'json'
extensions = ('json', 'jsn')
@classmethod
def export_set(cls, dataset):
"""Returns JSON representation of Dataset."""
return json.dumps(dataset.dict, default=serialize_objects_handler)
@classmethod
def export_book(cls, databook):
"""Returns JSON representation of Databook."""
return json.dumps(databook._package(), default=serialize_objects_handler)
@classmethod
def import_set(cls, dset, in_stream):
"""Returns dataset from JSON stream."""
dset.wipe()
dset.dict = json.load(in_stream)
@classmethod
def import_book(cls, dbook, in_stream):
"""Returns databook from JSON stream."""
dbook.wipe()
for sheet in json.load(in_stream):
data = tablib.Dataset()
data.title = sheet['title']
data.dict = sheet['data']
dbook.add_sheet(data)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is valid JSON."""
try:
json.load(stream)
return True
except (TypeError, ValueError):
return False
| JSONFormat |
python | rushter__MLAlgorithms | mla/neuralnet/constraints.py | {
"start": 584,
"end": 762
} | class ____(Constraint):
def __init__(self, axis=0):
self.axis = axis
def clip(self, p):
return p / (EPSILON + np.sqrt(np.sum(p**2, axis=self.axis)))
| UnitNorm |
python | joblib__joblib | joblib/_dask.py | {
"start": 3052,
"end": 3868
} | class ____:
"""dask-compatible wrapper that executes a batch of tasks"""
def __init__(self, tasks):
# collect some metadata from the tasks to ease Batch calls
# introspection when debugging
self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(tasks)
def __call__(self, tasks=None):
results = []
with parallel_config(backend="dask"):
for func, args, kwargs in tasks:
results.append(func(*args, **kwargs))
return results
def __repr__(self):
descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
if self._mixed:
descr = "mixed_" + descr
return descr
def _joblib_probe_task():
# Noop used by the joblib connector to probe when workers are ready.
pass
| Batch |
python | walkccc__LeetCode | solutions/1415. The k-th Lexicographical String of All Happy Strings of Length n/1415.py | {
"start": 0,
"end": 331
} | class ____:
def getHappyString(self, n: int, k: int) -> str:
nextLetters = {'a': 'bc', 'b': 'ac', 'c': 'ab'}
q = collections.deque(['a', 'b', 'c'])
while len(q[0]) != n:
u = q.popleft()
for nextLetter in nextLetters[u[-1]]:
q.append(u + nextLetter)
return '' if len(q) < k else q[k - 1]
| Solution |
python | gevent__gevent | src/gevent/tests/test__refcount_core.py | {
"start": 271,
"end": 600
} | class ____(greentest.TestCase):
def test(self):
from gevent import socket
s = socket.socket()
r = weakref.ref(s)
s.close()
del s
self.assertIsNone(r())
assert weakref.ref(Dummy())() is None or hasattr(sys, 'pypy_version_info')
if __name__ == '__main__':
greentest.main()
| Test |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/source_recharge/components/datetime_based_cursor.py | {
"start": 452,
"end": 2828
} | class ____(DatetimeBasedCursor):
"""
Override for the default `DatetimeBasedCursor`.
`get_request_params()` - to guarantee the records are returned in `ASC` order.
Currently the `HttpRequester` couldn't handle the case when,
we need to omit all other `request_params` but `next_page_token` param,
typically when the `CursorPagination` straregy is applied.
We should have the `request_parameters` structure like this, or similar to either keep or omit the parameter,
based on the paginated result:
```
HttpRequester:
...
request_parameters:
# The `sort_by` param, will be omitted intentionaly on the paginated result
- sort_by: "updated_at-asc"
ignore_on_pagination: true
# the `some_other_param` param, will be kept on the paginated result
- some_other_param: "string_value"
ignore_on_pagination: false
```
Because there is a `ignore_stream_slicer_parameters_on_paginated_requests` set to True for the `SimpleRetriever`,
we are able to omit everthing what we pass from the `DatetimeBasedCursor.get_request_params()` having the initial request as expected,
all subsequent requests are made based on Paginated Results.
"""
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters=parameters)
def get_request_params(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
"""
The override to add additional param to the api request to guarantee the `ASC` records order.
Background:
There is no possability to pass multiple request params from the YAML for the incremental streams,
in addition to the `start_time_option` or similar, having them ignored those additional params,
when we have `next_page_token`, which must be the single param to be passed to satisfy the API requirements.
"""
params = super().get_request_params(
stream_state=stream_state,
stream_slice=stream_slice,
next_page_token=next_page_token,
)
params["sort_by"] = "updated_at-asc"
return params
| RechargeDateTimeBasedCursor |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/call_to_numpy_function_test.py | {
"start": 991,
"end": 1152
} | class ____(reference_test_base.TestCase):
def test_basic(self):
self.assertFunctionMatchesEager(f)
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 554843,
"end": 596944
} | class ____(VegaLiteSchema):
"""
LegendConfig schema wrapper.
Parameters
----------
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG group, removing the legend from the ARIA accessibility tree.
**Default value:** ``true``
clipHeight : dict, float, :class:`ExprRef`
The height in pixels to clip symbol legend entries and limit their size.
columnPadding : dict, float, :class:`ExprRef`
The horizontal padding in pixels between symbol legend entries.
**Default value:** ``10``.
columns : dict, float, :class:`ExprRef`
The number of columns in which to arrange symbol legend entries. A value of ``0`` or
lower indicates a single row with one column per entry.
cornerRadius : dict, float, :class:`ExprRef`
Corner radius for the full legend.
description : str, dict, :class:`ExprRef`
A text description of this legend for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If the ``aria`` property is true, for SVG output the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__
will be set to this description. If the description is unspecified it will be
automatically generated.
direction : :class:`Orientation`, Literal['horizontal', 'vertical']
The direction of the legend, one of ``"vertical"`` or ``"horizontal"``.
**Default value:**
* For top-/bottom-``orient``ed legends, ``"horizontal"``
* For left-/right-``orient``ed legends, ``"vertical"``
* For top/bottom-left/right-``orient``ed legends, ``"horizontal"`` for gradient
legends and ``"vertical"`` for symbol legends.
disable : bool
Disable legend by default
fillColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Background fill color for the full legend.
gradientDirection : dict, :class:`ExprRef`, :class:`Orientation`, Literal['horizontal', 'vertical']
The default direction (``"horizontal"`` or ``"vertical"``) for gradient legends.
**Default value:** ``"vertical"``.
gradientHorizontalMaxLength : float
Max legend length for a horizontal gradient when ``config.legend.gradientLength`` is
undefined.
**Default value:** ``200``
gradientHorizontalMinLength : float
Min legend length for a horizontal gradient when ``config.legend.gradientLength`` is
undefined.
**Default value:** ``100``
gradientLabelLimit : dict, float, :class:`ExprRef`
The maximum allowed length in pixels of color ramp gradient labels.
gradientLabelOffset : dict, float, :class:`ExprRef`
Vertical offset in pixels for color ramp gradient labels.
**Default value:** ``2``.
gradientLength : dict, float, :class:`ExprRef`
The length in pixels of the primary axis of a color gradient. This value corresponds
to the height of a vertical gradient or the width of a horizontal gradient.
**Default value:** ``200``.
gradientOpacity : dict, float, :class:`ExprRef`
Opacity of the color gradient.
gradientStrokeColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
The color of the gradient stroke, can be in hex color code or regular color name.
**Default value:** ``"lightGray"``.
gradientStrokeWidth : dict, float, :class:`ExprRef`
The width of the gradient stroke, in pixels.
**Default value:** ``0``.
gradientThickness : dict, float, :class:`ExprRef`
The thickness in pixels of the color gradient. This value corresponds to the width
of a vertical gradient or the height of a horizontal gradient.
**Default value:** ``16``.
gradientVerticalMaxLength : float
Max legend length for a vertical gradient when ``config.legend.gradientLength`` is
undefined.
**Default value:** ``200``
gradientVerticalMinLength : float
Min legend length for a vertical gradient when ``config.legend.gradientLength`` is
undefined.
**Default value:** ``100``
gridAlign : dict, :class:`ExprRef`, :class:`LayoutAlign`, Literal['all', 'each', 'none']
The alignment to apply to symbol legends rows and columns. The supported string
values are ``"all"``, ``"each"`` (the default), and ``none``. For more information,
see the `grid layout documentation <https://vega.github.io/vega/docs/layout>`__.
**Default value:** ``"each"``.
labelAlign : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The alignment of the legend label, can be left, center, or right.
labelBaseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
The position of the baseline of legend label, can be ``"top"``, ``"middle"``,
``"bottom"``, or ``"alphabetic"``.
**Default value:** ``"middle"``.
labelColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
The color of the legend label, can be in hex color code or regular color name.
labelFont : str, dict, :class:`ExprRef`
The font of the legend label.
labelFontSize : dict, float, :class:`ExprRef`
The font size of legend label.
**Default value:** ``10``.
labelFontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style of legend label.
labelFontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight of legend label.
labelLimit : dict, float, :class:`ExprRef`
Maximum allowed pixel width of legend tick labels.
**Default value:** ``160``.
labelOffset : dict, float, :class:`ExprRef`
The offset of the legend label.
**Default value:** ``4``.
labelOpacity : dict, float, :class:`ExprRef`
Opacity of labels.
labelOverlap : bool, dict, :class:`ExprRef`, :class:`LabelOverlap`, Literal['greedy', 'parity']
The strategy to use for resolving overlap of labels in gradient legends. If
``false``, no overlap reduction is attempted. If set to ``true`` or ``"parity"``, a
strategy of removing every other label is used. If set to ``"greedy"``, a linear
scan of the labels is performed, removing any label that overlaps with the last
visible label (this often works better for log-scaled axes).
**Default value:** ``"greedy"`` for log scales otherwise ``true``.
labelPadding : dict, float, :class:`ExprRef`
Padding in pixels between the legend and legend labels.
labelSeparation : dict, float, :class:`ExprRef`
The minimum separation that must be between label bounding boxes for them to be
considered non-overlapping (default ``0``). This property is ignored if
*labelOverlap* resolution is not enabled.
layout : dict, :class:`ExprRef`
legendX : dict, float, :class:`ExprRef`
Custom x-position for legend with orient "none".
legendY : dict, float, :class:`ExprRef`
Custom y-position for legend with orient "none".
offset : dict, float, :class:`ExprRef`
The offset in pixels by which to displace the legend from the data rectangle and
axes.
**Default value:** ``18``.
orient : :class:`LegendOrient`, Literal['none', 'left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right']
The orientation of the legend, which determines how the legend is positioned within
the scene. One of ``"left"``, ``"right"``, ``"top"``, ``"bottom"``, ``"top-left"``,
``"top-right"``, ``"bottom-left"``, ``"bottom-right"``, ``"none"``.
**Default value:** ``"right"``
padding : dict, float, :class:`ExprRef`
The padding between the border and content of the legend group.
**Default value:** ``0``.
rowPadding : dict, float, :class:`ExprRef`
The vertical padding in pixels between symbol legend entries.
**Default value:** ``2``.
strokeColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Border stroke color for the full legend.
strokeDash : dict, Sequence[float], :class:`ExprRef`
Border stroke dash pattern for the full legend.
strokeWidth : dict, float, :class:`ExprRef`
Border stroke width for the full legend.
symbolBaseFillColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color for legend symbols. Only applied if there is no ``"fill"`` scale
color encoding for the legend.
**Default value:** ``"transparent"``.
symbolBaseStrokeColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color for legend symbols. Only applied if there is no ``"fill"``
scale color encoding for the legend.
**Default value:** ``"gray"``.
symbolDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating [stroke, space] lengths for dashed symbol strokes.
symbolDashOffset : dict, float, :class:`ExprRef`
The pixel offset at which to start drawing with the symbol stroke dash array.
symbolDirection : dict, :class:`ExprRef`, :class:`Orientation`, Literal['horizontal', 'vertical']
The default direction (``"horizontal"`` or ``"vertical"``) for symbol legends.
**Default value:** ``"vertical"``.
symbolFillColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
The color of the legend symbol,
symbolLimit : dict, float, :class:`ExprRef`
The maximum number of allowed entries for a symbol legend. Additional entries will
be dropped.
symbolOffset : dict, float, :class:`ExprRef`
Horizontal pixel offset for legend symbols.
**Default value:** ``0``.
symbolOpacity : dict, float, :class:`ExprRef`
Opacity of the legend symbols.
symbolSize : dict, float, :class:`ExprRef`
The size of the legend symbol, in pixels.
**Default value:** ``100``.
symbolStrokeColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Stroke color for legend symbols.
symbolStrokeWidth : dict, float, :class:`ExprRef`
The width of the symbol's stroke.
**Default value:** ``1.5``.
symbolType : str, dict, :class:`ExprRef`, :class:`SymbolShape`
The symbol shape. One of the plotting shapes ``circle`` (default), ``square``,
``cross``, ``diamond``, ``triangle-up``, ``triangle-down``, ``triangle-right``, or
``triangle-left``, the line symbol ``stroke``, or one of the centered directional
shapes ``arrow``, ``wedge``, or ``triangle``. Alternatively, a custom `SVG path
string <https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ can be
provided. For correct sizing, custom shape paths should be defined within a square
bounding box with coordinates ranging from -1 to 1 along both the x and y
dimensions.
**Default value:** ``"circle"``.
tickCount : dict, float, :class:`ExprRef`, :class:`TickCount`, :class:`TimeInterval`, :class:`TimeIntervalStep`, Literal['millisecond', 'second', 'minute', 'hour', 'day', 'week', 'month', 'year']
The desired number of tick values for quantitative legends.
title : None
Set to null to disable title for the axis, legend, or header.
titleAlign : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
Horizontal text alignment for legend titles.
**Default value:** ``"left"``.
titleAnchor : dict, :class:`ExprRef`, :class:`TitleAnchor`, Literal[None, 'start', 'middle', 'end']
Text anchor position for placing legend titles.
titleBaseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
Vertical text baseline for legend titles. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the *lineHeight* rather than *fontSize*
alone.
**Default value:** ``"top"``.
titleColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
The color of the legend title, can be in hex color code or regular color name.
titleFont : str, dict, :class:`ExprRef`
The font of the legend title.
titleFontSize : dict, float, :class:`ExprRef`
The font size of the legend title.
titleFontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style of the legend title.
titleFontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight of the legend title. This can be either a string (e.g ``"bold"``,
``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where
``"normal"`` = ``400`` and ``"bold"`` = ``700``).
titleLimit : dict, float, :class:`ExprRef`
Maximum allowed pixel width of legend titles.
**Default value:** ``180``.
titleLineHeight : dict, float, :class:`ExprRef`
Line height in pixels for multi-line title text or title text with ``"line-top"`` or
``"line-bottom"`` baseline.
titleOpacity : dict, float, :class:`ExprRef`
Opacity of the legend title.
titleOrient : dict, :class:`Orient`, :class:`ExprRef`, Literal['left', 'right', 'top', 'bottom']
Orientation of the legend title.
titlePadding : dict, float, :class:`ExprRef`
The padding, in pixels, between title and legend.
**Default value:** ``5``.
unselectedOpacity : float
The opacity of unselected legend entries.
**Default value:** 0.35.
zindex : dict, float, :class:`ExprRef`
The integer z-index indicating the layering of the legend group relative to other
axis, mark, and legend groups.
"""
_schema = {"$ref": "#/definitions/LegendConfig"}
def __init__(
self,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columns: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
direction: Optional[SchemaBase | Orientation_T] = Undefined,
disable: Optional[bool] = Undefined,
fillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gradientDirection: Optional[
Parameter | SchemaBase | Map | Orientation_T
] = Undefined,
gradientHorizontalMaxLength: Optional[float] = Undefined,
gradientHorizontalMinLength: Optional[float] = Undefined,
gradientLabelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientLabelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientVerticalMaxLength: Optional[float] = Undefined,
gradientVerticalMinLength: Optional[float] = Undefined,
gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOverlap: Optional[
bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map
] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined,
layout: Optional[Parameter | SchemaBase | Map] = Undefined,
legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined,
offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
orient: Optional[SchemaBase | LegendOrient_T] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolBaseFillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolBaseStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolDirection: Optional[
Parameter | SchemaBase | Map | Orientation_T
] = Undefined,
symbolFillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined,
tickCount: Optional[
float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
title: Optional[None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
unselectedOpacity: Optional[float] = Undefined,
zindex: Optional[float | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
aria=aria,
clipHeight=clipHeight,
columnPadding=columnPadding,
columns=columns,
cornerRadius=cornerRadius,
description=description,
direction=direction,
disable=disable,
fillColor=fillColor,
gradientDirection=gradientDirection,
gradientHorizontalMaxLength=gradientHorizontalMaxLength,
gradientHorizontalMinLength=gradientHorizontalMinLength,
gradientLabelLimit=gradientLabelLimit,
gradientLabelOffset=gradientLabelOffset,
gradientLength=gradientLength,
gradientOpacity=gradientOpacity,
gradientStrokeColor=gradientStrokeColor,
gradientStrokeWidth=gradientStrokeWidth,
gradientThickness=gradientThickness,
gradientVerticalMaxLength=gradientVerticalMaxLength,
gradientVerticalMinLength=gradientVerticalMinLength,
gridAlign=gridAlign,
labelAlign=labelAlign,
labelBaseline=labelBaseline,
labelColor=labelColor,
labelFont=labelFont,
labelFontSize=labelFontSize,
labelFontStyle=labelFontStyle,
labelFontWeight=labelFontWeight,
labelLimit=labelLimit,
labelOffset=labelOffset,
labelOpacity=labelOpacity,
labelOverlap=labelOverlap,
labelPadding=labelPadding,
labelSeparation=labelSeparation,
layout=layout,
legendX=legendX,
legendY=legendY,
offset=offset,
orient=orient,
padding=padding,
rowPadding=rowPadding,
strokeColor=strokeColor,
strokeDash=strokeDash,
strokeWidth=strokeWidth,
symbolBaseFillColor=symbolBaseFillColor,
symbolBaseStrokeColor=symbolBaseStrokeColor,
symbolDash=symbolDash,
symbolDashOffset=symbolDashOffset,
symbolDirection=symbolDirection,
symbolFillColor=symbolFillColor,
symbolLimit=symbolLimit,
symbolOffset=symbolOffset,
symbolOpacity=symbolOpacity,
symbolSize=symbolSize,
symbolStrokeColor=symbolStrokeColor,
symbolStrokeWidth=symbolStrokeWidth,
symbolType=symbolType,
tickCount=tickCount,
title=title,
titleAlign=titleAlign,
titleAnchor=titleAnchor,
titleBaseline=titleBaseline,
titleColor=titleColor,
titleFont=titleFont,
titleFontSize=titleFontSize,
titleFontStyle=titleFontStyle,
titleFontWeight=titleFontWeight,
titleLimit=titleLimit,
titleLineHeight=titleLineHeight,
titleOpacity=titleOpacity,
titleOrient=titleOrient,
titlePadding=titlePadding,
unselectedOpacity=unselectedOpacity,
zindex=zindex,
**kwds,
)
| LegendConfig |
python | ray-project__ray | python/ray/train/v2/_internal/execution/context.py | {
"start": 2735,
"end": 3530
} | class ____:
"""Holds the execution context for the current worker process.
Every worker process has a single execution context accessed via the
`TrainContext`, which includes the training thread that is actually
running the user code.
"""
# A shared synchronization actor that helps broadcast data across ranks.
synchronization_actor: SynchronizationActor
# A queue that receives training results from the user training code.
# `ray.train.report` in user code populates this queue.
result_queue: Queue
# The thread launcher that runs the user training loop.
training_thread_runner: "ThreadRunner"
# The callbacks that are run in the worker train context.
train_context_callbacks: List["TrainContextCallback"]
@dataclass
| ExecutionContext |
python | pydata__xarray | xarray/tests/test_units.py | {
"start": 46678,
"end": 72845
} | class ____:
@pytest.mark.parametrize(
"func",
(
method("all"),
method("any"),
method("argmax", dim="x"),
method("argmin", dim="x"),
method("argsort"),
method("cumprod"),
method("cumsum"),
method("max"),
method("mean"),
method("median"),
method("min"),
method("prod"),
method("std"),
method("sum"),
method("var"),
),
ids=repr,
)
def test_aggregation(self, func, dtype):
array = np.linspace(0, 1, 10).astype(dtype) * (
unit_registry.m if func.name != "cumprod" else unit_registry.dimensionless
)
variable = xr.Variable("x", array)
numpy_kwargs = func.kwargs.copy()
if "dim" in func.kwargs:
numpy_kwargs["axis"] = variable.get_axis_num(numpy_kwargs.pop("dim"))
units = extract_units(func(array, **numpy_kwargs))
expected = attach_units(func(strip_units(variable)), units)
actual = func(variable)
assert_units_equal(expected, actual)
assert_allclose(expected, actual)
def test_aggregate_complex(self):
variable = xr.Variable("x", [1, 2j, np.nan] * unit_registry.m)
expected = xr.Variable((), (0.5 + 1j) * unit_registry.m)
actual = variable.mean()
assert_units_equal(expected, actual)
assert_allclose(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("astype", np.float32),
method("conj"),
method("conjugate"),
method("clip", min=2, max=7),
),
ids=repr,
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_numpy_methods(self, func, unit, error, dtype):
array = np.linspace(0, 1, 10).astype(dtype) * unit_registry.m
variable = xr.Variable("x", array)
args = [
item * unit if isinstance(item, int | float | list) else item
for item in func.args
]
kwargs = {
key: value * unit if isinstance(value, int | float | list) else value
for key, value in func.kwargs.items()
}
if error is not None and func.name in ("searchsorted", "clip"):
with pytest.raises(error):
func(variable, *args, **kwargs)
return
converted_args = [
strip_units(convert_units(item, {None: unit_registry.m})) for item in args
]
converted_kwargs = {
key: strip_units(convert_units(value, {None: unit_registry.m}))
for key, value in kwargs.items()
}
units = extract_units(func(array, *args, **kwargs))
expected = attach_units(
func(strip_units(variable), *converted_args, **converted_kwargs), units
)
actual = func(variable, *args, **kwargs)
assert_units_equal(expected, actual)
assert_allclose(expected, actual)
@pytest.mark.parametrize(
"func", (method("item", 5), method("searchsorted", 5)), ids=repr
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_raw_numpy_methods(self, func, unit, error, dtype):
array = np.linspace(0, 1, 10).astype(dtype) * unit_registry.m
variable = xr.Variable("x", array)
args = [
(
item * unit
if isinstance(item, int | float | list) and func.name != "item"
else item
)
for item in func.args
]
kwargs = {
key: (
value * unit
if isinstance(value, int | float | list) and func.name != "item"
else value
)
for key, value in func.kwargs.items()
}
if error is not None and func.name != "item":
with pytest.raises(error):
func(variable, *args, **kwargs)
return
converted_args = [
(
strip_units(convert_units(item, {None: unit_registry.m}))
if func.name != "item"
else item
)
for item in args
]
converted_kwargs = {
key: (
strip_units(convert_units(value, {None: unit_registry.m}))
if func.name != "item"
else value
)
for key, value in kwargs.items()
}
units = extract_units(func(array, *args, **kwargs))
expected = attach_units(
func(strip_units(variable), *converted_args, **converted_kwargs), units
)
actual = func(variable, *args, **kwargs)
assert_units_equal(expected, actual)
assert_duckarray_allclose(expected, actual)
@pytest.mark.parametrize(
"func", (method("isnull"), method("notnull"), method("count")), ids=repr
)
def test_missing_value_detection(self, func):
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.degK
)
variable = xr.Variable(("x", "y"), array)
expected = func(strip_units(variable))
actual = func(variable)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_missing_value_fillna(self, unit, error):
value = 10
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.m
)
variable = xr.Variable(("x", "y"), array)
fill_value = value * unit
if error is not None:
with pytest.raises(error):
variable.fillna(value=fill_value)
return
expected = attach_units(
strip_units(variable).fillna(
value=fill_value.to(unit_registry.m).magnitude
),
extract_units(variable),
)
actual = variable.fillna(value=fill_value)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(
unit_registry.cm,
id="compatible_unit",
),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"convert_data",
(
pytest.param(False, id="no_conversion"),
pytest.param(True, id="with_conversion"),
),
)
@pytest.mark.parametrize(
"func",
(
method("equals"),
pytest.param(
method("identical"),
marks=pytest.mark.skip(reason="behavior of identical is undecided"),
),
),
ids=repr,
)
def test_comparisons(self, func, unit, convert_data, dtype):
array = np.linspace(0, 1, 9).astype(dtype)
quantity1 = array * unit_registry.m
variable = xr.Variable("x", quantity1)
if convert_data and is_compatible(unit_registry.m, unit):
quantity2 = convert_units(array * unit_registry.m, {None: unit})
else:
quantity2 = array * unit
other = xr.Variable("x", quantity2)
expected = func(
strip_units(variable),
strip_units(
convert_units(other, extract_units(variable))
if is_compatible(unit_registry.m, unit)
else other
),
)
if func.name == "identical":
expected &= extract_units(variable) == extract_units(other)
else:
expected &= all(
compatible_mappings(
extract_units(variable), extract_units(other)
).values()
)
actual = func(variable, other)
assert expected == actual
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_equals(self, unit, dtype):
base_unit = unit_registry.m
left_array = np.ones(shape=(2, 2), dtype=dtype) * base_unit
value = (
(1 * base_unit).to(unit).magnitude if is_compatible(unit, base_unit) else 1
)
right_array = np.full(shape=(2,), fill_value=value, dtype=dtype) * unit
left = xr.Variable(("x", "y"), left_array)
right = xr.Variable("x", right_array)
units = {
**extract_units(left),
**({} if is_compatible(unit, base_unit) else {None: None}),
}
expected = strip_units(left).broadcast_equals(
strip_units(convert_units(right, units))
) & is_compatible(unit, base_unit)
actual = left.broadcast_equals(right)
assert expected == actual
@pytest.mark.parametrize("dask", [False, pytest.param(True, marks=[requires_dask])])
@pytest.mark.parametrize(
["variable", "indexers"],
(
pytest.param(
xr.Variable("x", np.linspace(0, 5, 10)),
{"x": 4},
id="single value-single indexer",
),
pytest.param(
xr.Variable("x", np.linspace(0, 5, 10)),
{"x": [5, 2, 9, 1]},
id="multiple values-single indexer",
),
pytest.param(
xr.Variable(("x", "y"), np.linspace(0, 5, 20).reshape(4, 5)),
{"x": 1, "y": 4},
id="single value-multiple indexers",
),
pytest.param(
xr.Variable(("x", "y"), np.linspace(0, 5, 20).reshape(4, 5)),
{"x": [0, 1, 2], "y": [0, 2, 4]},
id="multiple values-multiple indexers",
),
),
)
def test_isel(self, variable, indexers, dask, dtype):
if dask:
variable = variable.chunk(dict.fromkeys(variable.dims, 2))
quantified = xr.Variable(
variable.dims, variable.data.astype(dtype) * unit_registry.s
)
expected = attach_units(
strip_units(quantified).isel(indexers), extract_units(quantified)
)
actual = quantified.isel(indexers)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func",
(
function(lambda x, *_: +x, function_label="unary_plus"),
function(lambda x, *_: -x, function_label="unary_minus"),
function(lambda x, *_: abs(x), function_label="absolute"),
function(lambda x, y: x + y, function_label="sum"),
function(lambda x, y: y + x, function_label="commutative_sum"),
function(lambda x, y: x * y, function_label="product"),
function(lambda x, y: y * x, function_label="commutative_product"),
),
ids=repr,
)
def test_1d_math(self, func, unit, error, dtype):
base_unit = unit_registry.m
array = np.arange(5).astype(dtype) * base_unit
variable = xr.Variable("x", array)
values = np.ones(5)
y = values * unit
if error is not None and func.name in ("sum", "commutative_sum"):
with pytest.raises(error):
func(variable, y)
return
units = extract_units(func(array, y))
if all(compatible_mappings(units, extract_units(y)).values()):
converted_y = convert_units(y, units)
else:
converted_y = y
if all(compatible_mappings(units, extract_units(variable)).values()):
converted_variable = convert_units(variable, units)
else:
converted_variable = variable
expected = attach_units(
func(strip_units(converted_variable), strip_units(converted_y)), units
)
actual = func(variable, y)
assert_units_equal(expected, actual)
assert_allclose(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func", (method("where"), method("_getitem_with_mask")), ids=repr
)
def test_masking(self, func, unit, error, dtype):
base_unit = unit_registry.m
array = np.linspace(0, 5, 10).astype(dtype) * base_unit
variable = xr.Variable("x", array)
cond = np.array([True, False] * 5)
other = -1 * unit
if error is not None:
with pytest.raises(error):
func(variable, cond, other)
return
expected = attach_units(
func(
strip_units(variable),
cond,
strip_units(
convert_units(
other,
(
{None: base_unit}
if is_compatible(base_unit, unit)
else {None: None}
),
)
),
),
extract_units(variable),
)
actual = func(variable, cond, other)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize("dim", ("x", "y", "z", "t", "all"))
def test_squeeze(self, dim, dtype):
shape = (2, 1, 3, 1, 1, 2)
names = list("abcdef")
dim_lengths = dict(zip(names, shape, strict=True))
array = np.ones(shape=shape) * unit_registry.m
variable = xr.Variable(names, array)
kwargs = {"dim": dim} if dim != "all" and dim_lengths.get(dim, 0) == 1 else {}
expected = attach_units(
strip_units(variable).squeeze(**kwargs), extract_units(variable)
)
actual = variable.squeeze(**kwargs)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True)
@pytest.mark.parametrize(
"func",
(
method("coarsen", windows={"y": 2}, func=np.mean),
method("quantile", q=[0.25, 0.75]),
pytest.param(
method("rank", dim="x"),
marks=pytest.mark.skip(reason="rank not implemented for non-ndarray"),
),
method("roll", {"x": 2}),
pytest.param(
method("rolling_window", "x", 3, "window"),
marks=pytest.mark.xfail(reason="converts to ndarray"),
),
method("reduce", np.std, "x"),
method("round", 2),
method("shift", {"x": -2}),
method("transpose", "y", "x"),
),
ids=repr,
)
def test_computation(self, func, dtype, compute_backend):
base_unit = unit_registry.m
array = np.linspace(0, 5, 5 * 10).reshape(5, 10).astype(dtype) * base_unit
variable = xr.Variable(("x", "y"), array)
expected = attach_units(func(strip_units(variable)), extract_units(variable))
actual = func(variable)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_searchsorted(self, unit, error, dtype):
base_unit = unit_registry.m
array = np.linspace(0, 5, 10).astype(dtype) * base_unit
variable = xr.Variable("x", array)
value = 0 * unit
if error is not None:
with pytest.raises(error):
variable.searchsorted(value) # type: ignore[attr-defined]
return
expected = strip_units(variable).searchsorted(
strip_units(convert_units(value, {None: base_unit}))
)
actual = variable.searchsorted(value) # type: ignore[attr-defined]
assert_units_equal(expected, actual)
np.testing.assert_allclose(expected, actual)
def test_stack(self, dtype):
array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m
variable = xr.Variable(("x", "y"), array)
expected = attach_units(
strip_units(variable).stack(z=("x", "y")), extract_units(variable)
)
actual = variable.stack(z=("x", "y"))
assert_units_equal(expected, actual)
assert_identical(expected, actual)
def test_unstack(self, dtype):
array = np.linspace(0, 5, 3 * 10).astype(dtype) * unit_registry.m
variable = xr.Variable("z", array)
expected = attach_units(
strip_units(variable).unstack(z={"x": 3, "y": 10}), extract_units(variable)
)
actual = variable.unstack(z={"x": 3, "y": 10})
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_concat(self, unit, error, dtype):
array1 = (
np.linspace(0, 5, 9 * 10).reshape(3, 6, 5).astype(dtype) * unit_registry.m
)
array2 = np.linspace(5, 10, 10 * 3).reshape(3, 2, 5).astype(dtype) * unit
variable = xr.Variable(("x", "y", "z"), array1)
other = xr.Variable(("x", "y", "z"), array2)
if error is not None:
with pytest.raises(error):
xr.Variable.concat([variable, other], dim="y")
return
units = extract_units(variable)
expected = attach_units(
xr.Variable.concat(
[strip_units(variable), strip_units(convert_units(other, units))],
dim="y",
),
units,
)
actual = xr.Variable.concat([variable, other], dim="y")
assert_units_equal(expected, actual)
assert_identical(expected, actual)
def test_set_dims(self, dtype):
array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m
variable = xr.Variable(("x", "y"), array)
dims = {"z": 6, "x": 3, "a": 1, "b": 4, "y": 10}
expected = attach_units(
strip_units(variable).set_dims(dims), extract_units(variable)
)
actual = variable.set_dims(dims)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
def test_copy(self, dtype):
array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
other = np.arange(10).astype(dtype) * unit_registry.s
variable = xr.Variable("x", array)
expected = attach_units(
strip_units(variable).copy(data=strip_units(other)), extract_units(other)
)
actual = variable.copy(data=other)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_no_conflicts(self, unit, dtype):
base_unit = unit_registry.m
array1 = (
np.array(
[
[6.3, 0.3, 0.45],
[np.nan, 0.3, 0.3],
[3.7, np.nan, 0.2],
[9.43, 0.3, 0.7],
]
)
* base_unit
)
array2 = np.array([np.nan, 0.3, np.nan]) * unit
variable = xr.Variable(("x", "y"), array1)
other = xr.Variable("y", array2)
expected = strip_units(variable).no_conflicts(
strip_units(
convert_units(
other, {None: base_unit if is_compatible(base_unit, unit) else None}
)
)
) & is_compatible(base_unit, unit)
actual = variable.no_conflicts(other)
assert expected == actual
@pytest.mark.parametrize(
"mode",
[
"constant",
"mean",
"median",
"reflect",
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2) * unit_registry.m
v = xr.Variable(["x", "y", "z"], data)
expected = attach_units(
strip_units(v).pad(mode=mode, **xr_arg),
extract_units(v),
)
actual = v.pad(mode=mode, **xr_arg)
assert_units_equal(expected, actual)
assert_equal(actual, expected)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_pad_unit_constant_value(self, unit, error, dtype):
array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m
variable = xr.Variable(("x", "y"), array)
fill_value = -100 * unit
func = method("pad", mode="constant", x=(2, 3), y=(1, 4))
if error is not None:
with pytest.raises(error):
func(variable, constant_values=fill_value)
return
units = extract_units(variable)
expected = attach_units(
func(
strip_units(variable),
constant_values=strip_units(convert_units(fill_value, units)),
),
units,
)
actual = func(variable, constant_values=fill_value)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
| TestVariable |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/rpc_test.py | {
"start": 160946,
"end": 181748
} | class ____(RpcAgentTestFixture, RpcTestCommon):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method, _transports=tp_transports()
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS,
_transports=tp_transports(),
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
# Set a high timeout since it doesn't affect test runtime and ensures
# the test doesn't erroneously timeout due to slow machines.
timeout = 100
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
_transports=tp_transports(),
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError,
"Overloaded torch operator invoked from Python failed to match any schema",
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2),))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(
rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}"
)
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# rpc_async returns immediately and surface a timeout through wait()
if rref_api == slow_rref.rpc_async:
result.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
@dist_init
def test_send_to_rank_sparse(self):
dst_rank = (self.rank + 1) % self.world_size
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = x + y
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = x + y
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst, build_sparse_tensor(), build_sparse_tensor(), build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst, build_sparse_tensor(), build_sparse_tensor(), build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
)
def test_world_size_one_sparse(self):
self._world_size_one(build_sparse_tensor(), build_sparse_tensor())
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(worker_name(dst_rank), my_container_sum, args=(a,))
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(
heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),)
)
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(), build_sparse_tensor(), build_sparse_tensor() * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(), build_sparse_tensor(), build_sparse_tensor() * 2
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(torch.add, True, args_fn=RpcTest._multi_args_fn)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function, True, kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4,
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6,
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6,
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse, build_sparse_tensor() + build_sparse_tensor()
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse, build_sparse_tensor() * 2, build_sparse_tensor() * 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse, build_sparse_tensor() * 2, build_sparse_tensor() * 2
)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
# Test init_rpc without world_size argument
@dist_init(setup_rpc=False)
def test_dynamic_rpc_init_rpc(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Dynamic RPC new ranks communicate with existing ranks
@dist_init(setup_rpc=False)
def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
result = rpc.rpc_sync(
worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1))
)
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
# Dynamic RPC existing ranks can communicate with new ranks
@dist_init(setup_rpc=False)
def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
# Rest of ranks join after barrier
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
if self.rank == 0:
for i in range(1, self.world_size):
result = rpc.rpc_sync(
worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1))
)
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
# Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc
@skip_if_lt_x_gpu(2)
@dist_init(setup_rpc=False)
def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
options = self.rpc_backend_options
for i in range(1, self.world_size):
dst = worker_name(i)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 1})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
# Rest of ranks join after barrier
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# TODO: Cuda RPC is failing due to:
# terminate called after throwing an instance of 'c10::Error'
# what(): 0 <= device && static_cast<size_t>(device) < device_allocator.size()
# INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":1937,
# please report a bug to PyTorch. Allocator not initialized for device 1: did you call init?
# dist.barrier()
# if self.rank == 0:
# for i in range(1, self.world_size):
# x = torch.ones(2)
# result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1))
# result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1))
# self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0)
# self.assertEqual(torch.device('cuda:0'), result_on_device_0.device)
# self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1)
# self.assertEqual(torch.device('cuda:1'), result_on_device_1.device)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_dynamic_rpc_init_rpc_without_rank(self):
# default initialization uses file init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=self.rpc_backend_options,
)
# env init
with self.assertRaisesRegex(ValueError, "environment variable RANK expected"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
# tcp init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method="tcp://127.0.0.1:23456"
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_dynamic_and_static_init_rpc_together(self):
# Initialize a static rpc group with size = self.world_size - 1
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
world_size_minus_one = self.world_size - 1
if self.rank < world_size_minus_one:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=world_size_minus_one,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
# Attempt to add an additional dynamic group member
if self.rank == world_size_minus_one:
# Expect error message to be thrown
with self.assertRaisesRegex(
RuntimeError,
"RPC group mixes statically and dynamically\
initialized members which is not supported.",
):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
| TensorPipeAgentRpcTest |
python | davidhalter__jedi | test/refactor.py | {
"start": 405,
"end": 3608
} | class ____(object):
def __init__(self, name, code, line_nr, index, path, kwargs, type_, desired_result):
self.name = name
self._code = code
self._line_nr = line_nr
self._index = index
self._path = path
self._kwargs = kwargs
self.type = type_
self._desired_result = desired_result
def get_desired_result(self):
if platform.system().lower() == 'windows' and self.type == 'diff':
# Windows uses backslashes to separate paths.
lines = split_lines(self._desired_result, keepends=True)
for i, line in enumerate(lines):
if re.search(' import_tree/', line):
lines[i] = line.replace('/', '\\')
return ''.join(lines)
return self._desired_result
@property
def refactor_type(self):
f_name = os.path.basename(self._path)
return f_name.replace('.py', '')
def refactor(self, environment):
project = jedi.Project(os.path.join(test_dir, 'refactor'))
script = jedi.Script(self._code, path=self._path, project=project, environment=environment)
refactor_func = getattr(script, self.refactor_type)
return refactor_func(self._line_nr, self._index, **self._kwargs)
def __repr__(self):
return '<%s: %s:%s>' % (self.__class__.__name__,
self.name, self._line_nr - 1)
def _collect_file_tests(code, path, lines_to_execute):
r = r'^# -{5,} ?([^\n]*)\n((?:(?!\n# \+{5,}).)*\n)' \
r'# \+{5,}\n((?:(?!\n# -{5,}).)*\n)'
match = None
for match in re.finditer(r, code, re.DOTALL | re.MULTILINE):
name = match.group(1).strip()
first = match.group(2)
second = match.group(3)
# get the line with the position of the operation
p = re.match(r'((?:(?!#\?).)*)#\? (\d*)( error| text|) ?([^\n]*)', first, re.DOTALL)
if p is None:
raise Exception("Please add a test start.")
continue
until = p.group(1)
index = int(p.group(2))
type_ = p.group(3).strip() or 'diff'
if p.group(4):
kwargs = eval(p.group(4))
else:
kwargs = {}
line_nr = until.count('\n') + 2
if lines_to_execute and line_nr - 1 not in lines_to_execute:
continue
yield RefactoringCase(name, first, line_nr, index, path, kwargs, type_, second)
if match is None:
raise Exception(f"Didn't match any test for {path}, {code!r}")
if match.end() != len(code):
raise Exception(f"Didn't match until the end of the file in {path}")
def collect_dir_tests(base_dir, test_files):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(base_dir, f_name)
with open(path, newline='') as f:
code = f.read()
for case in _collect_file_tests(code, path, lines_to_execute):
yield case
| RefactoringCase |
python | ipython__ipython | IPython/utils/text.py | {
"start": 12859,
"end": 14901
} | class ____(Formatter):
"""A String Formatter that allows evaluation of simple expressions.
Any time a format key is not found in the kwargs,
it will be tried as an expression in the kwargs namespace.
Note that this version allows slicing using [1:2], so you cannot specify
a format string. Use :class:`EvalFormatter` to permit format strings.
Examples
--------
::
In [1]: f = FullEvalFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: '2'
In [3]: f.format('{list(range(5))[2:4]}')
Out[3]: '[2, 3]'
In [4]: f.format('{3*2}')
Out[4]: '6'
"""
# copied from Formatter._vformat with minor changes to allow eval
# and replace the format_spec code with slicing
def vformat(
self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]
) -> str:
result = []
conversion: Optional[str]
for literal_text, field_name, format_spec, conversion in self.parse(
format_string
):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
if format_spec:
# override format spec, to allow slicing:
field_name = ':'.join([field_name, format_spec])
# eval the contents of the field for the object
# to be formatted
obj = eval(field_name, dict(kwargs))
# do any conversion on the resulting object
# type issue in typeshed, fined in https://github.com/python/typeshed/pull/11377
obj = self.convert_field(obj, conversion)
# format the object and append to the result
result.append(self.format_field(obj, ''))
return ''.join(result)
| FullEvalFormatter |
python | readthedocs__readthedocs.org | readthedocs/search/views.py | {
"start": 837,
"end": 1260
} | class ____(View):
"""
Search view of the ``search`` tab.
This redirects to the main search now.
Query params:
- q: search term
"""
http_method_names = ["get"]
def get(self, request, project_slug):
query = request.GET.get("q", "")
url = reverse("search") + "?" + urlencode({"q": f"project:{project_slug} {query}"})
return HttpResponseRedirect(url)
| ProjectSearchView |
python | apache__airflow | airflow-core/src/airflow/models/taskreschedule.py | {
"start": 1372,
"end": 3224
} | class ____(Base):
"""TaskReschedule tracks rescheduled task instances."""
__tablename__ = "task_reschedule"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
ti_id: Mapped[str] = mapped_column(
String(36).with_variant(postgresql.UUID(as_uuid=False), "postgresql"),
ForeignKey("task_instance.id", ondelete="CASCADE", name="task_reschedule_ti_fkey"),
nullable=False,
)
start_date: Mapped[datetime.datetime] = mapped_column(UtcDateTime, nullable=False)
end_date: Mapped[datetime.datetime] = mapped_column(UtcDateTime, nullable=False)
duration: Mapped[int] = mapped_column(Integer, nullable=False)
reschedule_date: Mapped[datetime.datetime] = mapped_column(UtcDateTime, nullable=False)
task_instance = relationship(
"TaskInstance", primaryjoin="TaskReschedule.ti_id == foreign(TaskInstance.id)", uselist=False
)
def __init__(
self,
ti_id: uuid.UUID | str,
start_date: datetime.datetime,
end_date: datetime.datetime,
reschedule_date: datetime.datetime,
) -> None:
self.ti_id = str(ti_id)
self.start_date = start_date
self.end_date = end_date
self.reschedule_date = reschedule_date
self.duration = int((self.end_date - self.start_date).total_seconds())
@classmethod
def stmt_for_task_instance(
cls,
ti: TaskInstance,
*,
descending: bool = False,
) -> Select:
"""
Statement for task reschedules for a given task instance.
:param ti: the task instance to find task reschedules for
:param descending: If True then records are returned in descending order
:meta private:
"""
return select(cls).where(cls.ti_id == ti.id).order_by(desc(cls.id) if descending else asc(cls.id))
| TaskReschedule |
python | huggingface__transformers | src/transformers/models/arcee/modeling_arcee.py | {
"start": 21533,
"end": 21686
} | class ____(GenericForSequenceClassification, ArceePreTrainedModel):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
| ArceeForSequenceClassification |
python | ansible__ansible | lib/ansible/executor/task_queue_manager.py | {
"start": 3489,
"end": 4051
} | class ____(Exception):
def __init__(self, result):
self.result = result
def _resolve_callback_option_variables(callback: CallbackBase, variables: dict[str, object], templar: TemplateEngine) -> None:
"""Set callback plugin options using documented variables."""
callback_variables = {
var_name: variables[var_name]
for var_name in C.config.get_plugin_vars(callback.plugin_type, callback._load_name)
if var_name in variables
}
callback.set_options(var_options=templar.template(callback_variables))
| AnsibleEndPlay |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/pooling.py | {
"start": 14584,
"end": 19650
} | class ____(Pooling2D):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output,
when using the `"valid"` padding option, has a spatial shape
(number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
For example, for `strides=(1, 1)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='valid')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[5.],
[6.]],
[[8.],
[9.]]]], dtype=float32)>
For example, for `strides=(2, 2)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = tf.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding='valid')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 1, 2, 1), dtype=float32, numpy=
array([[[[6.],
[8.]]]], dtype=float32)>
Usage Example:
>>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]],
... [[2.], [2.], [3.], [2.]],
... [[4.], [1.], [1.], [1.]],
... [[2.], [2.], [1.], [4.]]]])
>>> output = tf.constant([[[[1], [0]],
... [[0], [1]]]])
>>> model = tf.keras.models.Sequential()
>>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... input_shape=(4, 4, 1)))
>>> model.compile('adam', 'mean_squared_error')
>>> model.predict(input_image, steps=1)
array([[[[2.],
[4.]],
[[4.],
[4.]]]], dtype=float32)
For example, for stride=(1, 1) and padding="same":
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='same')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[5.],
[6.],
[6.]],
[[8.],
[9.],
[9.]],
[[8.],
[9.],
[9.]]]], dtype=float32)>
Args:
pool_size: integer or tuple of 2 integers,
window size over which to take the maximum.
`(2, 2)` will take the max value over a 2x2 pooling window.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values. Specifies how far the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
Returns:
A tensor of rank 4 representing the maximum pooled values. See above for
output shape.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
| MaxPooling2D |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset_test.py | {
"start": 1535,
"end": 9847
} | class ____(test.TestCase):
"""Tests functions for representative datasets."""
def _assert_tensorlike_all_close(
self,
sess: session.Session,
tensorlike_value_1: core.TensorLike,
tensorlike_value_2: core.TensorLike,
) -> None:
"""Asserts that two different TensorLike values are "all close".
Args:
sess: Session instance used to evaluate any tf.Tensors.
tensorlike_value_1: A TensorLike value.
tensorlike_value_2: A TensorLike value.
"""
if isinstance(tensorlike_value_1, core.Tensor):
tensorlike_value_1 = tensorlike_value_1.eval(session=sess)
if isinstance(tensorlike_value_2, core.Tensor):
tensorlike_value_2 = tensorlike_value_2.eval(session=sess)
self.assertAllClose(tensorlike_value_1, tensorlike_value_2)
def _assert_sample_values_all_close(
self,
sess: session.Session,
repr_ds_1: repr_dataset.RepresentativeDataset,
repr_ds_2: repr_dataset.RepresentativeDataset,
) -> None:
"""Asserts that the sample values are "all close" between the two datasets.
This assumes that the order of corresponding samples is preserved and the
size of the two datasets are equal.
Args:
sess: Session instance used to evaluate any tf.Tensors.
repr_ds_1: A RepresentativeDataset.
repr_ds_2: A RepresentativeDataset.
"""
for sample_1, sample_2 in zip(repr_ds_1, repr_ds_2):
self.assertCountEqual(sample_1.keys(), sample_2.keys())
for input_key in sample_1:
self._assert_tensorlike_all_close(
sess, sample_1[input_key], sample_2[input_key]
)
def test_not_implemented_saver(self):
with self.assertRaisesRegex(
NotImplementedError, '"save" is not implemented.'
):
repr_dataset.RepresentativeDatasetSaver().save(representative_dataset={})
def test_not_implemented_loader(self):
with self.assertRaisesRegex(
NotImplementedError, '"load" is not implemented.'
):
repr_dataset.RepresentativeDatasetLoader().load()
@test_util.deprecated_graph_mode_only
def test_replace_tensors_by_numpy_ndarrays_with_tensor_list(self):
num_samples = 8
samples = [
np.random.uniform(low=-1.0, high=1.0, size=(3, 3)).astype('f4')
for _ in range(num_samples)
]
repr_ds: repr_dataset.RepresentativeDataset = [
{
'input_tensor': ops.convert_to_tensor(sample),
}
for sample in samples
]
with self.session() as sess:
new_repr_ds = repr_dataset.replace_tensors_by_numpy_ndarrays(
repr_ds, sess
)
# The resulting dataset should not contain any tf.Tensors.
self.assertFalse(any(map(_contains_tensor, new_repr_ds)))
self._assert_sample_values_all_close(sess, repr_ds, new_repr_ds)
@test_util.deprecated_graph_mode_only
def test_replace_tensors_by_numpy_ndarrays_with_tensor_generator(self):
num_samples = 8
samples = [
np.random.uniform(low=-1.0, high=1.0, size=(1, 4)).astype('f4')
for _ in range(num_samples)
]
def data_gen() -> repr_dataset.RepresentativeDataset:
for sample in samples:
yield {'input_tensor': ops.convert_to_tensor(sample)}
with self.session() as sess:
new_repr_ds = repr_dataset.replace_tensors_by_numpy_ndarrays(
data_gen(), sess
)
# The resulting dataset should not contain any tf.Tensors.
self.assertFalse(any(map(_contains_tensor, new_repr_ds)))
self._assert_sample_values_all_close(sess, data_gen(), new_repr_ds)
@test_util.deprecated_graph_mode_only
def test_replace_tensors_by_numpy_ndarrays_is_noop_when_no_tensor(self):
# Fill the representative dataset with np.ndarrays only.
repr_ds: repr_dataset.RepresentativeDataset = [
{
'input_tensor': np.random.uniform(low=-1.0, high=1.0, size=(4, 3)),
}
for _ in range(8)
]
with self.session() as sess:
new_repr_ds = repr_dataset.replace_tensors_by_numpy_ndarrays(
repr_ds, sess
)
# The resulting dataset should not contain any tf.Tensors.
self.assertFalse(any(map(_contains_tensor, new_repr_ds)))
self._assert_sample_values_all_close(sess, repr_ds, new_repr_ds)
@test_util.deprecated_graph_mode_only
def test_replace_tensors_by_numpy_ndarrays_mixed_tensor_and_ndarray(self):
num_tensors = 4
samples = [
np.random.uniform(low=-1.0, high=1.0, size=(3, 3)).astype('f4')
for _ in range(num_tensors)
]
repr_ds: repr_dataset.RepresentativeDataset = [
{
'tensor_key': ops.convert_to_tensor(sample),
}
for sample in samples
]
# Extend the representative dataset with np.ndarrays.
repr_ds.extend([
{'tensor_key': np.random.uniform(low=-1.0, high=1.0, size=(3, 3))}
for _ in range(4)
])
random.shuffle(repr_ds)
with self.session() as sess:
new_repr_ds = repr_dataset.replace_tensors_by_numpy_ndarrays(
repr_ds, sess
)
# The resulting dataset should not contain any tf.Tensors.
self.assertFalse(any(map(_contains_tensor, new_repr_ds)))
self._assert_sample_values_all_close(sess, repr_ds, new_repr_ds)
def test_get_num_samples_returns_num_samples_when_list(self):
num_samples = 8
repr_ds = [
{'input': np.random.uniform(low=-1.0, high=1.0, size=(1, 2))}
for _ in range(num_samples)
]
self.assertEqual(repr_dataset.get_num_samples(repr_ds), num_samples)
def test_get_num_samples_returns_none_for_generator(self):
num_samples = 8
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(num_samples):
yield {
'input_tensor': np.random.uniform(low=-1.0, high=1.0, size=(1, 4))
}
repr_ds = data_gen()
self.assertIsNone(repr_dataset.get_num_samples(repr_ds))
# Make sure that the __next__() is never called during the
# get_num_samples call.
self.assertLen(list(repr_ds), num_samples)
def test_get_num_samples_returns_none_when_len_raises_error(self):
class LenRaisingError:
"""A test-only class that raises an error when len() is called.
This mocks the behavior of an Iterator whose size cannot be determined.
One example is `tf.data.Dataset` whose samples are generated by a
Generator.
"""
def __len__(self):
raise ValueError(
'You cannot take the len() of instance of LenRaisingError.'
)
self.assertIsNone(repr_dataset.get_num_samples(LenRaisingError()))
@test_util.deprecated_graph_mode_only
def test_create_feed_dict_from_input_data(self):
signature_def = meta_graph_pb2.SignatureDef(
inputs={'input_tensor': meta_graph_pb2.TensorInfo(name='input:0')}
)
rng = np.random.default_rng(seed=14)
input_tensor_value = rng.random(size=(2, 2))
sample = {'input_tensor': input_tensor_value}
feed_dict = repr_dataset.create_feed_dict_from_input_data(
sample, signature_def
)
self.assertLen(feed_dict, 1)
self.assertIn('input:0', feed_dict)
self.assertAllEqual(feed_dict['input:0'], input_tensor_value)
@test_util.deprecated_graph_mode_only
def test_create_feed_dict_from_input_data_core_tensors(self):
signature_def = meta_graph_pb2.SignatureDef(
inputs={'input_tensor': meta_graph_pb2.TensorInfo(name='input:0')}
)
with self.session():
input_tensor = constant_op.constant([1, 2, 3, 4, 5, 6])
sample = {'input_tensor': input_tensor}
feed_dict = repr_dataset.create_feed_dict_from_input_data(
sample, signature_def
)
input_tensor_data = input_tensor.eval()
self.assertLen(feed_dict, 1)
self.assertIn('input:0', feed_dict)
self.assertIsInstance(feed_dict['input:0'], np.ndarray)
self.assertAllEqual(feed_dict['input:0'], input_tensor_data)
@test_util.deprecated_graph_mode_only
def test_create_feed_dict_from_input_data_empty(self):
signature_def = meta_graph_pb2.SignatureDef(
inputs={'input_tensor': meta_graph_pb2.TensorInfo(name='input:0')}
)
sample = {}
feed_dict = repr_dataset.create_feed_dict_from_input_data(
sample, signature_def
)
self.assertEmpty(feed_dict)
| RepresentativeDatasetTest |
python | pytorch__pytorch | test/fx/test_gradual_type.py | {
"start": 4399,
"end": 40789
} | class ____(TestCase):
def test_type_check_add_with_broadcast(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: TensorType((2, 3, 4))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
expected_ph_types = [
TensorType((1, 2, 3, Dyn)),
TensorType((2, 3, 4)),
TensorType((1, 2, 3, Dyn)),
TensorType((1, 2, 3, Dyn)),
]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
if n.op == "call_function":
assert n.meta["broadcast"]
assert n.type == next(expected_iter)
def test_type_check_add_with_scalar(self):
class M(torch.nn.Module):
def forward(self, x: int, y: TensorType((2, 3, 4))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
expected_ph_types = [
int,
TensorType((2, 3, 4)),
TensorType((2, 3, 4)),
TensorType((2, 3, 4)),
]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
assert n.type == next(expected_iter)
def test_type_check_add_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: TensorType((1, 2, 3))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_add_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, Dyn)), y: TensorType((1, 2, 3))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
expected_ph_types = [TensorType((1, 2, Dyn)), TensorType((1, 2, 3))]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
assert n.type == next(expected_iter)
if n.op == "output":
assert n.type == TensorType((1, 2, Dyn))
def test_type_check_reshape_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 6))):
return torch.reshape(x, [1, 2, 3])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
assert n.type == TensorType((1, 6))
if n.op == "call_function":
assert n.type == TensorType((1, 2, 3))
if n.op == "output":
assert n.type == TensorType((1, 2, 3))
def test_type_check_reshape_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 5))):
return torch.reshape(x, [1, 2, 3])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_reshape_dyn_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 5))):
return torch.reshape(x, [1, 2, -1])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_reshape_dyn_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 15))):
return torch.reshape(x, [1, 5, -1])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
def test_type_check_reshape_dyn_true_param_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((Dyn, 5))):
return torch.reshape(x, [1, 2, -1])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_transpose_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, 5))):
return torch.transpose(x, 0, 1)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
for n in symbolic_traced.graph.nodes:
if n.op == "call_function":
assert n.type == TensorType([2, 1, 3, 5])
if n.op == "output":
assert n.type == TensorType([2, 1, 3, 5])
if n.op == "x":
assert n.placeholder == TensorType([1, 2, 3, 5])
def test_type_check_transpose_False(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, 5))):
return torch.transpose(x, 0, 10)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_batch_norm_2D(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: TensorType((2, 2, 5, 4))):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == "placeholder":
assert n.type == TensorType((2, 2, 5, 4))
if n.op == "output":
assert n.type == TensorType((2, 2, 5, 4))
if n.op == "call_module":
assert n.type == TensorType((2, 2, 5, 4))
if n.op == "call_function":
assert n.type == TensorType((2, 2, 5, 4))
def test_type_check_batch_norm_2D_false(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: TensorType((2, 2, 5))):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_batch_norm_2D_broadcast(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == "placeholder":
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == "call_function":
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == "output":
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == "call_module":
assert n.type == TensorType((2, 2, Dyn, 4))
B = BasicBlock(1, 1)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_conv2D(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes, stride=1):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.conv1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == "placeholder":
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == "call_function":
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == "output":
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == "call_module":
assert n.type == TensorType((2, 2, Dyn, 4))
def test_type_check_conv2D_2(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes, stride=1):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
def forward(self, x: TensorType((5, 2, 3, 4))):
identity = x
out = self.conv1(x)
out += identity
return out
B = BasicBlock(2, 2)
b = B.forward(torch.rand(5, 2, 3, 4))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
t = TensorType((5, 2, 3, 4))
for n in graph.nodes:
if n.op == "placeholder":
assert n.type == t
if n.op == "call_function":
assert n.type == t
if n.op == "output":
assert torch.Size(n.type.__args__) == b.shape
if n.op == "call_module":
assert n.type == t
B = BasicBlock(1, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_conv2D_2_fully_static(self):
annotation_list = [
(1, 2, 3, 5),
(2, 5, 6, 9),
(10, 15, 13, 14),
(10, Dyn, 13, 14),
(Dyn, Dyn, Dyn, 3),
]
input_list = [
(1, 2, 3, 5),
(2, 5, 6, 9),
(10, 15, 13, 14),
(10, 15, 13, 14),
(1, 2, 2, 3),
]
intermediate_types = [
(1, Dyn, Dyn, 7),
(2, Dyn, 4, 6),
(10, 15, Dyn, 5),
(10, 15, 7, 7),
(1, Dyn, Dyn, Dyn),
]
in_planes_list = [2, 5, 15, 15, 2]
stride_list = [1, 2, 3, 2, 2]
out_planes_list = [2, 5, 15, 15, 2]
groups_list = [1, 5, 5, 5, 2]
dilation_list = [1, 2, 3, 3, 3]
padding_list = [1, 2, 3, 3, 3]
kernel_size_list = [1, 2, 3, 3, 3]
output_types = [
(1, 2, Dyn, 7),
(2, 5, 4, 6),
(10, 15, Dyn, 5),
(10, 15, 7, 7),
(1, 2, Dyn, Dyn),
]
for i in range(5):
annotation = annotation_list[i]
input = input_list[i]
in_planes = in_planes_list[i]
stride = stride_list[i]
out_planes = out_planes_list[i]
groups = groups_list[i]
dilation = dilation_list[i]
padding = padding_list[i]
kernel_size = kernel_size_list[i]
intermediate_type = intermediate_types[i]
class BasicBlock(torch.nn.Module):
def __init__(
self,
in_planes,
out_planes,
kernel_size,
stride,
padding,
groups,
dilation,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
dilation=dilation,
)
def forward(self, x):
out = self.conv1(x)
return out
B = BasicBlock(
in_planes, out_planes, kernel_size, stride, padding, groups, dilation
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == "placeholder":
n.type = TensorType(annotation)
b = B.forward(torch.rand(input))
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == "output":
assert is_consistent(n.type, TensorType(b.size()))
# test with intermediate annotations
class BasicBlock(torch.nn.Module):
def __init__(
self,
in_planes,
out_planes,
kernel_size,
stride,
padding,
groups,
dilation,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
dilation=dilation,
)
def forward(self, x):
out = self.conv1(x)
return out
B = BasicBlock(
in_planes, out_planes, kernel_size, stride, padding, groups, dilation
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# populate our intermediate notes
for n in traced.graph.nodes:
if n.op == "call_module":
n.type = TensorType(intermediate_type)
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.op == "output":
assert n.type == TensorType(output_types[i])
assert is_consistent(n.type, TensorType(b.size()))
def test_typecheck_basicblock(self):
class BasicBlock(torch.nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
"BasicBlock only supports groups=1 and base_width=64"
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = torch.nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: TensorType((2, 2, 4, 5))):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.target == "output":
assert isinstance(n.type, TensorType)
assert (
torch.Size(n.type.__args__)
== B.forward(torch.rand(2, 2, 4, 5)).size()
)
def test_type_check_conv2D_maxpool2d_flatten(self):
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x: TensorType((4, 3, 32, 32))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
expected_ph_types = [
TensorType((4, 3, 32, 32)),
TensorType((4, 6, 28, 28)),
TensorType((4, 6, 14, 14)),
TensorType((4, 16, 10, 10)),
TensorType((4, 16, 5, 5)),
TensorType((4, 16, 5, 120)),
TensorType((4, 16, 6, 7)),
TensorType((4, 672)),
TensorType((4, 672)),
]
expected_iter = iter(expected_ph_types)
traced.graph.eliminate_dead_code()
for n in traced.graph.nodes:
assert n.type == next(expected_iter)
def test_type_check_flatten(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, 5, Dyn))):
return torch.flatten(x, 1, 2)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
for n in symbolic_traced.graph.nodes:
if n.op == "output":
assert n.type == TensorType((1, 6, 5, Dyn))
def test_type_check_flatten_2(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, Dyn, 3, 5, Dyn))):
return torch.flatten(x, 1, 2)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
for n in symbolic_traced.graph.nodes:
if n.op == "output":
assert n.type == TensorType((1, Dyn, 5, Dyn))
def test_type_check_flatten3(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((2, 3, 4, 5))):
return torch.flatten(x, start_dim=1, end_dim=3)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
for n in symbolic_traced.graph.nodes:
if n.op == "output":
assert n.type == TensorType((2, 60))
r = Refine(symbolic_traced)
r.refine()
c = r.constraints
assert c == [Equality(2, 2)]
def test_type_typechecl_maxpool2d_3dinput(self):
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.pool = torch.nn.MaxPool2d(5, 8)
def forward(self, x: TensorType((64, 8, 8))):
out = self.pool(x)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.target == "output":
assert n.type == TensorType((64, 1, 1))
def test_type_maxpool2d_fully_static(self):
annotation_list = [
(Dyn, Dyn, 3, 5),
(2, 5, 6, 9),
(10, 15, 13, 14),
(10, Dyn, 13, 14),
(Dyn, Dyn, Dyn, 10),
]
input_list = [
(1, 2, 3, 5),
(2, 5, 6, 9),
(10, 15, 13, 14),
(10, 15, 13, 14),
(2, 2, 10, 10),
]
intermediate_types = [
(1, 2, Dyn, Dyn),
(2, Dyn, 2, 4),
(10, 15, Dyn, 2),
(10, 15, 2, 3),
(2, Dyn, Dyn, Dyn),
]
stride_list = [1, 2, 3, 2, 1]
dilation_list = [1, 2, 3, 3, 2]
padding_list = [1, 2, 3, 3, 1]
kernel_size_list = [2, 4, 6, 6, 3]
output_types = [
(1, 2, 4, 6),
(2, 5, 2, 4),
(10, 15, 2, 2),
(10, 15, 2, 3),
(2, Dyn, Dyn, 8),
]
for i in range(5):
annotation = annotation_list[i]
input = input_list[i]
stride = stride_list[i]
dilation = dilation_list[i]
padding = padding_list[i]
kernel_size = kernel_size_list[i]
intermediate_type = intermediate_types[i]
class BasicBlock(torch.nn.Module):
def __init__(self, kernel_size, stride, padding, dilation):
super().__init__()
self.pool = torch.nn.MaxPool2d(
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
return_indices=False,
ceil_mode=False,
)
def forward(self, x):
out = self.pool(x)
return out
B = BasicBlock(kernel_size, stride, padding, dilation)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == "placeholder":
n.type = TensorType(annotation)
b = B.forward(torch.rand(input))
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == "output":
assert is_consistent(n.type, TensorType(b.size()))
# test with intermediate annotations
class BasicBlock(torch.nn.Module):
def __init__(self, kernel_size, stride, padding, dilation):
super().__init__()
self.pool = torch.nn.MaxPool2d(
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
return_indices=False,
ceil_mode=False,
)
def forward(self, x):
out = self.pool(x)
return out
B = BasicBlock(kernel_size, stride, padding, dilation)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == "placeholder":
n.type = TensorType(annotation)
# populate our intermediate notes
for n in traced.graph.nodes:
if n.op == "call_module":
n.type = TensorType(intermediate_type)
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.op == "output":
assert n.type == TensorType(output_types[i])
assert is_consistent(n.type, TensorType(b.size()))
def test_flatten_fully_static(self):
annotation_list = [
Dyn,
TensorType((2, 5, 6, 9)),
TensorType((10, 15, 13, 14)),
TensorType((10, Dyn, 13, 14)),
TensorType((Dyn, Dyn, Dyn, 10)),
]
input_list = [
(1, 2, 3, 5),
(2, 5, 6, 9),
(10, 15, 13, 14),
(10, 15, 13, 14),
(2, 2, 10, 10),
]
intermediate_list = [ # noqa: F841
Dyn,
(2, 5, 6, 9),
(10, 15, 13, 14),
(10, 15, 13, 14),
(2, 2, 10, 10),
]
start_dim = [1, 2, 1, 2, 0]
end_dim = [1, 3, 3, 3, -2]
for i in range(5):
annotation = annotation_list[i]
input = input_list[i]
# intermediate_type = intermediate_list[i]
class BasicBlock(torch.nn.Module):
def __init__(self, start, end):
super().__init__()
self.start = start
self.end = end
def forward(self, x):
out = torch.flatten(x, self.start, self.end)
return out
B = BasicBlock(start_dim[i], end_dim[i])
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == "placeholder":
n.type = annotation
b = B.forward(torch.rand(input))
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == "output":
assert is_consistent(n.type, TensorType(b.size()))
@skipIfNoTorchVision
def test_resnet50(self):
gm_run = symbolic_trace(resnet50())
sample_input = torch.randn(1, 3, 224, 224)
# run our nodes
ShapeProp(gm_run).propagate(sample_input)
gm_static = symbolic_trace(resnet50())
for n in gm_static.graph.nodes:
n.type = None
g = GraphTypeChecker({}, gm_static)
g.type_check()
gm_static.graph.eliminate_dead_code()
gm_run.graph.eliminate_dead_code()
# here we are checking for consistency with fully dynamic nodes
for n1, n2 in zip(gm_static.graph.nodes, gm_run.graph.nodes):
assert is_consistent(n1.type, TensorType(n2.meta["tensor_meta"].shape))
# here we give the same input as to runtime
gm_static_with_types = symbolic_trace(resnet50())
# we initialize our placeholder
for n in gm_static_with_types.graph.nodes:
if n.op == "placeholder":
n.type = TensorType((1, 3, 224, 224))
g = GraphTypeChecker({}, gm_static_with_types)
g.type_check()
for n1, n2 in zip(gm_static_with_types.graph.nodes, gm_run.graph.nodes):
assert n1.type == TensorType(n2.meta["tensor_meta"].shape)
# apply shape inference to graph and check
# that the batch size is equal across all layers
infer_symbolic_types(gm_static)
batch_sizes = set()
gm_static.graph.eliminate_dead_code()
for n in gm_static.graph.nodes:
assert isinstance(n.type, TensorType)
batch_sizes.add(n.type.__args__[0])
assert len(batch_sizes) == 1
def test_type_check_batch_norm_symbolic(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
infer_symbolic_types(traced)
my_types = iter(
[
TensorType[(2, 2, sympy.symbols("~7"), 4)],
TensorType[(2, 2, sympy.symbols("~7"), 4)],
TensorType[(2, 2, sympy.symbols("~7"), 4)],
TensorType[(2, 2, sympy.symbols("~7"), 4)],
]
)
for n in graph.nodes:
assert n.type == next(my_types)
def test_symbolic_add_with_broadcast(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: TensorType((2, 3, 4))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
infer_symbolic_types(symbolic_traced)
r = Refine(symbolic_traced)
r.refine()
assert r.constraints == [Equality(1, 1), Equality(2, 2), Equality(3, 3)]
# note that there is no equality constraint between dyn and 4 because
# dyn could be 4 or 1
infer_symbolic_types(symbolic_traced)
expected_ph_types = [
TensorType((1, 2, 3, sympy.symbols("~0"))),
TensorType((2, 3, 4)),
TensorType((1, 2, 3, sympy.symbols("~1"))),
TensorType((1, 2, 3, sympy.symbols("~1"))),
]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
assert n.type == next(expected_iter)
def test_symbolic_add_with_broadcast_2(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2)), y: TensorType((Dyn, 2))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
infer_symbolic_types(symbolic_traced)
r = Refine(symbolic_traced)
r.refine()
expected_ph_types = [
TensorType((1, 2)),
TensorType((sympy.symbols("~1"), 2)),
TensorType((sympy.symbols("~1"), 2)),
TensorType((sympy.symbols("~1"), 2)),
]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
assert n.type == next(expected_iter)
def test_type_check_conv2D_types(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes, stride=1):
super().__init__()
norm_layer = torch.nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.conv1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
infer_symbolic_types(traced)
for n in traced.graph.nodes:
if n.op == "call_module":
assert isinstance(n.type.__args__[2], sympy.floor)
assert isinstance(n.type.__args__[3], sympy.floor)
def test_type_check_symbolic_inferenceconv2D_maxpool2d_flatten(self):
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x: TensorType((4, 3, Dyn, Dyn))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer() # noqa: F841
traced = symbolic_trace(B)
tc = GraphTypeChecker({}, traced)
tc.type_check()
infer_symbolic_types(traced)
for n in traced.graph.nodes:
if n.target == "conv1":
assert n.type == TensorType(
(
4,
6,
sympy.floor(sympy.symbols("~0") - 4),
sympy.floor(sympy.symbols("~1") - 4),
)
)
elif n.target == "conv2":
assert n.type == TensorType(
(
4,
16,
sympy.floor(sympy.symbols("~4") - 4),
sympy.floor(sympy.symbols("~5") - 4),
)
)
if __name__ == "__main__":
raise_on_run_directly("test/test_fx.py")
| TypeCheckerTest |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 30408,
"end": 33221
} | class ____(PipesMessageWriter):
"""Message writer that writes messages to either a file or the stdout or stderr stream.
The write location is configured by the params received by the writer. If the params include a
key `path`, then messages will be written to a file at the specified path. If the params instead
include a key `stdio`, then messages then the corresponding value must specify either `stderr`
or `stdout`, and messages will be written to the selected stream.
"""
FILE_PATH_KEY = "path"
STDIO_KEY = "stdio"
BUFFERED_STDIO_KEY = "buffered_stdio"
STDERR = "stderr"
STDOUT = "stdout"
INCLUDE_STDIO_IN_MESSAGES_KEY: str = "include_stdio_in_messages"
@contextmanager
def open(self, params: PipesParams) -> Iterator[PipesMessageWriterChannel]:
if self.FILE_PATH_KEY in params:
path = _assert_env_param_type(params, self.FILE_PATH_KEY, str, self.__class__)
channel = PipesFileMessageWriterChannel(path)
if params.get(self.INCLUDE_STDIO_IN_MESSAGES_KEY):
log_writer = PipesDefaultLogWriter(message_channel=channel)
maybe_open_log_writer = log_writer.open(
params.get(PipesLogWriter.LOG_WRITER_KEY, {})
)
else:
maybe_open_log_writer = nullcontext()
with maybe_open_log_writer:
yield channel
elif self.STDIO_KEY in params:
stream = _assert_env_param_type(params, self.STDIO_KEY, str, self.__class__)
if stream not in (self.STDERR, self.STDOUT):
raise DagsterPipesError(
f'Invalid value for key "std", expected "{self.STDERR}" or "{self.STDOUT}" but'
f" received {stream}"
)
target = sys.stderr if stream == self.STDERR else sys.stdout
yield PipesStreamMessageWriterChannel(target)
elif self.BUFFERED_STDIO_KEY in params:
stream = _assert_env_param_type(params, self.BUFFERED_STDIO_KEY, str, self.__class__)
if stream not in (self.STDERR, self.STDOUT):
raise DagsterPipesError(
f'Invalid value for key "std", expected "{self.STDERR}" or "{self.STDOUT}" but'
f" received {stream}"
)
target = sys.stderr if stream == self.STDERR else sys.stdout
channel = PipesBufferedStreamMessageWriterChannel(target)
try:
yield channel
finally:
channel.flush()
else:
raise DagsterPipesError(
f'Invalid params for {self.__class__.__name__}, expected key "path" or "std",'
f" received {params}"
)
| PipesDefaultMessageWriter |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 170069,
"end": 172356
} | class ____(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(
reduction_axes, np.ndarray
):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as _:
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
def testFloat32(self):
for _ in range(5):
size_x = int(2 ** np.random.uniform(0, 15))
size_y = int(2 ** np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
if size_x % 2:
size_x = size_x + 1
if size_y % 2:
size_y = size_y + 1
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
full_sum = np.sum(arr, axis=-1, keepdims=True)
with self.cached_session(use_gpu=True) as _:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_full_sum = self._tf_reduce(arr, -1, keepdims=True)
tf_out_col = self.evaluate(tf_col_sum)
tf_out_row = self.evaluate(tf_row_sum)
tf_out_full = self.evaluate(tf_full_sum)
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
for size_x in [4, 16, 32]:
for size_y in [4, 16, 32]:
for size_z in [4, 16, 32]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.cached_session(use_gpu=True) as _:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
| SumReductionTest |
python | scikit-learn__scikit-learn | sklearn/decomposition/_sparse_pca.py | {
"start": 4804,
"end": 10770
} | class ____(_BaseSparsePCA):
"""Sparse Principal Components Analysis (SparsePCA).
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Read more in the :ref:`User Guide <SparsePCA>`.
Parameters
----------
n_components : int, default=None
Number of sparse atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float, default=0.01
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int, default=1000
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
Method to be used for optimization.
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
U_init : ndarray of shape (n_samples, n_components), default=None
Initial values for the loadings for warm restart scenarios. Only used
if `U_init` and `V_init` are not None.
V_init : ndarray of shape (n_components, n_features), default=None
Initial values for the components for warm restart scenarios. Only used
if `U_init` and `V_init` are not None.
verbose : int or bool, default=False
Controls the verbosity; the higher, the more messages. Defaults to 0.
random_state : int, RandomState instance or None, default=None
Used during dictionary learning. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Sparse components extracted from the data.
error_ : ndarray
Vector of errors at each iteration.
n_components_ : int
Estimated number of components.
.. versionadded:: 0.23
n_iter_ : int
Number of iterations run.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to ``X.mean(axis=0)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PCA : Principal Component Analysis implementation.
MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less
accurate.
DictionaryLearning : Generic dictionary learning problem using a sparse code.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.decomposition import SparsePCA
>>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
>>> transformer = SparsePCA(n_components=5, random_state=0)
>>> transformer.fit(X)
SparsePCA(...)
>>> X_transformed = transformer.transform(X)
>>> X_transformed.shape
(200, 5)
>>> # most values in the components_ are zero (sparsity)
>>> np.mean(transformer.components_ == 0)
np.float64(0.9666)
"""
_parameter_constraints: dict = {
**_BaseSparsePCA._parameter_constraints,
"U_init": [None, np.ndarray],
"V_init": [None, np.ndarray],
}
def __init__(
self,
n_components=None,
*,
alpha=1,
ridge_alpha=0.01,
max_iter=1000,
tol=1e-8,
method="lars",
n_jobs=None,
U_init=None,
V_init=None,
verbose=False,
random_state=None,
):
super().__init__(
n_components=n_components,
alpha=alpha,
ridge_alpha=ridge_alpha,
max_iter=max_iter,
tol=tol,
method=method,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state,
)
self.U_init = U_init
self.V_init = V_init
def _fit(self, X, n_components, random_state):
"""Specialized `fit` for SparsePCA."""
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
code, dictionary, E, self.n_iter_ = dict_learning(
X.T,
n_components,
alpha=self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=self.method,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init,
return_n_iter=True,
)
# flip eigenvectors' sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary, u_based_decision=True)
self.components_ = code.T
components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
components_norm[components_norm == 0] = 1
self.components_ /= components_norm
self.n_components_ = len(self.components_)
self.error_ = E
return self
| SparsePCA |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 73512,
"end": 74871
} | class ____(FunctionPass):
"""Replace existing raise statements by dynamic raises in Numba IR.
"""
_name = "Rewrite dynamic raises"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
func_ir = state.func_ir
changed = False
for block in func_ir.blocks.values():
for raise_ in block.find_insts((ir.Raise, ir.TryRaise)):
call_inst = guard(get_definition, func_ir, raise_.exception)
if call_inst is None:
continue
exc_type = func_ir.infer_constant(call_inst.func.name)
exc_args = []
for exc_arg in call_inst.args:
try:
const = func_ir.infer_constant(exc_arg)
exc_args.append(const)
except consts.ConstantInferenceError:
exc_args.append(exc_arg)
loc = raise_.loc
cls = {
ir.TryRaise: ir.DynamicTryRaise,
ir.Raise: ir.DynamicRaise,
}[type(raise_)]
dyn_raise = cls(exc_type, tuple(exc_args), loc)
block.insert_after(dyn_raise, raise_)
block.remove(raise_)
changed = True
return changed
| RewriteDynamicRaises |
python | plotly__plotly.py | plotly/graph_objs/indicator/title/_font.py | {
"start": 233,
"end": 9883
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "indicator.title"
_path_str = "indicator.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Set the font used to display the title
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.indicator.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | realpython__materials | queue/src/graph.py | {
"start": 178,
"end": 4375
} | class ____(NamedTuple):
name: str
country: str
year: int | None
latitude: float
longitude: float
@classmethod
def from_dict(cls, attrs):
return cls(
name=attrs["xlabel"],
country=attrs["country"],
year=int(attrs["year"]) or None,
latitude=float(attrs["latitude"]),
longitude=float(attrs["longitude"]),
)
def load_graph(filename, node_factory):
graph = nx.nx_agraph.read_dot(filename)
nodes = {
name: node_factory(attributes)
for name, attributes in graph.nodes(data=True)
}
return nodes, nx.Graph(
(nodes[name1], nodes[name2], weights)
for name1, name2, weights in graph.edges(data=True)
)
def breadth_first_traverse(graph, source, order_by=None):
queue = Queue(source)
visited = {source}
while queue:
yield (node := queue.dequeue())
neighbors = list(graph.neighbors(node))
if order_by:
neighbors.sort(key=order_by)
for neighbor in neighbors:
if neighbor not in visited:
visited.add(neighbor)
queue.enqueue(neighbor)
def breadth_first_search(graph, source, predicate, order_by=None):
return search(breadth_first_traverse, graph, source, predicate, order_by)
def shortest_path(graph, source, destination, order_by=None):
queue = Queue(source)
visited = {source}
previous = {}
while queue:
node = queue.dequeue()
neighbors = list(graph.neighbors(node))
if order_by:
neighbors.sort(key=order_by)
for neighbor in neighbors:
if neighbor not in visited:
visited.add(neighbor)
queue.enqueue(neighbor)
previous[neighbor] = node
if neighbor == destination:
return retrace(previous, source, destination)
def retrace(previous, source, destination):
path = deque()
current = destination
while current != source:
path.appendleft(current)
current = previous.get(current)
if current is None:
return None
path.appendleft(source)
return list(path)
def connected(graph, source, destination):
return shortest_path(graph, source, destination) is not None
def depth_first_traverse(graph, source, order_by=None):
stack = Stack(source)
visited = set()
while stack:
if (node := stack.dequeue()) not in visited:
yield node
visited.add(node)
neighbors = list(graph.neighbors(node))
if order_by:
neighbors.sort(key=order_by)
for neighbor in reversed(neighbors):
stack.enqueue(neighbor)
def recursive_depth_first_traverse(graph, source, order_by=None):
visited = set()
def visit(node):
yield node
visited.add(node)
neighbors = list(graph.neighbors(node))
if order_by:
neighbors.sort(key=order_by)
for neighbor in neighbors:
if neighbor not in visited:
yield from visit(neighbor)
return visit(source)
def depth_first_search(graph, source, predicate, order_by=None):
return search(depth_first_traverse, graph, source, predicate, order_by)
def search(traverse, graph, source, predicate, order_by=None):
for node in traverse(graph, source, order_by):
if predicate(node):
return node
def dijkstra_shortest_path(graph, source, destination, weight_factory):
previous = {}
visited = set()
unvisited = MutableMinHeap()
for node in graph.nodes:
unvisited[node] = infinity
unvisited[source] = 0
while unvisited:
visited.add(node := unvisited.dequeue())
for neighbor, weights in graph[node].items():
if neighbor not in visited:
weight = weight_factory(weights)
new_distance = unvisited[node] + weight
if new_distance < unvisited[neighbor]:
unvisited[neighbor] = new_distance
previous[neighbor] = node
return retrace(previous, source, destination)
| City |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/packaging.py | {
"start": 5175,
"end": 7462
} | class ____(PackagingCheck):
name = f"Connector license in {consts.METADATA_FILE_NAME} and {consts.PYPROJECT_FILE_NAME} file must match"
description = f"Connectors license in {consts.METADATA_FILE_NAME} and {consts.PYPROJECT_FILE_NAME} file must match. This is to ensure that all connectors are consistently licensed."
applies_to_connector_languages = [
ConnectorLanguage.PYTHON,
ConnectorLanguage.LOW_CODE,
]
def _run(self, connector: Connector) -> CheckResult:
metadata_license = get(connector.metadata, "license")
if metadata_license is None:
return self.fail(
connector=connector,
message=f"License is missing in the {consts.METADATA_FILE_NAME} file",
)
if not (connector.code_directory / consts.PYPROJECT_FILE_NAME).exists():
return self.fail(
connector=connector,
message=f"{consts.PYPROJECT_FILE_NAME} file is missing",
)
try:
pyproject = toml.load((connector.code_directory / consts.PYPROJECT_FILE_NAME))
except toml.TomlDecodeError:
return self.fail(
connector=connector,
message=f"{consts.PYPROJECT_FILE_NAME} is invalid toml file",
)
poetry_license = get(pyproject, "tool.poetry.license")
if poetry_license is None:
return self.fail(
connector=connector,
message=f"Connector is missing license in {consts.PYPROJECT_FILE_NAME}. Please add it",
)
if poetry_license.lower() != metadata_license.lower():
return self.fail(
connector=connector,
message=f"Connector is licensed under {poetry_license} in {consts.PYPROJECT_FILE_NAME}, but licensed under {metadata_license} in {consts.METADATA_FILE_NAME}. These two files have to be consistent",
)
return self.pass_(
connector=connector,
message=f"License in {consts.METADATA_FILE_NAME} and {consts.PYPROJECT_FILE_NAME} file match",
)
# TODO if more metadata.yaml to pyproject.toml field matching has to be done then create a generic class for this type of checks
| CheckConnectorLicenseMatchInPyproject |
python | google__jax | jax/experimental/mosaic/gpu/layout_inference.py | {
"start": 18017,
"end": 61009
} | class ____:
"""Holds context information used for deriving an constraint system."""
# A map of `ValueSite` to the variable that it is associated with.
variable_for_value_site: dict[ValueSite, cs.Variable] = dataclasses.field(
default_factory=dict, init=False
)
# A map of `cs.Variable` to all the `ValueSite`s that it is associated with.
value_sites_for_variable: ValueSitesForVariable = (
dataclasses.field(default_factory=dict, init=False)
)
def update(self, mapping: ValueSitesForVariable) -> None:
for variable, value_sites in mapping.items():
if variable in self.value_sites_for_variable:
self.value_sites_for_variable[variable].extend(value_sites)
else:
self.value_sites_for_variable[variable] = value_sites
for value_site in value_sites:
assert value_site not in self.variable_for_value_site
self.variable_for_value_site[value_site] = variable
def producer_ref(self, operand: ValueSite) -> cs.Variable:
"""Returns the producer reference variable for the given operand."""
return self.variable_for_value_site[producer_result(operand)]
ValueSitesForVariable = dict[cs.Variable, list[ValueSite]]
# A constraint system derivation rule is a function that takes an MLIR operation
# and returns a constraint system, a mapping from variables to value site
# identifiers, and a list of hints.
#
# The intended meaning of the mapping is that, for each identifier in the list
# keyed by a given variable, the MLIR operand/result/argument corresponding to
# that identifier has the same layout as the variable.
#
# A `ConstraintSystemDerivationRule` must return a mapping such that the
# identifier corresponding to each value site must appear in the mapping,
# and each identifier in the mapping must be keyed by exactly one variable.
# Lastly, the mapping must only refer to variables and
# operands/results/arguments that correspond to the given operation.
ConstraintSystemDerivationRule = Callable[
[DerivationContext, ir.OpView],
tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]],
]
_constraint_system_derivation_rules: dict[
str, ConstraintSystemDerivationRule
] = {}
def _add_constraint_system_derivation_rule(op: type[ir.OpView]):
def wrapper(rule: ConstraintSystemDerivationRule):
if op is not None:
_constraint_system_derivation_rules[op.OPERATION_NAME] = rule # pytype: disable=attribute-error
return rule
return wrapper
def is_vector(v: ir.Value) -> bool:
return ir.VectorType.isinstance(v.type)
def _is_smem_ref(v: ir.Value) -> bool:
return ir.MemRefType.isinstance(v.type) and utils.is_smem_ref(v)
def _is_tmem_ref(v: ir.Value) -> bool:
return ir.MemRefType.isinstance(v.type) and utils.is_tmem_ref(v)
def _pointwise_op_constraint_system(
ctx: DerivationContext,
op: ir.OpView,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
all_value_sites = vector_value_sites(op)
variable = cs.Variable(all_value_sites[-1])
return cs.ConstraintSystem(), {variable: all_value_sites}, []
for op in [
arith.AddIOp,
arith.AddFOp,
arith.AndIOp,
arith.BitcastOp,
arith.CmpFOp,
arith.CmpIOp,
arith.ExtFOp,
arith.ExtSIOp,
arith.ExtUIOp,
arith.FPToSIOp,
arith.FPToUIOp,
arith.MaximumFOp,
arith.MaxUIOp,
arith.MaxSIOp,
arith.MinimumFOp,
arith.MinUIOp,
arith.MinSIOp,
arith.MulIOp,
arith.MulFOp,
arith.OrIOp,
arith.FloorDivSIOp,
arith.DivUIOp,
arith.DivFOp,
arith.RemUIOp,
arith.RemSIOp,
arith.RemFOp,
arith.SIToFPOp,
arith.UIToFPOp,
arith.SubIOp,
arith.SubFOp,
arith.TruncFOp,
arith.TruncIOp,
arith.XOrIOp,
mlir_math.ExpOp,
mlir_math.Exp2Op,
mlir_math.LogOp,
mlir_math.RsqrtOp,
mlir_math.TanhOp,
]:
_add_constraint_system_derivation_rule(op)(_pointwise_op_constraint_system)
@_add_constraint_system_derivation_rule(mgpu.VectorLoadOp)
def _vector_load_constraint_system(
ctx: DerivationContext,
op: mgpu.VectorLoadOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
# TODO(b/447079781): Investigate whether we should check for contiguous
# strides here. An initial implementation of this failed the
# test_gmem_to_smem_with_multiple_smem_indexers_and_transforms test, but
# we should confirm that this is properly supported.
# Registers
dest = ValueSite(op, VariableType.RESULT, 0)
dest_var = cs.Variable(dest)
value_sites_for_variable = {dest_var: [dest]}
constraints = [cs.NotOfType(dest_var, fa.WGSplatFragLayout)]
# SMEM
if utils.is_smem_ref(op.source):
source = ValueSite(op, VariableType.OPERAND, 0)
source_var = ctx.producer_ref(source)
value_sites_for_variable[source_var] = [source]
shape = tuple(ir.MemRefType(op.source.type).shape)
constraints.append(cs.IsTransferable(source_var, dest_var, shape))
system = cs.ConstraintSystem(constraints=constraints)
return system, value_sites_for_variable, []
@_add_constraint_system_derivation_rule(mgpu.VectorStoreOp)
def _vector_store_constraint_system(
ctx: DerivationContext,
op: mgpu.VectorStoreOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
# TODO(b/447079781): Investigate whether we should check for contiguous
# strides here. An initial implementaiton of this failed the
# test_gmem_to_smem_with_multiple_smem_indexers_and_transforms test, but
# we should confirm that this is properly supported.
# Registers
value = ValueSite(op, VariableType.OPERAND, 0)
value_var = cs.Variable(value)
value_sites_for_variable = {value_var: [value]}
# SMEM
constraints = []
if utils.is_smem_ref(op.destination):
dest = ValueSite(op, VariableType.OPERAND, 1)
dest_var = ctx.producer_ref(dest)
value_sites_for_variable[dest_var] = [dest]
shape = tuple(ir.MemRefType(op.destination.type).shape)
constraints.append(cs.IsTransferable(value_var, dest_var, shape))
system = cs.ConstraintSystem(constraints=constraints)
return system, value_sites_for_variable, []
@_add_constraint_system_derivation_rule(mgpu.DebugPrintOp)
def _debug_print_constraint_system(
ctx: DerivationContext,
op: mgpu.DebugPrintOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
value = ValueSite(op, VariableType.OPERAND, 0)
return cs.ConstraintSystem(), {cs.Variable(value): [value]}, []
@_add_constraint_system_derivation_rule(mgpu.PrintLayoutOp)
def _print_layout_constraint_system(
ctx: DerivationContext,
op: mgpu.PrintLayoutOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
value = ValueSite(op, VariableType.OPERAND, 0)
var = cs.Variable(value) if is_vector(op.value) else ctx.producer_ref(value)
return cs.ConstraintSystem(), {var: [value]}, []
@_add_constraint_system_derivation_rule(mgpu.BroadcastedIotaOp)
def _broadcasted_iota_constraint_system(
ctx: DerivationContext,
op: mgpu.BroadcastedIotaOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
value = ValueSite(op, VariableType.RESULT, 0)
var = cs.Variable(value)
constraints = [cs.NotOfType(var, fa.WGSplatFragLayout)]
return cs.ConstraintSystem(constraints=constraints), {var: [value]}, []
@_add_constraint_system_derivation_rule(mgpu.OptimizationBarrierOp)
def _optimization_barrier_constraint_system(
ctx: DerivationContext,
op: ir.OpView,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
value_sites_for_variable: ValueSitesForVariable = {}
for i, operand in enumerate(op.operands):
if not is_vector(operand):
continue
variable = cs.Variable(ValueSite(op, VariableType.OPERAND, i))
value_sites_for_variable[variable] = [
ValueSite(op, VariableType.OPERAND, i),
ValueSite(op, VariableType.RESULT, i)
]
return cs.ConstraintSystem(), value_sites_for_variable, []
@_add_constraint_system_derivation_rule(vector.BroadcastOp)
def _vector_splat_constraint_system(
ctx: DerivationContext,
op: ir.OpView,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
result = ValueSite(op, VariableType.RESULT, 0)
variable = cs.Variable(result)
layout = fa.WGSplatFragLayout(tuple(cast(ir.ShapedType, op.result.type).shape))
system = cs.ConstraintSystem(
assignments={variable: cs.RegisterLayout(layout)}
)
return system, {variable: [result]}, []
@_add_constraint_system_derivation_rule(arith.ConstantOp)
def _constant_constraint_system(
ctx: DerivationContext,
constant_op: arith.ConstantOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
value = constant_op.value
result = ValueSite(constant_op, VariableType.RESULT, 0)
variable = cs.Variable(result)
shape = tuple(ir.ShapedType(constant_op.result.type).shape)
if (
ir.DenseElementsAttr.isinstance(value)
and ir.DenseElementsAttr(value).is_splat
):
layout = fa.WGSplatFragLayout(shape=shape)
system = cs.ConstraintSystem(
assignments={variable: cs.RegisterLayout(layout)}
)
else:
constant_is_not_splat = cs.NotOfType(variable, fa.WGSplatFragLayout)
system = cs.ConstraintSystem(constraints=[constant_is_not_splat])
return system, {variable: [result]}, []
def _terminator(
block: ir.Block, expected_terminator: type[ir.OpView]
) -> ir.OpView:
"""Returns the terminator of the given block.
Checks that the terminator is of the expected type.
"""
terminator = block.operations[len(block.operations) - 1]
assert isinstance(terminator, expected_terminator)
return terminator.opview
@_add_constraint_system_derivation_rule(scf.ForOp)
def _for_constraint_system(
ctx: DerivationContext,
op: scf.ForOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
[block] = op.region.blocks
yield_op = _terminator(block, scf.YieldOp)
value_sites_for_variable: ValueSitesForVariable = {}
# Account for the lower bound, upper bound, and step of the loop, which appear
# in the operands but not in the results.
num_leading_args = 3
for index, o in enumerate(op.operands):
if not is_vector(o) and not _is_smem_ref(o):
continue
result_index = index - num_leading_args
arg_index = index - num_leading_args + 1 # Account for the induction var.
operand = ValueSite(op, VariableType.OPERAND, index)
arg = ValueSite(op, VariableType.ARGUMENT, arg_index, region_index=0)
result = ValueSite(op, VariableType.RESULT, result_index)
yield_operand = ValueSite(
yield_op, VariableType.OPERAND, result_index
)
var = cs.Variable(operand) if is_vector(o) else ctx.producer_ref(operand)
value_sites_for_variable[var] = [operand, arg, result, yield_operand]
return cs.ConstraintSystem(), value_sites_for_variable, []
def prime_decomposition(n: int) -> list[int]:
"""Returns the prime decomposition of the given number `n` as a list of ints.
A factor appears as many times in the list as the power up to which it divides
`n`.
"""
# This implementation should be sufficiently efficient for small `n`, which
# should always be the case for us.
prime_factors = []
divisor = 2
while divisor * divisor <= n:
while n % divisor == 0:
n //= divisor
prime_factors.append(divisor)
divisor += 1
if n != 1:
prime_factors.append(n)
return prime_factors
# TODO(bchetioui): let's see if we need to parametrize this by depth.
def dynamic_gcd(a: int, b: ir.Value) -> int:
if a <= 0:
raise ValueError("a must be strictly positive")
if not ir.IntegerType.isinstance(b.type) and not ir.IndexType.isinstance(b.type):
raise ValueError(f"Expected an integer dynamic value, got a {b.type}")
if isinstance(b.owner, ir.Operation) and isinstance(b.owner.opview, arith.ConstantOp):
return math.gcd(a, b.owner.opview.literal_value)
running_gcd = 1
for factor in prime_decomposition(a):
if utils.is_known_divisible(b, running_gcd * factor):
running_gcd *= factor
return running_gcd
@_add_constraint_system_derivation_rule(scf.WhileOp)
def _while_constraint_system(
ctx: DerivationContext,
op: scf.WhileOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
[before_block] = op.before.blocks
[after_block] = op.after.blocks
cond_op = _terminator(before_block, scf.ConditionOp)
yield_op = _terminator(after_block, scf.YieldOp)
value_sites_for_variable: ValueSitesForVariable = {}
for value_site in vector_value_sites(op):
idx = value_site.index
match value_site.type:
case VariableType.OPERAND:
arg = ValueSite(op, VariableType.ARGUMENT, idx, region_index=0)
yield_operand = ValueSite(yield_op, VariableType.OPERAND, idx)
value_sites_for_variable[cs.Variable(value_site)] = [
value_site,
arg,
yield_operand,
]
case VariableType.RESULT:
# Increment by 1 to account for the conditional.
cond_operand = ValueSite(cond_op, VariableType.OPERAND, idx + 1)
arg = ValueSite(op, VariableType.ARGUMENT, idx, region_index=1)
value_sites_for_variable[cs.Variable(value_site)] = [
value_site,
arg,
cond_operand,
]
case _ as never:
assert_never(never) # pytype: disable=wrong-arg-types
return cs.ConstraintSystem(), value_sites_for_variable, []
@_add_constraint_system_derivation_rule(scf.IndexSwitchOp)
def _index_switch_constraint_system(
ctx: DerivationContext,
op: scf.IndexSwitchOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
value_sites_for_variable: ValueSitesForVariable = {
cs.Variable(o): [o] for o in vector_value_sites(op)
}
for region in op.regions:
[block] = region.blocks
yield_op = _terminator(block, scf.YieldOp)
for value_site in value_sites_for_variable.keys():
assert value_site.key.type == VariableType.RESULT
yield_operand = ValueSite(
yield_op, VariableType.OPERAND, value_site.key.index
)
value_sites_for_variable[value_site].append(yield_operand)
return cs.ConstraintSystem(), value_sites_for_variable, []
@_add_constraint_system_derivation_rule(mgpu.LayoutCastOp)
def _layout_cast_constraint_system(
ctx: DerivationContext,
op: mgpu.LayoutCastOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
operand = ValueSite(op, VariableType.OPERAND, 0)
result = ValueSite(op, VariableType.RESULT, 0)
variable = cs.Variable(operand)
out_layout = cs.RegisterLayout(layouts_lib.from_layout_attr(op.new_layout))
return (
cs.ConstraintSystem(assignments={variable: out_layout}),
{variable: [operand, result]},
[],
)
def _infer_tiling_for_mma_ref(
ref_ty: ir.MemRefType, max_swizzle: mgpu.SwizzlingMode
) -> tuple[int, int]:
element_bytewidth = utils.bytewidth(ref_ty.element_type)
strides, _ = ref_ty.get_strides_and_offset()
min_dim_index = np.argmin(strides)
minor_dim = ref_ty.shape[min_dim_index]
# Try tiling with all swizzling modes starting from the largest one.
for swizzle in [
mgpu.SwizzlingMode.k128ByteSwizzle,
mgpu.SwizzlingMode.k64ByteSwizzle,
mgpu.SwizzlingMode.k32ByteSwizzle,
mgpu.SwizzlingMode.kNoSwizzle,
]:
if swizzle > max_swizzle:
continue
swizzle_elems = swizzle // element_bytewidth
if minor_dim % swizzle_elems == 0:
minor_tiling = swizzle_elems
break
else:
# No valid tile transform can be inferred.
raise ValueError(f"{ref_ty.shape} is not a valid WGMMA shape")
major_tiling = 8
transposed = min_dim_index != len(strides) - 1
if transposed:
tiling = (minor_tiling, major_tiling)
else:
tiling = (major_tiling, minor_tiling)
return tiling
def _infer_wgmma_tiling(
a_type: ir.Type, b_type: ir.MemRefType
) -> tuple[tuple[int, int] | None, tuple[int, int]]:
"""Infers the tiling for a (if in SMEM) and b of a WGMMAOp.
If both a and b are in SMEM, this function infers tilings that have matching
swizzle values.
"""
b_tiling = _infer_tiling_for_mma_ref(
b_type, max_swizzle=mgpu.SwizzlingMode.k128ByteSwizzle
)
b_swizzle = _compute_swizzle(b_type, lc.TileTransform(b_tiling))
if not ir.MemRefType.isinstance(a_type):
return None, b_tiling
a_tiling = _infer_tiling_for_mma_ref(
cast(ir.MemRefType, a_type), max_swizzle=b_swizzle
)
a_swizzle = _compute_swizzle(a_type, lc.TileTransform(a_tiling))
if a_swizzle != b_swizzle:
# The swizzle for a and b has to match. This is not a fundamental
# limitation, rather the lowering doesn't currently support it.
b_tiling = _infer_tiling_for_mma_ref(b_type, max_swizzle=a_swizzle)
b_swizzle = _compute_swizzle(b_type, lc.TileTransform(b_tiling))
assert a_swizzle == b_swizzle
return a_tiling, b_tiling
@_add_constraint_system_derivation_rule(mgpu.WGMMAOp)
def _wgmma_constraint_system(
ctx: DerivationContext,
op: mgpu.WGMMAOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
assignments: dict[cs.Variable, cs.Constant] = {}
value_sites_for_variable: ValueSitesForVariable = {}
acc_out = ValueSite(op, VariableType.RESULT, 0)
acc_in = ValueSite(op, VariableType.OPERAND, 0)
acc_var = cs.Variable(acc_out)
assignments[acc_var] = cs.RegisterLayout(fa.WGMMA_LAYOUT)
value_sites_for_variable[acc_var] = [acc_in, acc_out]
a_tiling, b_tiling = _infer_wgmma_tiling(op.a.type, op.b.type)
b = ValueSite(op, VariableType.OPERAND, 2)
b_var = ctx.producer_ref(b)
assignments[b_var] = cs.SMEMTiling(lc.TileTransform(b_tiling))
value_sites_for_variable[b_var] = [b]
a = ValueSite(op, VariableType.OPERAND, 1)
if _is_smem_ref(op.a):
a_var = ctx.producer_ref(a)
assignments[a_var] = cs.SMEMTiling(lc.TileTransform(a_tiling))
else:
assert a_tiling is None
a_var = cs.Variable(a)
if ir.IntegerType.get_signless(8) == ir.VectorType(op.a.type).element_type:
assignments[a_var] = cs.RegisterLayout(fa.WGMMA_LAYOUT_8BIT)
else:
assignments[a_var] = cs.RegisterLayout(fa.WGMMA_LAYOUT)
value_sites_for_variable[a_var] = [a]
return cs.ConstraintSystem(assignments), value_sites_for_variable, []
@_add_constraint_system_derivation_rule(vector.BroadcastOp)
def _vector_broadcast_constraint_system(
ctx: DerivationContext,
op: vector.BroadcastOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
# This is not expected to be necessary at the moment. We should be using
# mgpu.BroadcastInDimOp instead when dealing with broadcasting vectors.
if ir.ShapedType.isinstance(op.source.type):
raise NotImplementedError("Only vector broadcasts from scalars are supported.")
out_variable = cs.Variable(ValueSite(op, VariableType.RESULT, 0))
layout = cs.RegisterLayout(fa.WGSplatFragLayout(tuple(op.result.type.shape)))
return (
cs.ConstraintSystem(assignments={out_variable: layout}),
{out_variable: [out_variable.key]},
[],
)
@_add_constraint_system_derivation_rule(vector.ReductionOp)
def _vector_reduction_constraint_system(
ctx: DerivationContext,
op: vector.ReductionOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
in_variable = cs.Variable(ValueSite(op, VariableType.OPERAND, 0))
return cs.ConstraintSystem(), {in_variable: [in_variable.key]}, []
def _reduction_constraint_and_hint(
larger: cs.Variable,
smaller: cs.Variable,
larger_shape: tuple[int, ...],
reduction_dims: tuple[int, ...],
) -> tuple[cs.Constraint, Hint]:
reduce_expr = cs.Reduce(larger, reduction_dims)
# There are always many options for broadcasting a layout, so we can only
# derive a broadcast hint in the out_variable -> source_variable direction.
broadcast_dims = tuple(
i for i in range(len(larger_shape)) if i not in reduction_dims
)
broadcast_expr = cs.BroadcastInDim(smaller, broadcast_dims, larger_shape)
broadcast_hint = Hint(variable=larger, expression=broadcast_expr)
return cs.Equals(lhs=smaller, rhs=reduce_expr), broadcast_hint
@_add_constraint_system_derivation_rule(vector.MultiDimReductionOp)
def _multi_dim_reduction_constraint_system(
ctx: DerivationContext,
op: vector.MultiDimReductionOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
source = ValueSite(op, VariableType.OPERAND, 0)
acc = ValueSite(op, VariableType.OPERAND, 1)
out = ValueSite(op, VariableType.RESULT, 0)
source_variable = cs.Variable(source)
out_variable = cs.Variable(out)
reduction_constraint, broadcast_hint = _reduction_constraint_and_hint(
source_variable,
out_variable,
tuple(ir.ShapedType(op.source.type).shape),
tuple(op.reduction_dims),
)
# TODO(bchetioui): in the future, we may need to add rules that prevent
# strided layouts from being chosen---since trying to reduce a strided layout
# may cause us to raise an Exception at the moment.
return (
cs.ConstraintSystem(constraints=[reduction_constraint]),
{source_variable: [source], out_variable: [acc, out]},
[broadcast_hint],
)
@_add_constraint_system_derivation_rule(mgpu.BroadcastInDimOp)
def _broadcast_in_dim_constraint_system(
ctx: DerivationContext,
op: mgpu.BroadcastInDimOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
out_variable = cs.Variable(ValueSite(op, VariableType.RESULT, 0))
source_variable = cs.Variable(ValueSite(op, VariableType.OPERAND, 0))
out_shape = tuple(cast(ir.ShapedType, op.result.type).shape)
reduction_dims = tuple(
i for i in range(len(out_shape)) if i not in op.broadcast_dimensions
)
reduction_constraint, broadcast_hint = _reduction_constraint_and_hint(
out_variable, source_variable, out_shape, reduction_dims
)
return (
cs.ConstraintSystem(constraints=[reduction_constraint]),
{
source_variable: [source_variable.key],
out_variable: [out_variable.key],
},
[broadcast_hint],
)
@_add_constraint_system_derivation_rule(vector.ShapeCastOp)
def _shape_cast_constraint_system(
ctx: DerivationContext, op: vector.ShapeCastOp
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
in_shape = tuple(cast(ir.ShapedType, op.source.type).shape)
out_shape = tuple(cast(ir.ShapedType, op.result.type).shape)
in_variable = cs.Variable(ValueSite(op, VariableType.OPERAND, 0))
out_variable = cs.Variable(ValueSite(op, VariableType.RESULT, 0))
# Here, we are in a case where we are stating
#
# out_variable = reshape(in_variable, in_shape, out_shape).
#
# Thanks to the symmetric property of reshape, we can also issue a constraint
# in the other direction, i.e.
#
# in_variable = reshape(out_variable, out_shape, in_shape)
#
# in order to be able to figure out an assignment for `in_variable`. if we
# happen to know `out_variable`. If we only issue the first constraint, then
# we will not be able to figure out an assignment for `in_variable` if we
# only know `out_variable`, even though their relationship is fully
# determined.
in_to_out = cs.Reshape(
in_variable, source_shape=in_shape, target_shape=out_shape
)
out_to_in = cs.Reshape(
out_variable, source_shape=out_shape, target_shape=in_shape
)
return (
cs.ConstraintSystem(
constraints=[
cs.Equals(lhs=out_variable, rhs=in_to_out),
cs.Equals(lhs=in_variable, rhs=out_to_in),
],
),
{in_variable: [in_variable.key], out_variable: [out_variable.key]},
[],
)
@_add_constraint_system_derivation_rule(vector.ExtractStridedSliceOp)
def _extract_strided_slice_constraint_system(
ctx: DerivationContext, op: vector.ExtractStridedSliceOp
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
if any(ir.IntegerAttr(s).value != 1 for s in op.strides):
raise NotImplementedError("`strides` must contain only 1s.")
operand = ValueSite(op, VariableType.OPERAND, 0)
result = ValueSite(op, VariableType.RESULT, 0)
variable = cs.Variable(operand)
offsets = tuple(ir.IntegerAttr(o).value for o in op.offsets)
constraints = [
cs.Divides(variable, offsets),
# TODO(allanrenucci): Remove once vectors with splat and strided layouts
# can be sliced.
cs.NotOfType(variable, fa.WGSplatFragLayout),
cs.NotOfType(variable, fa.WGStridedFragLayout),
]
return (
cs.ConstraintSystem(constraints=constraints),
# We use a single variable because lowering does not support two different
# layouts for `source` and `result`.
{variable: [operand, result]},
[],
)
@_add_constraint_system_derivation_rule(mgpu.CustomPrimitiveOp)
def _custom_primitive_constraint_system(
ctx: DerivationContext,
op: mgpu.CustomPrimitiveOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
assignments: dict[cs.Variable, cs.Constant] = {}
constraints: list[cs.Constraint] = []
in_layouts = iter(op.in_layouts)
in_transforms = iter(op.in_transforms)
variables: list[cs.Variable] = []
for i, operand in enumerate(op.operands):
if is_vector(operand):
v = cs.Variable(ValueSite(op, VariableType.OPERAND, i))
variables.append(v)
assignments[v] = cs.RegisterLayout(
layouts_lib.from_layout_attr(next(in_layouts))
)
elif _is_smem_ref(operand):
# Here we need to create a new variable, even though it is equal to the
# source operand. This is because we directly assign the new variable and
# if we did that to the source there could be conflicting assignments.
# For example, the same ref could be passed into the custom op twice with
# different transforms, which needs to yield an unsatisfiable system.
#
# TODO(b/447079781): Consider creating the final constraint system using
# __and__ and potentially returning Unsatisfiable() directly if there is
# a conflict between the assignments.
value_site = ValueSite(op, VariableType.OPERAND, i)
source_var = ctx.producer_ref(value_site)
v = cs.Variable(value_site)
constraints.append(cs.Equals(lhs=source_var, rhs=v))
variables.append(v)
transforms = next(in_transforms)
ref_ty = value_site.value.type
tiling = _extract_smem_tiling_from_custom_transform_attrs(ref_ty, transforms)
assignments[v] = tiling
out_layouts = iter(op.out_layouts)
for i, result in enumerate(op.results):
if ir.VectorType.isinstance(result.type):
v = cs.Variable(ValueSite(op, VariableType.RESULT, i))
variables.append(v)
assignments[v] = cs.RegisterLayout(
layouts_lib.from_layout_attr(next(out_layouts))
)
return (
cs.ConstraintSystem(assignments, constraints),
{v: [v.key] for v in variables},
[],
)
def _tmem_layout_from_layout_attr(
layout_attr: mgpu.TiledLayout,
) -> tcgen05.TMEMLayout:
layout = layouts_lib.from_layout_attr(layout_attr)
assert isinstance(layout, fa.TiledLayout)
return tcgen05.TMEMLayout(
layout.tiling, layout.warp_dims, layout.lane_dims, layout.vector_dim
)
@_add_constraint_system_derivation_rule(mgpu.TmemLayoutCastOp)
def _tmem_layout_cast_constraint_system(
ctx: DerivationContext,
op: mgpu.TmemLayoutCastOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
operand = ValueSite(op, VariableType.OPERAND, 0)
variable = ctx.producer_ref(operand)
result = ValueSite(op, VariableType.RESULT, 0)
out_layout = cs.TMEMLayout(_tmem_layout_from_layout_attr(op.new_layout))
return (
cs.ConstraintSystem(assignments={variable: out_layout}),
{variable: [operand, result]},
[],
)
@_add_constraint_system_derivation_rule(mgpu.TmemAllocOp)
def _tmem_alloc_constraint_system(
ctx: DerivationContext,
op: mgpu.TmemAllocOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
result = ValueSite(op, VariableType.RESULT, 0)
result_var = cs.Variable(result)
layout = tcgen05._infer_tmem_layout(
tuple(op.result.type.shape), op.collective, packing=1
)
in_smem = ValueSite(op, VariableType.OPERAND, 0)
in_smem_var = cs.Variable(in_smem)
assignments: dict[cs.Variable, cs.Constant] = {
in_smem_var: cs.SMEMTiling(None)
}
operands_for_variable = {result_var: [result], in_smem_var: [in_smem]}
# This is a hint, not a hard constraint. This will be the default layout if
# none can be inferred.
hint = Hint(result_var, cs.TMEMLayout(layout))
system = cs.ConstraintSystem(assignments=assignments)
return system, operands_for_variable, [hint]
@_add_constraint_system_derivation_rule(mgpu.TmemDeallocOp)
def _tmem_dealloc_constraint_system(
ctx: DerivationContext,
op: mgpu.TmemDeallocOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
operand = ValueSite(op, VariableType.OPERAND, 0)
variable = ctx.producer_ref(operand)
return cs.ConstraintSystem(), {variable: [operand]}, []
@_add_constraint_system_derivation_rule(mgpu.TcGen05MMAOp)
def _tcgen05_mma_constraint_system(
ctx: DerivationContext,
op: mgpu.TcGen05MMAOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
assignments: dict[cs.Variable, cs.Constant] = {}
operands_for_variable: ValueSitesForVariable = {}
# TMEM
acc = ValueSite(op, VariableType.OPERAND, 0)
acc_variable = ctx.producer_ref(acc)
acc_type = ir.ShapedType(op.accumulator.type)
acc_layout = tcgen05._infer_tmem_layout(
tuple(acc_type.shape), op.collective, packing=1
)
assignments[acc_variable] = cs.TMEMLayout(acc_layout)
operands_for_variable[acc_variable] = [acc]
if _is_tmem_ref(op.a):
a = ValueSite(op, VariableType.OPERAND, 1)
a_type = ir.ShapedType(op.a.type)
a_var = ctx.producer_ref(a)
packing = 32 // utils.bitwidth(a_type.element_type)
a_layout = tcgen05._infer_tmem_layout(
tuple(a_type.shape), op.collective, packing
)
assignments[a_var] = cs.TMEMLayout(a_layout)
operands_for_variable[a_var] = [a]
# SMEM
M = op.accumulator.type.shape[0]
if M == 64 and not op.collective.value:
# We can't split N into groups if we would partition it below the tile size.
N = op.b.type.shape[1]
element_type_bitwidth = utils.bitwidth(op.b.type.element_type)
n_lane_groups = 2
max_b_swizzle = next(
s
for s in reversed(mgpu.SwizzlingMode)
if 8 * s // element_type_bitwidth <= N // n_lane_groups
)
else:
max_b_swizzle = mgpu.SwizzlingMode.k128ByteSwizzle
b_tiling = _infer_tiling_for_mma_ref(ir.MemRefType(op.b.type), max_b_swizzle)
b = ValueSite(op, VariableType.OPERAND, 2)
b_var = ctx.producer_ref(b)
assignments[b_var] = cs.SMEMTiling(lc.TileTransform(b_tiling))
operands_for_variable[b_var] = [b]
if _is_smem_ref(op.a):
a_tiling = _infer_tiling_for_mma_ref(
ir.MemRefType(op.a.type),
max_swizzle=mgpu.SwizzlingMode.k128ByteSwizzle,
)
a = ValueSite(op, VariableType.OPERAND, 1)
a_var = ctx.producer_ref(a)
assignments[a_var] = cs.SMEMTiling(lc.TileTransform(a_tiling))
operands_for_variable[a_var] = [a]
return cs.ConstraintSystem(assignments=assignments), operands_for_variable, []
@_add_constraint_system_derivation_rule(mgpu.AsyncLoadTmemOp)
def _async_load_tmem_constraint_system(
ctx: DerivationContext,
op: mgpu.AsyncLoadTmemOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
source = ValueSite(op, VariableType.OPERAND, 0)
source_variable = ctx.producer_ref(source)
destination = ValueSite(op, VariableType.RESULT, 0)
destination_variable = cs.Variable(destination)
constraint = cs.IsTransferable(
source_variable,
destination_variable,
tuple(ir.ShapedType(op.source.type).shape),
)
return (
cs.ConstraintSystem(constraints=[constraint]),
{source_variable: [source], destination_variable: [destination]},
[],
)
@_add_constraint_system_derivation_rule(mgpu.SliceTmemOp)
def _slice_tmem_constraint_system(
ctx: DerivationContext,
op: mgpu.SliceTmemOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
operand = ValueSite(op, VariableType.OPERAND, 0)
operand_variable = ctx.producer_ref(operand)
result = ValueSite(op, VariableType.RESULT, 0)
result_variable = cs.Variable(result)
return (
cs.ConstraintSystem(),
{operand_variable: [operand], result_variable: [result]},
[],
)
@_add_constraint_system_derivation_rule(mgpu.AsyncStoreTmemOp)
def _async_store_tmem_constraint_system(
ctx: DerivationContext,
op: mgpu.AsyncStoreTmemOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
source = ValueSite(op, VariableType.OPERAND, 0)
source_variable = cs.Variable(source)
destination = ValueSite(op, VariableType.OPERAND, 1)
destination_variable = ctx.producer_ref(destination)
constraint = cs.IsTransferable(
source_variable,
destination_variable,
tuple(ir.ShapedType(op.source.type).shape),
)
return (
cs.ConstraintSystem(constraints=[constraint]),
{source_variable: [source], destination_variable: [destination]},
[],
)
@_add_constraint_system_derivation_rule(mgpu.SliceSMEMOp)
def _slice_smem_constraint_system(
ctx: DerivationContext,
op: mgpu.SliceSMEMOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
res = ValueSite(op, VariableType.RESULT, 0)
res_var = cs.Variable(res)
return (cs.ConstraintSystem(), {res_var: [res]}, [])
@_add_constraint_system_derivation_rule(memref.SubViewOp)
def _memref_subview_constraint_system(
ctx: DerivationContext,
op: memref.SubViewOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
source = ValueSite(op, VariableType.OPERAND, 0)
dest = ValueSite(op, VariableType.RESULT, 0)
source_dest_var = ctx.producer_ref(source)
if any(s != 1 for s in op.static_strides):
raise NotImplementedError(
f"Only unit strides are supported but got {op.static_strides}."
)
# Collect all the constraints from all dimensions.
tiling_multiple = []
dynamic_offset_index = 0
for i, size in enumerate(op.static_sizes):
offset = op.static_offsets[i]
if offset == ir.ShapedType.get_dynamic_size():
offset = op.offsets[dynamic_offset_index]
dynamic_offset_index += 1
# Drop all dimensions up to and including the last dynamic size. Dynamic
# sizes are not supported yet.
#
# Supporting dynamic sizes here can be done analogously to how dynamic
# offsets are supported. The reason we don't support dynamic sizes now is
# because the lowering does not yet support them.
if ir.ShapedType.is_dynamic_size(size):
tiling_multiple = []
else:
src_type = ir.MemRefType(op.source.type)
divisibility_constraint = math.gcd(size, src_type.shape[i])
if isinstance(offset, int):
divisibility_constraint = math.gcd(divisibility_constraint, offset)
else:
divisibility_constraint = dynamic_gcd(divisibility_constraint, offset)
tiling_multiple.append(divisibility_constraint)
constraints = [cs.Divides(source_dest_var, tuple(tiling_multiple))]
system = cs.ConstraintSystem(constraints=constraints)
return system, {source_dest_var: [source, dest]}, []
@_add_constraint_system_derivation_rule(memref.CastOp)
def _memref_cast_op_constraint_system(
ctx: DerivationContext,
op: memref.CastOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
source = ValueSite(op, VariableType.OPERAND, 0)
var_source_dest = ctx.producer_ref(source)
dest = ValueSite(op, VariableType.RESULT, 0)
return cs.ConstraintSystem(), {var_source_dest: [source, dest]}, []
@_add_constraint_system_derivation_rule(memref.TransposeOp)
def _memref_transpose_op_constraint_system(
ctx: DerivationContext,
op: memref.TransposeOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
in_ty = ir.MemRefType(op.in_.type)
if len(in_ty.shape) != 2:
raise NotImplementedError(f"Only 2D memrefs are supported, got {in_ty}")
in_strides, _ = in_ty.get_strides_and_offset()
out_strides, _ = ir.MemRefType(op.result.type).get_strides_and_offset()
transpose = in_strides != out_strides
source = ValueSite(op, VariableType.OPERAND, 0)
dest = ValueSite(op, VariableType.RESULT, 0)
source_var = ctx.producer_ref(source)
if not transpose:
return (cs.ConstraintSystem(), {source_var: [source, dest]}, [])
dest_var = cs.Variable(dest)
constraints = [
cs.Equals(cs.Transpose(source_var), dest_var),
cs.Equals(source_var, cs.Transpose(dest_var)),
]
system = cs.ConstraintSystem(constraints=constraints)
return system, {source_var: [source], dest_var: [dest]}, []
# `memref.load` and `memref.store` are used to load barrier phases which are
# scalars---the rule needn't do anything interesting, but we need to have it.
@_add_constraint_system_derivation_rule(memref.LoadOp)
@_add_constraint_system_derivation_rule(memref.StoreOp)
def _memref_load_store_op_constraint_system(
ctx: DerivationContext,
op: memref.LoadOp | memref.StoreOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
del ctx
ref_shape = ir.MemRefType(op.memref.type).shape
if ref_shape and ref_shape != [1]:
raise NotImplementedError(
f"Only scalar memrefs are supported, got {ref_shape}"
)
ref_op_index = 0 if isinstance(op, memref.LoadOp) else 1
ref = ValueSite(op, VariableType.OPERAND, ref_op_index)
var = cs.Variable(ref)
assignments: dict[cs.Variable, cs.Constant] = {var: cs.SMEMTiling(None)}
return cs.ConstraintSystem(assignments=assignments), {var: [ref]}, []
def _extract_smem_tiling_from_custom_transform_attrs(
ref_type: ir.MemRefType,
transform_attrs: ir.ArrayAttr,
) -> cs.SMEMTiling:
transforms = [layouts_lib.from_transform_attr(x) for x in transform_attrs]
match transforms:
case []:
tile_transform = None
swizzle = None
case [lc.TileTransform() as t]:
tile_transform = t
swizzle = None
case [lc.TileTransform() as t, mgpu.SwizzlingMode() as s]:
tile_transform = t
swizzle = s
case _:
raise NotImplementedError(f"Unsupported transforms {transforms}")
if swizzle is not None:
computed_swizzle = _compute_swizzle(ref_type, tile_transform)
if computed_swizzle != swizzle:
raise NotImplementedError(
f"Cannot honor caller-provided swizzle {swizzle} that is different "
f"from the computed swizle {computed_swizzle} for type {ref_type}."
)
return cs.SMEMTiling(tile_transform)
@_add_constraint_system_derivation_rule(mgpu.WithTransformsOp)
def _with_transforms_constraint_system(
ctx: DerivationContext,
op: mgpu.WithTransformsOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
source = ValueSite(op, VariableType.OPERAND, 0)
dest = ValueSite(op, VariableType.RESULT, 0)
var = ctx.producer_ref(source)
tiling = _extract_smem_tiling_from_custom_transform_attrs(op.ref.type, op.transforms)
assignments: dict[cs.Variable, cs.Constant] = {var: tiling}
return cs.ConstraintSystem(assignments=assignments), {var: [source, dest]}, []
@_add_constraint_system_derivation_rule(mgpu.AsyncLoadOp)
@_add_constraint_system_derivation_rule(mgpu.AsyncStoreOp)
def _async_load_store_constraint_system(
ctx: DerivationContext,
op: mgpu.AsyncLoadOp | mgpu.AsyncStoreOp,
) -> tuple[cs.ConstraintSystem, ValueSitesForVariable, list[Hint]]:
tiling_multiple = []
for size, index in zip(op.slice_lengths, op.indices, strict=True):
if size == -1:
# This dimension does not appear in the final smem memref shape.
continue
tiling_multiple.append(dynamic_gcd(size, index))
operand_index = 1 if isinstance(op, mgpu.AsyncLoadOp) else 0
operand = ValueSite(op, VariableType.OPERAND, operand_index)
var = ctx.producer_ref(operand)
constraints = [cs.Divides(expr=var, tiling_multiple=tuple(tiling_multiple))]
return cs.ConstraintSystem(constraints=constraints), {var: [operand]}, []
def _ensure_all_layouts_are_set(op: ir.OpView) -> None:
if inference_utils.should_have_layout(op):
_ensure_right_number_of_layouts(is_vector, "layouts", "vector", op)
if inference_utils.should_have_tmem_layout(op):
_ensure_right_number_of_layouts(_is_tmem_ref, "tmem_layouts", "TMEM ref", op)
if inference_utils.should_have_transforms(op):
_ensure_right_number_of_layouts(
inference_utils.is_transformable_smem_memref, "transforms", "SMEM ref", op,
)
def _ensure_right_number_of_layouts(
filter_fn: Callable[[ir.Value], bool],
attr_suffix: str,
value_type: str,
op: ir.OpView,
) -> None:
"""Ensures that the right number of in/out layouts are provided for an op.
Layouts here are can be vector layouts, TMEM layouts, or SMEM transforms.
"""
layouts = lambda attr: op.attributes[attr] if attr in op.attributes else []
in_layouts = layouts(f"in_{attr_suffix}")
out_layouts = layouts(f"out_{attr_suffix}")
num_matching_operands = sum(map(filter_fn, op.operands))
if len(in_layouts) != num_matching_operands:
raise ValueError(
f"Expected the same number of in_{attr_suffix} ({len(in_layouts)}) as "
f"{value_type} operands ({num_matching_operands}). op=\n {op}"
)
num_matching_results = sum(map(filter_fn, op.results))
if len(out_layouts) != num_matching_results:
raise ValueError(
f"Expected the same number of out_{attr_suffix} ({len(out_layouts)}) "
f"as {value_type} results ({num_matching_results}). op=\n {op}"
)
def _compute_swizzle(
type: ir.Type, tile_transform: lc.TileTransform | None
) -> mgpu.SwizzlingMode:
"""Computes the swizzle mode given a tiling transform and a data type."""
if tile_transform is None:
# TODO(b/447079781): Revisit if this is the behavior we want.
return mgpu.SwizzlingMode.kNoSwizzle
if not ir.MemRefType.isinstance(type):
raise ValueError(f"Expected a MemRefType, got {type}.")
ref_ty = ir.MemRefType(type)
strides, _ = ref_ty.get_strides_and_offset()
tiling = tile_transform.tiling
if len(tiling) > len(strides):
raise ValueError(
f"The tile rank ({len(tiling)}) cannot be greater than the ref's rank"
f" ({len(strides)})."
)
minor_tiling = tiling[np.argmin(strides[-len(tiling):])]
swizzle = minor_tiling * utils.bytewidth(ref_ty.element_type)
assert swizzle in (
mgpu.SwizzlingMode.k128ByteSwizzle,
mgpu.SwizzlingMode.k64ByteSwizzle,
mgpu.SwizzlingMode.k32ByteSwizzle,
mgpu.SwizzlingMode.kNoSwizzle,
)
return mgpu.SwizzlingMode(swizzle)
@dataclasses.dataclass(frozen=True)
| DerivationContext |
python | redis__redis-py | redis/asyncio/client.py | {
"start": 29265,
"end": 29415
} | class ____(TypedDict):
time: float
db: int
client_address: str
client_port: str
client_type: str
command: str
| MonitorCommandInfo |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 224057,
"end": 224439
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "project_v2_item")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
project_v2_item = sgqlc.types.Field("ProjectV2Item", graphql_name="projectV2Item")
| ClearProjectV2ItemFieldValuePayload |
python | kubernetes-client__python | kubernetes/client/models/v1_gce_persistent_disk_volume_source.py | {
"start": 383,
"end": 8034
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'partition': 'int',
'pd_name': 'str',
'read_only': 'bool'
}
attribute_map = {
'fs_type': 'fsType',
'partition': 'partition',
'pd_name': 'pdName',
'read_only': 'readOnly'
}
def __init__(self, fs_type=None, partition=None, pd_name=None, read_only=None, local_vars_configuration=None): # noqa: E501
"""V1GCEPersistentDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._partition = None
self._pd_name = None
self._read_only = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if partition is not None:
self.partition = partition
self.pd_name = pd_name
if read_only is not None:
self.read_only = read_only
@property
def fs_type(self):
"""Gets the fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1GCEPersistentDiskVolumeSource.
fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param fs_type: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def partition(self):
"""Gets the partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this V1GCEPersistentDiskVolumeSource.
partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param partition: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: int
"""
self._partition = partition
@property
def pd_name(self):
"""Gets the pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._pd_name
@pd_name.setter
def pd_name(self, pd_name):
"""Sets the pd_name of this V1GCEPersistentDiskVolumeSource.
pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param pd_name: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pd_name is None: # noqa: E501
raise ValueError("Invalid value for `pd_name`, must not be `None`") # noqa: E501
self._pd_name = pd_name
@property
def read_only(self):
"""Gets the read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1GCEPersistentDiskVolumeSource.
readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param read_only: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GCEPersistentDiskVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GCEPersistentDiskVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1GCEPersistentDiskVolumeSource |
python | pytorch__pytorch | test/test_prims.py | {
"start": 16009,
"end": 18137
} | class ____(TestCase):
@ops([op for op in op_db if op.supports_varargs], dtypes=OpDTypes.any_one)
def test_decomposition_method_vararg(self, device, dtype, op):
# some ops have vararg variants for the methods. this tests it.
# we don't have tests for varargs in OpInfo, so we need to
# improvise this a bit.
# The rule for general functions (the special cases being e.g. tensor
# creation functions taking shapes) is that things can be vararg
# if the method has only one argument of sequence type.
# e.g. permute can be called on a 3d tensor t as t.permute(0, 2, 1)
# as well as t.permute([0, 2, 1])
# when the signature in native_functions.yaml
# shows arguments Tensor self, IntList dims
# we might need to adjust things for the factory functions or
# have them do their own test
from torch.fx.experimental.proxy_tensor import make_fx
from torch._prims.context import TorchRefsMode
# filter out empty tuple as that cannot be the varargs
sample_inputs = (si for si in op.sample_inputs(device, dtype, requires_grad=False)
if (si.args[-1] if si.args else si.input))
# just run one test, we assume there is a suitable one in the tests
sample_input = next(sample_inputs)
all_args = (sample_input.input,) + sample_input.args
# in general, the methods take varargs and not (always?) the function
# variants, the exception to this rule are the factory functions
if op.is_factory_function:
fn = op.op
else:
fn = op.method_variant
with TorchRefsMode():
gm = make_fx(fn)(*all_args[:-1], *all_args[-1])
# in case we add random factory functions
torch.manual_seed(1)
res = gm(*all_args[:-1], *all_args[-1])
torch.manual_seed(1)
expected = fn(*all_args[:-1], *all_args[-1])
self.assertEqual(res, expected)
instantiate_device_type_tests(TestDecomp, globals())
if __name__ == "__main__":
run_tests()
| TestDecomp |
python | getsentry__sentry | src/sentry/notifications/notification_action/group_type_notification_registry/handlers/metric_alert_registry_handler.py | {
"start": 764,
"end": 2715
} | class ____(LegacyRegistryHandler):
@staticmethod
def handle_workflow_action(job: WorkflowEventData, action: Action, detector: Detector) -> None:
try:
handler = metric_alert_handler_registry.get(action.type)
handler.invoke_legacy_registry(job, action, detector)
except NoRegistrationExistsError:
logger.exception(
"No metric alert handler found for action type: %s",
action.type,
extra={"action_id": action.id},
)
raise
except Exception:
logger.exception(
"Error invoking metric alert handler",
extra={"action_id": action.id},
)
raise
@staticmethod
def target(action: Action) -> OrganizationMember | Team | str | None:
target_identifier = action.config.get("target_identifier")
if target_identifier is None:
return None
target_type = action.config.get("target_type")
if target_type == ActionTarget.USER.value:
dcga = DataConditionGroupAction.objects.get(action=action)
try:
return OrganizationMember.objects.get(
user_id=int(target_identifier),
organization=dcga.condition_group.organization,
)
except OrganizationMember.DoesNotExist:
# user is no longer a member of the organization
pass
elif target_type == ActionTarget.TEAM.value:
try:
return Team.objects.get(id=int(target_identifier))
except Team.DoesNotExist:
pass
elif target_type == ActionTarget.SPECIFIC.value:
# TODO: This is only for email. We should have a way of validating that it's
# ok to contact this email.
return target_identifier
return None
| MetricAlertRegistryHandler |
python | apache__airflow | providers/hashicorp/src/airflow/providers/hashicorp/_internal_client/vault_client.py | {
"start": 1481,
"end": 24683
} | class ____(LoggingMixin):
"""
Retrieves Authenticated client from Hashicorp Vault.
This is purely internal class promoting authentication code reuse between the Hook and the
SecretBackend, it should not be used directly in Airflow DAGs. Use VaultBackend for backend
integration and Hook in case you want to communicate with VaultHook using standard Airflow
Connection definition.
:param url: Base URL for the Vault instance being addressed.
:param auth_type: Authentication Type for Vault. Default is ``token``. Available values are in
('approle', 'aws_iam', 'azure', 'github', 'gcp', 'kubernetes', 'ldap', 'radius', 'token', 'userpass')
:param auth_mount_point: It can be used to define mount_point for authentication chosen
Default depends on the authentication method used.
:param mount_point: The "path" the secret engine was mounted on. Default is "secret". Note that
this mount_point is not used for authentication if authentication is done via a
different engine. For authentication mount_points see, auth_mount_point.
:param kv_engine_version: Selects the version of the engine to run (``1`` or ``2``, default: ``2``).
:param token: Authentication token to include in requests sent to Vault
(for ``token`` and ``github`` auth_type).
:param token_path: path to file containing authentication token to include in requests sent to Vault
(for ``token`` and ``github`` auth_type).
:param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_types).
:param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_types).
:param key_id: Key ID for Authentication (for ``aws_iam`` and ''azure`` auth_type).
:param secret_id: Secret ID for Authentication (for ``approle``, ``aws_iam`` and ``azure`` auth_types).
:param role_id: Role ID for Authentication (for ``approle``, ``aws_iam`` auth_types).
:param assume_role_kwargs: AWS assume role param.
See AWS STS Docs:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts/client/assume_role.html
:param region: AWS region for STS API calls. Inferred from the boto3 client configuration if not provided
(for ``aws_iam`` auth_type).
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type).
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, default:
``/var/run/secrets/kubernetes.io/serviceaccount/token``).
:param gcp_key_path: Path to Google Cloud Service Account key file (JSON) (for ``gcp`` auth_type).
Mutually exclusive with gcp_keyfile_dict
:param gcp_keyfile_dict: Dictionary of keyfile parameters. (for ``gcp`` auth_type).
Mutually exclusive with gcp_key_path
:param gcp_scopes: Comma-separated string containing OAuth2 scopes (for ``gcp`` auth_type).
:param azure_tenant_id: The tenant id for the Azure Active Directory (for ``azure`` auth_type).
:param azure_resource: The configured URL for the application registered in Azure Active Directory
(for ``azure`` auth_type).
:param radius_host: Host for radius (for ``radius`` auth_type).
:param radius_secret: Secret for radius (for ``radius`` auth_type).
:param radius_port: Port for radius (for ``radius`` auth_type).
"""
def __init__(
self,
url: str | None = None,
auth_type: str = "token",
auth_mount_point: str | None = None,
mount_point: str | None = "secret",
kv_engine_version: int | None = None,
token: str | None = None,
token_path: str | None = None,
username: str | None = None,
password: str | None = None,
key_id: str | None = None,
secret_id: str | None = None,
assume_role_kwargs: dict | None = None,
role_id: str | None = None,
region: str | None = None,
kubernetes_role: str | None = None,
kubernetes_jwt_path: str | None = "/var/run/secrets/kubernetes.io/serviceaccount/token",
gcp_key_path: str | None = None,
gcp_keyfile_dict: dict | None = None,
gcp_scopes: str | None = None,
azure_tenant_id: str | None = None,
azure_resource: str | None = None,
radius_host: str | None = None,
radius_secret: str | None = None,
radius_port: int | None = None,
**kwargs,
):
super().__init__()
if kv_engine_version and kv_engine_version not in VALID_KV_VERSIONS:
raise VaultError(
f"The version is not supported: {kv_engine_version}. It should be one of {VALID_KV_VERSIONS}"
)
if auth_type not in VALID_AUTH_TYPES:
raise VaultError(
f"The auth_type is not supported: {auth_type}. It should be one of {VALID_AUTH_TYPES}"
)
if auth_type == "token" and not token and not token_path and "VAULT_TOKEN" not in os.environ:
raise VaultError("The 'token' authentication type requires 'token' or 'token_path'")
if auth_type == "github" and not token and not token_path:
raise VaultError("The 'github' authentication type requires 'token' or 'token_path'")
if auth_type == "approle" and not role_id:
raise VaultError("The 'approle' authentication type requires 'role_id'")
if auth_type == "kubernetes":
if not kubernetes_role:
raise VaultError("The 'kubernetes' authentication type requires 'kubernetes_role'")
if not kubernetes_jwt_path:
raise VaultError("The 'kubernetes' authentication type requires 'kubernetes_jwt_path'")
if auth_type == "azure":
if not azure_resource:
raise VaultError("The 'azure' authentication type requires 'azure_resource'")
if not azure_tenant_id:
raise VaultError("The 'azure' authentication type requires 'azure_tenant_id'")
if auth_type == "radius":
if not radius_host:
raise VaultError("The 'radius' authentication type requires 'radius_host'")
if not radius_secret:
raise VaultError("The 'radius' authentication type requires 'radius_secret'")
if auth_type == "gcp":
if not gcp_scopes:
raise VaultError("The 'gcp' authentication type requires 'gcp_scopes'")
if not role_id:
raise VaultError("The 'gcp' authentication type requires 'role_id'")
if not gcp_key_path and not gcp_keyfile_dict:
raise VaultError(
"The 'gcp' authentication type requires 'gcp_key_path' or 'gcp_keyfile_dict'"
)
self.kv_engine_version = kv_engine_version or 2
self.url = url
self.auth_type = auth_type
self.kwargs = kwargs
self.token = token or os.getenv("VAULT_TOKEN", None)
self.token_path = token_path
self.auth_mount_point = auth_mount_point
self.mount_point = mount_point
self.username = username
self.password = password
self.key_id = key_id
self.secret_id = secret_id
self.role_id = role_id
self.assume_role_kwargs = assume_role_kwargs
self.region = region
self.kubernetes_role = kubernetes_role
self.kubernetes_jwt_path = kubernetes_jwt_path
self.gcp_key_path = gcp_key_path
self.gcp_keyfile_dict = gcp_keyfile_dict
self.gcp_scopes = gcp_scopes
self.azure_tenant_id = azure_tenant_id
self.azure_resource = azure_resource
self.radius_host = radius_host
self.radius_secret = radius_secret
self.radius_port = radius_port
@property
def client(self):
"""
Checks that it is still authenticated to Vault and invalidates the cache if this is not the case.
:return: Vault Client
"""
if not self._client.is_authenticated():
# Invalidate the cache:
# https://github.com/pydanny/cached-property#invalidating-the-cache
self.__dict__.pop("_client", None)
return self._client
@cached_property
def _client(self) -> hvac.Client:
"""
Return an authenticated Hashicorp Vault client.
:return: Vault Client
"""
if "session" not in self.kwargs:
# If no session object provide one with retry as per hvac documentation:
# https://hvac.readthedocs.io/en/stable/advanced_usage.html#retrying-failed-requests
adapter = HTTPAdapter(
max_retries=Retry(
total=3,
backoff_factor=0.1,
status_forcelist=[412, 500, 502, 503],
raise_on_status=False,
)
)
session = Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
session.verify = self.kwargs.get("verify", session.verify)
session.cert = self.kwargs.get("cert", session.cert)
session.proxies = self.kwargs.get("proxies", session.proxies)
self.kwargs["session"] = session
_client = hvac.Client(url=self.url, **self.kwargs)
if self.auth_type == "approle":
self._auth_approle(_client)
elif self.auth_type == "aws_iam":
self._auth_aws_iam(_client)
elif self.auth_type == "azure":
self._auth_azure(_client)
elif self.auth_type == "gcp":
self._auth_gcp(_client)
elif self.auth_type == "github":
self._auth_github(_client)
elif self.auth_type == "kubernetes":
self._auth_kubernetes(_client)
elif self.auth_type == "ldap":
self._auth_ldap(_client)
elif self.auth_type == "radius":
self._auth_radius(_client)
elif self.auth_type == "token":
self._set_token(_client)
elif self.auth_type == "userpass":
self._auth_userpass(_client)
else:
raise VaultError(f"Authentication type '{self.auth_type}' not supported")
if _client.is_authenticated():
return _client
raise VaultError("Vault Authentication Error!")
def _auth_userpass(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.userpass.login(
username=self.username, password=self.password, mount_point=self.auth_mount_point
)
else:
_client.auth.userpass.login(username=self.username, password=self.password)
def _auth_radius(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.radius.configure(
host=self.radius_host,
secret=self.radius_secret,
port=self.radius_port,
mount_point=self.auth_mount_point,
)
else:
_client.auth.radius.configure(
host=self.radius_host, secret=self.radius_secret, port=self.radius_port
)
def _auth_ldap(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.ldap.login(
username=self.username, password=self.password, mount_point=self.auth_mount_point
)
else:
_client.auth.ldap.login(username=self.username, password=self.password)
def _auth_kubernetes(self, _client: hvac.Client) -> None:
if not self.kubernetes_jwt_path:
raise VaultError("The kubernetes_jwt_path should be set here. This should not happen.")
with open(self.kubernetes_jwt_path) as f:
jwt = f.read().strip()
if self.auth_mount_point:
Kubernetes(_client.adapter).login(
role=self.kubernetes_role, jwt=jwt, mount_point=self.auth_mount_point
)
else:
Kubernetes(_client.adapter).login(role=self.kubernetes_role, jwt=jwt)
def _auth_github(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.github.login(token=self.token, mount_point=self.auth_mount_point)
else:
_client.auth.github.login(token=self.token)
def _auth_gcp(self, _client: hvac.Client) -> None:
from airflow.providers.google.cloud.utils.credentials_provider import (
_get_scopes,
get_credentials_and_project_id,
)
scopes = _get_scopes(self.gcp_scopes)
credentials, project_id = get_credentials_and_project_id(
key_path=self.gcp_key_path, keyfile_dict=self.gcp_keyfile_dict, scopes=scopes
)
import json
import time
import googleapiclient
if self.gcp_keyfile_dict:
creds = self.gcp_keyfile_dict
elif self.gcp_key_path:
with open(self.gcp_key_path) as f:
creds = json.load(f)
service_account = creds["client_email"]
# Generate a payload for subsequent "signJwt()" call
# Reference: https://googleapis.dev/python/google-auth/latest/reference/google.auth.jwt.html#google.auth.jwt.Credentials
now = int(time.time())
expires = now + 900 # 15 mins in seconds, can't be longer.
payload = {"iat": now, "exp": expires, "sub": credentials, "aud": f"vault/{self.role_id}"}
body = {"payload": json.dumps(payload)}
name = f"projects/{project_id}/serviceAccounts/{service_account}"
# Perform the GCP API call
iam = googleapiclient.discovery.build("iam", "v1", credentials=credentials)
request = iam.projects().serviceAccounts().signJwt(name=name, body=body)
resp = request.execute()
jwt = resp["signedJwt"]
if self.auth_mount_point:
_client.auth.gcp.login(role=self.role_id, jwt=jwt, mount_point=self.auth_mount_point)
else:
_client.auth.gcp.login(role=self.role_id, jwt=jwt)
def _auth_azure(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.azure.configure(
tenant_id=self.azure_tenant_id,
resource=self.azure_resource,
client_id=self.key_id,
client_secret=self.secret_id,
mount_point=self.auth_mount_point,
)
else:
_client.auth.azure.configure(
tenant_id=self.azure_tenant_id,
resource=self.azure_resource,
client_id=self.key_id,
client_secret=self.secret_id,
)
def _auth_aws_iam(self, _client: hvac.Client) -> None:
if self.key_id and self.secret_id:
auth_args = {
"access_key": self.key_id,
"secret_key": self.secret_id,
}
else:
import boto3
if self.assume_role_kwargs:
sts_client = boto3.client("sts")
credentials = sts_client.assume_role(**self.assume_role_kwargs)
auth_args = {
"access_key": credentials["Credentials"]["AccessKeyId"],
"secret_key": credentials["Credentials"]["SecretAccessKey"],
"session_token": credentials["Credentials"]["SessionToken"],
"region": sts_client.meta.region_name,
}
else:
session = boto3.Session()
credentials = session.get_credentials()
auth_args = {
"access_key": credentials.access_key,
"secret_key": credentials.secret_key,
"session_token": credentials.token,
"region": session.region_name,
}
if self.auth_mount_point:
auth_args["mount_point"] = self.auth_mount_point
if self.region:
auth_args["region"] = self.region
if self.role_id:
auth_args["role"] = self.role_id
_client.auth.aws.iam_login(**auth_args)
def _auth_approle(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.approle.login(
role_id=self.role_id, secret_id=self.secret_id, mount_point=self.auth_mount_point
)
else:
_client.auth.approle.login(role_id=self.role_id, secret_id=self.secret_id)
def _set_token(self, _client: hvac.Client) -> None:
if self.token_path:
with open(self.token_path) as f:
_client.token = f.read().strip()
else:
_client.token = self.token
def _parse_secret_path(self, secret_path: str) -> tuple[str, str]:
if not self.mount_point:
split_secret_path = secret_path.split("/", 1)
if len(split_secret_path) < 2:
raise InvalidPath
return split_secret_path[0], split_secret_path[1]
return self.mount_point, secret_path
def get_secret(self, secret_path: str, secret_version: int | None = None) -> dict | None:
"""
Get secret value from the KV engine.
:param secret_path: The path of the secret.
:param secret_version: Specifies the version of Secret to return. If not set, the latest
version is returned. (Can only be used in case of version 2 of KV).
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v1.html
and https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
:return: secret stored in the vault as a dictionary
"""
mount_point = None
try:
mount_point, secret_path = self._parse_secret_path(secret_path)
if self.kv_engine_version == 1:
if secret_version:
raise VaultError("Secret version can only be used with version 2 of the KV engine")
response = self.client.secrets.kv.v1.read_secret(path=secret_path, mount_point=mount_point)
else:
response = self.client.secrets.kv.v2.read_secret_version(
path=secret_path,
mount_point=mount_point,
version=secret_version,
raise_on_deleted_version=True,
)
except InvalidPath:
self.log.debug("Secret not found %s with mount point %s", secret_path, mount_point)
return None
return_data = response["data"] if self.kv_engine_version == 1 else response["data"]["data"]
return return_data
def get_secret_metadata(self, secret_path: str) -> dict | None:
"""
Read secret metadata (including versions) from the engine. It is only valid for KV version 2.
:param secret_path: The path of the secret.
:return: secret metadata. This is a Dict containing metadata for the secret.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
"""
if self.kv_engine_version == 1:
raise VaultError("Metadata might only be used with version 2 of the KV engine.")
mount_point = None
try:
mount_point, secret_path = self._parse_secret_path(secret_path)
return self.client.secrets.kv.v2.read_secret_metadata(path=secret_path, mount_point=mount_point)
except InvalidPath:
self.log.debug("Secret not found %s with mount point %s", secret_path, mount_point)
return None
def get_secret_including_metadata(
self, secret_path: str, secret_version: int | None = None
) -> dict | None:
"""
Read secret including metadata. It is only valid for KV version 2.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
:param secret_path: The path of the secret.
:param secret_version: Specifies the version of Secret to return. If not set, the latest
version is returned. (Can only be used in case of version 2 of KV).
:return: The key info. This is a Dict with "data" mapping keeping secret
and "metadata" mapping keeping metadata of the secret.
"""
if self.kv_engine_version == 1:
raise VaultError("Metadata might only be used with version 2 of the KV engine.")
mount_point = None
try:
mount_point, secret_path = self._parse_secret_path(secret_path)
return self.client.secrets.kv.v2.read_secret_version(
path=secret_path,
mount_point=mount_point,
version=secret_version,
raise_on_deleted_version=True,
)
except InvalidPath:
self.log.debug(
"Secret not found %s with mount point %s and version %s",
secret_path,
mount_point,
secret_version,
)
return None
def create_or_update_secret(
self, secret_path: str, secret: dict, method: str | None = None, cas: int | None = None
) -> Response:
"""
Create or updates secret.
:param secret_path: The path of the secret.
:param secret: Secret to create or update for the path specified
:param method: Optional parameter to explicitly request a POST (create) or PUT (update) request to
the selected kv secret engine. If no argument is provided for this parameter, hvac attempts to
intelligently determine which method is appropriate. Only valid for KV engine version 1
:param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be
allowed. If set to 0 a write will only be allowed if the key doesn't exist.
If the index is non-zero the write will only be allowed if the key's current version
matches the version specified in the cas parameter. Only valid for KV engine version 2.
:return: The response of the create_or_update_secret request.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v1.html
and https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
"""
if self.kv_engine_version == 2 and method:
raise VaultError("The method parameter is only valid for version 1")
if self.kv_engine_version == 1 and cas:
raise VaultError("The cas parameter is only valid for version 2")
mount_point, secret_path = self._parse_secret_path(secret_path)
if self.kv_engine_version == 1:
response = self.client.secrets.kv.v1.create_or_update_secret(
path=secret_path, secret=secret, mount_point=mount_point, method=method
)
else:
response = self.client.secrets.kv.v2.create_or_update_secret(
path=secret_path, secret=secret, mount_point=mount_point, cas=cas
)
return response
| _VaultClient |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_ctor.py | {
"start": 2751,
"end": 2930
} | class ____:
def setup(self):
N = 100000
self.data = np.random.randn(N)
def time_frame_from_ndarray(self):
self.df = DataFrame(self.data)
| FromNDArray |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 5540,
"end": 5722
} | class ____(torch.nn.Module):
def __init__(self, func):
super().__init__()
self.f = func.apply
def forward(self, x):
return self.f(x)
| ModuleWithGradFunc |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/external_non_default_variant/package.py | {
"start": 216,
"end": 621
} | class ____(Package):
"""An external that is registered with a non-default value"""
homepage = "http://www.python.org"
url = "http://www.python.org/ftp/python/3.8.7/Python-3.8.7.tgz"
version("3.8.7", md5="be78e48cdfc1a7ad90efff146dce6cfe")
variant("foo", default=True, description="just a variant")
variant("bar", default=True, description="just a variant")
| ExternalNonDefaultVariant |
python | encode__django-rest-framework | tests/authentication/test_authentication.py | {
"start": 18632,
"end": 18781
} | class ____(BaseTokenAuthTests, TestCase):
model = Token
path = '/customkeywordtoken/'
header_prefix = 'Bearer '
| CustomKeywordTokenAuthTests |
python | Textualize__textual | src/textual/rlock.py | {
"start": 83,
"end": 1657
} | class ____:
"""A re-entrant asyncio lock."""
def __init__(self) -> None:
self._owner: Task | None = None
self._count = 0
self._lock = Lock()
async def acquire(self) -> None:
"""Wait until the lock can be acquired."""
task = current_task()
assert task is not None
if self._owner is None or self._owner is not task:
await self._lock.acquire()
self._owner = task
self._count += 1
def release(self) -> None:
"""Release a previously acquired lock."""
task = current_task()
assert task is not None
self._count -= 1
if self._count < 0:
# Should not occur if every acquire as a release
raise RuntimeError("RLock.release called too many times")
if self._owner is task:
if not self._count:
self._owner = None
self._lock.release()
@property
def is_locked(self):
"""Return True if lock is acquired."""
return self._lock.locked()
async def __aenter__(self) -> None:
"""Asynchronous context manager to acquire and release lock."""
await self.acquire()
async def __aexit__(self, _type, _value, _traceback) -> None:
"""Exit the context manager."""
self.release()
if __name__ == "__main__":
from asyncio import Lock
async def locks():
lock = RLock()
async with lock:
async with lock:
print("Hello")
import asyncio
asyncio.run(locks())
| RLock |
python | pydantic__pydantic | tests/benchmarks/basemodel_eq_performance.py | {
"start": 6554,
"end": 8364
} | class ____(pydantic.BaseModel, frozen=True):
def __eq__(self, other: Any) -> bool:
if isinstance(other, pydantic.BaseModel):
# When comparing instances of generic types for equality, as long as all field values are equal,
# only require their generic origin types to be equal, rather than exact type equality.
# This prevents headaches like MyGeneric(x=1) != MyGeneric[Any](x=1).
self_type = self.__pydantic_generic_metadata__['origin'] or self.__class__
other_type = other.__pydantic_generic_metadata__['origin'] or other.__class__
# Perform common checks first
if not (
self_type == other_type
and self.__pydantic_private__ == other.__pydantic_private__
and self.__pydantic_extra__ == other.__pydantic_extra__
):
return False
# Fix GH-7444 by comparing only pydantic fields
# We provide a fast-path for performance: __dict__ comparison is *much* faster
# See tests/benchmarks/test_basemodel_eq_performances.py and GH-7825 for benchmarks
if self.__dict__ == other.__dict__:
# If the check above passes, then pydantic fields are equal, we can return early
return True
else:
# Else, we need to perform a more detailed, costlier comparison
model_fields = type(self).model_fields.keys()
getter = operator.itemgetter(*model_fields) if model_fields else lambda _: None
return getter(_SafeGetItemProxy(self.__dict__)) == getter(_SafeGetItemProxy(other.__dict__))
else:
return NotImplemented # delegate to the other item in the comparison
| SafeItemGetterEqModelFastPath |
python | django__django | tests/managers_regress/models.py | {
"start": 2446,
"end": 2697
} | class ____(models.Model):
test_gfk = GenericRelation(
"RelationModel", content_type_field="gfk_ctype", object_id_field="gfk_id"
)
exact = models.BooleanField(null=True)
def __str__(self):
return str(self.pk)
| RelatedModel |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/document.py | {
"start": 2307,
"end": 40539
} | class ____:
"""
This is a immutable class around the text and cursor position, and contains
methods for querying this data, e.g. to give the text before the cursor.
This class is usually instantiated by a :class:`~prompt_toolkit.buffer.Buffer`
object, and accessed as the `document` property of that class.
:param text: string
:param cursor_position: int
:param selection: :class:`.SelectionState`
"""
__slots__ = ("_text", "_cursor_position", "_selection", "_cache")
def __init__(
self,
text: str = "",
cursor_position: int | None = None,
selection: SelectionState | None = None,
) -> None:
# Check cursor position. It can also be right after the end. (Where we
# insert text.)
assert cursor_position is None or cursor_position <= len(text), AssertionError(
f"cursor_position={cursor_position!r}, len_text={len(text)!r}"
)
# By default, if no cursor position was given, make sure to put the
# cursor position is at the end of the document. This is what makes
# sense in most places.
if cursor_position is None:
cursor_position = len(text)
# Keep these attributes private. A `Document` really has to be
# considered to be immutable, because otherwise the caching will break
# things. Because of that, we wrap these into read-only properties.
self._text = text
self._cursor_position = cursor_position
self._selection = selection
# Cache for lines/indexes. (Shared with other Document instances that
# contain the same text.
try:
self._cache = _text_to_document_cache[self.text]
except KeyError:
self._cache = _DocumentCache()
_text_to_document_cache[self.text] = self._cache
# XX: For some reason, above, we can't use 'WeakValueDictionary.setdefault'.
# This fails in Pypy3. `self._cache` becomes None, because that's what
# 'setdefault' returns.
# self._cache = _text_to_document_cache.setdefault(self.text, _DocumentCache())
# assert self._cache
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.text!r}, {self.cursor_position!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, Document):
return False
return (
self.text == other.text
and self.cursor_position == other.cursor_position
and self.selection == other.selection
)
@property
def text(self) -> str:
"The document text."
return self._text
@property
def cursor_position(self) -> int:
"The document cursor position."
return self._cursor_position
@property
def selection(self) -> SelectionState | None:
":class:`.SelectionState` object."
return self._selection
@property
def current_char(self) -> str:
"""Return character under cursor or an empty string."""
return self._get_char_relative_to_cursor(0) or ""
@property
def char_before_cursor(self) -> str:
"""Return character before the cursor or an empty string."""
return self._get_char_relative_to_cursor(-1) or ""
@property
def text_before_cursor(self) -> str:
return self.text[: self.cursor_position :]
@property
def text_after_cursor(self) -> str:
return self.text[self.cursor_position :]
@property
def current_line_before_cursor(self) -> str:
"""Text from the start of the line until the cursor."""
_, _, text = self.text_before_cursor.rpartition("\n")
return text
@property
def current_line_after_cursor(self) -> str:
"""Text from the cursor until the end of the line."""
text, _, _ = self.text_after_cursor.partition("\n")
return text
@property
def lines(self) -> list[str]:
"""
Array of all the lines.
"""
# Cache, because this one is reused very often.
if self._cache.lines is None:
self._cache.lines = _ImmutableLineList(self.text.split("\n"))
return self._cache.lines
@property
def _line_start_indexes(self) -> list[int]:
"""
Array pointing to the start indexes of all the lines.
"""
# Cache, because this is often reused. (If it is used, it's often used
# many times. And this has to be fast for editing big documents!)
if self._cache.line_indexes is None:
# Create list of line lengths.
line_lengths = map(len, self.lines)
# Calculate cumulative sums.
indexes = [0]
append = indexes.append
pos = 0
for line_length in line_lengths:
pos += line_length + 1
append(pos)
# Remove the last item. (This is not a new line.)
if len(indexes) > 1:
indexes.pop()
self._cache.line_indexes = indexes
return self._cache.line_indexes
@property
def lines_from_current(self) -> list[str]:
"""
Array of the lines starting from the current line, until the last line.
"""
return self.lines[self.cursor_position_row :]
@property
def line_count(self) -> int:
r"""Return the number of lines in this document. If the document ends
with a trailing \n, that counts as the beginning of a new line."""
return len(self.lines)
@property
def current_line(self) -> str:
"""Return the text on the line where the cursor is. (when the input
consists of just one line, it equals `text`."""
return self.current_line_before_cursor + self.current_line_after_cursor
@property
def leading_whitespace_in_current_line(self) -> str:
"""The leading whitespace in the left margin of the current line."""
current_line = self.current_line
length = len(current_line) - len(current_line.lstrip())
return current_line[:length]
def _get_char_relative_to_cursor(self, offset: int = 0) -> str:
"""
Return character relative to cursor position, or empty string
"""
try:
return self.text[self.cursor_position + offset]
except IndexError:
return ""
@property
def on_first_line(self) -> bool:
"""
True when we are at the first line.
"""
return self.cursor_position_row == 0
@property
def on_last_line(self) -> bool:
"""
True when we are at the last line.
"""
return self.cursor_position_row == self.line_count - 1
@property
def cursor_position_row(self) -> int:
"""
Current row. (0-based.)
"""
row, _ = self._find_line_start_index(self.cursor_position)
return row
@property
def cursor_position_col(self) -> int:
"""
Current column. (0-based.)
"""
# (Don't use self.text_before_cursor to calculate this. Creating
# substrings and doing rsplit is too expensive for getting the cursor
# position.)
_, line_start_index = self._find_line_start_index(self.cursor_position)
return self.cursor_position - line_start_index
def _find_line_start_index(self, index: int) -> tuple[int, int]:
"""
For the index of a character at a certain line, calculate the index of
the first character on that line.
Return (row, index) tuple.
"""
indexes = self._line_start_indexes
pos = bisect.bisect_right(indexes, index) - 1
return pos, indexes[pos]
def translate_index_to_position(self, index: int) -> tuple[int, int]:
"""
Given an index for the text, return the corresponding (row, col) tuple.
(0-based. Returns (0, 0) for index=0.)
"""
# Find start of this line.
row, row_index = self._find_line_start_index(index)
col = index - row_index
return row, col
def translate_row_col_to_index(self, row: int, col: int) -> int:
"""
Given a (row, col) tuple, return the corresponding index.
(Row and col params are 0-based.)
Negative row/col values are turned into zero.
"""
try:
result = self._line_start_indexes[row]
line = self.lines[row]
except IndexError:
if row < 0:
result = self._line_start_indexes[0]
line = self.lines[0]
else:
result = self._line_start_indexes[-1]
line = self.lines[-1]
result += max(0, min(col, len(line)))
# Keep in range. (len(self.text) is included, because the cursor can be
# right after the end of the text as well.)
result = max(0, min(result, len(self.text)))
return result
@property
def is_cursor_at_the_end(self) -> bool:
"""True when the cursor is at the end of the text."""
return self.cursor_position == len(self.text)
@property
def is_cursor_at_the_end_of_line(self) -> bool:
"""True when the cursor is at the end of this line."""
return self.current_char in ("\n", "")
def has_match_at_current_position(self, sub: str) -> bool:
"""
`True` when this substring is found at the cursor position.
"""
return self.text.find(sub, self.cursor_position) == self.cursor_position
def find(
self,
sub: str,
in_current_line: bool = False,
include_current_position: bool = False,
ignore_case: bool = False,
count: int = 1,
) -> int | None:
"""
Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurrence.
"""
assert isinstance(ignore_case, bool)
if in_current_line:
text = self.current_line_after_cursor
else:
text = self.text_after_cursor
if not include_current_position:
if len(text) == 0:
return None # (Otherwise, we always get a match for the empty string.)
else:
text = text[1:]
flags = re.IGNORECASE if ignore_case else 0
iterator = re.finditer(re.escape(sub), text, flags)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
if include_current_position:
return match.start(0)
else:
return match.start(0) + 1
except StopIteration:
pass
return None
def find_all(self, sub: str, ignore_case: bool = False) -> list[int]:
"""
Find all occurrences of the substring. Return a list of absolute
positions in the document.
"""
flags = re.IGNORECASE if ignore_case else 0
return [a.start() for a in re.finditer(re.escape(sub), self.text, flags)]
def find_backwards(
self,
sub: str,
in_current_line: bool = False,
ignore_case: bool = False,
count: int = 1,
) -> int | None:
"""
Find `text` before the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurrence.
"""
if in_current_line:
before_cursor = self.current_line_before_cursor[::-1]
else:
before_cursor = self.text_before_cursor[::-1]
flags = re.IGNORECASE if ignore_case else 0
iterator = re.finditer(re.escape(sub[::-1]), before_cursor, flags)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
return -match.start(0) - len(sub)
except StopIteration:
pass
return None
def get_word_before_cursor(
self, WORD: bool = False, pattern: Pattern[str] | None = None
) -> str:
"""
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
:param pattern: (None or compiled regex). When given, use this regex
pattern.
"""
if self._is_word_before_cursor_complete(WORD=WORD, pattern=pattern):
# Space before the cursor or no text before cursor.
return ""
text_before_cursor = self.text_before_cursor
start = self.find_start_of_previous_word(WORD=WORD, pattern=pattern) or 0
return text_before_cursor[len(text_before_cursor) + start :]
def _is_word_before_cursor_complete(
self, WORD: bool = False, pattern: Pattern[str] | None = None
) -> bool:
if self.text_before_cursor == "" or self.text_before_cursor[-1:].isspace():
return True
if pattern:
return self.find_start_of_previous_word(WORD=WORD, pattern=pattern) is None
return False
def find_start_of_previous_word(
self, count: int = 1, WORD: bool = False, pattern: Pattern[str] | None = None
) -> int | None:
"""
Return an index relative to the cursor position pointing to the start
of the previous word. Return `None` if nothing was found.
:param pattern: (None or compiled regex). When given, use this regex
pattern.
"""
assert not (WORD and pattern)
# Reverse the text before the cursor, in order to do an efficient
# backwards search.
text_before_cursor = self.text_before_cursor[::-1]
if pattern:
regex = pattern
elif WORD:
regex = _FIND_BIG_WORD_RE
else:
regex = _FIND_WORD_RE
iterator = regex.finditer(text_before_cursor)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
return -match.end(0)
except StopIteration:
pass
return None
def find_boundaries_of_current_word(
self,
WORD: bool = False,
include_leading_whitespace: bool = False,
include_trailing_whitespace: bool = False,
) -> tuple[int, int]:
"""
Return the relative boundaries (startpos, endpos) of the current word under the
cursor. (This is at the current line, because line boundaries obviously
don't belong to any word.)
If not on a word, this returns (0,0)
"""
text_before_cursor = self.current_line_before_cursor[::-1]
text_after_cursor = self.current_line_after_cursor
def get_regex(include_whitespace: bool) -> Pattern[str]:
return {
(False, False): _FIND_CURRENT_WORD_RE,
(False, True): _FIND_CURRENT_WORD_INCLUDE_TRAILING_WHITESPACE_RE,
(True, False): _FIND_CURRENT_BIG_WORD_RE,
(True, True): _FIND_CURRENT_BIG_WORD_INCLUDE_TRAILING_WHITESPACE_RE,
}[(WORD, include_whitespace)]
match_before = get_regex(include_leading_whitespace).search(text_before_cursor)
match_after = get_regex(include_trailing_whitespace).search(text_after_cursor)
# When there is a match before and after, and we're not looking for
# WORDs, make sure that both the part before and after the cursor are
# either in the [a-zA-Z_] alphabet or not. Otherwise, drop the part
# before the cursor.
if not WORD and match_before and match_after:
c1 = self.text[self.cursor_position - 1]
c2 = self.text[self.cursor_position]
alphabet = string.ascii_letters + "0123456789_"
if (c1 in alphabet) != (c2 in alphabet):
match_before = None
return (
-match_before.end(1) if match_before else 0,
match_after.end(1) if match_after else 0,
)
def get_word_under_cursor(self, WORD: bool = False) -> str:
"""
Return the word, currently below the cursor.
This returns an empty string when the cursor is on a whitespace region.
"""
start, end = self.find_boundaries_of_current_word(WORD=WORD)
return self.text[self.cursor_position + start : self.cursor_position + end]
def find_next_word_beginning(
self, count: int = 1, WORD: bool = False
) -> int | None:
"""
Return an index relative to the cursor position pointing to the start
of the next word. Return `None` if nothing was found.
"""
if count < 0:
return self.find_previous_word_beginning(count=-count, WORD=WORD)
regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE
iterator = regex.finditer(self.text_after_cursor)
try:
for i, match in enumerate(iterator):
# Take first match, unless it's the word on which we're right now.
if i == 0 and match.start(1) == 0:
count += 1
if i + 1 == count:
return match.start(1)
except StopIteration:
pass
return None
def find_next_word_ending(
self, include_current_position: bool = False, count: int = 1, WORD: bool = False
) -> int | None:
"""
Return an index relative to the cursor position pointing to the end
of the next word. Return `None` if nothing was found.
"""
if count < 0:
return self.find_previous_word_ending(count=-count, WORD=WORD)
if include_current_position:
text = self.text_after_cursor
else:
text = self.text_after_cursor[1:]
regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE
iterable = regex.finditer(text)
try:
for i, match in enumerate(iterable):
if i + 1 == count:
value = match.end(1)
if include_current_position:
return value
else:
return value + 1
except StopIteration:
pass
return None
def find_previous_word_beginning(
self, count: int = 1, WORD: bool = False
) -> int | None:
"""
Return an index relative to the cursor position pointing to the start
of the previous word. Return `None` if nothing was found.
"""
if count < 0:
return self.find_next_word_beginning(count=-count, WORD=WORD)
regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE
iterator = regex.finditer(self.text_before_cursor[::-1])
try:
for i, match in enumerate(iterator):
if i + 1 == count:
return -match.end(1)
except StopIteration:
pass
return None
def find_previous_word_ending(
self, count: int = 1, WORD: bool = False
) -> int | None:
"""
Return an index relative to the cursor position pointing to the end
of the previous word. Return `None` if nothing was found.
"""
if count < 0:
return self.find_next_word_ending(count=-count, WORD=WORD)
text_before_cursor = self.text_after_cursor[:1] + self.text_before_cursor[::-1]
regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE
iterator = regex.finditer(text_before_cursor)
try:
for i, match in enumerate(iterator):
# Take first match, unless it's the word on which we're right now.
if i == 0 and match.start(1) == 0:
count += 1
if i + 1 == count:
return -match.start(1) + 1
except StopIteration:
pass
return None
def find_next_matching_line(
self, match_func: Callable[[str], bool], count: int = 1
) -> int | None:
"""
Look downwards for empty lines.
Return the line index, relative to the current line.
"""
result = None
for index, line in enumerate(self.lines[self.cursor_position_row + 1 :]):
if match_func(line):
result = 1 + index
count -= 1
if count == 0:
break
return result
def find_previous_matching_line(
self, match_func: Callable[[str], bool], count: int = 1
) -> int | None:
"""
Look upwards for empty lines.
Return the line index, relative to the current line.
"""
result = None
for index, line in enumerate(self.lines[: self.cursor_position_row][::-1]):
if match_func(line):
result = -1 - index
count -= 1
if count == 0:
break
return result
def get_cursor_left_position(self, count: int = 1) -> int:
"""
Relative position for cursor left.
"""
if count < 0:
return self.get_cursor_right_position(-count)
return -min(self.cursor_position_col, count)
def get_cursor_right_position(self, count: int = 1) -> int:
"""
Relative position for cursor_right.
"""
if count < 0:
return self.get_cursor_left_position(-count)
return min(count, len(self.current_line_after_cursor))
def get_cursor_up_position(
self, count: int = 1, preferred_column: int | None = None
) -> int:
"""
Return the relative cursor position (character index) where we would be if the
user pressed the arrow-up button.
:param preferred_column: When given, go to this column instead of
staying at the current column.
"""
assert count >= 1
column = (
self.cursor_position_col if preferred_column is None else preferred_column
)
return (
self.translate_row_col_to_index(
max(0, self.cursor_position_row - count), column
)
- self.cursor_position
)
def get_cursor_down_position(
self, count: int = 1, preferred_column: int | None = None
) -> int:
"""
Return the relative cursor position (character index) where we would be if the
user pressed the arrow-down button.
:param preferred_column: When given, go to this column instead of
staying at the current column.
"""
assert count >= 1
column = (
self.cursor_position_col if preferred_column is None else preferred_column
)
return (
self.translate_row_col_to_index(self.cursor_position_row + count, column)
- self.cursor_position
)
def find_enclosing_bracket_right(
self, left_ch: str, right_ch: str, end_pos: int | None = None
) -> int | None:
"""
Find the right bracket enclosing current position. Return the relative
position to the cursor position.
When `end_pos` is given, don't look past the position.
"""
if self.current_char == right_ch:
return 0
if end_pos is None:
end_pos = len(self.text)
else:
end_pos = min(len(self.text), end_pos)
stack = 1
# Look forward.
for i in range(self.cursor_position + 1, end_pos):
c = self.text[i]
if c == left_ch:
stack += 1
elif c == right_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position
return None
def find_enclosing_bracket_left(
self, left_ch: str, right_ch: str, start_pos: int | None = None
) -> int | None:
"""
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
"""
if self.current_char == left_ch:
return 0
if start_pos is None:
start_pos = 0
else:
start_pos = max(0, start_pos)
stack = 1
# Look backward.
for i in range(self.cursor_position - 1, start_pos - 1, -1):
c = self.text[i]
if c == right_ch:
stack += 1
elif c == left_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position
return None
def find_matching_bracket_position(
self, start_pos: int | None = None, end_pos: int | None = None
) -> int:
"""
Return relative cursor position of matching [, (, { or < bracket.
When `start_pos` or `end_pos` are given. Don't look past the positions.
"""
# Look for a match.
for pair in "()", "[]", "{}", "<>":
A = pair[0]
B = pair[1]
if self.current_char == A:
return self.find_enclosing_bracket_right(A, B, end_pos=end_pos) or 0
elif self.current_char == B:
return self.find_enclosing_bracket_left(A, B, start_pos=start_pos) or 0
return 0
def get_start_of_document_position(self) -> int:
"""Relative position for the start of the document."""
return -self.cursor_position
def get_end_of_document_position(self) -> int:
"""Relative position for the end of the document."""
return len(self.text) - self.cursor_position
def get_start_of_line_position(self, after_whitespace: bool = False) -> int:
"""Relative position for the start of this line."""
if after_whitespace:
current_line = self.current_line
return (
len(current_line)
- len(current_line.lstrip())
- self.cursor_position_col
)
else:
return -len(self.current_line_before_cursor)
def get_end_of_line_position(self) -> int:
"""Relative position for the end of this line."""
return len(self.current_line_after_cursor)
def last_non_blank_of_current_line_position(self) -> int:
"""
Relative position for the last non blank character of this line.
"""
return len(self.current_line.rstrip()) - self.cursor_position_col - 1
def get_column_cursor_position(self, column: int) -> int:
"""
Return the relative cursor position for this column at the current
line. (It will stay between the boundaries of the line in case of a
larger number.)
"""
line_length = len(self.current_line)
current_column = self.cursor_position_col
column = max(0, min(line_length, column))
return column - current_column
def selection_range(
self,
) -> tuple[
int, int
]: # XXX: shouldn't this return `None` if there is no selection???
"""
Return (from, to) tuple of the selection.
start and end position are included.
This doesn't take the selection type into account. Use
`selection_ranges` instead.
"""
if self.selection:
from_, to = sorted(
[self.cursor_position, self.selection.original_cursor_position]
)
else:
from_, to = self.cursor_position, self.cursor_position
return from_, to
def selection_ranges(self) -> Iterable[tuple[int, int]]:
"""
Return a list of `(from, to)` tuples for the selection or none if
nothing was selected. The upper boundary is not included.
This will yield several (from, to) tuples in case of a BLOCK selection.
This will return zero ranges, like (8,8) for empty lines in a block
selection.
"""
if self.selection:
from_, to = sorted(
[self.cursor_position, self.selection.original_cursor_position]
)
if self.selection.type == SelectionType.BLOCK:
from_line, from_column = self.translate_index_to_position(from_)
to_line, to_column = self.translate_index_to_position(to)
from_column, to_column = sorted([from_column, to_column])
lines = self.lines
if vi_mode():
to_column += 1
for l in range(from_line, to_line + 1):
line_length = len(lines[l])
if from_column <= line_length:
yield (
self.translate_row_col_to_index(l, from_column),
self.translate_row_col_to_index(
l, min(line_length, to_column)
),
)
else:
# In case of a LINES selection, go to the start/end of the lines.
if self.selection.type == SelectionType.LINES:
from_ = max(0, self.text.rfind("\n", 0, from_) + 1)
if self.text.find("\n", to) >= 0:
to = self.text.find("\n", to)
else:
to = len(self.text) - 1
# In Vi mode, the upper boundary is always included. For Emacs,
# that's not the case.
if vi_mode():
to += 1
yield from_, to
def selection_range_at_line(self, row: int) -> tuple[int, int] | None:
"""
If the selection spans a portion of the given line, return a (from, to) tuple.
The returned upper boundary is not included in the selection, so
`(0, 0)` is an empty selection. `(0, 1)`, is a one character selection.
Returns None if the selection doesn't cover this line at all.
"""
if self.selection:
line = self.lines[row]
row_start = self.translate_row_col_to_index(row, 0)
row_end = self.translate_row_col_to_index(row, len(line))
from_, to = sorted(
[self.cursor_position, self.selection.original_cursor_position]
)
# Take the intersection of the current line and the selection.
intersection_start = max(row_start, from_)
intersection_end = min(row_end, to)
if intersection_start <= intersection_end:
if self.selection.type == SelectionType.LINES:
intersection_start = row_start
intersection_end = row_end
elif self.selection.type == SelectionType.BLOCK:
_, col1 = self.translate_index_to_position(from_)
_, col2 = self.translate_index_to_position(to)
col1, col2 = sorted([col1, col2])
if col1 > len(line):
return None # Block selection doesn't cross this line.
intersection_start = self.translate_row_col_to_index(row, col1)
intersection_end = self.translate_row_col_to_index(row, col2)
_, from_column = self.translate_index_to_position(intersection_start)
_, to_column = self.translate_index_to_position(intersection_end)
# In Vi mode, the upper boundary is always included. For Emacs
# mode, that's not the case.
if vi_mode():
to_column += 1
return from_column, to_column
return None
def cut_selection(self) -> tuple[Document, ClipboardData]:
"""
Return a (:class:`.Document`, :class:`.ClipboardData`) tuple, where the
document represents the new document when the selection is cut, and the
clipboard data, represents whatever has to be put on the clipboard.
"""
if self.selection:
cut_parts = []
remaining_parts = []
new_cursor_position = self.cursor_position
last_to = 0
for from_, to in self.selection_ranges():
if last_to == 0:
new_cursor_position = from_
remaining_parts.append(self.text[last_to:from_])
cut_parts.append(self.text[from_:to])
last_to = to
remaining_parts.append(self.text[last_to:])
cut_text = "\n".join(cut_parts)
remaining_text = "".join(remaining_parts)
# In case of a LINES selection, don't include the trailing newline.
if self.selection.type == SelectionType.LINES and cut_text.endswith("\n"):
cut_text = cut_text[:-1]
return (
Document(text=remaining_text, cursor_position=new_cursor_position),
ClipboardData(cut_text, self.selection.type),
)
else:
return self, ClipboardData("")
def paste_clipboard_data(
self,
data: ClipboardData,
paste_mode: PasteMode = PasteMode.EMACS,
count: int = 1,
) -> Document:
"""
Return a new :class:`.Document` instance which contains the result if
we would paste this data at the current cursor position.
:param paste_mode: Where to paste. (Before/after/emacs.)
:param count: When >1, Paste multiple times.
"""
before = paste_mode == PasteMode.VI_BEFORE
after = paste_mode == PasteMode.VI_AFTER
if data.type == SelectionType.CHARACTERS:
if after:
new_text = (
self.text[: self.cursor_position + 1]
+ data.text * count
+ self.text[self.cursor_position + 1 :]
)
else:
new_text = (
self.text_before_cursor + data.text * count + self.text_after_cursor
)
new_cursor_position = self.cursor_position + len(data.text) * count
if before:
new_cursor_position -= 1
elif data.type == SelectionType.LINES:
l = self.cursor_position_row
if before:
lines = self.lines[:l] + [data.text] * count + self.lines[l:]
new_text = "\n".join(lines)
new_cursor_position = len("".join(self.lines[:l])) + l
else:
lines = self.lines[: l + 1] + [data.text] * count + self.lines[l + 1 :]
new_cursor_position = len("".join(self.lines[: l + 1])) + l + 1
new_text = "\n".join(lines)
elif data.type == SelectionType.BLOCK:
lines = self.lines[:]
start_line = self.cursor_position_row
start_column = self.cursor_position_col + (0 if before else 1)
for i, line in enumerate(data.text.split("\n")):
index = i + start_line
if index >= len(lines):
lines.append("")
lines[index] = lines[index].ljust(start_column)
lines[index] = (
lines[index][:start_column]
+ line * count
+ lines[index][start_column:]
)
new_text = "\n".join(lines)
new_cursor_position = self.cursor_position + (0 if before else 1)
return Document(text=new_text, cursor_position=new_cursor_position)
def empty_line_count_at_the_end(self) -> int:
"""
Return number of empty lines at the end of the document.
"""
count = 0
for line in self.lines[::-1]:
if not line or line.isspace():
count += 1
else:
break
return count
def start_of_paragraph(self, count: int = 1, before: bool = False) -> int:
"""
Return the start of the current paragraph. (Relative cursor position.)
"""
def match_func(text: str) -> bool:
return not text or text.isspace()
line_index = self.find_previous_matching_line(
match_func=match_func, count=count
)
if line_index:
add = 0 if before else 1
return min(0, self.get_cursor_up_position(count=-line_index) + add)
else:
return -self.cursor_position
def end_of_paragraph(self, count: int = 1, after: bool = False) -> int:
"""
Return the end of the current paragraph. (Relative cursor position.)
"""
def match_func(text: str) -> bool:
return not text or text.isspace()
line_index = self.find_next_matching_line(match_func=match_func, count=count)
if line_index:
add = 0 if after else 1
return max(0, self.get_cursor_down_position(count=line_index) - add)
else:
return len(self.text_after_cursor)
# Modifiers.
def insert_after(self, text: str) -> Document:
"""
Create a new document, with this text inserted after the buffer.
It keeps selection ranges and cursor position in sync.
"""
return Document(
text=self.text + text,
cursor_position=self.cursor_position,
selection=self.selection,
)
def insert_before(self, text: str) -> Document:
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position
+ len(text),
type=selection_state.type,
)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state,
)
| Document |
python | pypa__setuptools | pkg_resources/tests/test_resources.py | {
"start": 26764,
"end": 31252
} | class ____:
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
@pytest.fixture
def symlinked_tmpdir(self, tmpdir):
"""
Where available, return the tempdir as a symlink,
which as revealed in #231 is more fragile than
a natural tempdir.
"""
if not hasattr(os, 'symlink'):
yield str(tmpdir)
return
link_name = str(tmpdir) + '-linked'
os.symlink(str(tmpdir), link_name)
try:
yield type(tmpdir)(link_name)
finally:
os.unlink(link_name)
@pytest.fixture(autouse=True)
def patched_path(self, tmpdir):
"""
Patch sys.path to include the 'site-pkgs' dir. Also
restore pkg_resources._namespace_packages to its
former state.
"""
saved_ns_pkgs = pkg_resources._namespace_packages.copy()
saved_sys_path = sys.path[:]
site_pkgs = tmpdir.mkdir('site-pkgs')
sys.path.append(str(site_pkgs))
try:
yield
finally:
pkg_resources._namespace_packages = saved_ns_pkgs
sys.path = saved_sys_path
issue591 = pytest.mark.xfail(platform.system() == 'Windows', reason="#591")
@issue591
def test_two_levels_deep(self, symlinked_tmpdir):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
real_tmpdir = symlinked_tmpdir.realpath()
tmpdir = symlinked_tmpdir
sys.path.append(str(tmpdir / 'site-pkgs2'))
site_dirs = tmpdir / 'site-pkgs', tmpdir / 'site-pkgs2'
for site in site_dirs:
pkg1 = site / 'pkg1'
pkg2 = pkg1 / 'pkg2'
pkg2.ensure_dir()
(pkg1 / '__init__.py').write_text(self.ns_str, encoding='utf-8')
(pkg2 / '__init__.py').write_text(self.ns_str, encoding='utf-8')
with pytest.warns(DeprecationWarning, match="pkg_resources.declare_namespace"):
import pkg1 # pyright: ignore[reportMissingImports] # Temporary package for test
assert "pkg1" in pkg_resources._namespace_packages
# attempt to import pkg2 from site-pkgs2
with pytest.warns(DeprecationWarning, match="pkg_resources.declare_namespace"):
import pkg1.pkg2 # pyright: ignore[reportMissingImports] # Temporary package for test
# check the _namespace_packages dict
assert "pkg1.pkg2" in pkg_resources._namespace_packages
assert pkg_resources._namespace_packages["pkg1"] == ["pkg1.pkg2"]
# check the __path__ attribute contains both paths
expected = [
str(real_tmpdir / "site-pkgs" / "pkg1" / "pkg2"),
str(real_tmpdir / "site-pkgs2" / "pkg1" / "pkg2"),
]
assert pkg1.pkg2.__path__ == expected
@issue591
def test_path_order(self, symlinked_tmpdir):
"""
Test that if multiple versions of the same namespace package subpackage
are on different sys.path entries, that only the one earliest on
sys.path is imported, and that the namespace package's __path__ is in
the correct order.
Regression test for https://github.com/pypa/setuptools/issues/207
"""
tmpdir = symlinked_tmpdir
site_dirs = (
tmpdir / "site-pkgs",
tmpdir / "site-pkgs2",
tmpdir / "site-pkgs3",
)
vers_str = "__version__ = %r"
for number, site in enumerate(site_dirs, 1):
if number > 1:
sys.path.append(str(site))
nspkg = site / 'nspkg'
subpkg = nspkg / 'subpkg'
subpkg.ensure_dir()
(nspkg / '__init__.py').write_text(self.ns_str, encoding='utf-8')
(subpkg / '__init__.py').write_text(vers_str % number, encoding='utf-8')
with pytest.warns(DeprecationWarning, match="pkg_resources.declare_namespace"):
import nspkg # pyright: ignore[reportMissingImports] # Temporary package for test
import nspkg.subpkg # pyright: ignore[reportMissingImports] # Temporary package for test
expected = [str(site.realpath() / 'nspkg') for site in site_dirs]
assert nspkg.__path__ == expected
assert nspkg.subpkg.__version__ == 1
| TestNamespaces |
python | pandas-dev__pandas | pandas/tests/indexes/test_index_new.py | {
"start": 12834,
"end": 14235
} | class ____:
# Test passing different arraylike values to pd.Index
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series_dt64(self, klass):
stamps = [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")]
expected = DatetimeIndex(stamps)
ser = Series(stamps)
result = klass(ser)
tm.assert_index_equal(result, expected)
def test_constructor_no_pandas_array(self):
ser = Series([1, 2, 3])
result = Index(ser.array)
expected = Index([1, 2, 3])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"array",
[
np.arange(5),
np.array(["a", "b", "c"]),
date_range("2000-01-01", periods=3).values,
],
)
def test_constructor_ndarray_like(self, array):
# GH#5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike:
def __init__(self, array) -> None:
self.array = array
def __array__(self, dtype=None, copy=None) -> np.ndarray:
return self.array
expected = Index(array)
result = Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
| TestIndexConstructorUnwrapping |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 27594,
"end": 30319
} | class ____(GradientCheckpointingLayer):
"""Conformer block based on https://huggingface.co/papers/2005.08100."""
# Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerEncoderLayer.__init__ with Wav2Vec2->SeamlessM4T, attention_dropout->speech_encoder_dropout, torch.nn->nn
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
dropout = config.speech_encoder_dropout
# Feed-forward 1
self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
self.ffn1 = SeamlessM4TConformerFeedForward(config)
# Self-Attention
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_dropout = nn.Dropout(dropout)
self.self_attn = SeamlessM4TConformerSelfAttention(config)
# Conformer Convolution
self.conv_module = SeamlessM4TConformerConvolutionModule(config)
# Feed-forward 2
self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
self.ffn2 = SeamlessM4TConformerFeedForward(config)
self.final_layer_norm = nn.LayerNorm(embed_dim)
def forward(
self,
hidden_states,
attention_mask: Optional[torch.Tensor] = None,
relative_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
conv_attention_mask: Optional[torch.Tensor] = None,
):
# 1. Feed-Forward 1 layer
residual = hidden_states
hidden_states = self.ffn1_layer_norm(hidden_states)
hidden_states = self.ffn1(hidden_states)
hidden_states = hidden_states * 0.5 + residual
residual = hidden_states
# 2. Self-Attention layer
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weigts = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
relative_position_embeddings=relative_position_embeddings,
output_attentions=output_attentions,
)
hidden_states = self.self_attn_dropout(hidden_states)
hidden_states = hidden_states + residual
# 3. Convolutional Layer
residual = hidden_states
hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask)
hidden_states = residual + hidden_states
# 4. Feed-Forward 2 Layer
residual = hidden_states
hidden_states = self.ffn2_layer_norm(hidden_states)
hidden_states = self.ffn2(hidden_states)
hidden_states = hidden_states * 0.5 + residual
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, attn_weigts
| SeamlessM4TConformerEncoderLayer |
python | astropy__astropy | astropy/io/ascii/fastbasic.py | {
"start": 194,
"end": 8443
} | class ____(metaclass=core.MetaBaseReader):
"""
This class is intended to handle the same format addressed by the
ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C
code and is therefore much faster. Unlike the other ASCII readers and
writers, this class is not very extensible and is restricted
by optimization requirements.
"""
_format_name = "fast_basic"
_description = "Basic table with custom delimiter using the fast C engine"
_fast = True
fill_extra_cols = False
guessing = False
strict_names = False
def __init__(self, default_kwargs={}, **user_kwargs):
# Make sure user does not set header_start to None for a reader
# that expects a non-None value (i.e. a number >= 0). This mimics
# what happens in the Basic reader.
if (
default_kwargs.get("header_start", 0) is not None
and user_kwargs.get("header_start", 0) is None
):
raise ValueError("header_start cannot be set to None for this Reader")
# Set up kwargs and copy any user kwargs. Use deepcopy user kwargs
# since they may contain a dict item which would end up as a ref to the
# original and get munged later (e.g. in cparser.pyx validation of
# fast_reader dict).
kwargs = copy.deepcopy(default_kwargs)
kwargs.update(copy.deepcopy(user_kwargs))
delimiter = kwargs.pop("delimiter", " ")
self.delimiter = str(delimiter) if delimiter is not None else None
self.write_comment = kwargs.get("comment", "# ")
self.comment = kwargs.pop("comment", "#")
if self.comment is not None:
self.comment = str(self.comment)
self.quotechar = str(kwargs.pop("quotechar", '"'))
self.header_start = kwargs.pop("header_start", 0)
# If data_start is not specified, start reading
# data right after the header line
data_start_default = user_kwargs.get(
"data_start", self.header_start + 1 if self.header_start is not None else 1
)
self.data_start = kwargs.pop("data_start", data_start_default)
self.kwargs = kwargs
self.strip_whitespace_lines = True
self.strip_whitespace_fields = True
def _read_header(self):
# Use the tokenizer by default -- this method
# can be overridden for specialized headers
self.engine.read_header()
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
if self.comment is not None and len(self.comment) != 1:
raise core.ParameterError("The C reader does not support a comment regex")
elif self.data_start is None:
raise core.ParameterError(
"The C reader does not allow data_start to be None"
)
elif (
self.header_start is not None
and self.header_start < 0
and not isinstance(self, FastCommentedHeader)
):
raise core.ParameterError(
"The C reader does not allow header_start to be "
"negative except for commented-header files"
)
elif self.data_start < 0:
raise core.ParameterError(
"The C reader does not allow data_start to be negative"
)
elif len(self.delimiter) != 1:
raise core.ParameterError("The C reader only supports 1-char delimiters")
elif len(self.quotechar) != 1:
raise core.ParameterError(
"The C reader only supports a length-1 quote character"
)
elif "converters" in self.kwargs:
raise core.ParameterError(
"The C reader does not support passing specialized converters"
)
elif "encoding" in self.kwargs:
raise core.ParameterError(
"The C reader does not use the encoding parameter"
)
elif "outputter_cls" in self.kwargs:
raise core.ParameterError(
"The C reader does not use the outputter_cls parameter"
)
elif "inputter_cls" in self.kwargs:
raise core.ParameterError(
"The C reader does not use the inputter_cls parameter"
)
elif "data_splitter_cls" in self.kwargs or "header_splitter_cls" in self.kwargs:
raise core.ParameterError("The C reader does not use a Splitter class")
self.strict_names = self.kwargs.pop("strict_names", False)
# Process fast_reader kwarg, which may or may not exist (though ui.py will always
# pass this as a dict with at least 'enable' set).
fast_reader = self.kwargs.get("fast_reader", True)
if not isinstance(fast_reader, dict):
fast_reader = {}
fast_reader.pop("enable", None)
self.return_header_chars = fast_reader.pop("return_header_chars", False)
# Put fast_reader dict back into kwargs.
self.kwargs["fast_reader"] = fast_reader
self.engine = cparser.CParser(
table,
self.strip_whitespace_lines,
self.strip_whitespace_fields,
delimiter=self.delimiter,
header_start=self.header_start,
comment=self.comment,
quotechar=self.quotechar,
data_start=self.data_start,
fill_extra_cols=self.fill_extra_cols,
**self.kwargs,
)
conversion_info = self._read_header()
self.check_header()
if conversion_info is not None:
try_int, try_float, try_string = conversion_info
else:
try_int = {}
try_float = {}
try_string = {}
with _set_locale("C"):
data, comments = self.engine.read(try_int, try_float, try_string)
out = self.make_table(data, comments)
if self.return_header_chars:
out.meta["__ascii_fast_reader_header_chars__"] = self.engine.header_chars
return out
def make_table(self, data, comments):
"""Actually make the output table give the data and comments."""
meta = {}
if comments:
meta["comments"] = comments
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def check_header(self):
names = self.engine.get_header_names() or self.engine.get_names()
if self.strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in names:
if (
core._is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise ValueError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns
if self.guessing and len(names) <= 1:
raise ValueError(
f"Table format guessing requires at least two columns, got {names}"
)
def write(self, table, output):
"""
Use a fast Cython method to write table data to output,
where output is a filename or file-like object.
"""
self._write(table, output, {})
def _write(
self, table, output, default_kwargs, header_output=True, output_types=False
):
# Fast writer supports only 1-d columns
core._check_multidim_table(table, max_ndim=1)
write_kwargs = {
"delimiter": self.delimiter,
"quotechar": self.quotechar,
"strip_whitespace": self.strip_whitespace_fields,
"comment": self.write_comment,
}
write_kwargs.update(default_kwargs)
# user kwargs take precedence over default kwargs
write_kwargs.update(self.kwargs)
writer = cparser.FastWriter(table, **write_kwargs)
writer.write(output, header_output, output_types)
| FastBasic |
python | huggingface__transformers | src/transformers/models/hubert/modular_hubert.py | {
"start": 4482,
"end": 7750
} | class ____(PreTrainedModel):
config: HubertConfig
base_model_prefix = "hubert"
main_input_name = "input_values"
input_modalities = "audio"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm1d)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
if is_deepspeed_zero3_enabled():
import deepspeed
if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
init.kaiming_normal_(module.weight)
else:
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
init.kaiming_normal_(module.weight)
else:
init.kaiming_normal_(module.weight)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, HubertModel):
if hasattr(module, "masked_spec_embed"):
init.uniform_(module.masked_spec_embed)
elif isinstance(module, HubertForSequenceClassification):
if hasattr(module, "layer_weights"):
init.constant_(module.layer_weights, 1.0 / (self.config.num_hidden_layers + 1))
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
| HubertPreTrainedModel |
python | scipy__scipy | scipy/interpolate/tests/test_bsplines.py | {
"start": 45276,
"end": 65121
} | class ____:
#
# Test basic ways of constructing interpolating splines.
#
xx = np.linspace(0., 2.*np.pi)
yy = np.sin(xx)
def _get_xy(self, xp):
return xp.asarray(self.xx), xp.asarray(self.yy)
def test_non_int_order(self):
with assert_raises(TypeError):
make_interp_spline(self.xx, self.yy, k=2.5)
def test_order_0(self, xp):
xx, yy = self._get_xy(xp)
b = make_interp_spline(xx, yy, k=0)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
b = make_interp_spline(xx, yy, k=0, axis=-1)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_linear(self, xp):
xx, yy = self._get_xy(xp)
b = make_interp_spline(xx, yy, k=1)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
b = make_interp_spline(xx, yy, k=1, axis=-1)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
@pytest.mark.parametrize('k', [0, 1, 2, 3])
def test_incompatible_x_y(self, k):
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6, 7]
with assert_raises(ValueError, match="Shapes of x"):
make_interp_spline(x, y, k=k)
@pytest.mark.parametrize('k', [0, 1, 2, 3])
def test_broken_x(self, k):
x = [0, 1, 1, 2, 3, 4] # duplicates
y = [0, 1, 2, 3, 4, 5]
with assert_raises(ValueError, match="x to not have duplicates"):
make_interp_spline(x, y, k=k)
x = [0, 2, 1, 3, 4, 5] # unsorted
with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
make_interp_spline(x, y, k=k)
x = [0, 1, 2, 3, 4, 5]
x = np.asarray(x).reshape((1, -1)) # 1D
with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
make_interp_spline(x, y, k=k)
def test_not_a_knot(self, xp):
xx, yy = self._get_xy(xp)
for k in [2, 3, 4, 5, 6, 7]:
b = make_interp_spline(xx, yy, k)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_periodic(self, xp):
xx, yy = self._get_xy(xp)
# k = 5 here for more derivatives
b = make_interp_spline(xx, yy, k=5, bc_type='periodic')
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
# in periodic case it is expected equality of k-1 first
# derivatives at the boundaries
for i in range(1, 5):
xp_assert_close(b(xx[0], nu=i), b(xx[-1], nu=i), atol=1e-11)
# tests for axis=-1
b = make_interp_spline(xx, yy, k=5, bc_type='periodic', axis=-1)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
for i in range(1, 5):
xp_assert_close(b(xx[0], nu=i), b(xx[-1], nu=i), atol=1e-11)
@pytest.mark.parametrize('k', [2, 3, 4, 5, 6, 7])
def test_periodic_random(self, k, xp):
# tests for both cases (k > n and k <= n)
n = 5
rng = np.random.RandomState(1234)
x = np.sort(rng.random_sample(n) * 10)
y = rng.random_sample(n) * 100
y[0] = y[-1]
x, y = xp.asarray(x), xp.asarray(y)
b = make_interp_spline(x, y, k=k, bc_type='periodic')
xp_assert_close(b(x), y, atol=1e-14)
def test_periodic_axis(self, xp):
n = self.xx.shape[0]
rng = np.random.RandomState(1234)
x = rng.random_sample(n) * 2 * np.pi
x = np.sort(x)
x[0] = 0.
x[-1] = 2 * np.pi
y = np.zeros((2, n))
y[0] = np.sin(x)
y[1] = np.cos(x)
x, y = xp.asarray(x), xp.asarray(y)
b = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
for i in range(n):
xp_assert_close(b(x[i]), y[:, i], atol=1e-14)
xp_assert_close(b(x[0]), b(x[-1]), atol=1e-14)
def test_periodic_points_exception(self):
# first and last points should match when periodic case expected
rng = np.random.RandomState(1234)
k = 5
n = 8
x = np.sort(rng.random_sample(n))
y = rng.random_sample(n)
y[0] = y[-1] - 1 # to be sure that they are not equal
with assert_raises(ValueError):
make_interp_spline(x, y, k=k, bc_type='periodic')
def test_periodic_knots_exception(self):
# `periodic` case does not work with passed vector of knots
rng = np.random.RandomState(1234)
k = 3
n = 7
x = np.sort(rng.random_sample(n))
y = rng.random_sample(n)
t = np.zeros(n + 2 * k)
with assert_raises(ValueError):
make_interp_spline(x, y, k, t, 'periodic')
@pytest.mark.parametrize('k', [2, 3, 4, 5])
def test_periodic_splev(self, k):
# comparison values of periodic b-spline with splev
b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
tck = splrep(self.xx, self.yy, per=True, k=k)
spl = splev(self.xx, tck)
xp_assert_close(spl, b(self.xx), atol=1e-14)
# comparison derivatives of periodic b-spline with splev
for i in range(1, k):
spl = splev(self.xx, tck, der=i)
xp_assert_close(spl, b(self.xx, nu=i), atol=1e-10)
def test_periodic_cubic(self):
# comparison values of cubic periodic b-spline with CubicSpline
b = make_interp_spline(self.xx, self.yy, k=3, bc_type='periodic')
cub = CubicSpline(self.xx, self.yy, bc_type='periodic')
xp_assert_close(b(self.xx), cub(self.xx), atol=1e-14)
# edge case: Cubic interpolation on 3 points
rng = np.random.RandomState(1234)
n = 3
x = np.sort(rng.random_sample(n) * 10)
y = rng.random_sample(n) * 100
y[0] = y[-1]
b = make_interp_spline(x, y, k=3, bc_type='periodic')
cub = CubicSpline(x, y, bc_type='periodic')
xp_assert_close(b(x), cub(x), atol=1e-14)
def test_periodic_full_matrix(self):
# comparison values of cubic periodic b-spline with
# solution of the system with full matrix
k = 3
b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
t = _periodic_knots(self.xx, k)
c = _make_interp_per_full_matr(self.xx, self.yy, t, k)
b1 = np.vectorize(lambda x: _naive_eval(x, t, c, k, xp=np))
xp_assert_close(b(self.xx), b1(self.xx), atol=1e-14)
def test_quadratic_deriv(self, xp):
xx, yy = self._get_xy(xp)
der = [(1, 8.)] # order, value: f'(x) = 8.
# derivative at right-hand edge
b = make_interp_spline(xx, yy, k=2, bc_type=(None, der))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
xp_assert_close(
b(xx[-1], 1),
xp.asarray(der[0][1], dtype=xp.float64),
atol=1e-14, rtol=1e-14, check_0d=False
)
# derivative at left-hand edge
b = make_interp_spline(xx, yy, k=2, bc_type=(der, None))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
xp_assert_close(
b(xx[0], 1),
xp.asarray(der[0][1], dtype=xp.float64),
atol=1e-14, rtol=1e-14, check_0d=False
)
def test_cubic_deriv(self, xp):
xx, yy = self._get_xy(xp)
k = 3
# first derivatives at left & right edges:
der_l, der_r = [(1, 3.)], [(1, 4.)]
b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
xp_assert_close(
b(xx[0], 1),
xp.asarray(der_l[0][1], dtype=xp.float64), atol=1e-14, rtol=1e-14
)
xp_assert_close(
b(xx[-1], 1),
xp.asarray(der_r[0][1], dtype=xp.float64), atol=1e-14, rtol=1e-14
)
# 'natural' cubic spline, zero out 2nd derivatives at the boundaries
der_l, der_r = [(2, 0)], [(2, 0)]
b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_quintic_derivs(self, xp):
k, n = 5, 7
x = xp.arange(n, dtype=xp.float64)
y = xp.sin(x)
der_l = [(1, -12.), (2, 1)]
der_r = [(1, 8.), (2, 3.)]
b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
xp_assert_close(b(x), y, atol=1e-14, rtol=1e-14)
xp_assert_close(xp.stack([b(x[0], 1), b(x[0], 2)]),
xp.asarray([val for (nu, val) in der_l], dtype=xp.float64))
xp_assert_close(xp.stack([b(x[-1], 1), b(x[-1], 2)]),
xp.asarray([val for (nu, val) in der_r], dtype=xp.float64))
@pytest.mark.xfail(reason='unstable')
def test_cubic_deriv_unstable(self):
# 1st and 2nd derivative at x[0], no derivative information at x[-1]
# The problem is not that it fails [who would use this anyway],
# the problem is that it fails *silently*, and I've no idea
# how to detect this sort of instability.
# In this particular case: it's OK for len(t) < 20, goes haywire
# at larger `len(t)`.
k = 3
t = _augknt(self.xx, k)
der_l = [(1, 3.), (2, 4.)]
b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
xp_assert_close(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_knots_not_data_sites(self, xp):
# Knots need not coincide with the data sites.
# use a quadratic spline, knots are at data averages,
# two additional constraints are zero 2nd derivatives at edges
k = 2
xx, yy = self._get_xy(xp)
t = concat_1d(xp,
xp.ones(k+1) * xx[0],
(xx[1:] + xx[:-1]) / 2.,
xp.ones(k+1) * xx[-1]
)
b = make_interp_spline(xx, yy, k, t,
bc_type=([(2, 0)], [(2, 0)]))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
assert math.isclose(b(xx[0], 2), 0.0, abs_tol=1e-14)
assert math.isclose(b(xx[-1], 2), 0.0, abs_tol=1e-14)
def test_minimum_points_and_deriv(self, xp):
# interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
# f'(0) = 0, f'(1) = 3.
k = 3
x = xp.asarray([0., 1.])
y = xp.asarray([0., 1.])
b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
xx = xp.linspace(0., 1., 21, dtype=xp.float64)
yy = xx**3
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_deriv_spec(self):
# If one of the derivatives is omitted, the spline definition is
# incomplete.
x = y = [1.0, 2, 3, 4, 5, 6]
with assert_raises(ValueError):
make_interp_spline(x, y, bc_type=([(1, 0.)], None))
with assert_raises(ValueError):
make_interp_spline(x, y, bc_type=(1, 0.))
with assert_raises(ValueError):
make_interp_spline(x, y, bc_type=[(1, 0.)])
with assert_raises(ValueError):
make_interp_spline(x, y, bc_type=42)
# CubicSpline expects`bc_type=(left_pair, right_pair)`, while
# here we expect `bc_type=(iterable, iterable)`.
l, r = (1, 0.0), (1, 0.0)
with assert_raises(ValueError):
make_interp_spline(x, y, bc_type=(l, r))
def test_deriv_order_too_large(self, xp):
x = xp.arange(7)
y = x**2
l, r = [(6, 0)], [(1, 0)] # 6th derivative = 0 at x[0] for k=3
with assert_raises(ValueError, match="Bad boundary conditions at 0."):
# cannot fix 6th derivative at x[0]: does not segfault
make_interp_spline(x, y, bc_type=(l, r))
l, r = [(1, 0)], [(-6, 0)] # derivative order < 0 at x[-1]
with assert_raises(ValueError, match="Bad boundary conditions at 6."):
# does not segfault
make_interp_spline(x, y, bc_type=(l, r))
def test_complex(self, xp):
k = 3
xx, yy = self._get_xy(xp)
yy = yy + 1.j*yy
# first derivatives at left & right edges:
der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
assert cmath.isclose(b(xx[0], 1), der_l[0][1], abs_tol=1e-14)
assert cmath.isclose(b(xx[-1], 1), der_r[0][1], abs_tol=1e-14)
# also test zero and first order
for k in (0, 1):
b = make_interp_spline(xx, yy, k=k)
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_int_xy(self, xp):
x = xp.arange(10, dtype=xp.int32)
y = xp.arange(10, dtype=xp.int32)
# Cython chokes on "buffer type mismatch" (construction) or
# "no matching signature found" (evaluation)
for k in (0, 1, 2, 3):
b = make_interp_spline(x, y, k=k)
b(x)
def test_sliced_input(self, xp):
# Cython code chokes on non C contiguous arrays
xx = xp.linspace(-1, 1, 100)
x = xx[::5]
y = xx[::5]
for k in (0, 1, 2, 3):
make_interp_spline(x, y, k=k)
def test_check_finite(self, xp):
# check_finite defaults to True; nans and such trigger a ValueError
x = xp.arange(10, dtype=xp.float64)
y = x**2
for z in [xp.nan, xp.inf, -xp.inf]:
y = xpx.at(y, -1).set(z)
assert_raises(ValueError, make_interp_spline, x, y)
@pytest.mark.parametrize('k', [1, 2, 3, 5])
def test_list_input(self, k):
# regression test for gh-8714: TypeError for x, y being lists and k=2
x = list(range(10))
y = [a**2 for a in x]
make_interp_spline(x, y, k=k)
def test_multiple_rhs(self, xp):
xx, yy = self._get_xy(xp)
yy = xp.stack((xx, yy), axis=1)
der_l = [(1, [1., 2.])]
der_r = [(1, [3., 4.])]
b = make_interp_spline(xx, yy, k=3, bc_type=(der_l, der_r))
xp_assert_close(b(xx), yy, atol=1e-14, rtol=1e-14)
xp_assert_close(
b(xx[0], 1),
xp.asarray(der_l[0][1], dtype=xp.float64), atol=1e-14, rtol=1e-14
)
xp_assert_close(
b(xx[-1], 1),
xp.asarray(der_r[0][1], dtype=xp.float64), atol=1e-14, rtol=1e-14
)
def test_shapes(self):
rng = np.random.RandomState(1234)
k, n = 3, 22
x = np.sort(rng.random(size=n))
y = rng.random(size=(n, 5, 6, 7))
b = make_interp_spline(x, y, k)
assert b.c.shape == (n, 5, 6, 7)
# now throw in some derivatives
d_l = [(1, rng.random((5, 6, 7)))]
d_r = [(1, rng.random((5, 6, 7)))]
b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
assert b.c.shape == (n + k - 1, 5, 6, 7)
def test_string_aliases(self, xp):
xx, yy = self._get_xy(xp)
yy = xp.sin(xx)
# a single string is duplicated
b1 = make_interp_spline(xx, yy, k=3, bc_type='natural')
b2 = make_interp_spline(xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))
xp_assert_close(b1.c, b2.c, atol=1e-15)
# two strings are handled
b1 = make_interp_spline(xx, yy, k=3,
bc_type=('natural', 'clamped'))
b2 = make_interp_spline(xx, yy, k=3,
bc_type=([(2, 0)], [(1, 0)]))
xp_assert_close(b1.c, b2.c, atol=1e-15)
# one-sided BCs are OK
b1 = make_interp_spline(xx, yy, k=2, bc_type=(None, 'clamped'))
b2 = make_interp_spline(xx, yy, k=2, bc_type=(None, [(1, 0.0)]))
xp_assert_close(b1.c, b2.c, atol=1e-15)
# 'not-a-knot' is equivalent to None
b1 = make_interp_spline(xx, yy, k=3, bc_type='not-a-knot')
b2 = make_interp_spline(xx, yy, k=3, bc_type=None)
xp_assert_close(b1.c, b2.c, atol=1e-15)
# unknown strings do not pass
with assert_raises(ValueError):
make_interp_spline(xx, yy, k=3, bc_type='typo')
# string aliases are handled for 2D values
yy = xp.stack((xp.sin(xx), xp.cos(xx)), axis=1)
der_l = [(1, [0., 0.])]
der_r = [(2, [0., 0.])]
b2 = make_interp_spline(xx, yy, k=3, bc_type=(der_l, der_r))
b1 = make_interp_spline(xx, yy, k=3,
bc_type=('clamped', 'natural'))
xp_assert_close(b1.c, b2.c, atol=1e-15)
# ... and for N-D values:
rng = np.random.RandomState(1234)
k, n = 3, 22
x = np.sort(rng.random(size=n))
y = rng.random(size=(n, 5, 6, 7))
x, y = xp.asarray(x), xp.asarray(y)
# now throw in some derivatives
d_l = [(1, xp.zeros((5, 6, 7)))]
d_r = [(1, xp.zeros((5, 6, 7)))]
b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
b2 = make_interp_spline(x, y, k, bc_type='clamped')
xp_assert_close(b1.c, b2.c, atol=1e-15)
def test_full_matrix(self, xp):
rng = np.random.RandomState(1234)
k, n = 3, 7
x_np = np.sort(rng.random(size=n))
y_np = rng.random(size=n)
t_np = _not_a_knot(x_np, k)
cf = make_interp_full_matr(x_np, y_np, t_np, k)
cf = xp.asarray(cf)
x, y, t = map(xp.asarray, (x_np, y_np, t_np))
b = make_interp_spline(x, y, k, t)
xp_assert_close(b.c, cf, atol=1e-14, rtol=1e-14)
def test_woodbury(self):
'''
Random elements in diagonal matrix with blocks in the
left lower and right upper corners checking the
implementation of Woodbury algorithm.
'''
rng = np.random.RandomState(1234)
n = 201
for k in range(3, 32, 2):
offset = int((k - 1) / 2)
a = np.diagflat(rng.random((1, n)))
for i in range(1, offset + 1):
a[:-i, i:] += np.diagflat(rng.random((1, n - i)))
a[i:, :-i] += np.diagflat(rng.random((1, n - i)))
ur = rng.random((offset, offset))
a[:offset, -offset:] = ur
ll = rng.random((offset, offset))
a[-offset:, :offset] = ll
d = np.zeros((k, n))
for i, j in enumerate(range(offset, -offset - 1, -1)):
if j < 0:
d[i, :j] = np.diagonal(a, offset=j)
else:
d[i, j:] = np.diagonal(a, offset=j)
b = rng.random(n)
xp_assert_close(_woodbury_algorithm(d, ur, ll, b, k),
np.linalg.solve(a, b), atol=1e-14)
def make_interp_full_matr(x, y, t, k):
"""Assemble an spline order k with knots t to interpolate
y(x) using full matrices.
Not-a-knot BC only.
This routine is here for testing only (even though it's functional).
"""
assert x.size == y.size
assert t.size == x.size + k + 1
n = x.size
A = np.zeros((n, n), dtype=np.float64)
for j in range(n):
xval = x[j]
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _dierckx.evaluate_all_bspl(t, k, xval, left)
A[j, left-k:left+1] = bb
c = sl.solve(A, y)
return c
def make_lsq_full_matrix(x, y, t, k=3):
"""Make the least-square spline, full matrices."""
x, y, t = map(np.asarray, (x, y, t))
m = x.size
n = t.size - k - 1
A = np.zeros((m, n), dtype=np.float64)
for j in range(m):
xval = x[j]
# find interval
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _dierckx.evaluate_all_bspl(t, k, xval, left)
A[j, left-k:left+1] = bb
# have observation matrix, can solve the LSQ problem
B = np.dot(A.T, A)
Y = np.dot(A.T, y)
c = sl.solve(B, Y)
return c, (A, Y)
parametrize_lsq_methods = pytest.mark.parametrize("method", ["norm-eq", "qr"])
@make_xp_test_case(make_lsq_spline)
| TestInterp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.